1 /*
2 * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomicAccess.hpp"
37 #include "runtime/basicLock.inline.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/objectMonitorTable.hpp"
48 #include "runtime/os.inline.hpp"
49 #include "runtime/osThread.hpp"
50 #include "runtime/safepointMechanism.inline.hpp"
51 #include "runtime/safepointVerifiers.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/threads.hpp"
56 #include "runtime/timer.hpp"
57 #include "runtime/timerTrace.hpp"
58 #include "runtime/trimNativeHeap.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/align.hpp"
62 #include "utilities/concurrentHashTable.inline.hpp"
63 #include "utilities/concurrentHashTableTasks.inline.hpp"
64 #include "utilities/dtrace.hpp"
65 #include "utilities/events.hpp"
66 #include "utilities/fastHash.hpp"
67 #include "utilities/globalCounter.inline.hpp"
68 #include "utilities/globalDefinitions.hpp"
69 #include "utilities/linkedlist.hpp"
70 #include "utilities/preserveException.hpp"
71
72 class ObjectMonitorDeflationLogging;
73
74 void MonitorList::add(ObjectMonitor* m) {
75 ObjectMonitor* head;
76 do {
77 head = AtomicAccess::load(&_head);
78 m->set_next_om(head);
79 } while (AtomicAccess::cmpxchg(&_head, head, m) != head);
80
81 size_t count = AtomicAccess::add(&_count, 1u, memory_order_relaxed);
82 size_t old_max;
83 do {
84 old_max = AtomicAccess::load(&_max);
85 if (count <= old_max) {
86 break;
87 }
88 } while (AtomicAccess::cmpxchg(&_max, old_max, count, memory_order_relaxed) != old_max);
89 }
90
91 size_t MonitorList::count() const {
92 return AtomicAccess::load(&_count);
93 }
94
95 size_t MonitorList::max() const {
96 return AtomicAccess::load(&_max);
97 }
98
99 class ObjectMonitorDeflationSafepointer : public StackObj {
100 JavaThread* const _current;
101 ObjectMonitorDeflationLogging* const _log;
102
103 public:
104 ObjectMonitorDeflationSafepointer(JavaThread* current, ObjectMonitorDeflationLogging* log)
105 : _current(current), _log(log) {}
106
107 void block_for_safepoint(const char* op_name, const char* count_name, size_t counter);
108 };
109
110 // Walk the in-use list and unlink deflated ObjectMonitors.
111 // Returns the number of unlinked ObjectMonitors.
112 size_t MonitorList::unlink_deflated(size_t deflated_count,
113 GrowableArray<ObjectMonitor*>* unlinked_list,
114 ObjectMonitorDeflationSafepointer* safepointer) {
115 size_t unlinked_count = 0;
116 ObjectMonitor* prev = nullptr;
117 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
118
119 while (m != nullptr) {
120 if (m->is_being_async_deflated()) {
121 // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
122 // modify the list once per batch. The batch starts at "m".
123 size_t unlinked_batch = 0;
124 ObjectMonitor* next = m;
125 // Look for at most MonitorUnlinkBatch monitors, or the number of
126 // deflated and not unlinked monitors, whatever comes first.
127 assert(deflated_count >= unlinked_count, "Sanity: underflow");
128 size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch);
129 do {
130 ObjectMonitor* next_next = next->next_om();
131 unlinked_batch++;
132 unlinked_list->append(next);
133 next = next_next;
134 if (unlinked_batch >= unlinked_batch_limit) {
135 // Reached the max batch, so bail out of the gathering loop.
136 break;
137 }
138 if (prev == nullptr && AtomicAccess::load(&_head) != m) {
139 // Current batch used to be at head, but it is not at head anymore.
140 // Bail out and figure out where we currently are. This avoids long
141 // walks searching for new prev during unlink under heavy list inserts.
142 break;
143 }
144 } while (next != nullptr && next->is_being_async_deflated());
145
146 // Unlink the found batch.
147 if (prev == nullptr) {
148 // The current batch is the first batch, so there is a chance that it starts at head.
149 // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
150 ObjectMonitor* prev_head = AtomicAccess::cmpxchg(&_head, m, next);
151 if (prev_head != m) {
152 // Something must have updated the head. Figure out the actual prev for this batch.
153 for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
154 prev = n;
155 }
156 assert(prev != nullptr, "Should have found the prev for the current batch");
157 prev->set_next_om(next);
158 }
159 } else {
160 // The current batch is preceded by another batch. This guarantees the current batch
161 // does not start at head. Unlink the entire current batch without updating the head.
162 assert(AtomicAccess::load(&_head) != m, "Sanity");
163 prev->set_next_om(next);
164 }
165
166 unlinked_count += unlinked_batch;
167 if (unlinked_count >= deflated_count) {
168 // Reached the max so bail out of the searching loop.
169 // There should be no more deflated monitors left.
170 break;
171 }
172 m = next;
173 } else {
174 prev = m;
175 m = m->next_om();
176 }
177
178 // Must check for a safepoint/handshake and honor it.
179 safepointer->block_for_safepoint("unlinking", "unlinked_count", unlinked_count);
180 }
181
182 #ifdef ASSERT
183 // Invariant: the code above should unlink all deflated monitors.
184 // The code that runs after this unlinking does not expect deflated monitors.
185 // Notably, attempting to deflate the already deflated monitor would break.
186 {
187 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
188 while (m != nullptr) {
189 assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
190 m = m->next_om();
191 }
192 }
193 #endif
194
195 AtomicAccess::sub(&_count, unlinked_count);
196 return unlinked_count;
197 }
198
199 MonitorList::Iterator MonitorList::iterator() const {
200 return Iterator(AtomicAccess::load_acquire(&_head));
201 }
202
203 ObjectMonitor* MonitorList::Iterator::next() {
204 ObjectMonitor* current = _current;
205 _current = current->next_om();
206 return current;
207 }
208
209 // The "core" versions of monitor enter and exit reside in this file.
210 // The interpreter and compilers contain specialized transliterated
211 // variants of the enter-exit fast-path operations. See c2_MacroAssembler_x86.cpp
212 // fast_lock(...) for instance. If you make changes here, make sure to modify the
213 // interpreter, and both C1 and C2 fast-path inline locking code emission.
214 //
215 // -----------------------------------------------------------------------------
216
217 #ifdef DTRACE_ENABLED
218
219 // Only bother with this argument setup if dtrace is available
220 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
221
222 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
223 char* bytes = nullptr; \
224 int len = 0; \
225 jlong jtid = SharedRuntime::get_java_tid(thread); \
226 Symbol* klassname = obj->klass()->name(); \
227 if (klassname != nullptr) { \
228 bytes = (char*)klassname->bytes(); \
229 len = klassname->utf8_length(); \
230 }
231
232 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
233 { \
234 if (DTraceMonitorProbes) { \
235 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
236 HOTSPOT_MONITOR_WAIT(jtid, \
237 (uintptr_t)(monitor), bytes, len, (millis)); \
238 } \
239 }
240
241 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
242 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
243 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
244
245 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
246 { \
247 if (DTraceMonitorProbes) { \
248 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
249 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
250 (uintptr_t)(monitor), bytes, len); \
251 } \
252 }
253
254 #else // ndef DTRACE_ENABLED
255
256 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
257 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
258
259 #endif // ndef DTRACE_ENABLED
260
261 // This exists only as a workaround of dtrace bug 6254741
262 static int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) {
263 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
264 return 0;
265 }
266
267 static constexpr size_t inflation_lock_count() {
268 return 256;
269 }
270
271 // Static storage for an array of PlatformMutex.
272 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
273
274 static inline PlatformMutex* inflation_lock(size_t index) {
275 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
276 }
277
278 void ObjectSynchronizer::initialize() {
279 for (size_t i = 0; i < inflation_lock_count(); i++) {
280 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
281 }
282 // Start the ceiling with the estimate for one thread.
283 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
284
285 // Start the timer for deflations, so it does not trigger immediately.
286 _last_async_deflation_time_ns = os::javaTimeNanos();
287
288 ObjectSynchronizer::create_om_table();
289 }
290
291 MonitorList ObjectSynchronizer::_in_use_list;
292 // monitors_used_above_threshold() policy is as follows:
293 //
294 // The ratio of the current _in_use_list count to the ceiling is used
295 // to determine if we are above MonitorUsedDeflationThreshold and need
296 // to do an async monitor deflation cycle. The ceiling is increased by
297 // AvgMonitorsPerThreadEstimate when a thread is added to the system
298 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
299 // removed from the system.
300 //
301 // Note: If the _in_use_list max exceeds the ceiling, then
302 // monitors_used_above_threshold() will use the in_use_list max instead
303 // of the thread count derived ceiling because we have used more
304 // ObjectMonitors than the estimated average.
305 //
306 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
307 // no-progress async monitor deflation cycles in a row, then the ceiling
308 // is adjusted upwards by monitors_used_above_threshold().
309 //
310 // Start the ceiling with the estimate for one thread in initialize()
311 // which is called after cmd line options are processed.
312 static size_t _in_use_list_ceiling = 0;
313 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
314 bool volatile ObjectSynchronizer::_is_final_audit = false;
315 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
316 static uintx _no_progress_cnt = 0;
317 static bool _no_progress_skip_increment = false;
318
319 // =====================> Quick functions
320
321 // The quick_* forms are special fast-path variants used to improve
322 // performance. In the simplest case, a "quick_*" implementation could
323 // simply return false, in which case the caller will perform the necessary
324 // state transitions and call the slow-path form.
325 // The fast-path is designed to handle frequently arising cases in an efficient
326 // manner and is just a degenerate "optimistic" variant of the slow-path.
327 // returns true -- to indicate the call was satisfied.
328 // returns false -- to indicate the call needs the services of the slow-path.
329 // A no-loitering ordinance is in effect for code in the quick_* family
330 // operators: safepoints or indefinite blocking (blocking that might span a
331 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
332 // entry.
333 //
334 // Consider: An interesting optimization is to have the JIT recognize the
335 // following common idiom:
336 // synchronized (someobj) { .... ; notify(); }
337 // That is, we find a notify() or notifyAll() call that immediately precedes
338 // the monitorexit operation. In that case the JIT could fuse the operations
339 // into a single notifyAndExit() runtime primitive.
340
341 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
342 assert(current->thread_state() == _thread_in_Java, "invariant");
343 NoSafepointVerifier nsv;
344 if (obj == nullptr) return false; // slow-path for invalid obj
345 const markWord mark = obj->mark();
346
347 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
348 // Degenerate notify
349 // fast-locked by caller so by definition the implied waitset is empty.
350 return true;
351 }
352
353 if (mark.has_monitor()) {
354 ObjectMonitor* const mon = read_monitor(obj, mark);
355 if (mon == nullptr) {
356 // Racing with inflation/deflation go slow path
357 return false;
358 }
359 assert(mon->object() == oop(obj), "invariant");
360 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
361
362 if (mon->first_waiter() != nullptr) {
363 // We have one or more waiters. Since this is an inflated monitor
364 // that we own, we quickly notify them here and now, avoiding the slow-path.
365 if (all) {
366 mon->quick_notifyAll(current);
367 } else {
368 mon->quick_notify(current);
369 }
370 }
371 return true;
372 }
373
374 // other IMS exception states take the slow-path
375 return false;
376 }
377
378 // Handle notifications when synchronizing on value based classes
379 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
380 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
381 frame last_frame = locking_thread->last_frame();
382 bool bcp_was_adjusted = false;
383 // Don't decrement bcp if it points to the frame's first instruction. This happens when
384 // handle_sync_on_value_based_class() is called because of a synchronized method. There
385 // is no actual monitorenter instruction in the byte code in this case.
386 if (last_frame.is_interpreted_frame() &&
387 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
388 // adjust bcp to point back to monitorenter so that we print the correct line numbers
389 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
390 bcp_was_adjusted = true;
391 }
392
393 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
394 ResourceMark rm;
395 stringStream ss;
396 locking_thread->print_active_stack_on(&ss);
397 char* base = (char*)strstr(ss.base(), "at");
398 char* newline = (char*)strchr(ss.base(), '\n');
399 if (newline != nullptr) {
400 *newline = '\0';
401 }
402 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
403 } else {
404 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
405 ResourceMark rm;
406 Log(valuebasedclasses) vblog;
407
408 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
409 if (locking_thread->has_last_Java_frame()) {
410 LogStream info_stream(vblog.info());
411 locking_thread->print_active_stack_on(&info_stream);
412 } else {
413 vblog.info("Cannot find the last Java frame");
414 }
415
416 EventSyncOnValueBasedClass event;
417 if (event.should_commit()) {
418 event.set_valueBasedClass(obj->klass());
419 event.commit();
420 }
421 }
422
423 if (bcp_was_adjusted) {
424 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
425 }
426 }
427
428 // -----------------------------------------------------------------------------
429 // JNI locks on java objects
430 // NOTE: must use heavy weight monitor to handle jni monitor enter
431 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
432 // Top native frames in the stack will not be seen if we attempt
433 // preemption, since we start walking from the last Java anchor.
434 NoPreemptMark npm(current);
435
436 if (obj->klass()->is_value_based()) {
437 handle_sync_on_value_based_class(obj, current);
438 }
439
440 // the current locking is from JNI instead of Java code
441 current->set_current_pending_monitor_is_from_java(false);
442 // An async deflation can race after the inflate() call and before
443 // enter() can make the ObjectMonitor busy. enter() returns false if
444 // we have lost the race to async deflation and we simply try again.
445 while (true) {
446 BasicLock lock;
447 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
448 break;
449 }
450 }
451 current->set_current_pending_monitor_is_from_java(true);
452 }
453
454 // NOTE: must use heavy weight monitor to handle jni monitor exit
455 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
456 JavaThread* current = THREAD;
457
458 ObjectMonitor* monitor;
459 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
460 // If this thread has locked the object, exit the monitor. We
461 // intentionally do not use CHECK on check_owner because we must exit the
462 // monitor even if an exception was already pending.
463 if (monitor->check_owner(THREAD)) {
464 monitor->exit(current);
465 }
466 }
467
468 // -----------------------------------------------------------------------------
469 // Internal VM locks on java objects
470 // standard constructor, allows locking failures
471 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
472 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
473 assert(!_thread->preempting(), "");
474
475 _thread->check_for_valid_safepoint_state();
476
477 if (_obj() != nullptr) {
478 ObjectSynchronizer::enter(_obj, &_lock, _thread);
479
480 if (_thread->preempting()) {
481 // If preemption was cancelled we acquired the monitor after freezing
482 // the frames. Redoing the vm call laterĀ in thaw will require us to
483 // release it since the call should look like the original one. We
484 // do it in ~ObjectLocker to reduce the window of time we hold the
485 // monitor since we can't do anything useful with it now, and would
486 // otherwise just force other vthreads to preempt in case they try
487 // to acquire this monitor.
488 _skip_exit = !_thread->preemption_cancelled();
489 ObjectSynchronizer::read_monitor(_obj())->set_object_strong();
490 _thread->set_pending_preempted_exception();
491
492 }
493 }
494 }
495
496 ObjectLocker::~ObjectLocker() {
497 if (_obj() != nullptr && !_skip_exit) {
498 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
499 }
500 }
501
502 void ObjectLocker::wait_uninterruptibly(TRAPS) {
503 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
504 if (_thread->preempting()) {
505 _skip_exit = true;
506 ObjectSynchronizer::read_monitor(_obj())->set_object_strong();
507 _thread->set_pending_preempted_exception();
508 }
509 }
510
511 // -----------------------------------------------------------------------------
512 // Wait/Notify/NotifyAll
513 // NOTE: must use heavy weight monitor to handle wait()
514
515 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
516 JavaThread* current = THREAD;
517 if (millis < 0) {
518 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
519 }
520
521 ObjectMonitor* monitor;
522 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
523
524 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
525 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
526
527 // This dummy call is in place to get around dtrace bug 6254741. Once
528 // that's fixed we can uncomment the following line, remove the call
529 // and change this function back into a "void" func.
530 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
531 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
532 return ret_code;
533 }
534
535 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
536 assert(millis >= 0, "timeout value is negative");
537
538 ObjectMonitor* monitor;
539 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
540 monitor->wait(millis, false, THREAD);
541 }
542
543
544 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
545 JavaThread* current = THREAD;
546
547 markWord mark = obj->mark();
548 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
549 // Not inflated so there can't be any waiters to notify.
550 return;
551 }
552 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
553 monitor->notify(CHECK);
554 }
555
556 // NOTE: see comment of notify()
557 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
558 JavaThread* current = THREAD;
559
560 markWord mark = obj->mark();
561 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
562 // Not inflated so there can't be any waiters to notify.
563 return;
564 }
565
566 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
567 monitor->notifyAll(CHECK);
568 }
569
570 // -----------------------------------------------------------------------------
571 // Hash Code handling
572
573 struct SharedGlobals {
574 char _pad_prefix[OM_CACHE_LINE_SIZE];
575 // This is a highly shared mostly-read variable.
576 // To avoid false-sharing it needs to be the sole occupant of a cache line.
577 volatile int stw_random;
578 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
579 // Hot RW variable -- Sequester to avoid false-sharing
580 volatile int hc_sequence;
581 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
582 };
583
584 static SharedGlobals GVars;
585
586 // hashCode() generation :
587 //
588 // Possibilities:
589 // * MD5Digest of {obj,stw_random}
590 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
591 // * A DES- or AES-style SBox[] mechanism
592 // * One of the Phi-based schemes, such as:
593 // 2654435761 = 2^32 * Phi (golden ratio)
594 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
595 // * A variation of Marsaglia's shift-xor RNG scheme.
596 // * (obj ^ stw_random) is appealing, but can result
597 // in undesirable regularity in the hashCode values of adjacent objects
598 // (objects allocated back-to-back, in particular). This could potentially
599 // result in hashtable collisions and reduced hashtable efficiency.
600 // There are simple ways to "diffuse" the middle address bits over the
601 // generated hashCode values:
602
603 intptr_t ObjectSynchronizer::get_next_hash(Thread* current, oop obj) {
604 intptr_t value = 0;
605 if (hashCode == 0) {
606 // This form uses global Park-Miller RNG.
607 // On MP system we'll have lots of RW access to a global, so the
608 // mechanism induces lots of coherency traffic.
609 value = os::random();
610 } else if (hashCode == 1) {
611 // This variation has the property of being stable (idempotent)
612 // between STW operations. This can be useful in some of the 1-0
613 // synchronization schemes.
614 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
615 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
616 } else if (hashCode == 2) {
617 value = 1; // for sensitivity testing
618 } else if (hashCode == 3) {
619 value = ++GVars.hc_sequence;
620 } else if (hashCode == 4) {
621 value = cast_from_oop<intptr_t>(obj);
622 } else if (hashCode == 5) {
623 // Marsaglia's xor-shift scheme with thread-specific state
624 // This is probably the best overall implementation -- we'll
625 // likely make this the default in future releases.
626 unsigned t = current->_hashStateX;
627 t ^= (t << 11);
628 current->_hashStateX = current->_hashStateY;
629 current->_hashStateY = current->_hashStateZ;
630 current->_hashStateZ = current->_hashStateW;
631 unsigned v = current->_hashStateW;
632 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
633 current->_hashStateW = v;
634 value = v;
635 } else {
636 assert(UseCompactObjectHeaders, "Only with compact i-hash");
637 #ifdef _LP64
638 uint64_t val = cast_from_oop<uint64_t>(obj);
639 uint32_t hash = FastHash::get_hash32((uint32_t)val, (uint32_t)(val >> 32));
640 #else
641 uint32_t val = cast_from_oop<uint32_t>(obj);
642 uint32_t hash = FastHash::get_hash32(val, UCONST64(0xAAAAAAAA));
643 #endif
644 value= static_cast<intptr_t>(hash);
645 }
646
647 value &= markWord::hash_mask;
648 if (hashCode != 6 && value == 0) value = 0xBAD;
649 assert(value != markWord::no_hash || hashCode == 6, "invariant");
650 return value;
651 }
652
653 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
654 while (true) {
655 ObjectMonitor* monitor = nullptr;
656 markWord temp, test;
657 intptr_t hash;
658 markWord mark = obj->mark_acquire();
659 if (UseCompactObjectHeaders) {
660 if (mark.is_hashed()) {
661 return get_hash(mark, obj);
662 }
663 intptr_t hash = get_next_hash(current, obj); // get a new hash
664 markWord new_mark;
665 if (mark.is_not_hashed_expanded()) {
666 new_mark = mark.set_hashed_expanded();
667 int offset = mark.klass()->hash_offset_in_bytes(obj, mark);
668 obj->int_field_put(offset, (jint) hash);
669 } else {
670 new_mark = mark.set_hashed_not_expanded();
671 }
672 markWord old_mark = obj->cas_set_mark(new_mark, mark);
673 if (old_mark == mark) {
674 return hash;
675 }
676 // CAS failed, retry.
677 continue;
678 } else if (UseObjectMonitorTable || !mark.has_monitor()) {
679 // If UseObjectMonitorTable is set the hash can simply be installed in the
680 // object header, since the monitor isn't in the object header.
681 hash = mark.hash();
682 if (hash != 0) { // if it has a hash, just return it
683 return hash;
684 }
685 hash = get_next_hash(current, obj); // get a new hash
686 temp = mark.copy_set_hash(hash); // merge the hash into header
687 // try to install the hash
688 test = obj->cas_set_mark(temp, mark);
689 if (test == mark) { // if the hash was installed, return it
690 return hash;
691 }
692 // CAS failed, retry
693 continue;
694
695 // Failed to install the hash. It could be that another thread
696 // installed the hash just before our attempt or inflation has
697 // occurred or... so we fall thru to inflate the monitor for
698 // stability and then install the hash.
699 } else {
700 assert(!mark.is_unlocked() && !mark.is_fast_locked(), "invariant");
701 monitor = mark.monitor();
702 temp = monitor->header();
703 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
704 hash = temp.hash();
705 if (hash != 0) {
706 // It has a hash.
707
708 // Separate load of dmw/header above from the loads in
709 // is_being_async_deflated().
710
711 // dmw/header and _contentions may get written by different threads.
712 // Make sure to observe them in the same order when having several observers.
713 OrderAccess::loadload_for_IRIW();
714
715 if (monitor->is_being_async_deflated()) {
716 // But we can't safely use the hash if we detect that async
717 // deflation has occurred. So we attempt to restore the
718 // header/dmw to the object's header so that we only retry
719 // once if the deflater thread happens to be slow.
720 monitor->install_displaced_markword_in_object(obj);
721 continue;
722 }
723 return hash;
724 }
725 // Fall thru so we only have one place that installs the hash in
726 // the ObjectMonitor.
727 }
728
729 // NOTE: an async deflation can race after we get the monitor and
730 // before we can update the ObjectMonitor's header with the hash
731 // value below.
732 assert(mark.has_monitor(), "must be");
733 monitor = mark.monitor();
734
735 // Load ObjectMonitor's header/dmw field and see if it has a hash.
736 mark = monitor->header();
737 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
738 hash = mark.hash();
739 if (hash == 0) { // if it does not have a hash
740 hash = get_next_hash(current, obj); // get a new hash
741 temp = mark.copy_set_hash(hash) ; // merge the hash into header
742 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
743 uintptr_t v = AtomicAccess::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
744 test = markWord(v);
745 if (test != mark) {
746 // The attempt to update the ObjectMonitor's header/dmw field
747 // did not work. This can happen if another thread managed to
748 // merge in the hash just before our cmpxchg().
749 // If we add any new usages of the header/dmw field, this code
750 // will need to be updated.
751 hash = test.hash();
752 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
753 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
754 }
755 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
756 // If we detect that async deflation has occurred, then we
757 // attempt to restore the header/dmw to the object's header
758 // so that we only retry once if the deflater thread happens
759 // to be slow.
760 monitor->install_displaced_markword_in_object(obj);
761 continue;
762 }
763 }
764 // We finally get the hash.
765 return hash;
766 }
767 }
768
769
770 uint32_t ObjectSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
771 assert(UseCompactObjectHeaders, "Only with compact i-hash");
772 //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
773 assert(mark.is_hashed(), "only from hashed or copied object");
774 if (mark.is_hashed_expanded()) {
775 return obj->int_field(klass->hash_offset_in_bytes(obj, mark));
776 } else {
777 assert(mark.is_hashed_not_expanded(), "must be hashed");
778 assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
779 // Already marked as hashed, but not yet copied. Recompute hash and return it.
780 return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
781 }
782 }
783
784 uint32_t ObjectSynchronizer::get_hash(markWord mark, oop obj) {
785 return get_hash(mark, obj, mark.klass());
786 }
787
788 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
789 Handle h_obj) {
790 assert(current == JavaThread::current(), "Can only be called on current thread");
791 oop obj = h_obj();
792
793 markWord mark = obj->mark_acquire();
794
795 if (mark.is_fast_locked()) {
796 // fast-locking case, see if lock is in current's lock stack
797 return current->lock_stack().contains(h_obj());
798 }
799
800 while (mark.has_monitor()) {
801 ObjectMonitor* monitor = read_monitor(obj, mark);
802 if (monitor != nullptr) {
803 return monitor->is_entered(current) != 0;
804 }
805 // Racing with inflation/deflation, retry
806 mark = obj->mark_acquire();
807
808 if (mark.is_fast_locked()) {
809 // Some other thread fast_locked, current could not have held the lock
810 return false;
811 }
812 }
813
814 // Unlocked case, header in place
815 assert(mark.is_unlocked(), "sanity check");
816 return false;
817 }
818
819 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
820 oop obj = h_obj();
821 markWord mark = obj->mark_acquire();
822
823 if (mark.is_fast_locked()) {
824 // fast-locked so get owner from the object.
825 // owning_thread_from_object() may also return null here:
826 return Threads::owning_thread_from_object(t_list, h_obj());
827 }
828
829 while (mark.has_monitor()) {
830 ObjectMonitor* monitor = read_monitor(obj, mark);
831 if (monitor != nullptr) {
832 return Threads::owning_thread_from_monitor(t_list, monitor);
833 }
834 // Racing with inflation/deflation, retry
835 mark = obj->mark_acquire();
836
837 if (mark.is_fast_locked()) {
838 // Some other thread fast_locked
839 return Threads::owning_thread_from_object(t_list, h_obj());
840 }
841 }
842
843 // Unlocked case, header in place
844 // Cannot have assertion since this object may have been
845 // locked by another thread when reaching here.
846 // assert(mark.is_unlocked(), "sanity check");
847
848 return nullptr;
849 }
850
851 // Visitors ...
852
853 // Iterate over all ObjectMonitors.
854 template <typename Function>
855 void ObjectSynchronizer::monitors_iterate(Function function) {
856 MonitorList::Iterator iter = _in_use_list.iterator();
857 while (iter.has_next()) {
858 ObjectMonitor* monitor = iter.next();
859 function(monitor);
860 }
861 }
862
863 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
864 // returns true.
865 template <typename OwnerFilter>
866 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
867 monitors_iterate([&](ObjectMonitor* monitor) {
868 // This function is only called at a safepoint or when the
869 // target thread is suspended or when the target thread is
870 // operating on itself. The current closures in use today are
871 // only interested in an owned ObjectMonitor and ownership
872 // cannot be dropped under the calling contexts so the
873 // ObjectMonitor cannot be async deflated.
874 if (monitor->has_owner() && filter(monitor)) {
875 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
876
877 closure->do_monitor(monitor);
878 }
879 });
880 }
881
882 // Iterate ObjectMonitors where the owner == thread; this does NOT include
883 // ObjectMonitors where owner is set to a stack-lock address in thread.
884 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
885 int64_t key = ObjectMonitor::owner_id_from(thread);
886 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
887 return owned_monitors_iterate_filtered(closure, thread_filter);
888 }
889
890 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, oop vthread) {
891 int64_t key = ObjectMonitor::owner_id_from(vthread);
892 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
893 return owned_monitors_iterate_filtered(closure, thread_filter);
894 }
895
896 // Iterate ObjectMonitors owned by any thread.
897 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
898 auto all_filter = [&](ObjectMonitor* monitor) { return true; };
899 return owned_monitors_iterate_filtered(closure, all_filter);
900 }
901
902 static bool monitors_used_above_threshold(MonitorList* list) {
903 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
904 return false;
905 }
906 size_t monitors_used = list->count();
907 if (monitors_used == 0) { // empty list is easy
908 return false;
909 }
910 size_t old_ceiling = ObjectSynchronizer::in_use_list_ceiling();
911 // Make sure that we use a ceiling value that is not lower than
912 // previous, not lower than the recorded max used by the system, and
913 // not lower than the current number of monitors in use (which can
914 // race ahead of max). The result is guaranteed > 0.
915 size_t ceiling = MAX3(old_ceiling, list->max(), monitors_used);
916
917 // Check if our monitor usage is above the threshold:
918 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
919 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
920 // Deflate monitors if over the threshold percentage, unless no
921 // progress on previous deflations.
922 bool is_above_threshold = true;
923
924 // Check if it's time to adjust the in_use_list_ceiling up, due
925 // to too many async deflation attempts without any progress.
926 if (NoAsyncDeflationProgressMax != 0 &&
927 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
928 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
929 size_t delta = (size_t)(ceiling * remainder) + 1;
930 size_t new_ceiling = (ceiling > SIZE_MAX - delta)
931 ? SIZE_MAX // Overflow, let's clamp new_ceiling.
932 : ceiling + delta;
933
934 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
935 log_info(monitorinflation)("Too many deflations without progress; "
936 "bumping in_use_list_ceiling from %zu"
937 " to %zu", old_ceiling, new_ceiling);
938 _no_progress_cnt = 0;
939 ceiling = new_ceiling;
940
941 // Check if our monitor usage is still above the threshold:
942 monitor_usage = (monitors_used * 100LL) / ceiling;
943 is_above_threshold = int(monitor_usage) > MonitorUsedDeflationThreshold;
944 }
945 log_info(monitorinflation)("monitors_used=%zu, ceiling=%zu"
946 ", monitor_usage=%zu, threshold=%d",
947 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
948 return is_above_threshold;
949 }
950
951 return false;
952 }
953
954 size_t ObjectSynchronizer::in_use_list_count() {
955 return _in_use_list.count();
956 }
957
958 size_t ObjectSynchronizer::in_use_list_max() {
959 return _in_use_list.max();
960 }
961
962 size_t ObjectSynchronizer::in_use_list_ceiling() {
963 return _in_use_list_ceiling;
964 }
965
966 void ObjectSynchronizer::dec_in_use_list_ceiling() {
967 AtomicAccess::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
968 }
969
970 void ObjectSynchronizer::inc_in_use_list_ceiling() {
971 AtomicAccess::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
972 }
973
974 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
975 _in_use_list_ceiling = new_value;
976 }
977
978 bool ObjectSynchronizer::is_async_deflation_needed() {
979 if (is_async_deflation_requested()) {
980 // Async deflation request.
981 log_info(monitorinflation)("Async deflation needed: explicit request");
982 return true;
983 }
984
985 jlong time_since_last = time_since_last_async_deflation_ms();
986
987 if (AsyncDeflationInterval > 0 &&
988 time_since_last > AsyncDeflationInterval &&
989 monitors_used_above_threshold(&_in_use_list)) {
990 // It's been longer than our specified deflate interval and there
991 // are too many monitors in use. We don't deflate more frequently
992 // than AsyncDeflationInterval (unless is_async_deflation_requested)
993 // in order to not swamp the MonitorDeflationThread.
994 log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold");
995 return true;
996 }
997
998 if (GuaranteedAsyncDeflationInterval > 0 &&
999 time_since_last > GuaranteedAsyncDeflationInterval) {
1000 // It's been longer than our specified guaranteed deflate interval.
1001 // We need to clean up the used monitors even if the threshold is
1002 // not reached, to keep the memory utilization at bay when many threads
1003 // touched many monitors.
1004 log_info(monitorinflation)("Async deflation needed: guaranteed interval (%zd ms) "
1005 "is greater than time since last deflation (" JLONG_FORMAT " ms)",
1006 GuaranteedAsyncDeflationInterval, time_since_last);
1007
1008 // If this deflation has no progress, then it should not affect the no-progress
1009 // tracking, otherwise threshold heuristics would think it was triggered, experienced
1010 // no progress, and needs to backoff more aggressively. In this "no progress" case,
1011 // the generic code would bump the no-progress counter, and we compensate for that
1012 // by telling it to skip the update.
1013 //
1014 // If this deflation has progress, then it should let non-progress tracking
1015 // know about this, otherwise the threshold heuristics would kick in, potentially
1016 // experience no-progress due to aggressive cleanup by this deflation, and think
1017 // it is still in no-progress stride. In this "progress" case, the generic code would
1018 // zero the counter, and we allow it to happen.
1019 _no_progress_skip_increment = true;
1020
1021 return true;
1022 }
1023
1024 return false;
1025 }
1026
1027 void ObjectSynchronizer::request_deflate_idle_monitors() {
1028 MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
1029 set_is_async_deflation_requested(true);
1030 ml.notify_all();
1031 }
1032
1033 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
1034 JavaThread* current = JavaThread::current();
1035 bool ret_code = false;
1036
1037 jlong last_time = last_async_deflation_time_ns();
1038
1039 request_deflate_idle_monitors();
1040
1041 const int N_CHECKS = 5;
1042 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
1043 if (last_async_deflation_time_ns() > last_time) {
1044 log_info(monitorinflation)("Async Deflation happened after %d check(s).", i);
1045 ret_code = true;
1046 break;
1047 }
1048 {
1049 // JavaThread has to honor the blocking protocol.
1050 ThreadBlockInVM tbivm(current);
1051 os::naked_short_sleep(999); // sleep for almost 1 second
1052 }
1053 }
1054 if (!ret_code) {
1055 log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1056 }
1057
1058 return ret_code;
1059 }
1060
1061 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1062 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1063 }
1064
1065 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1066 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1067 //
1068 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1069 MonitorList::Iterator iter = _in_use_list.iterator();
1070 size_t deflated_count = 0;
1071 Thread* current = Thread::current();
1072
1073 while (iter.has_next()) {
1074 if (deflated_count >= (size_t)MonitorDeflationMax) {
1075 break;
1076 }
1077 ObjectMonitor* mid = iter.next();
1078 if (mid->deflate_monitor(current)) {
1079 deflated_count++;
1080 }
1081
1082 // Must check for a safepoint/handshake and honor it.
1083 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1084 }
1085
1086 return deflated_count;
1087 }
1088
1089 class DeflationHandshakeClosure : public HandshakeClosure {
1090 public:
1091 DeflationHandshakeClosure() : HandshakeClosure("DeflationHandshakeClosure") {}
1092
1093 void do_thread(Thread* thread) {
1094 log_trace(monitorinflation)("DeflationHandshakeClosure::do_thread: thread="
1095 INTPTR_FORMAT, p2i(thread));
1096 if (thread->is_Java_thread()) {
1097 // Clear OM cache
1098 JavaThread* jt = JavaThread::cast(thread);
1099 jt->om_clear_monitor_cache();
1100 }
1101 }
1102 };
1103
1104 class VM_RendezvousGCThreads : public VM_Operation {
1105 public:
1106 bool evaluate_at_safepoint() const override { return false; }
1107 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1108 void doit() override {
1109 Universe::heap()->safepoint_synchronize_begin();
1110 Universe::heap()->safepoint_synchronize_end();
1111 };
1112 };
1113
1114 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1115 ObjectMonitorDeflationSafepointer* safepointer) {
1116 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1117 size_t deleted_count = 0;
1118 for (ObjectMonitor* monitor: *delete_list) {
1119 delete monitor;
1120 deleted_count++;
1121 // A JavaThread must check for a safepoint/handshake and honor it.
1122 safepointer->block_for_safepoint("deletion", "deleted_count", deleted_count);
1123 }
1124 return deleted_count;
1125 }
1126
1127 class ObjectMonitorDeflationLogging: public StackObj {
1128 LogStreamHandle(Debug, monitorinflation) _debug;
1129 LogStreamHandle(Info, monitorinflation) _info;
1130 LogStream* _stream;
1131 elapsedTimer _timer;
1132
1133 size_t ceiling() const { return ObjectSynchronizer::in_use_list_ceiling(); }
1134 size_t count() const { return ObjectSynchronizer::in_use_list_count(); }
1135 size_t max() const { return ObjectSynchronizer::in_use_list_max(); }
1136
1137 public:
1138 ObjectMonitorDeflationLogging()
1139 : _debug(), _info(), _stream(nullptr) {
1140 if (_debug.is_enabled()) {
1141 _stream = &_debug;
1142 } else if (_info.is_enabled()) {
1143 _stream = &_info;
1144 }
1145 }
1146
1147 void begin() {
1148 if (_stream != nullptr) {
1149 _stream->print_cr("begin deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1150 ceiling(), count(), max());
1151 _timer.start();
1152 }
1153 }
1154
1155 void before_handshake(size_t unlinked_count) {
1156 if (_stream != nullptr) {
1157 _timer.stop();
1158 _stream->print_cr("before handshaking: unlinked_count=%zu"
1159 ", in_use_list stats: ceiling=%zu, count="
1160 "%zu, max=%zu",
1161 unlinked_count, ceiling(), count(), max());
1162 }
1163 }
1164
1165 void after_handshake() {
1166 if (_stream != nullptr) {
1167 _stream->print_cr("after handshaking: in_use_list stats: ceiling="
1168 "%zu, count=%zu, max=%zu",
1169 ceiling(), count(), max());
1170 _timer.start();
1171 }
1172 }
1173
1174 void end(size_t deflated_count, size_t unlinked_count) {
1175 if (_stream != nullptr) {
1176 _timer.stop();
1177 if (deflated_count != 0 || unlinked_count != 0 || _debug.is_enabled()) {
1178 _stream->print_cr("deflated_count=%zu, {unlinked,deleted}_count=%zu monitors in %3.7f secs",
1179 deflated_count, unlinked_count, _timer.seconds());
1180 }
1181 _stream->print_cr("end deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1182 ceiling(), count(), max());
1183 }
1184 }
1185
1186 void before_block_for_safepoint(const char* op_name, const char* cnt_name, size_t cnt) {
1187 if (_stream != nullptr) {
1188 _timer.stop();
1189 _stream->print_cr("pausing %s: %s=%zu, in_use_list stats: ceiling="
1190 "%zu, count=%zu, max=%zu",
1191 op_name, cnt_name, cnt, ceiling(), count(), max());
1192 }
1193 }
1194
1195 void after_block_for_safepoint(const char* op_name) {
1196 if (_stream != nullptr) {
1197 _stream->print_cr("resuming %s: in_use_list stats: ceiling=%zu"
1198 ", count=%zu, max=%zu", op_name,
1199 ceiling(), count(), max());
1200 _timer.start();
1201 }
1202 }
1203 };
1204
1205 void ObjectMonitorDeflationSafepointer::block_for_safepoint(const char* op_name, const char* count_name, size_t counter) {
1206 if (!SafepointMechanism::should_process(_current)) {
1207 return;
1208 }
1209
1210 // A safepoint/handshake has started.
1211 _log->before_block_for_safepoint(op_name, count_name, counter);
1212
1213 {
1214 // Honor block request.
1215 ThreadBlockInVM tbivm(_current);
1216 }
1217
1218 _log->after_block_for_safepoint(op_name);
1219 }
1220
1221 // This function is called by the MonitorDeflationThread to deflate
1222 // ObjectMonitors.
1223 size_t ObjectSynchronizer::deflate_idle_monitors() {
1224 JavaThread* current = JavaThread::current();
1225 assert(current->is_monitor_deflation_thread(), "The only monitor deflater");
1226
1227 // The async deflation request has been processed.
1228 _last_async_deflation_time_ns = os::javaTimeNanos();
1229 set_is_async_deflation_requested(false);
1230
1231 ObjectMonitorDeflationLogging log;
1232 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1233
1234 log.begin();
1235
1236 // Deflate some idle ObjectMonitors.
1237 size_t deflated_count = deflate_monitor_list(&safepointer);
1238
1239 // Unlink the deflated ObjectMonitors from the in-use list.
1240 size_t unlinked_count = 0;
1241 size_t deleted_count = 0;
1242 if (deflated_count > 0) {
1243 ResourceMark rm(current);
1244 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1245 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1246
1247 GrowableArray<ObjectMonitorTable::Table*> table_delete_list;
1248 if (UseObjectMonitorTable) {
1249 ObjectMonitorTable::rebuild(&table_delete_list);
1250 }
1251
1252 log.before_handshake(unlinked_count);
1253
1254 // A JavaThread needs to handshake in order to safely free the
1255 // ObjectMonitors that were deflated in this cycle.
1256 DeflationHandshakeClosure dhc;
1257 Handshake::execute(&dhc);
1258 // Also, we sync and desync GC threads around the handshake, so that they can
1259 // safely read the mark-word and look-through to the object-monitor, without
1260 // being afraid that the object-monitor is going away.
1261 VM_RendezvousGCThreads sync_gc;
1262 VMThread::execute(&sync_gc);
1263
1264 log.after_handshake();
1265
1266 // After the handshake, safely free the ObjectMonitors that were
1267 // deflated and unlinked in this cycle.
1268
1269 // Delete the unlinked ObjectMonitors.
1270 deleted_count = delete_monitors(&delete_list, &safepointer);
1271 if (UseObjectMonitorTable) {
1272 ObjectMonitorTable::destroy(&table_delete_list);
1273 }
1274 assert(unlinked_count == deleted_count, "must be");
1275 }
1276
1277 log.end(deflated_count, unlinked_count);
1278
1279 GVars.stw_random = os::random();
1280
1281 if (deflated_count != 0) {
1282 _no_progress_cnt = 0;
1283 } else if (_no_progress_skip_increment) {
1284 _no_progress_skip_increment = false;
1285 } else {
1286 _no_progress_cnt++;
1287 }
1288
1289 return deflated_count;
1290 }
1291
1292 // Monitor cleanup on JavaThread::exit
1293
1294 // Iterate through monitor cache and attempt to release thread's monitors
1295 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1296 private:
1297 JavaThread* _thread;
1298
1299 public:
1300 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1301 void do_monitor(ObjectMonitor* mid) {
1302 mid->complete_exit(_thread);
1303 }
1304 };
1305
1306 // Release all inflated monitors owned by current thread. Lightweight monitors are
1307 // ignored. This is meant to be called during JNI thread detach which assumes
1308 // all remaining monitors are heavyweight. All exceptions are swallowed.
1309 // Scanning the extant monitor list can be time consuming.
1310 // A simple optimization is to add a per-thread flag that indicates a thread
1311 // called jni_monitorenter() during its lifetime.
1312 //
1313 // Instead of NoSafepointVerifier it might be cheaper to
1314 // use an idiom of the form:
1315 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1316 // <code that must not run at safepoint>
1317 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1318 // Since the tests are extremely cheap we could leave them enabled
1319 // for normal product builds.
1320
1321 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1322 assert(current == JavaThread::current(), "must be current Java thread");
1323 NoSafepointVerifier nsv;
1324 ReleaseJavaMonitorsClosure rjmc(current);
1325 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1326 assert(!current->has_pending_exception(), "Should not be possible");
1327 current->clear_pending_exception();
1328 }
1329
1330 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1331 switch (cause) {
1332 case inflate_cause_vm_internal: return "VM Internal";
1333 case inflate_cause_monitor_enter: return "Monitor Enter";
1334 case inflate_cause_wait: return "Monitor Wait";
1335 case inflate_cause_notify: return "Monitor Notify";
1336 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1337 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1338 default:
1339 ShouldNotReachHere();
1340 }
1341 return "Unknown";
1342 }
1343
1344 //------------------------------------------------------------------------------
1345 // Debugging code
1346
1347 u_char* ObjectSynchronizer::get_gvars_addr() {
1348 return (u_char*)&GVars;
1349 }
1350
1351 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1352 return (u_char*)&GVars.hc_sequence;
1353 }
1354
1355 size_t ObjectSynchronizer::get_gvars_size() {
1356 return sizeof(SharedGlobals);
1357 }
1358
1359 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1360 return (u_char*)&GVars.stw_random;
1361 }
1362
1363 // Do the final audit and print of ObjectMonitor stats; must be done
1364 // by the VMThread at VM exit time.
1365 void ObjectSynchronizer::do_final_audit_and_print_stats() {
1366 assert(Thread::current()->is_VM_thread(), "sanity check");
1367
1368 if (is_final_audit()) { // Only do the audit once.
1369 return;
1370 }
1371 set_is_final_audit();
1372 log_info(monitorinflation)("Starting the final audit.");
1373
1374 if (log_is_enabled(Info, monitorinflation)) {
1375 LogStreamHandle(Info, monitorinflation) ls;
1376 audit_and_print_stats(&ls, true /* on_exit */);
1377 }
1378 }
1379
1380 // This function can be called by the MonitorDeflationThread or it can be called when
1381 // we are trying to exit the VM. The list walker functions can run in parallel with
1382 // the other list operations.
1383 // Calls to this function can be added in various places as a debugging
1384 // aid.
1385 //
1386 void ObjectSynchronizer::audit_and_print_stats(outputStream* ls, bool on_exit) {
1387 int error_cnt = 0;
1388
1389 ls->print_cr("Checking in_use_list:");
1390 chk_in_use_list(ls, &error_cnt);
1391
1392 if (error_cnt == 0) {
1393 ls->print_cr("No errors found in in_use_list checks.");
1394 } else {
1395 log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt);
1396 }
1397
1398 // When exiting, only log the interesting entries at the Info level.
1399 // When called at intervals by the MonitorDeflationThread, log output
1400 // at the Trace level since there can be a lot of it.
1401 if (!on_exit && log_is_enabled(Trace, monitorinflation)) {
1402 LogStreamHandle(Trace, monitorinflation) ls_tr;
1403 log_in_use_monitor_details(&ls_tr, true /* log_all */);
1404 } else if (on_exit) {
1405 log_in_use_monitor_details(ls, false /* log_all */);
1406 }
1407
1408 ls->flush();
1409
1410 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1411 }
1412
1413 // Check the in_use_list; log the results of the checks.
1414 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
1415 size_t l_in_use_count = _in_use_list.count();
1416 size_t l_in_use_max = _in_use_list.max();
1417 out->print_cr("count=%zu, max=%zu", l_in_use_count,
1418 l_in_use_max);
1419
1420 size_t ck_in_use_count = 0;
1421 MonitorList::Iterator iter = _in_use_list.iterator();
1422 while (iter.has_next()) {
1423 ObjectMonitor* mid = iter.next();
1424 chk_in_use_entry(mid, out, error_cnt_p);
1425 ck_in_use_count++;
1426 }
1427
1428 if (l_in_use_count == ck_in_use_count) {
1429 out->print_cr("in_use_count=%zu equals ck_in_use_count=%zu",
1430 l_in_use_count, ck_in_use_count);
1431 } else {
1432 out->print_cr("WARNING: in_use_count=%zu is not equal to "
1433 "ck_in_use_count=%zu", l_in_use_count,
1434 ck_in_use_count);
1435 }
1436
1437 size_t ck_in_use_max = _in_use_list.max();
1438 if (l_in_use_max == ck_in_use_max) {
1439 out->print_cr("in_use_max=%zu equals ck_in_use_max=%zu",
1440 l_in_use_max, ck_in_use_max);
1441 } else {
1442 out->print_cr("WARNING: in_use_max=%zu is not equal to "
1443 "ck_in_use_max=%zu", l_in_use_max, ck_in_use_max);
1444 }
1445 }
1446
1447 // Check an in-use monitor entry; log any errors.
1448 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1449 int* error_cnt_p) {
1450 if (n->owner_is_DEFLATER_MARKER()) {
1451 // This could happen when monitor deflation blocks for a safepoint.
1452 return;
1453 }
1454
1455
1456 if (n->metadata() == 0) {
1457 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1458 "have non-null _metadata (header/hash) field.", p2i(n));
1459 *error_cnt_p = *error_cnt_p + 1;
1460 }
1461
1462 const oop obj = n->object_peek();
1463 if (obj == nullptr) {
1464 return;
1465 }
1466
1467 const markWord mark = obj->mark();
1468 // Note: When using ObjectMonitorTable we may observe an intermediate state,
1469 // where the monitor is globally visible, but no thread has yet transitioned
1470 // the markWord. To avoid reporting a false positive during this transition, we
1471 // skip the `!mark.has_monitor()` test if we are using the ObjectMonitorTable.
1472 if (!UseObjectMonitorTable && !mark.has_monitor()) {
1473 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1474 "object does not think it has a monitor: obj="
1475 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
1476 p2i(obj), mark.value());
1477 *error_cnt_p = *error_cnt_p + 1;
1478 return;
1479 }
1480
1481 ObjectMonitor* const obj_mon = read_monitor(obj, mark);
1482 if (n != obj_mon) {
1483 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1484 "object does not refer to the same monitor: obj="
1485 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
1486 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
1487 *error_cnt_p = *error_cnt_p + 1;
1488 }
1489 }
1490
1491 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
1492 // flags indicate why the entry is in-use, 'object' and 'object type'
1493 // indicate the associated object and its type.
1494 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
1495 if (_in_use_list.count() > 0) {
1496 stringStream ss;
1497 out->print_cr("In-use monitor info%s:", log_all ? "" : " (eliding idle monitors)");
1498 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
1499 out->print_cr("%18s %s %18s %18s",
1500 "monitor", "BHL", "object", "object type");
1501 out->print_cr("================== === ================== ==================");
1502
1503 auto is_interesting = [&](ObjectMonitor* monitor) {
1504 return log_all || monitor->has_owner() || monitor->is_busy();
1505 };
1506
1507 monitors_iterate([&](ObjectMonitor* monitor) {
1508 if (is_interesting(monitor)) {
1509 const oop obj = monitor->object_peek();
1510 const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
1511 ResourceMark rm;
1512 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
1513 monitor->is_busy(), hash != 0, monitor->has_owner(),
1514 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
1515 if (monitor->is_busy()) {
1516 out->print(" (%s)", monitor->is_busy_to_string(&ss));
1517 ss.reset();
1518 }
1519 out->cr();
1520 }
1521 });
1522 }
1523
1524 out->flush();
1525 }
1526
1527 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, bool* inserted) {
1528 ObjectMonitor* monitor = get_monitor_from_table(object);
1529 if (monitor != nullptr) {
1530 *inserted = false;
1531 return monitor;
1532 }
1533
1534 ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
1535 alloced_monitor->set_anonymous_owner();
1536
1537 // Try insert monitor
1538 monitor = add_monitor(alloced_monitor, object);
1539
1540 *inserted = alloced_monitor == monitor;
1541 if (!*inserted) {
1542 delete alloced_monitor;
1543 }
1544
1545 return monitor;
1546 }
1547
1548 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
1549 if (log_is_enabled(Trace, monitorinflation)) {
1550 ResourceMark rm(current);
1551 log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
1552 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
1553 object->mark().value(), object->klass()->external_name(),
1554 ObjectSynchronizer::inflate_cause_name(cause));
1555 }
1556 }
1557
1558 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1559 const oop obj,
1560 ObjectSynchronizer::InflateCause cause) {
1561 assert(event != nullptr, "invariant");
1562 const Klass* monitor_klass = obj->klass();
1563 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
1564 return;
1565 }
1566 event->set_monitorClass(monitor_klass);
1567 event->set_address((uintptr_t)(void*)obj);
1568 event->set_cause((u1)cause);
1569 event->commit();
1570 }
1571
1572 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
1573 assert(UseObjectMonitorTable, "must be");
1574
1575 EventJavaMonitorInflate event;
1576
1577 bool inserted;
1578 ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, &inserted);
1579
1580 if (inserted) {
1581 log_inflate(current, object, cause);
1582 if (event.should_commit()) {
1583 post_monitor_inflate_event(&event, object, cause);
1584 }
1585
1586 // The monitor has an anonymous owner so it is safe from async deflation.
1587 ObjectSynchronizer::_in_use_list.add(monitor);
1588 }
1589
1590 return monitor;
1591 }
1592
1593 // Add the hashcode to the monitor to match the object and put it in the hashtable.
1594 ObjectMonitor* ObjectSynchronizer::add_monitor(ObjectMonitor* monitor, oop obj) {
1595 assert(UseObjectMonitorTable, "must be");
1596 assert(obj == monitor->object(), "must be");
1597
1598 markWord mark = obj->mark();
1599 intptr_t hash;
1600 if (UseCompactObjectHeaders) {
1601 hash = static_cast<intptr_t>(get_hash(mark, obj));
1602 } else {
1603 hash = mark.hash();
1604 }
1605 assert(hash != 0, "must be set when claiming the object monitor");
1606 monitor->set_hash(hash);
1607
1608 return ObjectMonitorTable::monitor_put_get(monitor, obj);
1609 }
1610
1611 void ObjectSynchronizer::remove_monitor(ObjectMonitor* monitor, oop obj) {
1612 assert(UseObjectMonitorTable, "must be");
1613 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
1614
1615 ObjectMonitorTable::remove_monitor_entry(monitor);
1616 }
1617
1618 void ObjectSynchronizer::deflate_mark_word(oop obj) {
1619 assert(UseObjectMonitorTable, "must be");
1620
1621 markWord mark = obj->mark_acquire();
1622 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
1623
1624 while (mark.has_monitor()) {
1625 const markWord new_mark = mark.clear_lock_bits().set_unlocked();
1626 mark = obj->cas_set_mark(new_mark, mark);
1627 }
1628 }
1629
1630 void ObjectSynchronizer::create_om_table() {
1631 if (!UseObjectMonitorTable) {
1632 return;
1633 }
1634 ObjectMonitorTable::create();
1635 }
1636
1637 class ObjectSynchronizer::LockStackInflateContendedLocks : private OopClosure {
1638 private:
1639 oop _contended_oops[LockStack::CAPACITY];
1640 int _length;
1641
1642 void do_oop(oop* o) final {
1643 oop obj = *o;
1644 if (obj->mark_acquire().has_monitor()) {
1645 if (_length > 0 && _contended_oops[_length - 1] == obj) {
1646 // Recursive
1647 return;
1648 }
1649 _contended_oops[_length++] = obj;
1650 }
1651 }
1652
1653 void do_oop(narrowOop* o) final {
1654 ShouldNotReachHere();
1655 }
1656
1657 public:
1658 LockStackInflateContendedLocks() :
1659 _contended_oops(),
1660 _length(0) {};
1661
1662 void inflate(JavaThread* current) {
1663 assert(current == JavaThread::current(), "must be");
1664 current->lock_stack().oops_do(this);
1665 for (int i = 0; i < _length; i++) {
1666 ObjectSynchronizer::
1667 inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1668 }
1669 }
1670 };
1671
1672 void ObjectSynchronizer::ensure_lock_stack_space(JavaThread* current) {
1673 assert(current == JavaThread::current(), "must be");
1674 LockStack& lock_stack = current->lock_stack();
1675
1676 // Make room on lock_stack
1677 if (lock_stack.is_full()) {
1678 // Inflate contended objects
1679 LockStackInflateContendedLocks().inflate(current);
1680 if (lock_stack.is_full()) {
1681 // Inflate the oldest object
1682 inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1683 }
1684 }
1685 }
1686
1687 class ObjectSynchronizer::CacheSetter : StackObj {
1688 JavaThread* const _thread;
1689 BasicLock* const _lock;
1690 ObjectMonitor* _monitor;
1691
1692 NONCOPYABLE(CacheSetter);
1693
1694 public:
1695 CacheSetter(JavaThread* thread, BasicLock* lock) :
1696 _thread(thread),
1697 _lock(lock),
1698 _monitor(nullptr) {}
1699
1700 ~CacheSetter() {
1701 // Only use the cache if using the table.
1702 if (UseObjectMonitorTable) {
1703 if (_monitor != nullptr) {
1704 // If the monitor is already in the BasicLock cache then it is most
1705 // likely in the thread cache, do not set it again to avoid reordering.
1706 if (_monitor != _lock->object_monitor_cache()) {
1707 _thread->om_set_monitor_cache(_monitor);
1708 _lock->set_object_monitor_cache(_monitor);
1709 }
1710 } else {
1711 _lock->clear_object_monitor_cache();
1712 }
1713 }
1714 }
1715
1716 void set_monitor(ObjectMonitor* monitor) {
1717 assert(_monitor == nullptr, "only set once");
1718 _monitor = monitor;
1719 }
1720
1721 };
1722
1723 // Reads first from the BasicLock cache then from the OMCache in the current thread.
1724 // C2 fast-path may have put the monitor in the cache in the BasicLock.
1725 inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) {
1726 ObjectMonitor* monitor = lock->object_monitor_cache();
1727 if (monitor == nullptr) {
1728 monitor = current->om_get_from_monitor_cache(object);
1729 }
1730 return monitor;
1731 }
1732
1733 class ObjectSynchronizer::VerifyThreadState {
1734 bool _no_safepoint;
1735
1736 public:
1737 VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
1738 assert(current == Thread::current(), "must be");
1739 assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
1740 if (_no_safepoint) {
1741 DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
1742 }
1743 }
1744 ~VerifyThreadState() {
1745 if (_no_safepoint){
1746 DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
1747 }
1748 }
1749 };
1750
1751 inline bool ObjectSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
1752 markWord mark = obj->mark();
1753 while (mark.is_unlocked()) {
1754 ensure_lock_stack_space(current);
1755 assert(!lock_stack.is_full(), "must have made room on the lock stack");
1756 assert(!lock_stack.contains(obj), "thread must not already hold the lock");
1757 // Try to swing into 'fast-locked' state.
1758 markWord locked_mark = mark.set_fast_locked();
1759 markWord old_mark = mark;
1760 mark = obj->cas_set_mark(locked_mark, old_mark);
1761 if (old_mark == mark) {
1762 // Successfully fast-locked, push object to lock-stack and return.
1763 lock_stack.push(obj);
1764 return true;
1765 }
1766 }
1767 return false;
1768 }
1769
1770 bool ObjectSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
1771 assert(UseObjectMonitorTable, "must be");
1772 // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
1773 const int log_spin_limit = os::is_MP() ? FastLockingSpins : 1;
1774 const int log_min_safepoint_check_interval = 10;
1775
1776 markWord mark = obj->mark();
1777 const auto should_spin = [&]() {
1778 if (!mark.has_monitor()) {
1779 // Spin while not inflated.
1780 return true;
1781 } else if (observed_deflation) {
1782 // Spin while monitor is being deflated.
1783 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(obj, mark);
1784 return monitor == nullptr || monitor->is_being_async_deflated();
1785 }
1786 // Else stop spinning.
1787 return false;
1788 };
1789 // Always attempt to lock once even when safepoint synchronizing.
1790 bool should_process = false;
1791 for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
1792 // Spin with exponential backoff.
1793 const int total_spin_count = 1 << i;
1794 const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
1795 const int outer_spin_count = total_spin_count / inner_spin_count;
1796 for (int outer = 0; outer < outer_spin_count; outer++) {
1797 should_process = SafepointMechanism::should_process(current);
1798 if (should_process) {
1799 // Stop spinning for safepoint.
1800 break;
1801 }
1802 for (int inner = 1; inner < inner_spin_count; inner++) {
1803 SpinPause();
1804 }
1805 }
1806
1807 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
1808 }
1809 return false;
1810 }
1811
1812 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
1813 // When called with locking_thread != Thread::current() some mechanism must synchronize
1814 // the locking_thread with respect to the current thread. Currently only used when
1815 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
1816 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
1817
1818 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
1819 JavaThread* current = JavaThread::current();
1820 VerifyThreadState vts(locking_thread, current);
1821
1822 if (obj->klass()->is_value_based()) {
1823 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
1824 }
1825
1826 LockStack& lock_stack = locking_thread->lock_stack();
1827
1828 ObjectMonitor* monitor = nullptr;
1829 if (lock_stack.contains(obj())) {
1830 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
1831 bool entered = monitor->enter_for(locking_thread);
1832 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
1833 } else {
1834 do {
1835 // It is assumed that enter_for must enter on an object without contention.
1836 monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
1837 // But there may still be a race with deflation.
1838 } while (monitor == nullptr);
1839 }
1840
1841 assert(monitor != nullptr, "ObjectSynchronizer::enter_for must succeed");
1842 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared");
1843 }
1844
1845 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
1846 assert(current == JavaThread::current(), "must be");
1847
1848 if (obj->klass()->is_value_based()) {
1849 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
1850 }
1851
1852 CacheSetter cache_setter(current, lock);
1853
1854 // Used when deflation is observed. Progress here requires progress
1855 // from the deflator. After observing that the deflator is not
1856 // making progress (after two yields), switch to sleeping.
1857 SpinYield spin_yield(0, 2);
1858 bool observed_deflation = false;
1859
1860 LockStack& lock_stack = current->lock_stack();
1861
1862 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
1863 // Recursively fast locked
1864 return;
1865 }
1866
1867 if (lock_stack.contains(obj())) {
1868 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
1869 bool entered = monitor->enter(current);
1870 assert(entered, "recursive ObjectMonitor::enter must succeed");
1871 cache_setter.set_monitor(monitor);
1872 return;
1873 }
1874
1875 while (true) {
1876 // Fast-locking does not use the 'lock' argument.
1877 // Fast-lock spinning to avoid inflating for short critical sections.
1878 // The goal is to only inflate when the extra cost of using ObjectMonitors
1879 // is worth it.
1880 // If deflation has been observed we also spin while deflation is ongoing.
1881 if (fast_lock_try_enter(obj(), lock_stack, current)) {
1882 return;
1883 } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
1884 return;
1885 }
1886
1887 if (observed_deflation) {
1888 spin_yield.wait();
1889 }
1890
1891 ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
1892 if (monitor != nullptr) {
1893 cache_setter.set_monitor(monitor);
1894 return;
1895 }
1896
1897 // If inflate_and_enter returns nullptr it is because a deflated monitor
1898 // was encountered. Fallback to fast locking. The deflater is responsible
1899 // for clearing out the monitor and transitioning the markWord back to
1900 // fast locking.
1901 observed_deflation = true;
1902 }
1903 }
1904
1905 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
1906 assert(current == Thread::current(), "must be");
1907
1908 markWord mark = object->mark();
1909 assert(!mark.is_unlocked(), "must be");
1910
1911 LockStack& lock_stack = current->lock_stack();
1912 if (mark.is_fast_locked()) {
1913 if (lock_stack.try_recursive_exit(object)) {
1914 // This is a recursive exit which succeeded
1915 return;
1916 }
1917 if (lock_stack.is_recursive(object)) {
1918 // Must inflate recursive locks if try_recursive_exit fails
1919 // This happens for un-structured unlocks, could potentially
1920 // fix try_recursive_exit to handle these.
1921 inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1922 }
1923 }
1924
1925 while (mark.is_fast_locked()) {
1926 markWord unlocked_mark = mark.set_unlocked();
1927 markWord old_mark = mark;
1928 mark = object->cas_set_mark(unlocked_mark, old_mark);
1929 if (old_mark == mark) {
1930 // CAS successful, remove from lock_stack
1931 size_t recursion = lock_stack.remove(object) - 1;
1932 assert(recursion == 0, "Should not have unlocked here");
1933 return;
1934 }
1935 }
1936
1937 assert(mark.has_monitor(), "must be");
1938 // The monitor exists
1939 ObjectMonitor* monitor;
1940 if (UseObjectMonitorTable) {
1941 monitor = read_caches(current, lock, object);
1942 if (monitor == nullptr) {
1943 monitor = get_monitor_from_table(object);
1944 }
1945 } else {
1946 monitor = ObjectSynchronizer::read_monitor(mark);
1947 }
1948 if (monitor->has_anonymous_owner()) {
1949 assert(current->lock_stack().contains(object), "current must have object on its lock stack");
1950 monitor->set_owner_from_anonymous(current);
1951 monitor->set_recursions(current->lock_stack().remove(object) - 1);
1952 }
1953
1954 monitor->exit(current);
1955 }
1956
1957 // ObjectSynchronizer::inflate_locked_or_imse is used to get an
1958 // inflated ObjectMonitor* from contexts which require that, such as
1959 // notify/wait and jni_exit. Fast locking keeps the invariant that it
1960 // only inflates if it is already locked by the current thread or the current
1961 // thread is in the process of entering. To maintain this invariant we need to
1962 // throw a java.lang.IllegalMonitorStateException before inflating if the
1963 // current thread is not the owner.
1964 ObjectMonitor* ObjectSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
1965 JavaThread* current = THREAD;
1966
1967 for (;;) {
1968 markWord mark = obj->mark_acquire();
1969 if (mark.is_unlocked()) {
1970 // No lock, IMSE.
1971 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1972 "current thread is not owner", nullptr);
1973 }
1974
1975 if (mark.is_fast_locked()) {
1976 if (!current->lock_stack().contains(obj)) {
1977 // Fast locked by other thread, IMSE.
1978 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1979 "current thread is not owner", nullptr);
1980 } else {
1981 // Current thread owns the lock, must inflate
1982 return inflate_fast_locked_object(obj, cause, current, current);
1983 }
1984 }
1985
1986 assert(mark.has_monitor(), "must be");
1987 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(obj, mark);
1988 if (monitor != nullptr) {
1989 if (monitor->has_anonymous_owner()) {
1990 LockStack& lock_stack = current->lock_stack();
1991 if (lock_stack.contains(obj)) {
1992 // Current thread owns the lock but someone else inflated it.
1993 // Fix owner and pop lock stack.
1994 monitor->set_owner_from_anonymous(current);
1995 monitor->set_recursions(lock_stack.remove(obj) - 1);
1996 } else {
1997 // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
1998 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1999 "current thread is not owner", nullptr);
2000 }
2001 }
2002 return monitor;
2003 }
2004 }
2005 }
2006
2007 ObjectMonitor* ObjectSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
2008
2009 // The JavaThread* locking parameter requires that the locking_thread == JavaThread::current,
2010 // or is suspended throughout the call by some other mechanism.
2011 // Even with fast locking the thread might be nullptr when called from a non
2012 // JavaThread. (As may still be the case from FastHashCode). However it is only
2013 // important for the correctness of the fast locking algorithm that the thread
2014 // is set when called from ObjectSynchronizer::enter from the owning thread,
2015 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
2016 EventJavaMonitorInflate event;
2017
2018 for (;;) {
2019 const markWord mark = object->mark_acquire();
2020
2021 // The mark can be in one of the following states:
2022 // * inflated - Just return if using stack-locking.
2023 // If using fast-locking and the ObjectMonitor owner
2024 // is anonymous and the locking_thread owns the
2025 // object lock, then we make the locking_thread
2026 // the ObjectMonitor owner and remove the lock from
2027 // the locking_thread's lock stack.
2028 // * fast-locked - Coerce it to inflated from fast-locked.
2029 // * unlocked - Aggressively inflate the object.
2030
2031 // CASE: inflated
2032 if (mark.has_monitor()) {
2033 ObjectMonitor* inf = mark.monitor();
2034 markWord dmw = inf->header();
2035 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2036 if (inf->has_anonymous_owner() &&
2037 locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
2038 inf->set_owner_from_anonymous(locking_thread);
2039 size_t removed = locking_thread->lock_stack().remove(object);
2040 inf->set_recursions(removed - 1);
2041 }
2042 return inf;
2043 }
2044
2045 // CASE: fast-locked
2046 // Could be fast-locked either by the locking_thread or by some other thread.
2047 //
2048 // Note that we allocate the ObjectMonitor speculatively, _before_
2049 // attempting to set the object's mark to the new ObjectMonitor. If
2050 // the locking_thread owns the monitor, then we set the ObjectMonitor's
2051 // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
2052 // to anonymous. If we lose the race to set the object's mark to the
2053 // new ObjectMonitor, then we just delete it and loop around again.
2054 //
2055 if (mark.is_fast_locked()) {
2056 ObjectMonitor* monitor = new ObjectMonitor(object);
2057 monitor->set_header(mark.set_unlocked());
2058 bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
2059 if (own) {
2060 // Owned by locking_thread.
2061 monitor->set_owner(locking_thread);
2062 } else {
2063 // Owned by somebody else.
2064 monitor->set_anonymous_owner();
2065 }
2066 markWord monitor_mark = markWord::encode(monitor);
2067 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
2068 if (old_mark == mark) {
2069 // Success! Return inflated monitor.
2070 if (own) {
2071 size_t removed = locking_thread->lock_stack().remove(object);
2072 monitor->set_recursions(removed - 1);
2073 }
2074 // Once the ObjectMonitor is configured and object is associated
2075 // with the ObjectMonitor, it is safe to allow async deflation:
2076 ObjectSynchronizer::_in_use_list.add(monitor);
2077
2078 log_inflate(current, object, cause);
2079 if (event.should_commit()) {
2080 post_monitor_inflate_event(&event, object, cause);
2081 }
2082 return monitor;
2083 } else {
2084 delete monitor;
2085 continue; // Interference -- just retry
2086 }
2087 }
2088
2089 // CASE: unlocked
2090 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2091 // If we know we're inflating for entry it's better to inflate by swinging a
2092 // pre-locked ObjectMonitor pointer into the object header. A successful
2093 // CAS inflates the object *and* confers ownership to the inflating thread.
2094 // In the current implementation we use a 2-step mechanism where we CAS()
2095 // to inflate and then CAS() again to try to swing _owner from null to current.
2096 // An inflateTry() method that we could call from enter() would be useful.
2097
2098 assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
2099 ObjectMonitor* m = new ObjectMonitor(object);
2100 // prepare m for installation - set monitor to initial state
2101 m->set_header(mark);
2102
2103 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2104 delete m;
2105 m = nullptr;
2106 continue;
2107 // interference - the markword changed - just retry.
2108 // The state-transitions are one-way, so there's no chance of
2109 // live-lock -- "Inflated" is an absorbing state.
2110 }
2111
2112 // Once the ObjectMonitor is configured and object is associated
2113 // with the ObjectMonitor, it is safe to allow async deflation:
2114 ObjectSynchronizer::_in_use_list.add(m);
2115
2116 log_inflate(current, object, cause);
2117 if (event.should_commit()) {
2118 post_monitor_inflate_event(&event, object, cause);
2119 }
2120 return m;
2121 }
2122 }
2123
2124 ObjectMonitor* ObjectSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2125 VerifyThreadState vts(locking_thread, current);
2126 assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
2127
2128 ObjectMonitor* monitor;
2129
2130 if (!UseObjectMonitorTable) {
2131 return inflate_into_object_header(object, cause, locking_thread, current);
2132 }
2133
2134 // Inflating requires a hash code
2135 ObjectSynchronizer::FastHashCode(current, object);
2136
2137 markWord mark = object->mark_acquire();
2138 assert(!mark.is_unlocked(), "Cannot be unlocked");
2139
2140 for (;;) {
2141 // Fetch the monitor from the table
2142 monitor = get_or_insert_monitor(object, current, cause);
2143
2144 // ObjectMonitors are always inserted as anonymously owned, this thread is
2145 // the current holder of the monitor. So unless the entry is stale and
2146 // contains a deflating monitor it must be anonymously owned.
2147 if (monitor->has_anonymous_owner()) {
2148 // The monitor must be anonymously owned if it was added
2149 assert(monitor == get_monitor_from_table(object), "The monitor must be found");
2150 // New fresh monitor
2151 break;
2152 }
2153
2154 // If the monitor was not anonymously owned then we got a deflating monitor
2155 // from the table. We need to let the deflator make progress and remove this
2156 // entry before we are allowed to add a new one.
2157 os::naked_yield();
2158 assert(monitor->is_being_async_deflated(), "Should be the reason");
2159 }
2160
2161 // Set the mark word; loop to handle concurrent updates to other parts of the mark word
2162 while (mark.is_fast_locked()) {
2163 mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2164 }
2165
2166 // Indicate that the monitor now has a known owner
2167 monitor->set_owner_from_anonymous(locking_thread);
2168
2169 // Remove the entry from the thread's lock stack
2170 monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
2171
2172 if (locking_thread == current) {
2173 // Only change the thread local state of the current thread.
2174 locking_thread->om_set_monitor_cache(monitor);
2175 }
2176
2177 return monitor;
2178 }
2179
2180 ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2181 VerifyThreadState vts(locking_thread, current);
2182
2183 // Note: In some paths (deoptimization) the 'current' thread inflates and
2184 // enters the lock on behalf of the 'locking_thread' thread.
2185
2186 ObjectMonitor* monitor = nullptr;
2187
2188 if (!UseObjectMonitorTable) {
2189 // Do the old inflate and enter.
2190 monitor = inflate_into_object_header(object, cause, locking_thread, current);
2191
2192 bool entered;
2193 if (locking_thread == current) {
2194 entered = monitor->enter(locking_thread);
2195 } else {
2196 entered = monitor->enter_for(locking_thread);
2197 }
2198
2199 // enter returns false for deflation found.
2200 return entered ? monitor : nullptr;
2201 }
2202
2203 NoSafepointVerifier nsv;
2204
2205 // Try to get the monitor from the thread-local cache.
2206 // There's no need to use the cache if we are locking
2207 // on behalf of another thread.
2208 if (current == locking_thread) {
2209 monitor = read_caches(current, lock, object);
2210 }
2211
2212 // Get or create the monitor
2213 if (monitor == nullptr) {
2214 // Lightweight monitors require that hash codes are installed first
2215 ObjectSynchronizer::FastHashCode(locking_thread, object);
2216 monitor = get_or_insert_monitor(object, current, cause);
2217 }
2218
2219 if (monitor->try_enter(locking_thread)) {
2220 return monitor;
2221 }
2222
2223 // Holds is_being_async_deflated() stable throughout this function.
2224 ObjectMonitorContentionMark contention_mark(monitor);
2225
2226 /// First handle the case where the monitor from the table is deflated
2227 if (monitor->is_being_async_deflated()) {
2228 // The MonitorDeflation thread is deflating the monitor. The locking thread
2229 // must spin until further progress has been made.
2230
2231 // Clear the BasicLock cache as it may contain this monitor.
2232 lock->clear_object_monitor_cache();
2233
2234 const markWord mark = object->mark_acquire();
2235
2236 if (mark.has_monitor()) {
2237 // Waiting on the deflation thread to remove the deflated monitor from the table.
2238 os::naked_yield();
2239
2240 } else if (mark.is_fast_locked()) {
2241 // Some other thread managed to fast-lock the lock, or this is a
2242 // recursive lock from the same thread; yield for the deflation
2243 // thread to remove the deflated monitor from the table.
2244 os::naked_yield();
2245
2246 } else {
2247 assert(mark.is_unlocked(), "Implied");
2248 // Retry immediately
2249 }
2250
2251 // Retry
2252 return nullptr;
2253 }
2254
2255 for (;;) {
2256 const markWord mark = object->mark_acquire();
2257 // The mark can be in one of the following states:
2258 // * inflated - If the ObjectMonitor owner is anonymous
2259 // and the locking_thread owns the object
2260 // lock, then we make the locking_thread
2261 // the ObjectMonitor owner and remove the
2262 // lock from the locking_thread's lock stack.
2263 // * fast-locked - Coerce it to inflated from fast-locked.
2264 // * neutral - Inflate the object. Successful CAS is locked
2265
2266 // CASE: inflated
2267 if (mark.has_monitor()) {
2268 LockStack& lock_stack = locking_thread->lock_stack();
2269 if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
2270 // The lock is fast-locked by the locking thread,
2271 // convert it to a held monitor with a known owner.
2272 monitor->set_owner_from_anonymous(locking_thread);
2273 monitor->set_recursions(lock_stack.remove(object) - 1);
2274 }
2275
2276 break; // Success
2277 }
2278
2279 // CASE: fast-locked
2280 // Could be fast-locked either by locking_thread or by some other thread.
2281 //
2282 if (mark.is_fast_locked()) {
2283 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2284 if (old_mark != mark) {
2285 // CAS failed
2286 continue;
2287 }
2288
2289 // Success! Return inflated monitor.
2290 LockStack& lock_stack = locking_thread->lock_stack();
2291 if (lock_stack.contains(object)) {
2292 // The lock is fast-locked by the locking thread,
2293 // convert it to a held monitor with a known owner.
2294 monitor->set_owner_from_anonymous(locking_thread);
2295 monitor->set_recursions(lock_stack.remove(object) - 1);
2296 }
2297
2298 break; // Success
2299 }
2300
2301 // CASE: neutral (unlocked)
2302
2303 // Catch if the object's header is not neutral (not locked and
2304 // not marked is what we care about here).
2305 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2306 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2307 if (old_mark != mark) {
2308 // CAS failed
2309 continue;
2310 }
2311
2312 // Transitioned from unlocked to monitor means locking_thread owns the lock.
2313 monitor->set_owner_from_anonymous(locking_thread);
2314
2315 return monitor;
2316 }
2317
2318 if (current == locking_thread) {
2319 // One round of spinning
2320 if (monitor->spin_enter(locking_thread)) {
2321 return monitor;
2322 }
2323
2324 // Monitor is contended, take the time before entering to fix the lock stack.
2325 LockStackInflateContendedLocks().inflate(current);
2326 }
2327
2328 // enter can block for safepoints; clear the unhandled object oop
2329 PauseNoSafepointVerifier pnsv(&nsv);
2330 object = nullptr;
2331
2332 if (current == locking_thread) {
2333 monitor->enter_with_contention_mark(locking_thread, contention_mark);
2334 } else {
2335 monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
2336 }
2337
2338 return monitor;
2339 }
2340
2341 void ObjectSynchronizer::deflate_monitor(oop obj, ObjectMonitor* monitor) {
2342 if (obj != nullptr) {
2343 deflate_mark_word(obj);
2344 remove_monitor(monitor, obj);
2345 }
2346 }
2347
2348 ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(oop obj) {
2349 assert(UseObjectMonitorTable, "must be");
2350 return ObjectMonitorTable::monitor_get(obj);
2351 }
2352
2353 ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
2354 return mark.monitor();
2355 }
2356
2357 ObjectMonitor* ObjectSynchronizer::read_monitor(oop obj) {
2358 return ObjectSynchronizer::read_monitor(obj, obj->mark());
2359 }
2360
2361 ObjectMonitor* ObjectSynchronizer::read_monitor(oop obj, markWord mark) {
2362 if (!UseObjectMonitorTable) {
2363 return read_monitor(mark);
2364 } else {
2365 return ObjectSynchronizer::get_monitor_from_table(obj);
2366 }
2367 }
2368
2369 bool ObjectSynchronizer::quick_enter_internal(oop obj, BasicLock* lock, JavaThread* current) {
2370 assert(current->thread_state() == _thread_in_Java, "must be");
2371 assert(obj != nullptr, "must be");
2372 NoSafepointVerifier nsv;
2373
2374 LockStack& lock_stack = current->lock_stack();
2375 if (lock_stack.is_full()) {
2376 // Always go into runtime if the lock stack is full.
2377 return false;
2378 }
2379
2380 const markWord mark = obj->mark();
2381
2382 #ifndef _LP64
2383 // Only for 32bit which has limited support for fast locking outside the runtime.
2384 if (lock_stack.try_recursive_enter(obj)) {
2385 // Recursive lock successful.
2386 return true;
2387 }
2388
2389 if (mark.is_unlocked()) {
2390 markWord locked_mark = mark.set_fast_locked();
2391 if (obj->cas_set_mark(locked_mark, mark) == mark) {
2392 // Successfully fast-locked, push object to lock-stack and return.
2393 lock_stack.push(obj);
2394 return true;
2395 }
2396 }
2397 #endif
2398
2399 if (mark.has_monitor()) {
2400 ObjectMonitor* monitor;
2401 if (UseObjectMonitorTable) {
2402 monitor = read_caches(current, lock, obj);
2403 } else {
2404 monitor = ObjectSynchronizer::read_monitor(mark);
2405 }
2406
2407 if (monitor == nullptr) {
2408 // Take the slow-path on a cache miss.
2409 return false;
2410 }
2411
2412 if (UseObjectMonitorTable) {
2413 // Set the monitor regardless of success.
2414 // Either we successfully lock on the monitor, or we retry with the
2415 // monitor in the slow path. If the monitor gets deflated, it will be
2416 // cleared, either by the CacheSetter if we fast lock in enter or in
2417 // inflate_and_enter when we see that the monitor is deflated.
2418 lock->set_object_monitor_cache(monitor);
2419 }
2420
2421 if (monitor->spin_enter(current)) {
2422 return true;
2423 }
2424 }
2425
2426 // Slow-path.
2427 return false;
2428 }
2429
2430 bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
2431 assert(current->thread_state() == _thread_in_Java, "invariant");
2432 NoSafepointVerifier nsv;
2433 if (obj == nullptr) return false; // Need to throw NPE
2434
2435 if (obj->klass()->is_value_based()) {
2436 return false;
2437 }
2438
2439 return ObjectSynchronizer::quick_enter_internal(obj, lock, current);
2440 }