1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomicAccess.hpp"
37 #include "runtime/basicLock.inline.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/objectMonitorTable.hpp"
48 #include "runtime/os.inline.hpp"
49 #include "runtime/osThread.hpp"
50 #include "runtime/safepointMechanism.inline.hpp"
51 #include "runtime/safepointVerifiers.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/threads.hpp"
56 #include "runtime/timer.hpp"
57 #include "runtime/timerTrace.hpp"
58 #include "runtime/trimNativeHeap.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/align.hpp"
62 #include "utilities/concurrentHashTable.inline.hpp"
63 #include "utilities/concurrentHashTableTasks.inline.hpp"
64 #include "utilities/dtrace.hpp"
65 #include "utilities/events.hpp"
66 #include "utilities/globalCounter.inline.hpp"
67 #include "utilities/globalDefinitions.hpp"
68 #include "utilities/linkedlist.hpp"
69 #include "utilities/preserveException.hpp"
70
71 class ObjectMonitorDeflationLogging;
72
73 void MonitorList::add(ObjectMonitor* m) {
74 ObjectMonitor* head;
75 do {
76 head = AtomicAccess::load(&_head);
77 m->set_next_om(head);
78 } while (AtomicAccess::cmpxchg(&_head, head, m) != head);
79
80 size_t count = AtomicAccess::add(&_count, 1u, memory_order_relaxed);
81 size_t old_max;
82 do {
83 old_max = AtomicAccess::load(&_max);
84 if (count <= old_max) {
85 break;
86 }
87 } while (AtomicAccess::cmpxchg(&_max, old_max, count, memory_order_relaxed) != old_max);
88 }
89
90 size_t MonitorList::count() const {
91 return AtomicAccess::load(&_count);
92 }
93
94 size_t MonitorList::max() const {
95 return AtomicAccess::load(&_max);
96 }
97
98 class ObjectMonitorDeflationSafepointer : public StackObj {
99 JavaThread* const _current;
100 ObjectMonitorDeflationLogging* const _log;
101
102 public:
103 ObjectMonitorDeflationSafepointer(JavaThread* current, ObjectMonitorDeflationLogging* log)
104 : _current(current), _log(log) {}
105
106 void block_for_safepoint(const char* op_name, const char* count_name, size_t counter);
107 };
108
109 // Walk the in-use list and unlink deflated ObjectMonitors.
110 // Returns the number of unlinked ObjectMonitors.
111 size_t MonitorList::unlink_deflated(size_t deflated_count,
112 GrowableArray<ObjectMonitor*>* unlinked_list,
113 ObjectMonitorDeflationSafepointer* safepointer) {
114 size_t unlinked_count = 0;
115 ObjectMonitor* prev = nullptr;
116 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
117
118 while (m != nullptr) {
119 if (m->is_being_async_deflated()) {
120 // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
121 // modify the list once per batch. The batch starts at "m".
122 size_t unlinked_batch = 0;
123 ObjectMonitor* next = m;
124 // Look for at most MonitorUnlinkBatch monitors, or the number of
125 // deflated and not unlinked monitors, whatever comes first.
126 assert(deflated_count >= unlinked_count, "Sanity: underflow");
127 size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch);
128 do {
129 ObjectMonitor* next_next = next->next_om();
130 unlinked_batch++;
131 unlinked_list->append(next);
132 next = next_next;
133 if (unlinked_batch >= unlinked_batch_limit) {
134 // Reached the max batch, so bail out of the gathering loop.
135 break;
136 }
137 if (prev == nullptr && AtomicAccess::load(&_head) != m) {
138 // Current batch used to be at head, but it is not at head anymore.
139 // Bail out and figure out where we currently are. This avoids long
140 // walks searching for new prev during unlink under heavy list inserts.
141 break;
142 }
143 } while (next != nullptr && next->is_being_async_deflated());
144
145 // Unlink the found batch.
146 if (prev == nullptr) {
147 // The current batch is the first batch, so there is a chance that it starts at head.
148 // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
149 ObjectMonitor* prev_head = AtomicAccess::cmpxchg(&_head, m, next);
150 if (prev_head != m) {
151 // Something must have updated the head. Figure out the actual prev for this batch.
152 for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
153 prev = n;
154 }
155 assert(prev != nullptr, "Should have found the prev for the current batch");
156 prev->set_next_om(next);
157 }
158 } else {
159 // The current batch is preceded by another batch. This guarantees the current batch
160 // does not start at head. Unlink the entire current batch without updating the head.
161 assert(AtomicAccess::load(&_head) != m, "Sanity");
162 prev->set_next_om(next);
163 }
164
165 unlinked_count += unlinked_batch;
166 if (unlinked_count >= deflated_count) {
167 // Reached the max so bail out of the searching loop.
168 // There should be no more deflated monitors left.
169 break;
170 }
171 m = next;
172 } else {
173 prev = m;
174 m = m->next_om();
175 }
176
177 // Must check for a safepoint/handshake and honor it.
178 safepointer->block_for_safepoint("unlinking", "unlinked_count", unlinked_count);
179 }
180
181 #ifdef ASSERT
182 // Invariant: the code above should unlink all deflated monitors.
183 // The code that runs after this unlinking does not expect deflated monitors.
184 // Notably, attempting to deflate the already deflated monitor would break.
185 {
186 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
187 while (m != nullptr) {
188 assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
189 m = m->next_om();
190 }
191 }
192 #endif
193
194 AtomicAccess::sub(&_count, unlinked_count);
195 return unlinked_count;
196 }
197
198 MonitorList::Iterator MonitorList::iterator() const {
199 return Iterator(AtomicAccess::load_acquire(&_head));
200 }
201
202 ObjectMonitor* MonitorList::Iterator::next() {
203 ObjectMonitor* current = _current;
204 _current = current->next_om();
205 return current;
206 }
207
208 // The "core" versions of monitor enter and exit reside in this file.
209 // The interpreter and compilers contain specialized transliterated
210 // variants of the enter-exit fast-path operations. See c2_MacroAssembler_x86.cpp
211 // fast_lock(...) for instance. If you make changes here, make sure to modify the
212 // interpreter, and both C1 and C2 fast-path inline locking code emission.
213 //
214 // -----------------------------------------------------------------------------
215
216 #ifdef DTRACE_ENABLED
217
218 // Only bother with this argument setup if dtrace is available
219 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
220
221 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
222 char* bytes = nullptr; \
223 int len = 0; \
224 jlong jtid = SharedRuntime::get_java_tid(thread); \
225 Symbol* klassname = obj->klass()->name(); \
226 if (klassname != nullptr) { \
227 bytes = (char*)klassname->bytes(); \
228 len = klassname->utf8_length(); \
229 }
230
231 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
232 { \
233 if (DTraceMonitorProbes) { \
234 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
235 HOTSPOT_MONITOR_WAIT(jtid, \
236 (uintptr_t)(monitor), bytes, len, (millis)); \
237 } \
238 }
239
240 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
241 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
242 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
243
244 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
245 { \
246 if (DTraceMonitorProbes) { \
247 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
248 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
249 (uintptr_t)(monitor), bytes, len); \
250 } \
251 }
252
253 #else // ndef DTRACE_ENABLED
254
255 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
256 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
257
258 #endif // ndef DTRACE_ENABLED
259
260 // This exists only as a workaround of dtrace bug 6254741
261 static int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) {
262 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
263 return 0;
264 }
265
266 static constexpr size_t inflation_lock_count() {
267 return 256;
268 }
269
270 // Static storage for an array of PlatformMutex.
271 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
272
273 static inline PlatformMutex* inflation_lock(size_t index) {
274 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
275 }
276
277 void ObjectSynchronizer::initialize() {
278 for (size_t i = 0; i < inflation_lock_count(); i++) {
279 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
280 }
281 // Start the ceiling with the estimate for one thread.
282 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
283
284 // Start the timer for deflations, so it does not trigger immediately.
285 _last_async_deflation_time_ns = os::javaTimeNanos();
286
287 ObjectSynchronizer::create_om_table();
288 }
289
290 MonitorList ObjectSynchronizer::_in_use_list;
291 // monitors_used_above_threshold() policy is as follows:
292 //
293 // The ratio of the current _in_use_list count to the ceiling is used
294 // to determine if we are above MonitorUsedDeflationThreshold and need
295 // to do an async monitor deflation cycle. The ceiling is increased by
296 // AvgMonitorsPerThreadEstimate when a thread is added to the system
297 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // =====================> Quick functions
319
320 // The quick_* forms are special fast-path variants used to improve
321 // performance. In the simplest case, a "quick_*" implementation could
322 // simply return false, in which case the caller will perform the necessary
323 // state transitions and call the slow-path form.
324 // The fast-path is designed to handle frequently arising cases in an efficient
325 // manner and is just a degenerate "optimistic" variant of the slow-path.
326 // returns true -- to indicate the call was satisfied.
327 // returns false -- to indicate the call needs the services of the slow-path.
328 // A no-loitering ordinance is in effect for code in the quick_* family
329 // operators: safepoints or indefinite blocking (blocking that might span a
330 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
331 // entry.
332 //
333 // Consider: An interesting optimization is to have the JIT recognize the
334 // following common idiom:
335 // synchronized (someobj) { .... ; notify(); }
336 // That is, we find a notify() or notifyAll() call that immediately precedes
337 // the monitorexit operation. In that case the JIT could fuse the operations
338 // into a single notifyAndExit() runtime primitive.
339
340 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
341 assert(current->thread_state() == _thread_in_Java, "invariant");
342 NoSafepointVerifier nsv;
343 if (obj == nullptr) return false; // slow-path for invalid obj
344 const markWord mark = obj->mark();
345
346 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
347 // Degenerate notify
348 // fast-locked by caller so by definition the implied waitset is empty.
349 return true;
350 }
351
352 if (mark.has_monitor()) {
353 ObjectMonitor* const mon = read_monitor(current, obj, mark);
354 if (mon == nullptr) {
355 // Racing with inflation/deflation go slow path
356 return false;
357 }
358 assert(mon->object() == oop(obj), "invariant");
359 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
360
361 if (mon->first_waiter() != nullptr) {
362 // We have one or more waiters. Since this is an inflated monitor
363 // that we own, we quickly notify them here and now, avoiding the slow-path.
364 if (all) {
365 mon->quick_notifyAll(current);
366 } else {
367 mon->quick_notify(current);
368 }
369 }
370 return true;
371 }
372
373 // other IMS exception states take the slow-path
374 return false;
375 }
376
377 // Handle notifications when synchronizing on value based classes
378 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
379 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
380 frame last_frame = locking_thread->last_frame();
381 bool bcp_was_adjusted = false;
382 // Don't decrement bcp if it points to the frame's first instruction. This happens when
383 // handle_sync_on_value_based_class() is called because of a synchronized method. There
384 // is no actual monitorenter instruction in the byte code in this case.
385 if (last_frame.is_interpreted_frame() &&
386 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
387 // adjust bcp to point back to monitorenter so that we print the correct line numbers
388 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
389 bcp_was_adjusted = true;
390 }
391
392 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
393 ResourceMark rm;
394 stringStream ss;
395 locking_thread->print_active_stack_on(&ss);
396 char* base = (char*)strstr(ss.base(), "at");
397 char* newline = (char*)strchr(ss.base(), '\n');
398 if (newline != nullptr) {
399 *newline = '\0';
400 }
401 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
402 } else {
403 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
404 ResourceMark rm;
405 Log(valuebasedclasses) vblog;
406
407 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
408 if (locking_thread->has_last_Java_frame()) {
409 LogStream info_stream(vblog.info());
410 locking_thread->print_active_stack_on(&info_stream);
411 } else {
412 vblog.info("Cannot find the last Java frame");
413 }
414
415 EventSyncOnValueBasedClass event;
416 if (event.should_commit()) {
417 event.set_valueBasedClass(obj->klass());
418 event.commit();
419 }
420 }
421
422 if (bcp_was_adjusted) {
423 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
424 }
425 }
426
427 // -----------------------------------------------------------------------------
428 // JNI locks on java objects
429 // NOTE: must use heavy weight monitor to handle jni monitor enter
430 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
431 // Top native frames in the stack will not be seen if we attempt
432 // preemption, since we start walking from the last Java anchor.
433 NoPreemptMark npm(current);
434
435 if (obj->klass()->is_value_based()) {
436 handle_sync_on_value_based_class(obj, current);
437 }
438
439 // the current locking is from JNI instead of Java code
440 current->set_current_pending_monitor_is_from_java(false);
441 // An async deflation can race after the inflate() call and before
442 // enter() can make the ObjectMonitor busy. enter() returns false if
443 // we have lost the race to async deflation and we simply try again.
444 while (true) {
445 BasicLock lock;
446 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
447 break;
448 }
449 }
450 current->set_current_pending_monitor_is_from_java(true);
451 }
452
453 // NOTE: must use heavy weight monitor to handle jni monitor exit
454 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
455 JavaThread* current = THREAD;
456
457 ObjectMonitor* monitor;
458 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
459 // If this thread has locked the object, exit the monitor. We
460 // intentionally do not use CHECK on check_owner because we must exit the
461 // monitor even if an exception was already pending.
462 if (monitor->check_owner(THREAD)) {
463 monitor->exit(current);
464 }
465 }
466
467 // -----------------------------------------------------------------------------
468 // Internal VM locks on java objects
469 // standard constructor, allows locking failures
470 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
471 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
472 assert(!_thread->preempting(), "");
473
474 _thread->check_for_valid_safepoint_state();
475
476 if (_obj() != nullptr) {
477 ObjectSynchronizer::enter(_obj, &_lock, _thread);
478
479 if (_thread->preempting()) {
480 // If preemption was cancelled we acquired the monitor after freezing
481 // the frames. Redoing the vm call laterĀ in thaw will require us to
482 // release it since the call should look like the original one. We
483 // do it in ~ObjectLocker to reduce the window of time we hold the
484 // monitor since we can't do anything useful with it now, and would
485 // otherwise just force other vthreads to preempt in case they try
486 // to acquire this monitor.
487 _skip_exit = !_thread->preemption_cancelled();
488 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
489 _thread->set_pending_preempted_exception();
490
491 }
492 }
493 }
494
495 ObjectLocker::~ObjectLocker() {
496 if (_obj() != nullptr && !_skip_exit) {
497 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
498 }
499 }
500
501 void ObjectLocker::wait_uninterruptibly(TRAPS) {
502 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
503 if (_thread->preempting()) {
504 _skip_exit = true;
505 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
506 _thread->set_pending_preempted_exception();
507 }
508 }
509
510 // -----------------------------------------------------------------------------
511 // Wait/Notify/NotifyAll
512 // NOTE: must use heavy weight monitor to handle wait()
513
514 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
515 JavaThread* current = THREAD;
516 if (millis < 0) {
517 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
518 }
519
520 ObjectMonitor* monitor;
521 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
522
523 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
524 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
525
526 // This dummy call is in place to get around dtrace bug 6254741. Once
527 // that's fixed we can uncomment the following line, remove the call
528 // and change this function back into a "void" func.
529 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
530 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
531 return ret_code;
532 }
533
534 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
535 assert(millis >= 0, "timeout value is negative");
536
537 ObjectMonitor* monitor;
538 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
539 monitor->wait(millis, false, THREAD);
540 }
541
542
543 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
544 JavaThread* current = THREAD;
545
546 markWord mark = obj->mark();
547 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
548 // Not inflated so there can't be any waiters to notify.
549 return;
550 }
551 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
552 monitor->notify(CHECK);
553 }
554
555 // NOTE: see comment of notify()
556 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
557 JavaThread* current = THREAD;
558
559 markWord mark = obj->mark();
560 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
561 // Not inflated so there can't be any waiters to notify.
562 return;
563 }
564
565 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
566 monitor->notifyAll(CHECK);
567 }
568
569 // -----------------------------------------------------------------------------
570 // Hash Code handling
571
572 struct SharedGlobals {
573 char _pad_prefix[OM_CACHE_LINE_SIZE];
574 // This is a highly shared mostly-read variable.
575 // To avoid false-sharing it needs to be the sole occupant of a cache line.
576 volatile int stw_random;
577 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
578 // Hot RW variable -- Sequester to avoid false-sharing
579 volatile int hc_sequence;
580 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
581 };
582
583 static SharedGlobals GVars;
584
585 // hashCode() generation :
586 //
587 // Possibilities:
588 // * MD5Digest of {obj,stw_random}
589 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
590 // * A DES- or AES-style SBox[] mechanism
591 // * One of the Phi-based schemes, such as:
592 // 2654435761 = 2^32 * Phi (golden ratio)
593 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
594 // * A variation of Marsaglia's shift-xor RNG scheme.
595 // * (obj ^ stw_random) is appealing, but can result
596 // in undesirable regularity in the hashCode values of adjacent objects
597 // (objects allocated back-to-back, in particular). This could potentially
598 // result in hashtable collisions and reduced hashtable efficiency.
599 // There are simple ways to "diffuse" the middle address bits over the
600 // generated hashCode values:
601
602 static intptr_t get_next_hash(Thread* current, oop obj) {
603 intptr_t value = 0;
604 if (hashCode == 0) {
605 // This form uses global Park-Miller RNG.
606 // On MP system we'll have lots of RW access to a global, so the
607 // mechanism induces lots of coherency traffic.
608 value = os::random();
609 } else if (hashCode == 1) {
610 // This variation has the property of being stable (idempotent)
611 // between STW operations. This can be useful in some of the 1-0
612 // synchronization schemes.
613 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
614 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
615 } else if (hashCode == 2) {
616 value = 1; // for sensitivity testing
617 } else if (hashCode == 3) {
618 value = ++GVars.hc_sequence;
619 } else if (hashCode == 4) {
620 value = cast_from_oop<intptr_t>(obj);
621 } else {
622 // Marsaglia's xor-shift scheme with thread-specific state
623 // This is probably the best overall implementation -- we'll
624 // likely make this the default in future releases.
625 unsigned t = current->_hashStateX;
626 t ^= (t << 11);
627 current->_hashStateX = current->_hashStateY;
628 current->_hashStateY = current->_hashStateZ;
629 current->_hashStateZ = current->_hashStateW;
630 unsigned v = current->_hashStateW;
631 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
632 current->_hashStateW = v;
633 value = v;
634 }
635
636 value &= markWord::hash_mask;
637 if (value == 0) value = 0xBAD;
638 assert(value != markWord::no_hash, "invariant");
639 return value;
640 }
641
642 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
643 while (true) {
644 ObjectMonitor* monitor = nullptr;
645 markWord temp, test;
646 intptr_t hash;
647 markWord mark = obj->mark_acquire();
648 // If UseObjectMonitorTable is set the hash can simply be installed in the
649 // object header, since the monitor isn't in the object header.
650 if (UseObjectMonitorTable || !mark.has_monitor()) {
651 hash = mark.hash();
652 if (hash != 0) { // if it has a hash, just return it
653 return hash;
654 }
655 hash = get_next_hash(current, obj); // get a new hash
656 temp = mark.copy_set_hash(hash); // merge the hash into header
657 // try to install the hash
658 test = obj->cas_set_mark(temp, mark);
659 if (test == mark) { // if the hash was installed, return it
660 return hash;
661 }
662 // CAS failed, retry
663 continue;
664
665 // Failed to install the hash. It could be that another thread
666 // installed the hash just before our attempt or inflation has
667 // occurred or... so we fall thru to inflate the monitor for
668 // stability and then install the hash.
669 } else {
670 assert(!mark.is_unlocked() && !mark.is_fast_locked(), "invariant");
671 monitor = mark.monitor();
672 temp = monitor->header();
673 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
674 hash = temp.hash();
675 if (hash != 0) {
676 // It has a hash.
677
678 // Separate load of dmw/header above from the loads in
679 // is_being_async_deflated().
680
681 // dmw/header and _contentions may get written by different threads.
682 // Make sure to observe them in the same order when having several observers.
683 OrderAccess::loadload_for_IRIW();
684
685 if (monitor->is_being_async_deflated()) {
686 // But we can't safely use the hash if we detect that async
687 // deflation has occurred. So we attempt to restore the
688 // header/dmw to the object's header so that we only retry
689 // once if the deflater thread happens to be slow.
690 monitor->install_displaced_markword_in_object(obj);
691 continue;
692 }
693 return hash;
694 }
695 // Fall thru so we only have one place that installs the hash in
696 // the ObjectMonitor.
697 }
698
699 // NOTE: an async deflation can race after we get the monitor and
700 // before we can update the ObjectMonitor's header with the hash
701 // value below.
702 assert(mark.has_monitor(), "must be");
703 monitor = mark.monitor();
704
705 // Load ObjectMonitor's header/dmw field and see if it has a hash.
706 mark = monitor->header();
707 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
708 hash = mark.hash();
709 if (hash == 0) { // if it does not have a hash
710 hash = get_next_hash(current, obj); // get a new hash
711 temp = mark.copy_set_hash(hash) ; // merge the hash into header
712 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
713 uintptr_t v = AtomicAccess::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
714 test = markWord(v);
715 if (test != mark) {
716 // The attempt to update the ObjectMonitor's header/dmw field
717 // did not work. This can happen if another thread managed to
718 // merge in the hash just before our cmpxchg().
719 // If we add any new usages of the header/dmw field, this code
720 // will need to be updated.
721 hash = test.hash();
722 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
723 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
724 }
725 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
726 // If we detect that async deflation has occurred, then we
727 // attempt to restore the header/dmw to the object's header
728 // so that we only retry once if the deflater thread happens
729 // to be slow.
730 monitor->install_displaced_markword_in_object(obj);
731 continue;
732 }
733 }
734 // We finally get the hash.
735 return hash;
736 }
737 }
738
739 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
740 Handle h_obj) {
741 assert(current == JavaThread::current(), "Can only be called on current thread");
742 oop obj = h_obj();
743
744 markWord mark = obj->mark_acquire();
745
746 if (mark.is_fast_locked()) {
747 // fast-locking case, see if lock is in current's lock stack
748 return current->lock_stack().contains(h_obj());
749 }
750
751 while (mark.has_monitor()) {
752 ObjectMonitor* monitor = read_monitor(current, obj, mark);
753 if (monitor != nullptr) {
754 return monitor->is_entered(current) != 0;
755 }
756 // Racing with inflation/deflation, retry
757 mark = obj->mark_acquire();
758
759 if (mark.is_fast_locked()) {
760 // Some other thread fast_locked, current could not have held the lock
761 return false;
762 }
763 }
764
765 // Unlocked case, header in place
766 assert(mark.is_unlocked(), "sanity check");
767 return false;
768 }
769
770 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
771 oop obj = h_obj();
772 markWord mark = obj->mark_acquire();
773
774 if (mark.is_fast_locked()) {
775 // fast-locked so get owner from the object.
776 // owning_thread_from_object() may also return null here:
777 return Threads::owning_thread_from_object(t_list, h_obj());
778 }
779
780 while (mark.has_monitor()) {
781 ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark);
782 if (monitor != nullptr) {
783 return Threads::owning_thread_from_monitor(t_list, monitor);
784 }
785 // Racing with inflation/deflation, retry
786 mark = obj->mark_acquire();
787
788 if (mark.is_fast_locked()) {
789 // Some other thread fast_locked
790 return Threads::owning_thread_from_object(t_list, h_obj());
791 }
792 }
793
794 // Unlocked case, header in place
795 // Cannot have assertion since this object may have been
796 // locked by another thread when reaching here.
797 // assert(mark.is_unlocked(), "sanity check");
798
799 return nullptr;
800 }
801
802 // Visitors ...
803
804 // Iterate over all ObjectMonitors.
805 template <typename Function>
806 void ObjectSynchronizer::monitors_iterate(Function function) {
807 MonitorList::Iterator iter = _in_use_list.iterator();
808 while (iter.has_next()) {
809 ObjectMonitor* monitor = iter.next();
810 function(monitor);
811 }
812 }
813
814 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
815 // returns true.
816 template <typename OwnerFilter>
817 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
818 monitors_iterate([&](ObjectMonitor* monitor) {
819 // This function is only called at a safepoint or when the
820 // target thread is suspended or when the target thread is
821 // operating on itself. The current closures in use today are
822 // only interested in an owned ObjectMonitor and ownership
823 // cannot be dropped under the calling contexts so the
824 // ObjectMonitor cannot be async deflated.
825 if (monitor->has_owner() && filter(monitor)) {
826 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
827
828 closure->do_monitor(monitor);
829 }
830 });
831 }
832
833 // Iterate ObjectMonitors where the owner == thread; this does NOT include
834 // ObjectMonitors where owner is set to a stack-lock address in thread.
835 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
836 int64_t key = ObjectMonitor::owner_id_from(thread);
837 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
838 return owned_monitors_iterate_filtered(closure, thread_filter);
839 }
840
841 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, oop vthread) {
842 int64_t key = ObjectMonitor::owner_id_from(vthread);
843 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
844 return owned_monitors_iterate_filtered(closure, thread_filter);
845 }
846
847 // Iterate ObjectMonitors owned by any thread.
848 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
849 auto all_filter = [&](ObjectMonitor* monitor) { return true; };
850 return owned_monitors_iterate_filtered(closure, all_filter);
851 }
852
853 static bool monitors_used_above_threshold(MonitorList* list) {
854 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
855 return false;
856 }
857 size_t monitors_used = list->count();
858 if (monitors_used == 0) { // empty list is easy
859 return false;
860 }
861 size_t old_ceiling = ObjectSynchronizer::in_use_list_ceiling();
862 // Make sure that we use a ceiling value that is not lower than
863 // previous, not lower than the recorded max used by the system, and
864 // not lower than the current number of monitors in use (which can
865 // race ahead of max). The result is guaranteed > 0.
866 size_t ceiling = MAX3(old_ceiling, list->max(), monitors_used);
867
868 // Check if our monitor usage is above the threshold:
869 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
870 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
871 // Deflate monitors if over the threshold percentage, unless no
872 // progress on previous deflations.
873 bool is_above_threshold = true;
874
875 // Check if it's time to adjust the in_use_list_ceiling up, due
876 // to too many async deflation attempts without any progress.
877 if (NoAsyncDeflationProgressMax != 0 &&
878 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
879 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
880 size_t delta = (size_t)(ceiling * remainder) + 1;
881 size_t new_ceiling = (ceiling > SIZE_MAX - delta)
882 ? SIZE_MAX // Overflow, let's clamp new_ceiling.
883 : ceiling + delta;
884
885 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
886 log_info(monitorinflation)("Too many deflations without progress; "
887 "bumping in_use_list_ceiling from %zu"
888 " to %zu", old_ceiling, new_ceiling);
889 _no_progress_cnt = 0;
890 ceiling = new_ceiling;
891
892 // Check if our monitor usage is still above the threshold:
893 monitor_usage = (monitors_used * 100LL) / ceiling;
894 is_above_threshold = int(monitor_usage) > MonitorUsedDeflationThreshold;
895 }
896 log_info(monitorinflation)("monitors_used=%zu, ceiling=%zu"
897 ", monitor_usage=%zu, threshold=%d",
898 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
899 return is_above_threshold;
900 }
901
902 return false;
903 }
904
905 size_t ObjectSynchronizer::in_use_list_count() {
906 return _in_use_list.count();
907 }
908
909 size_t ObjectSynchronizer::in_use_list_max() {
910 return _in_use_list.max();
911 }
912
913 size_t ObjectSynchronizer::in_use_list_ceiling() {
914 return _in_use_list_ceiling;
915 }
916
917 void ObjectSynchronizer::dec_in_use_list_ceiling() {
918 AtomicAccess::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
919 }
920
921 void ObjectSynchronizer::inc_in_use_list_ceiling() {
922 AtomicAccess::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
923 }
924
925 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
926 _in_use_list_ceiling = new_value;
927 }
928
929 bool ObjectSynchronizer::is_async_deflation_needed() {
930 if (is_async_deflation_requested()) {
931 // Async deflation request.
932 log_info(monitorinflation)("Async deflation needed: explicit request");
933 return true;
934 }
935
936 jlong time_since_last = time_since_last_async_deflation_ms();
937
938 if (AsyncDeflationInterval > 0 &&
939 time_since_last > AsyncDeflationInterval &&
940 monitors_used_above_threshold(&_in_use_list)) {
941 // It's been longer than our specified deflate interval and there
942 // are too many monitors in use. We don't deflate more frequently
943 // than AsyncDeflationInterval (unless is_async_deflation_requested)
944 // in order to not swamp the MonitorDeflationThread.
945 log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold");
946 return true;
947 }
948
949 if (GuaranteedAsyncDeflationInterval > 0 &&
950 time_since_last > GuaranteedAsyncDeflationInterval) {
951 // It's been longer than our specified guaranteed deflate interval.
952 // We need to clean up the used monitors even if the threshold is
953 // not reached, to keep the memory utilization at bay when many threads
954 // touched many monitors.
955 log_info(monitorinflation)("Async deflation needed: guaranteed interval (%zd ms) "
956 "is greater than time since last deflation (" JLONG_FORMAT " ms)",
957 GuaranteedAsyncDeflationInterval, time_since_last);
958
959 // If this deflation has no progress, then it should not affect the no-progress
960 // tracking, otherwise threshold heuristics would think it was triggered, experienced
961 // no progress, and needs to backoff more aggressively. In this "no progress" case,
962 // the generic code would bump the no-progress counter, and we compensate for that
963 // by telling it to skip the update.
964 //
965 // If this deflation has progress, then it should let non-progress tracking
966 // know about this, otherwise the threshold heuristics would kick in, potentially
967 // experience no-progress due to aggressive cleanup by this deflation, and think
968 // it is still in no-progress stride. In this "progress" case, the generic code would
969 // zero the counter, and we allow it to happen.
970 _no_progress_skip_increment = true;
971
972 return true;
973 }
974
975 return false;
976 }
977
978 void ObjectSynchronizer::request_deflate_idle_monitors() {
979 MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
980 set_is_async_deflation_requested(true);
981 ml.notify_all();
982 }
983
984 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
985 JavaThread* current = JavaThread::current();
986 bool ret_code = false;
987
988 jlong last_time = last_async_deflation_time_ns();
989
990 request_deflate_idle_monitors();
991
992 const int N_CHECKS = 5;
993 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
994 if (last_async_deflation_time_ns() > last_time) {
995 log_info(monitorinflation)("Async Deflation happened after %d check(s).", i);
996 ret_code = true;
997 break;
998 }
999 {
1000 // JavaThread has to honor the blocking protocol.
1001 ThreadBlockInVM tbivm(current);
1002 os::naked_short_sleep(999); // sleep for almost 1 second
1003 }
1004 }
1005 if (!ret_code) {
1006 log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1007 }
1008
1009 return ret_code;
1010 }
1011
1012 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1013 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1014 }
1015
1016 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1017 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1018 //
1019 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1020 MonitorList::Iterator iter = _in_use_list.iterator();
1021 size_t deflated_count = 0;
1022 Thread* current = Thread::current();
1023
1024 while (iter.has_next()) {
1025 if (deflated_count >= (size_t)MonitorDeflationMax) {
1026 break;
1027 }
1028 ObjectMonitor* mid = iter.next();
1029 if (mid->deflate_monitor(current)) {
1030 deflated_count++;
1031 }
1032
1033 // Must check for a safepoint/handshake and honor it.
1034 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1035 }
1036
1037 return deflated_count;
1038 }
1039
1040 class DeflationHandshakeClosure : public HandshakeClosure {
1041 public:
1042 DeflationHandshakeClosure() : HandshakeClosure("DeflationHandshakeClosure") {}
1043
1044 void do_thread(Thread* thread) {
1045 log_trace(monitorinflation)("DeflationHandshakeClosure::do_thread: thread="
1046 INTPTR_FORMAT, p2i(thread));
1047 if (thread->is_Java_thread()) {
1048 // Clear OM cache
1049 JavaThread* jt = JavaThread::cast(thread);
1050 jt->om_clear_monitor_cache();
1051 }
1052 }
1053 };
1054
1055 class VM_RendezvousGCThreads : public VM_Operation {
1056 public:
1057 bool evaluate_at_safepoint() const override { return false; }
1058 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1059 void doit() override {
1060 Universe::heap()->safepoint_synchronize_begin();
1061 Universe::heap()->safepoint_synchronize_end();
1062 };
1063 };
1064
1065 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1066 ObjectMonitorDeflationSafepointer* safepointer) {
1067 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1068 size_t deleted_count = 0;
1069 for (ObjectMonitor* monitor: *delete_list) {
1070 delete monitor;
1071 deleted_count++;
1072 // A JavaThread must check for a safepoint/handshake and honor it.
1073 safepointer->block_for_safepoint("deletion", "deleted_count", deleted_count);
1074 }
1075 return deleted_count;
1076 }
1077
1078 class ObjectMonitorDeflationLogging: public StackObj {
1079 LogStreamHandle(Debug, monitorinflation) _debug;
1080 LogStreamHandle(Info, monitorinflation) _info;
1081 LogStream* _stream;
1082 elapsedTimer _timer;
1083
1084 size_t ceiling() const { return ObjectSynchronizer::in_use_list_ceiling(); }
1085 size_t count() const { return ObjectSynchronizer::in_use_list_count(); }
1086 size_t max() const { return ObjectSynchronizer::in_use_list_max(); }
1087
1088 public:
1089 ObjectMonitorDeflationLogging()
1090 : _debug(), _info(), _stream(nullptr) {
1091 if (_debug.is_enabled()) {
1092 _stream = &_debug;
1093 } else if (_info.is_enabled()) {
1094 _stream = &_info;
1095 }
1096 }
1097
1098 void begin() {
1099 if (_stream != nullptr) {
1100 _stream->print_cr("begin deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1101 ceiling(), count(), max());
1102 _timer.start();
1103 }
1104 }
1105
1106 void before_handshake(size_t unlinked_count) {
1107 if (_stream != nullptr) {
1108 _timer.stop();
1109 _stream->print_cr("before handshaking: unlinked_count=%zu"
1110 ", in_use_list stats: ceiling=%zu, count="
1111 "%zu, max=%zu",
1112 unlinked_count, ceiling(), count(), max());
1113 }
1114 }
1115
1116 void after_handshake() {
1117 if (_stream != nullptr) {
1118 _stream->print_cr("after handshaking: in_use_list stats: ceiling="
1119 "%zu, count=%zu, max=%zu",
1120 ceiling(), count(), max());
1121 _timer.start();
1122 }
1123 }
1124
1125 void end(size_t deflated_count, size_t unlinked_count) {
1126 if (_stream != nullptr) {
1127 _timer.stop();
1128 if (deflated_count != 0 || unlinked_count != 0 || _debug.is_enabled()) {
1129 _stream->print_cr("deflated_count=%zu, {unlinked,deleted}_count=%zu monitors in %3.7f secs",
1130 deflated_count, unlinked_count, _timer.seconds());
1131 }
1132 _stream->print_cr("end deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1133 ceiling(), count(), max());
1134 }
1135 }
1136
1137 void before_block_for_safepoint(const char* op_name, const char* cnt_name, size_t cnt) {
1138 if (_stream != nullptr) {
1139 _timer.stop();
1140 _stream->print_cr("pausing %s: %s=%zu, in_use_list stats: ceiling="
1141 "%zu, count=%zu, max=%zu",
1142 op_name, cnt_name, cnt, ceiling(), count(), max());
1143 }
1144 }
1145
1146 void after_block_for_safepoint(const char* op_name) {
1147 if (_stream != nullptr) {
1148 _stream->print_cr("resuming %s: in_use_list stats: ceiling=%zu"
1149 ", count=%zu, max=%zu", op_name,
1150 ceiling(), count(), max());
1151 _timer.start();
1152 }
1153 }
1154 };
1155
1156 void ObjectMonitorDeflationSafepointer::block_for_safepoint(const char* op_name, const char* count_name, size_t counter) {
1157 if (!SafepointMechanism::should_process(_current)) {
1158 return;
1159 }
1160
1161 // A safepoint/handshake has started.
1162 _log->before_block_for_safepoint(op_name, count_name, counter);
1163
1164 {
1165 // Honor block request.
1166 ThreadBlockInVM tbivm(_current);
1167 }
1168
1169 _log->after_block_for_safepoint(op_name);
1170 }
1171
1172 // This function is called by the MonitorDeflationThread to deflate
1173 // ObjectMonitors.
1174 size_t ObjectSynchronizer::deflate_idle_monitors() {
1175 JavaThread* current = JavaThread::current();
1176 assert(current->is_monitor_deflation_thread(), "The only monitor deflater");
1177
1178 // The async deflation request has been processed.
1179 _last_async_deflation_time_ns = os::javaTimeNanos();
1180 set_is_async_deflation_requested(false);
1181
1182 ObjectMonitorDeflationLogging log;
1183 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1184
1185 log.begin();
1186
1187 // Deflate some idle ObjectMonitors.
1188 size_t deflated_count = deflate_monitor_list(&safepointer);
1189
1190 // Unlink the deflated ObjectMonitors from the in-use list.
1191 size_t unlinked_count = 0;
1192 size_t deleted_count = 0;
1193 if (deflated_count > 0) {
1194 ResourceMark rm(current);
1195 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1196 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1197
1198 #ifdef ASSERT
1199 if (UseObjectMonitorTable) {
1200 for (ObjectMonitor* monitor : delete_list) {
1201 assert(!ObjectSynchronizer::contains_monitor(current, monitor), "Should have been removed");
1202 }
1203 }
1204 #endif
1205
1206 log.before_handshake(unlinked_count);
1207
1208 // A JavaThread needs to handshake in order to safely free the
1209 // ObjectMonitors that were deflated in this cycle.
1210 DeflationHandshakeClosure dhc;
1211 Handshake::execute(&dhc);
1212 // Also, we sync and desync GC threads around the handshake, so that they can
1213 // safely read the mark-word and look-through to the object-monitor, without
1214 // being afraid that the object-monitor is going away.
1215 VM_RendezvousGCThreads sync_gc;
1216 VMThread::execute(&sync_gc);
1217
1218 log.after_handshake();
1219
1220 // After the handshake, safely free the ObjectMonitors that were
1221 // deflated and unlinked in this cycle.
1222
1223 // Delete the unlinked ObjectMonitors.
1224 deleted_count = delete_monitors(&delete_list, &safepointer);
1225 assert(unlinked_count == deleted_count, "must be");
1226 }
1227
1228 log.end(deflated_count, unlinked_count);
1229
1230 GVars.stw_random = os::random();
1231
1232 if (deflated_count != 0) {
1233 _no_progress_cnt = 0;
1234 } else if (_no_progress_skip_increment) {
1235 _no_progress_skip_increment = false;
1236 } else {
1237 _no_progress_cnt++;
1238 }
1239
1240 return deflated_count;
1241 }
1242
1243 // Monitor cleanup on JavaThread::exit
1244
1245 // Iterate through monitor cache and attempt to release thread's monitors
1246 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1247 private:
1248 JavaThread* _thread;
1249
1250 public:
1251 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1252 void do_monitor(ObjectMonitor* mid) {
1253 mid->complete_exit(_thread);
1254 }
1255 };
1256
1257 // Release all inflated monitors owned by current thread. Lightweight monitors are
1258 // ignored. This is meant to be called during JNI thread detach which assumes
1259 // all remaining monitors are heavyweight. All exceptions are swallowed.
1260 // Scanning the extant monitor list can be time consuming.
1261 // A simple optimization is to add a per-thread flag that indicates a thread
1262 // called jni_monitorenter() during its lifetime.
1263 //
1264 // Instead of NoSafepointVerifier it might be cheaper to
1265 // use an idiom of the form:
1266 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1267 // <code that must not run at safepoint>
1268 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1269 // Since the tests are extremely cheap we could leave them enabled
1270 // for normal product builds.
1271
1272 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1273 assert(current == JavaThread::current(), "must be current Java thread");
1274 NoSafepointVerifier nsv;
1275 ReleaseJavaMonitorsClosure rjmc(current);
1276 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1277 assert(!current->has_pending_exception(), "Should not be possible");
1278 current->clear_pending_exception();
1279 }
1280
1281 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1282 switch (cause) {
1283 case inflate_cause_vm_internal: return "VM Internal";
1284 case inflate_cause_monitor_enter: return "Monitor Enter";
1285 case inflate_cause_wait: return "Monitor Wait";
1286 case inflate_cause_notify: return "Monitor Notify";
1287 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1288 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1289 default:
1290 ShouldNotReachHere();
1291 }
1292 return "Unknown";
1293 }
1294
1295 //------------------------------------------------------------------------------
1296 // Debugging code
1297
1298 u_char* ObjectSynchronizer::get_gvars_addr() {
1299 return (u_char*)&GVars;
1300 }
1301
1302 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1303 return (u_char*)&GVars.hc_sequence;
1304 }
1305
1306 size_t ObjectSynchronizer::get_gvars_size() {
1307 return sizeof(SharedGlobals);
1308 }
1309
1310 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1311 return (u_char*)&GVars.stw_random;
1312 }
1313
1314 // Do the final audit and print of ObjectMonitor stats; must be done
1315 // by the VMThread at VM exit time.
1316 void ObjectSynchronizer::do_final_audit_and_print_stats() {
1317 assert(Thread::current()->is_VM_thread(), "sanity check");
1318
1319 if (is_final_audit()) { // Only do the audit once.
1320 return;
1321 }
1322 set_is_final_audit();
1323 log_info(monitorinflation)("Starting the final audit.");
1324
1325 if (log_is_enabled(Info, monitorinflation)) {
1326 LogStreamHandle(Info, monitorinflation) ls;
1327 audit_and_print_stats(&ls, true /* on_exit */);
1328 }
1329 }
1330
1331 // This function can be called by the MonitorDeflationThread or it can be called when
1332 // we are trying to exit the VM. The list walker functions can run in parallel with
1333 // the other list operations.
1334 // Calls to this function can be added in various places as a debugging
1335 // aid.
1336 //
1337 void ObjectSynchronizer::audit_and_print_stats(outputStream* ls, bool on_exit) {
1338 int error_cnt = 0;
1339
1340 ls->print_cr("Checking in_use_list:");
1341 chk_in_use_list(ls, &error_cnt);
1342
1343 if (error_cnt == 0) {
1344 ls->print_cr("No errors found in in_use_list checks.");
1345 } else {
1346 log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt);
1347 }
1348
1349 // When exiting, only log the interesting entries at the Info level.
1350 // When called at intervals by the MonitorDeflationThread, log output
1351 // at the Trace level since there can be a lot of it.
1352 if (!on_exit && log_is_enabled(Trace, monitorinflation)) {
1353 LogStreamHandle(Trace, monitorinflation) ls_tr;
1354 log_in_use_monitor_details(&ls_tr, true /* log_all */);
1355 } else if (on_exit) {
1356 log_in_use_monitor_details(ls, false /* log_all */);
1357 }
1358
1359 ls->flush();
1360
1361 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1362 }
1363
1364 // Check the in_use_list; log the results of the checks.
1365 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
1366 size_t l_in_use_count = _in_use_list.count();
1367 size_t l_in_use_max = _in_use_list.max();
1368 out->print_cr("count=%zu, max=%zu", l_in_use_count,
1369 l_in_use_max);
1370
1371 size_t ck_in_use_count = 0;
1372 MonitorList::Iterator iter = _in_use_list.iterator();
1373 while (iter.has_next()) {
1374 ObjectMonitor* mid = iter.next();
1375 chk_in_use_entry(mid, out, error_cnt_p);
1376 ck_in_use_count++;
1377 }
1378
1379 if (l_in_use_count == ck_in_use_count) {
1380 out->print_cr("in_use_count=%zu equals ck_in_use_count=%zu",
1381 l_in_use_count, ck_in_use_count);
1382 } else {
1383 out->print_cr("WARNING: in_use_count=%zu is not equal to "
1384 "ck_in_use_count=%zu", l_in_use_count,
1385 ck_in_use_count);
1386 }
1387
1388 size_t ck_in_use_max = _in_use_list.max();
1389 if (l_in_use_max == ck_in_use_max) {
1390 out->print_cr("in_use_max=%zu equals ck_in_use_max=%zu",
1391 l_in_use_max, ck_in_use_max);
1392 } else {
1393 out->print_cr("WARNING: in_use_max=%zu is not equal to "
1394 "ck_in_use_max=%zu", l_in_use_max, ck_in_use_max);
1395 }
1396 }
1397
1398 // Check an in-use monitor entry; log any errors.
1399 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1400 int* error_cnt_p) {
1401 if (n->owner_is_DEFLATER_MARKER()) {
1402 // This could happen when monitor deflation blocks for a safepoint.
1403 return;
1404 }
1405
1406
1407 if (n->metadata() == 0) {
1408 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1409 "have non-null _metadata (header/hash) field.", p2i(n));
1410 *error_cnt_p = *error_cnt_p + 1;
1411 }
1412
1413 const oop obj = n->object_peek();
1414 if (obj == nullptr) {
1415 return;
1416 }
1417
1418 const markWord mark = obj->mark();
1419 if (!mark.has_monitor()) {
1420 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1421 "object does not think it has a monitor: obj="
1422 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
1423 p2i(obj), mark.value());
1424 *error_cnt_p = *error_cnt_p + 1;
1425 return;
1426 }
1427
1428 ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
1429 if (n != obj_mon) {
1430 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1431 "object does not refer to the same monitor: obj="
1432 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
1433 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
1434 *error_cnt_p = *error_cnt_p + 1;
1435 }
1436 }
1437
1438 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
1439 // flags indicate why the entry is in-use, 'object' and 'object type'
1440 // indicate the associated object and its type.
1441 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
1442 if (_in_use_list.count() > 0) {
1443 stringStream ss;
1444 out->print_cr("In-use monitor info%s:", log_all ? "" : " (eliding idle monitors)");
1445 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
1446 out->print_cr("%18s %s %18s %18s",
1447 "monitor", "BHL", "object", "object type");
1448 out->print_cr("================== === ================== ==================");
1449
1450 auto is_interesting = [&](ObjectMonitor* monitor) {
1451 return log_all || monitor->has_owner() || monitor->is_busy();
1452 };
1453
1454 monitors_iterate([&](ObjectMonitor* monitor) {
1455 if (is_interesting(monitor)) {
1456 const oop obj = monitor->object_peek();
1457 const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
1458 ResourceMark rm;
1459 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
1460 monitor->is_busy(), hash != 0, monitor->has_owner(),
1461 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
1462 if (monitor->is_busy()) {
1463 out->print(" (%s)", monitor->is_busy_to_string(&ss));
1464 ss.reset();
1465 }
1466 out->cr();
1467 }
1468 });
1469 }
1470
1471 out->flush();
1472 }
1473
1474 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
1475 ObjectMonitor* monitor = get_monitor_from_table(current, object);
1476 if (monitor != nullptr) {
1477 *inserted = false;
1478 return monitor;
1479 }
1480
1481 ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
1482 alloced_monitor->set_anonymous_owner();
1483
1484 // Try insert monitor
1485 monitor = add_monitor(current, alloced_monitor, object);
1486
1487 *inserted = alloced_monitor == monitor;
1488 if (!*inserted) {
1489 delete alloced_monitor;
1490 }
1491
1492 return monitor;
1493 }
1494
1495 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
1496 if (log_is_enabled(Trace, monitorinflation)) {
1497 ResourceMark rm(current);
1498 log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
1499 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
1500 object->mark().value(), object->klass()->external_name(),
1501 ObjectSynchronizer::inflate_cause_name(cause));
1502 }
1503 }
1504
1505 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1506 const oop obj,
1507 ObjectSynchronizer::InflateCause cause) {
1508 assert(event != nullptr, "invariant");
1509 const Klass* monitor_klass = obj->klass();
1510 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
1511 return;
1512 }
1513 event->set_monitorClass(monitor_klass);
1514 event->set_address((uintptr_t)(void*)obj);
1515 event->set_cause((u1)cause);
1516 event->commit();
1517 }
1518
1519 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
1520 assert(UseObjectMonitorTable, "must be");
1521
1522 EventJavaMonitorInflate event;
1523
1524 bool inserted;
1525 ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
1526
1527 if (inserted) {
1528 log_inflate(current, object, cause);
1529 if (event.should_commit()) {
1530 post_monitor_inflate_event(&event, object, cause);
1531 }
1532
1533 // The monitor has an anonymous owner so it is safe from async deflation.
1534 ObjectSynchronizer::_in_use_list.add(monitor);
1535 }
1536
1537 return monitor;
1538 }
1539
1540 // Add the hashcode to the monitor to match the object and put it in the hashtable.
1541 ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
1542 assert(UseObjectMonitorTable, "must be");
1543 assert(obj == monitor->object(), "must be");
1544
1545 intptr_t hash = obj->mark().hash();
1546 assert(hash != 0, "must be set when claiming the object monitor");
1547 monitor->set_hash(hash);
1548
1549 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
1550 }
1551
1552 bool ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
1553 assert(UseObjectMonitorTable, "must be");
1554 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
1555
1556 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
1557 }
1558
1559 void ObjectSynchronizer::deflate_mark_word(oop obj) {
1560 assert(UseObjectMonitorTable, "must be");
1561
1562 markWord mark = obj->mark_acquire();
1563 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
1564
1565 while (mark.has_monitor()) {
1566 const markWord new_mark = mark.clear_lock_bits().set_unlocked();
1567 mark = obj->cas_set_mark(new_mark, mark);
1568 }
1569 }
1570
1571 void ObjectSynchronizer::create_om_table() {
1572 if (!UseObjectMonitorTable) {
1573 return;
1574 }
1575 ObjectMonitorTable::create();
1576 }
1577
1578 bool ObjectSynchronizer::needs_resize() {
1579 if (!UseObjectMonitorTable) {
1580 return false;
1581 }
1582 return ObjectMonitorTable::should_resize();
1583 }
1584
1585 bool ObjectSynchronizer::resize_table(JavaThread* current) {
1586 if (!UseObjectMonitorTable) {
1587 return true;
1588 }
1589 return ObjectMonitorTable::resize(current);
1590 }
1591
1592 class ObjectSynchronizer::LockStackInflateContendedLocks : private OopClosure {
1593 private:
1594 oop _contended_oops[LockStack::CAPACITY];
1595 int _length;
1596
1597 void do_oop(oop* o) final {
1598 oop obj = *o;
1599 if (obj->mark_acquire().has_monitor()) {
1600 if (_length > 0 && _contended_oops[_length - 1] == obj) {
1601 // Recursive
1602 return;
1603 }
1604 _contended_oops[_length++] = obj;
1605 }
1606 }
1607
1608 void do_oop(narrowOop* o) final {
1609 ShouldNotReachHere();
1610 }
1611
1612 public:
1613 LockStackInflateContendedLocks() :
1614 _contended_oops(),
1615 _length(0) {};
1616
1617 void inflate(JavaThread* current) {
1618 assert(current == JavaThread::current(), "must be");
1619 current->lock_stack().oops_do(this);
1620 for (int i = 0; i < _length; i++) {
1621 ObjectSynchronizer::
1622 inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1623 }
1624 }
1625 };
1626
1627 void ObjectSynchronizer::ensure_lock_stack_space(JavaThread* current) {
1628 assert(current == JavaThread::current(), "must be");
1629 LockStack& lock_stack = current->lock_stack();
1630
1631 // Make room on lock_stack
1632 if (lock_stack.is_full()) {
1633 // Inflate contended objects
1634 LockStackInflateContendedLocks().inflate(current);
1635 if (lock_stack.is_full()) {
1636 // Inflate the oldest object
1637 inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1638 }
1639 }
1640 }
1641
1642 class ObjectSynchronizer::CacheSetter : StackObj {
1643 JavaThread* const _thread;
1644 BasicLock* const _lock;
1645 ObjectMonitor* _monitor;
1646
1647 NONCOPYABLE(CacheSetter);
1648
1649 public:
1650 CacheSetter(JavaThread* thread, BasicLock* lock) :
1651 _thread(thread),
1652 _lock(lock),
1653 _monitor(nullptr) {}
1654
1655 ~CacheSetter() {
1656 // Only use the cache if using the table.
1657 if (UseObjectMonitorTable) {
1658 if (_monitor != nullptr) {
1659 // If the monitor is already in the BasicLock cache then it is most
1660 // likely in the thread cache, do not set it again to avoid reordering.
1661 if (_monitor != _lock->object_monitor_cache()) {
1662 _thread->om_set_monitor_cache(_monitor);
1663 _lock->set_object_monitor_cache(_monitor);
1664 }
1665 } else {
1666 _lock->clear_object_monitor_cache();
1667 }
1668 }
1669 }
1670
1671 void set_monitor(ObjectMonitor* monitor) {
1672 assert(_monitor == nullptr, "only set once");
1673 _monitor = monitor;
1674 }
1675
1676 };
1677
1678 // Reads first from the BasicLock cache then from the OMCache in the current thread.
1679 // C2 fast-path may have put the monitor in the cache in the BasicLock.
1680 inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) {
1681 ObjectMonitor* monitor = lock->object_monitor_cache();
1682 if (monitor == nullptr) {
1683 monitor = current->om_get_from_monitor_cache(object);
1684 }
1685 return monitor;
1686 }
1687
1688 class ObjectSynchronizer::VerifyThreadState {
1689 bool _no_safepoint;
1690
1691 public:
1692 VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
1693 assert(current == Thread::current(), "must be");
1694 assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
1695 if (_no_safepoint) {
1696 DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
1697 }
1698 }
1699 ~VerifyThreadState() {
1700 if (_no_safepoint){
1701 DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
1702 }
1703 }
1704 };
1705
1706 inline bool ObjectSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
1707 markWord mark = obj->mark();
1708 while (mark.is_unlocked()) {
1709 ensure_lock_stack_space(current);
1710 assert(!lock_stack.is_full(), "must have made room on the lock stack");
1711 assert(!lock_stack.contains(obj), "thread must not already hold the lock");
1712 // Try to swing into 'fast-locked' state.
1713 markWord locked_mark = mark.set_fast_locked();
1714 markWord old_mark = mark;
1715 mark = obj->cas_set_mark(locked_mark, old_mark);
1716 if (old_mark == mark) {
1717 // Successfully fast-locked, push object to lock-stack and return.
1718 lock_stack.push(obj);
1719 return true;
1720 }
1721 }
1722 return false;
1723 }
1724
1725 bool ObjectSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
1726 assert(UseObjectMonitorTable, "must be");
1727 // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
1728 const int log_spin_limit = os::is_MP() ? FastLockingSpins : 1;
1729 const int log_min_safepoint_check_interval = 10;
1730
1731 markWord mark = obj->mark();
1732 const auto should_spin = [&]() {
1733 if (!mark.has_monitor()) {
1734 // Spin while not inflated.
1735 return true;
1736 } else if (observed_deflation) {
1737 // Spin while monitor is being deflated.
1738 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
1739 return monitor == nullptr || monitor->is_being_async_deflated();
1740 }
1741 // Else stop spinning.
1742 return false;
1743 };
1744 // Always attempt to lock once even when safepoint synchronizing.
1745 bool should_process = false;
1746 for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
1747 // Spin with exponential backoff.
1748 const int total_spin_count = 1 << i;
1749 const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
1750 const int outer_spin_count = total_spin_count / inner_spin_count;
1751 for (int outer = 0; outer < outer_spin_count; outer++) {
1752 should_process = SafepointMechanism::should_process(current);
1753 if (should_process) {
1754 // Stop spinning for safepoint.
1755 break;
1756 }
1757 for (int inner = 1; inner < inner_spin_count; inner++) {
1758 SpinPause();
1759 }
1760 }
1761
1762 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
1763 }
1764 return false;
1765 }
1766
1767 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
1768 // When called with locking_thread != Thread::current() some mechanism must synchronize
1769 // the locking_thread with respect to the current thread. Currently only used when
1770 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
1771 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
1772
1773 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
1774 JavaThread* current = JavaThread::current();
1775 VerifyThreadState vts(locking_thread, current);
1776
1777 if (obj->klass()->is_value_based()) {
1778 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
1779 }
1780
1781 LockStack& lock_stack = locking_thread->lock_stack();
1782
1783 ObjectMonitor* monitor = nullptr;
1784 if (lock_stack.contains(obj())) {
1785 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
1786 bool entered = monitor->enter_for(locking_thread);
1787 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
1788 } else {
1789 do {
1790 // It is assumed that enter_for must enter on an object without contention.
1791 monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
1792 // But there may still be a race with deflation.
1793 } while (monitor == nullptr);
1794 }
1795
1796 assert(monitor != nullptr, "ObjectSynchronizer::enter_for must succeed");
1797 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared");
1798 }
1799
1800 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
1801 assert(current == JavaThread::current(), "must be");
1802
1803 if (obj->klass()->is_value_based()) {
1804 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
1805 }
1806
1807 CacheSetter cache_setter(current, lock);
1808
1809 // Used when deflation is observed. Progress here requires progress
1810 // from the deflator. After observing that the deflator is not
1811 // making progress (after two yields), switch to sleeping.
1812 SpinYield spin_yield(0, 2);
1813 bool observed_deflation = false;
1814
1815 LockStack& lock_stack = current->lock_stack();
1816
1817 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
1818 // Recursively fast locked
1819 return;
1820 }
1821
1822 if (lock_stack.contains(obj())) {
1823 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
1824 bool entered = monitor->enter(current);
1825 assert(entered, "recursive ObjectMonitor::enter must succeed");
1826 cache_setter.set_monitor(monitor);
1827 return;
1828 }
1829
1830 while (true) {
1831 // Fast-locking does not use the 'lock' argument.
1832 // Fast-lock spinning to avoid inflating for short critical sections.
1833 // The goal is to only inflate when the extra cost of using ObjectMonitors
1834 // is worth it.
1835 // If deflation has been observed we also spin while deflation is ongoing.
1836 if (fast_lock_try_enter(obj(), lock_stack, current)) {
1837 return;
1838 } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
1839 return;
1840 }
1841
1842 if (observed_deflation) {
1843 spin_yield.wait();
1844 }
1845
1846 ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
1847 if (monitor != nullptr) {
1848 cache_setter.set_monitor(monitor);
1849 return;
1850 }
1851
1852 // If inflate_and_enter returns nullptr it is because a deflated monitor
1853 // was encountered. Fallback to fast locking. The deflater is responsible
1854 // for clearing out the monitor and transitioning the markWord back to
1855 // fast locking.
1856 observed_deflation = true;
1857 }
1858 }
1859
1860 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
1861 assert(current == Thread::current(), "must be");
1862
1863 markWord mark = object->mark();
1864 assert(!mark.is_unlocked(), "must be");
1865
1866 LockStack& lock_stack = current->lock_stack();
1867 if (mark.is_fast_locked()) {
1868 if (lock_stack.try_recursive_exit(object)) {
1869 // This is a recursive exit which succeeded
1870 return;
1871 }
1872 if (lock_stack.is_recursive(object)) {
1873 // Must inflate recursive locks if try_recursive_exit fails
1874 // This happens for un-structured unlocks, could potentially
1875 // fix try_recursive_exit to handle these.
1876 inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1877 }
1878 }
1879
1880 while (mark.is_fast_locked()) {
1881 markWord unlocked_mark = mark.set_unlocked();
1882 markWord old_mark = mark;
1883 mark = object->cas_set_mark(unlocked_mark, old_mark);
1884 if (old_mark == mark) {
1885 // CAS successful, remove from lock_stack
1886 size_t recursion = lock_stack.remove(object) - 1;
1887 assert(recursion == 0, "Should not have unlocked here");
1888 return;
1889 }
1890 }
1891
1892 assert(mark.has_monitor(), "must be");
1893 // The monitor exists
1894 ObjectMonitor* monitor;
1895 if (UseObjectMonitorTable) {
1896 monitor = read_caches(current, lock, object);
1897 if (monitor == nullptr) {
1898 monitor = get_monitor_from_table(current, object);
1899 }
1900 } else {
1901 monitor = ObjectSynchronizer::read_monitor(mark);
1902 }
1903 if (monitor->has_anonymous_owner()) {
1904 assert(current->lock_stack().contains(object), "current must have object on its lock stack");
1905 monitor->set_owner_from_anonymous(current);
1906 monitor->set_recursions(current->lock_stack().remove(object) - 1);
1907 }
1908
1909 monitor->exit(current);
1910 }
1911
1912 // ObjectSynchronizer::inflate_locked_or_imse is used to get an
1913 // inflated ObjectMonitor* from contexts which require that, such as
1914 // notify/wait and jni_exit. Fast locking keeps the invariant that it
1915 // only inflates if it is already locked by the current thread or the current
1916 // thread is in the process of entering. To maintain this invariant we need to
1917 // throw a java.lang.IllegalMonitorStateException before inflating if the
1918 // current thread is not the owner.
1919 ObjectMonitor* ObjectSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
1920 JavaThread* current = THREAD;
1921
1922 for (;;) {
1923 markWord mark = obj->mark_acquire();
1924 if (mark.is_unlocked()) {
1925 // No lock, IMSE.
1926 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1927 "current thread is not owner", nullptr);
1928 }
1929
1930 if (mark.is_fast_locked()) {
1931 if (!current->lock_stack().contains(obj)) {
1932 // Fast locked by other thread, IMSE.
1933 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1934 "current thread is not owner", nullptr);
1935 } else {
1936 // Current thread owns the lock, must inflate
1937 return inflate_fast_locked_object(obj, cause, current, current);
1938 }
1939 }
1940
1941 assert(mark.has_monitor(), "must be");
1942 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
1943 if (monitor != nullptr) {
1944 if (monitor->has_anonymous_owner()) {
1945 LockStack& lock_stack = current->lock_stack();
1946 if (lock_stack.contains(obj)) {
1947 // Current thread owns the lock but someone else inflated it.
1948 // Fix owner and pop lock stack.
1949 monitor->set_owner_from_anonymous(current);
1950 monitor->set_recursions(lock_stack.remove(obj) - 1);
1951 } else {
1952 // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
1953 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1954 "current thread is not owner", nullptr);
1955 }
1956 }
1957 return monitor;
1958 }
1959 }
1960 }
1961
1962 ObjectMonitor* ObjectSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
1963
1964 // The JavaThread* locking parameter requires that the locking_thread == JavaThread::current,
1965 // or is suspended throughout the call by some other mechanism.
1966 // Even with fast locking the thread might be nullptr when called from a non
1967 // JavaThread. (As may still be the case from FastHashCode). However it is only
1968 // important for the correctness of the fast locking algorithm that the thread
1969 // is set when called from ObjectSynchronizer::enter from the owning thread,
1970 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1971 EventJavaMonitorInflate event;
1972
1973 for (;;) {
1974 const markWord mark = object->mark_acquire();
1975
1976 // The mark can be in one of the following states:
1977 // * inflated - Just return if using stack-locking.
1978 // If using fast-locking and the ObjectMonitor owner
1979 // is anonymous and the locking_thread owns the
1980 // object lock, then we make the locking_thread
1981 // the ObjectMonitor owner and remove the lock from
1982 // the locking_thread's lock stack.
1983 // * fast-locked - Coerce it to inflated from fast-locked.
1984 // * unlocked - Aggressively inflate the object.
1985
1986 // CASE: inflated
1987 if (mark.has_monitor()) {
1988 ObjectMonitor* inf = mark.monitor();
1989 markWord dmw = inf->header();
1990 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1991 if (inf->has_anonymous_owner() &&
1992 locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
1993 inf->set_owner_from_anonymous(locking_thread);
1994 size_t removed = locking_thread->lock_stack().remove(object);
1995 inf->set_recursions(removed - 1);
1996 }
1997 return inf;
1998 }
1999
2000 // CASE: fast-locked
2001 // Could be fast-locked either by the locking_thread or by some other thread.
2002 //
2003 // Note that we allocate the ObjectMonitor speculatively, _before_
2004 // attempting to set the object's mark to the new ObjectMonitor. If
2005 // the locking_thread owns the monitor, then we set the ObjectMonitor's
2006 // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
2007 // to anonymous. If we lose the race to set the object's mark to the
2008 // new ObjectMonitor, then we just delete it and loop around again.
2009 //
2010 if (mark.is_fast_locked()) {
2011 ObjectMonitor* monitor = new ObjectMonitor(object);
2012 monitor->set_header(mark.set_unlocked());
2013 bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
2014 if (own) {
2015 // Owned by locking_thread.
2016 monitor->set_owner(locking_thread);
2017 } else {
2018 // Owned by somebody else.
2019 monitor->set_anonymous_owner();
2020 }
2021 markWord monitor_mark = markWord::encode(monitor);
2022 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
2023 if (old_mark == mark) {
2024 // Success! Return inflated monitor.
2025 if (own) {
2026 size_t removed = locking_thread->lock_stack().remove(object);
2027 monitor->set_recursions(removed - 1);
2028 }
2029 // Once the ObjectMonitor is configured and object is associated
2030 // with the ObjectMonitor, it is safe to allow async deflation:
2031 ObjectSynchronizer::_in_use_list.add(monitor);
2032
2033 log_inflate(current, object, cause);
2034 if (event.should_commit()) {
2035 post_monitor_inflate_event(&event, object, cause);
2036 }
2037 return monitor;
2038 } else {
2039 delete monitor;
2040 continue; // Interference -- just retry
2041 }
2042 }
2043
2044 // CASE: unlocked
2045 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2046 // If we know we're inflating for entry it's better to inflate by swinging a
2047 // pre-locked ObjectMonitor pointer into the object header. A successful
2048 // CAS inflates the object *and* confers ownership to the inflating thread.
2049 // In the current implementation we use a 2-step mechanism where we CAS()
2050 // to inflate and then CAS() again to try to swing _owner from null to current.
2051 // An inflateTry() method that we could call from enter() would be useful.
2052
2053 assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
2054 ObjectMonitor* m = new ObjectMonitor(object);
2055 // prepare m for installation - set monitor to initial state
2056 m->set_header(mark);
2057
2058 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2059 delete m;
2060 m = nullptr;
2061 continue;
2062 // interference - the markword changed - just retry.
2063 // The state-transitions are one-way, so there's no chance of
2064 // live-lock -- "Inflated" is an absorbing state.
2065 }
2066
2067 // Once the ObjectMonitor is configured and object is associated
2068 // with the ObjectMonitor, it is safe to allow async deflation:
2069 ObjectSynchronizer::_in_use_list.add(m);
2070
2071 log_inflate(current, object, cause);
2072 if (event.should_commit()) {
2073 post_monitor_inflate_event(&event, object, cause);
2074 }
2075 return m;
2076 }
2077 }
2078
2079 ObjectMonitor* ObjectSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2080 VerifyThreadState vts(locking_thread, current);
2081 assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
2082
2083 ObjectMonitor* monitor;
2084
2085 if (!UseObjectMonitorTable) {
2086 return inflate_into_object_header(object, cause, locking_thread, current);
2087 }
2088
2089 // Inflating requires a hash code
2090 ObjectSynchronizer::FastHashCode(current, object);
2091
2092 markWord mark = object->mark_acquire();
2093 assert(!mark.is_unlocked(), "Cannot be unlocked");
2094
2095 for (;;) {
2096 // Fetch the monitor from the table
2097 monitor = get_or_insert_monitor(object, current, cause);
2098
2099 // ObjectMonitors are always inserted as anonymously owned, this thread is
2100 // the current holder of the monitor. So unless the entry is stale and
2101 // contains a deflating monitor it must be anonymously owned.
2102 if (monitor->has_anonymous_owner()) {
2103 // The monitor must be anonymously owned if it was added
2104 assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
2105 // New fresh monitor
2106 break;
2107 }
2108
2109 // If the monitor was not anonymously owned then we got a deflating monitor
2110 // from the table. We need to let the deflator make progress and remove this
2111 // entry before we are allowed to add a new one.
2112 os::naked_yield();
2113 assert(monitor->is_being_async_deflated(), "Should be the reason");
2114 }
2115
2116 // Set the mark word; loop to handle concurrent updates to other parts of the mark word
2117 while (mark.is_fast_locked()) {
2118 mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2119 }
2120
2121 // Indicate that the monitor now has a known owner
2122 monitor->set_owner_from_anonymous(locking_thread);
2123
2124 // Remove the entry from the thread's lock stack
2125 monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
2126
2127 if (locking_thread == current) {
2128 // Only change the thread local state of the current thread.
2129 locking_thread->om_set_monitor_cache(monitor);
2130 }
2131
2132 return monitor;
2133 }
2134
2135 ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2136 VerifyThreadState vts(locking_thread, current);
2137
2138 // Note: In some paths (deoptimization) the 'current' thread inflates and
2139 // enters the lock on behalf of the 'locking_thread' thread.
2140
2141 ObjectMonitor* monitor = nullptr;
2142
2143 if (!UseObjectMonitorTable) {
2144 // Do the old inflate and enter.
2145 monitor = inflate_into_object_header(object, cause, locking_thread, current);
2146
2147 bool entered;
2148 if (locking_thread == current) {
2149 entered = monitor->enter(locking_thread);
2150 } else {
2151 entered = monitor->enter_for(locking_thread);
2152 }
2153
2154 // enter returns false for deflation found.
2155 return entered ? monitor : nullptr;
2156 }
2157
2158 NoSafepointVerifier nsv;
2159
2160 // Try to get the monitor from the thread-local cache.
2161 // There's no need to use the cache if we are locking
2162 // on behalf of another thread.
2163 if (current == locking_thread) {
2164 monitor = read_caches(current, lock, object);
2165 }
2166
2167 // Get or create the monitor
2168 if (monitor == nullptr) {
2169 // Lightweight monitors require that hash codes are installed first
2170 ObjectSynchronizer::FastHashCode(locking_thread, object);
2171 monitor = get_or_insert_monitor(object, current, cause);
2172 }
2173
2174 if (monitor->try_enter(locking_thread)) {
2175 return monitor;
2176 }
2177
2178 // Holds is_being_async_deflated() stable throughout this function.
2179 ObjectMonitorContentionMark contention_mark(monitor);
2180
2181 /// First handle the case where the monitor from the table is deflated
2182 if (monitor->is_being_async_deflated()) {
2183 // The MonitorDeflation thread is deflating the monitor. The locking thread
2184 // must spin until further progress has been made.
2185
2186 // Clear the BasicLock cache as it may contain this monitor.
2187 lock->clear_object_monitor_cache();
2188
2189 const markWord mark = object->mark_acquire();
2190
2191 if (mark.has_monitor()) {
2192 // Waiting on the deflation thread to remove the deflated monitor from the table.
2193 os::naked_yield();
2194
2195 } else if (mark.is_fast_locked()) {
2196 // Some other thread managed to fast-lock the lock, or this is a
2197 // recursive lock from the same thread; yield for the deflation
2198 // thread to remove the deflated monitor from the table.
2199 os::naked_yield();
2200
2201 } else {
2202 assert(mark.is_unlocked(), "Implied");
2203 // Retry immediately
2204 }
2205
2206 // Retry
2207 return nullptr;
2208 }
2209
2210 for (;;) {
2211 const markWord mark = object->mark_acquire();
2212 // The mark can be in one of the following states:
2213 // * inflated - If the ObjectMonitor owner is anonymous
2214 // and the locking_thread owns the object
2215 // lock, then we make the locking_thread
2216 // the ObjectMonitor owner and remove the
2217 // lock from the locking_thread's lock stack.
2218 // * fast-locked - Coerce it to inflated from fast-locked.
2219 // * neutral - Inflate the object. Successful CAS is locked
2220
2221 // CASE: inflated
2222 if (mark.has_monitor()) {
2223 LockStack& lock_stack = locking_thread->lock_stack();
2224 if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
2225 // The lock is fast-locked by the locking thread,
2226 // convert it to a held monitor with a known owner.
2227 monitor->set_owner_from_anonymous(locking_thread);
2228 monitor->set_recursions(lock_stack.remove(object) - 1);
2229 }
2230
2231 break; // Success
2232 }
2233
2234 // CASE: fast-locked
2235 // Could be fast-locked either by locking_thread or by some other thread.
2236 //
2237 if (mark.is_fast_locked()) {
2238 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2239 if (old_mark != mark) {
2240 // CAS failed
2241 continue;
2242 }
2243
2244 // Success! Return inflated monitor.
2245 LockStack& lock_stack = locking_thread->lock_stack();
2246 if (lock_stack.contains(object)) {
2247 // The lock is fast-locked by the locking thread,
2248 // convert it to a held monitor with a known owner.
2249 monitor->set_owner_from_anonymous(locking_thread);
2250 monitor->set_recursions(lock_stack.remove(object) - 1);
2251 }
2252
2253 break; // Success
2254 }
2255
2256 // CASE: neutral (unlocked)
2257
2258 // Catch if the object's header is not neutral (not locked and
2259 // not marked is what we care about here).
2260 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2261 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2262 if (old_mark != mark) {
2263 // CAS failed
2264 continue;
2265 }
2266
2267 // Transitioned from unlocked to monitor means locking_thread owns the lock.
2268 monitor->set_owner_from_anonymous(locking_thread);
2269
2270 return monitor;
2271 }
2272
2273 if (current == locking_thread) {
2274 // One round of spinning
2275 if (monitor->spin_enter(locking_thread)) {
2276 return monitor;
2277 }
2278
2279 // Monitor is contended, take the time before entering to fix the lock stack.
2280 LockStackInflateContendedLocks().inflate(current);
2281 }
2282
2283 // enter can block for safepoints; clear the unhandled object oop
2284 PauseNoSafepointVerifier pnsv(&nsv);
2285 object = nullptr;
2286
2287 if (current == locking_thread) {
2288 monitor->enter_with_contention_mark(locking_thread, contention_mark);
2289 } else {
2290 monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
2291 }
2292
2293 return monitor;
2294 }
2295
2296 void ObjectSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
2297 if (obj != nullptr) {
2298 deflate_mark_word(obj);
2299 }
2300 bool removed = remove_monitor(current, monitor, obj);
2301 if (obj != nullptr) {
2302 assert(removed, "Should have removed the entry if obj was alive");
2303 }
2304 }
2305
2306 ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
2307 assert(UseObjectMonitorTable, "must be");
2308 return ObjectMonitorTable::monitor_get(current, obj);
2309 }
2310
2311 bool ObjectSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
2312 assert(UseObjectMonitorTable, "must be");
2313 return ObjectMonitorTable::contains_monitor(current, monitor);
2314 }
2315
2316 ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
2317 return mark.monitor();
2318 }
2319
2320 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) {
2321 return ObjectSynchronizer::read_monitor(current, obj, obj->mark());
2322 }
2323
2324 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) {
2325 if (!UseObjectMonitorTable) {
2326 return read_monitor(mark);
2327 } else {
2328 return ObjectSynchronizer::get_monitor_from_table(current, obj);
2329 }
2330 }
2331
2332 bool ObjectSynchronizer::quick_enter_internal(oop obj, BasicLock* lock, JavaThread* current) {
2333 assert(current->thread_state() == _thread_in_Java, "must be");
2334 assert(obj != nullptr, "must be");
2335 NoSafepointVerifier nsv;
2336
2337 LockStack& lock_stack = current->lock_stack();
2338 if (lock_stack.is_full()) {
2339 // Always go into runtime if the lock stack is full.
2340 return false;
2341 }
2342
2343 const markWord mark = obj->mark();
2344
2345 #ifndef _LP64
2346 // Only for 32bit which has limited support for fast locking outside the runtime.
2347 if (lock_stack.try_recursive_enter(obj)) {
2348 // Recursive lock successful.
2349 return true;
2350 }
2351
2352 if (mark.is_unlocked()) {
2353 markWord locked_mark = mark.set_fast_locked();
2354 if (obj->cas_set_mark(locked_mark, mark) == mark) {
2355 // Successfully fast-locked, push object to lock-stack and return.
2356 lock_stack.push(obj);
2357 return true;
2358 }
2359 }
2360 #endif
2361
2362 if (mark.has_monitor()) {
2363 ObjectMonitor* monitor;
2364 if (UseObjectMonitorTable) {
2365 monitor = read_caches(current, lock, obj);
2366 } else {
2367 monitor = ObjectSynchronizer::read_monitor(mark);
2368 }
2369
2370 if (monitor == nullptr) {
2371 // Take the slow-path on a cache miss.
2372 return false;
2373 }
2374
2375 if (UseObjectMonitorTable) {
2376 // Set the monitor regardless of success.
2377 // Either we successfully lock on the monitor, or we retry with the
2378 // monitor in the slow path. If the monitor gets deflated, it will be
2379 // cleared, either by the CacheSetter if we fast lock in enter or in
2380 // inflate_and_enter when we see that the monitor is deflated.
2381 lock->set_object_monitor_cache(monitor);
2382 }
2383
2384 if (monitor->spin_enter(current)) {
2385 return true;
2386 }
2387 }
2388
2389 // Slow-path.
2390 return false;
2391 }
2392
2393 bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
2394 assert(current->thread_state() == _thread_in_Java, "invariant");
2395 NoSafepointVerifier nsv;
2396 if (obj == nullptr) return false; // Need to throw NPE
2397
2398 if (obj->klass()->is_value_based()) {
2399 return false;
2400 }
2401
2402 return ObjectSynchronizer::quick_enter_internal(obj, lock, current);
2403 }