1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomicAccess.hpp"
37 #include "runtime/basicLock.inline.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/objectMonitorTable.hpp"
48 #include "runtime/os.inline.hpp"
49 #include "runtime/osThread.hpp"
50 #include "runtime/safepointMechanism.inline.hpp"
51 #include "runtime/safepointVerifiers.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/threads.hpp"
56 #include "runtime/timer.hpp"
57 #include "runtime/timerTrace.hpp"
58 #include "runtime/trimNativeHeap.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/align.hpp"
62 #include "utilities/concurrentHashTable.inline.hpp"
63 #include "utilities/concurrentHashTableTasks.inline.hpp"
64 #include "utilities/dtrace.hpp"
65 #include "utilities/events.hpp"
66 #include "utilities/globalCounter.inline.hpp"
67 #include "utilities/globalDefinitions.hpp"
68 #include "utilities/linkedlist.hpp"
69 #include "utilities/preserveException.hpp"
70
71 class ObjectMonitorDeflationLogging;
72
73 void MonitorList::add(ObjectMonitor* m) {
74 ObjectMonitor* head;
75 do {
76 head = AtomicAccess::load(&_head);
77 m->set_next_om(head);
78 } while (AtomicAccess::cmpxchg(&_head, head, m) != head);
79
80 size_t count = AtomicAccess::add(&_count, 1u, memory_order_relaxed);
81 size_t old_max;
82 do {
83 old_max = AtomicAccess::load(&_max);
84 if (count <= old_max) {
85 break;
86 }
87 } while (AtomicAccess::cmpxchg(&_max, old_max, count, memory_order_relaxed) != old_max);
88 }
89
90 size_t MonitorList::count() const {
91 return AtomicAccess::load(&_count);
92 }
93
94 size_t MonitorList::max() const {
95 return AtomicAccess::load(&_max);
96 }
97
98 class ObjectMonitorDeflationSafepointer : public StackObj {
99 JavaThread* const _current;
100 ObjectMonitorDeflationLogging* const _log;
101
102 public:
103 ObjectMonitorDeflationSafepointer(JavaThread* current, ObjectMonitorDeflationLogging* log)
104 : _current(current), _log(log) {}
105
106 void block_for_safepoint(const char* op_name, const char* count_name, size_t counter);
107 };
108
109 // Walk the in-use list and unlink deflated ObjectMonitors.
110 // Returns the number of unlinked ObjectMonitors.
111 size_t MonitorList::unlink_deflated(size_t deflated_count,
112 GrowableArray<ObjectMonitor*>* unlinked_list,
113 ObjectMonitorDeflationSafepointer* safepointer) {
114 size_t unlinked_count = 0;
115 ObjectMonitor* prev = nullptr;
116 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
117
118 while (m != nullptr) {
119 if (m->is_being_async_deflated()) {
120 // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
121 // modify the list once per batch. The batch starts at "m".
122 size_t unlinked_batch = 0;
123 ObjectMonitor* next = m;
124 // Look for at most MonitorUnlinkBatch monitors, or the number of
125 // deflated and not unlinked monitors, whatever comes first.
126 assert(deflated_count >= unlinked_count, "Sanity: underflow");
127 size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch);
128 do {
129 ObjectMonitor* next_next = next->next_om();
130 unlinked_batch++;
131 unlinked_list->append(next);
132 next = next_next;
133 if (unlinked_batch >= unlinked_batch_limit) {
134 // Reached the max batch, so bail out of the gathering loop.
135 break;
136 }
137 if (prev == nullptr && AtomicAccess::load(&_head) != m) {
138 // Current batch used to be at head, but it is not at head anymore.
139 // Bail out and figure out where we currently are. This avoids long
140 // walks searching for new prev during unlink under heavy list inserts.
141 break;
142 }
143 } while (next != nullptr && next->is_being_async_deflated());
144
145 // Unlink the found batch.
146 if (prev == nullptr) {
147 // The current batch is the first batch, so there is a chance that it starts at head.
148 // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
149 ObjectMonitor* prev_head = AtomicAccess::cmpxchg(&_head, m, next);
150 if (prev_head != m) {
151 // Something must have updated the head. Figure out the actual prev for this batch.
152 for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
153 prev = n;
154 }
155 assert(prev != nullptr, "Should have found the prev for the current batch");
156 prev->set_next_om(next);
157 }
158 } else {
159 // The current batch is preceded by another batch. This guarantees the current batch
160 // does not start at head. Unlink the entire current batch without updating the head.
161 assert(AtomicAccess::load(&_head) != m, "Sanity");
162 prev->set_next_om(next);
163 }
164
165 unlinked_count += unlinked_batch;
166 if (unlinked_count >= deflated_count) {
167 // Reached the max so bail out of the searching loop.
168 // There should be no more deflated monitors left.
169 break;
170 }
171 m = next;
172 } else {
173 prev = m;
174 m = m->next_om();
175 }
176
177 // Must check for a safepoint/handshake and honor it.
178 safepointer->block_for_safepoint("unlinking", "unlinked_count", unlinked_count);
179 }
180
181 #ifdef ASSERT
182 // Invariant: the code above should unlink all deflated monitors.
183 // The code that runs after this unlinking does not expect deflated monitors.
184 // Notably, attempting to deflate the already deflated monitor would break.
185 {
186 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
187 while (m != nullptr) {
188 assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
189 m = m->next_om();
190 }
191 }
192 #endif
193
194 AtomicAccess::sub(&_count, unlinked_count);
195 return unlinked_count;
196 }
197
198 MonitorList::Iterator MonitorList::iterator() const {
199 return Iterator(AtomicAccess::load_acquire(&_head));
200 }
201
202 ObjectMonitor* MonitorList::Iterator::next() {
203 ObjectMonitor* current = _current;
204 _current = current->next_om();
205 return current;
206 }
207
208 // The "core" versions of monitor enter and exit reside in this file.
209 // The interpreter and compilers contain specialized transliterated
210 // variants of the enter-exit fast-path operations. See c2_MacroAssembler_x86.cpp
211 // fast_lock(...) for instance. If you make changes here, make sure to modify the
212 // interpreter, and both C1 and C2 fast-path inline locking code emission.
213 //
214 // -----------------------------------------------------------------------------
215
216 #ifdef DTRACE_ENABLED
217
218 // Only bother with this argument setup if dtrace is available
219 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
220
221 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
222 char* bytes = nullptr; \
223 int len = 0; \
224 jlong jtid = SharedRuntime::get_java_tid(thread); \
225 Symbol* klassname = obj->klass()->name(); \
226 if (klassname != nullptr) { \
227 bytes = (char*)klassname->bytes(); \
228 len = klassname->utf8_length(); \
229 }
230
231 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
232 { \
233 if (DTraceMonitorProbes) { \
234 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
235 HOTSPOT_MONITOR_WAIT(jtid, \
236 (uintptr_t)(monitor), bytes, len, (millis)); \
237 } \
238 }
239
240 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
241 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
242 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
243
244 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
245 { \
246 if (DTraceMonitorProbes) { \
247 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
248 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
249 (uintptr_t)(monitor), bytes, len); \
250 } \
251 }
252
253 #else // ndef DTRACE_ENABLED
254
255 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
256 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
257
258 #endif // ndef DTRACE_ENABLED
259
260 // This exists only as a workaround of dtrace bug 6254741
261 static int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) {
262 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
263 return 0;
264 }
265
266 static constexpr size_t inflation_lock_count() {
267 return 256;
268 }
269
270 // Static storage for an array of PlatformMutex.
271 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
272
273 static inline PlatformMutex* inflation_lock(size_t index) {
274 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
275 }
276
277 void ObjectSynchronizer::initialize() {
278 for (size_t i = 0; i < inflation_lock_count(); i++) {
279 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
280 }
281 // Start the ceiling with the estimate for one thread.
282 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
283
284 // Start the timer for deflations, so it does not trigger immediately.
285 _last_async_deflation_time_ns = os::javaTimeNanos();
286
287 ObjectSynchronizer::create_om_table();
288 }
289
290 MonitorList ObjectSynchronizer::_in_use_list;
291 // monitors_used_above_threshold() policy is as follows:
292 //
293 // The ratio of the current _in_use_list count to the ceiling is used
294 // to determine if we are above MonitorUsedDeflationThreshold and need
295 // to do an async monitor deflation cycle. The ceiling is increased by
296 // AvgMonitorsPerThreadEstimate when a thread is added to the system
297 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // These checks are required for wait, notify and exit to avoid inflating the monitor to
319 // find out this inline type object cannot be locked.
320 #define CHECK_THROW_NOSYNC_IMSE(obj) \
321 if ((obj)->mark().is_inline_type()) { \
322 JavaThread* THREAD = current; \
323 ResourceMark rm(THREAD); \
324 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
325 }
326
327 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
328 if ((obj)->mark().is_inline_type()) { \
329 JavaThread* THREAD = current; \
330 ResourceMark rm(THREAD); \
331 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
332 }
333
334 // =====================> Quick functions
335
336 // The quick_* forms are special fast-path variants used to improve
337 // performance. In the simplest case, a "quick_*" implementation could
338 // simply return false, in which case the caller will perform the necessary
339 // state transitions and call the slow-path form.
340 // The fast-path is designed to handle frequently arising cases in an efficient
341 // manner and is just a degenerate "optimistic" variant of the slow-path.
342 // returns true -- to indicate the call was satisfied.
343 // returns false -- to indicate the call needs the services of the slow-path.
344 // A no-loitering ordinance is in effect for code in the quick_* family
345 // operators: safepoints or indefinite blocking (blocking that might span a
346 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
347 // entry.
348 //
349 // Consider: An interesting optimization is to have the JIT recognize the
350 // following common idiom:
351 // synchronized (someobj) { .... ; notify(); }
352 // That is, we find a notify() or notifyAll() call that immediately precedes
353 // the monitorexit operation. In that case the JIT could fuse the operations
354 // into a single notifyAndExit() runtime primitive.
355
356 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
357 assert(current->thread_state() == _thread_in_Java, "invariant");
358 NoSafepointVerifier nsv;
359 if (obj == nullptr) return false; // slow-path for invalid obj
360 assert(!obj->klass()->is_inline_klass(), "monitor op on inline type");
361 const markWord mark = obj->mark();
362
363 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
364 // Degenerate notify
365 // fast-locked by caller so by definition the implied waitset is empty.
366 return true;
367 }
368
369 if (mark.has_monitor()) {
370 ObjectMonitor* const mon = read_monitor(current, obj, mark);
371 if (mon == nullptr) {
372 // Racing with inflation/deflation go slow path
373 return false;
374 }
375 assert(mon->object() == oop(obj), "invariant");
376 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
377
378 if (mon->first_waiter() != nullptr) {
379 // We have one or more waiters. Since this is an inflated monitor
380 // that we own, we quickly notify them here and now, avoiding the slow-path.
381 if (all) {
382 mon->quick_notifyAll(current);
383 } else {
384 mon->quick_notify(current);
385 }
386 }
387 return true;
388 }
389
390 // other IMS exception states take the slow-path
391 return false;
392 }
393
394 // Handle notifications when synchronizing on value based classes
395 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
396 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
397 frame last_frame = locking_thread->last_frame();
398 bool bcp_was_adjusted = false;
399 // Don't decrement bcp if it points to the frame's first instruction. This happens when
400 // handle_sync_on_value_based_class() is called because of a synchronized method. There
401 // is no actual monitorenter instruction in the byte code in this case.
402 if (last_frame.is_interpreted_frame() &&
403 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
404 // adjust bcp to point back to monitorenter so that we print the correct line numbers
405 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
406 bcp_was_adjusted = true;
407 }
408
409 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
410 ResourceMark rm;
411 stringStream ss;
412 locking_thread->print_active_stack_on(&ss);
413 char* base = (char*)strstr(ss.base(), "at");
414 char* newline = (char*)strchr(ss.base(), '\n');
415 if (newline != nullptr) {
416 *newline = '\0';
417 }
418 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
419 } else {
420 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
421 ResourceMark rm;
422 Log(valuebasedclasses) vblog;
423
424 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
425 if (locking_thread->has_last_Java_frame()) {
426 LogStream info_stream(vblog.info());
427 locking_thread->print_active_stack_on(&info_stream);
428 } else {
429 vblog.info("Cannot find the last Java frame");
430 }
431
432 EventSyncOnValueBasedClass event;
433 if (event.should_commit()) {
434 event.set_valueBasedClass(obj->klass());
435 event.commit();
436 }
437 }
438
439 if (bcp_was_adjusted) {
440 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
441 }
442 }
443
444 // -----------------------------------------------------------------------------
445 // JNI locks on java objects
446 // NOTE: must use heavy weight monitor to handle jni monitor enter
447 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
448 JavaThread* THREAD = current;
449 // Top native frames in the stack will not be seen if we attempt
450 // preemption, since we start walking from the last Java anchor.
451 NoPreemptMark npm(current);
452
453 if (obj->klass()->is_value_based()) {
454 handle_sync_on_value_based_class(obj, current);
455 }
456
457 if (obj->klass()->is_inline_klass()) {
458 ResourceMark rm(THREAD);
459 const char* desc = "Cannot synchronize on an instance of value class ";
460 const char* className = obj->klass()->external_name();
461 size_t msglen = strlen(desc) + strlen(className) + 1;
462 char* message = NEW_RESOURCE_ARRAY(char, msglen);
463 assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
464 THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
465 }
466
467 // the current locking is from JNI instead of Java code
468 current->set_current_pending_monitor_is_from_java(false);
469 // An async deflation can race after the inflate() call and before
470 // enter() can make the ObjectMonitor busy. enter() returns false if
471 // we have lost the race to async deflation and we simply try again.
472 while (true) {
473 BasicLock lock;
474 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
475 break;
476 }
477 }
478 current->set_current_pending_monitor_is_from_java(true);
479 }
480
481 // NOTE: must use heavy weight monitor to handle jni monitor exit
482 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
483 JavaThread* current = THREAD;
484 CHECK_THROW_NOSYNC_IMSE(obj);
485
486 ObjectMonitor* monitor;
487 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
488 // If this thread has locked the object, exit the monitor. We
489 // intentionally do not use CHECK on check_owner because we must exit the
490 // monitor even if an exception was already pending.
491 if (monitor->check_owner(THREAD)) {
492 monitor->exit(current);
493 }
494 }
495
496 // -----------------------------------------------------------------------------
497 // Internal VM locks on java objects
498 // standard constructor, allows locking failures
499 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
500 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
501 assert(!_thread->preempting(), "");
502
503 _thread->check_for_valid_safepoint_state();
504
505 if (_obj() != nullptr) {
506 ObjectSynchronizer::enter(_obj, &_lock, _thread);
507
508 if (_thread->preempting()) {
509 // If preemption was cancelled we acquired the monitor after freezing
510 // the frames. Redoing the vm call laterĀ in thaw will require us to
511 // release it since the call should look like the original one. We
512 // do it in ~ObjectLocker to reduce the window of time we hold the
513 // monitor since we can't do anything useful with it now, and would
514 // otherwise just force other vthreads to preempt in case they try
515 // to acquire this monitor.
516 _skip_exit = !_thread->preemption_cancelled();
517 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
518 _thread->set_pending_preempted_exception();
519
520 }
521 }
522 }
523
524 ObjectLocker::~ObjectLocker() {
525 if (_obj() != nullptr && !_skip_exit) {
526 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
527 }
528 }
529
530 void ObjectLocker::wait_uninterruptibly(TRAPS) {
531 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
532 if (_thread->preempting()) {
533 _skip_exit = true;
534 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
535 _thread->set_pending_preempted_exception();
536 }
537 }
538
539 // -----------------------------------------------------------------------------
540 // Wait/Notify/NotifyAll
541 // NOTE: must use heavy weight monitor to handle wait()
542
543 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
544 JavaThread* current = THREAD;
545 CHECK_THROW_NOSYNC_IMSE_0(obj);
546 if (millis < 0) {
547 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
548 }
549
550 ObjectMonitor* monitor;
551 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
552
553 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
554 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
555
556 // This dummy call is in place to get around dtrace bug 6254741. Once
557 // that's fixed we can uncomment the following line, remove the call
558 // and change this function back into a "void" func.
559 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
560 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
561 return ret_code;
562 }
563
564 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
565 assert(millis >= 0, "timeout value is negative");
566
567 ObjectMonitor* monitor;
568 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
569 monitor->wait(millis, false, THREAD);
570 }
571
572
573 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
574 JavaThread* current = THREAD;
575 CHECK_THROW_NOSYNC_IMSE(obj);
576
577 markWord mark = obj->mark();
578 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
579 // Not inflated so there can't be any waiters to notify.
580 return;
581 }
582 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
583 monitor->notify(CHECK);
584 }
585
586 // NOTE: see comment of notify()
587 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
588 JavaThread* current = THREAD;
589 CHECK_THROW_NOSYNC_IMSE(obj);
590
591 markWord mark = obj->mark();
592 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
593 // Not inflated so there can't be any waiters to notify.
594 return;
595 }
596
597 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
598 monitor->notifyAll(CHECK);
599 }
600
601 // -----------------------------------------------------------------------------
602 // Hash Code handling
603
604 struct SharedGlobals {
605 char _pad_prefix[OM_CACHE_LINE_SIZE];
606 // This is a highly shared mostly-read variable.
607 // To avoid false-sharing it needs to be the sole occupant of a cache line.
608 volatile int stw_random;
609 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
610 // Hot RW variable -- Sequester to avoid false-sharing
611 volatile int hc_sequence;
612 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
613 };
614
615 static SharedGlobals GVars;
616
617 // hashCode() generation :
618 //
619 // Possibilities:
620 // * MD5Digest of {obj,stw_random}
621 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
622 // * A DES- or AES-style SBox[] mechanism
623 // * One of the Phi-based schemes, such as:
624 // 2654435761 = 2^32 * Phi (golden ratio)
625 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
626 // * A variation of Marsaglia's shift-xor RNG scheme.
627 // * (obj ^ stw_random) is appealing, but can result
628 // in undesirable regularity in the hashCode values of adjacent objects
629 // (objects allocated back-to-back, in particular). This could potentially
630 // result in hashtable collisions and reduced hashtable efficiency.
631 // There are simple ways to "diffuse" the middle address bits over the
632 // generated hashCode values:
633
634 static intptr_t get_next_hash(Thread* current, oop obj) {
635 intptr_t value = 0;
636 if (hashCode == 0) {
637 // This form uses global Park-Miller RNG.
638 // On MP system we'll have lots of RW access to a global, so the
639 // mechanism induces lots of coherency traffic.
640 value = os::random();
641 } else if (hashCode == 1) {
642 // This variation has the property of being stable (idempotent)
643 // between STW operations. This can be useful in some of the 1-0
644 // synchronization schemes.
645 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
646 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
647 } else if (hashCode == 2) {
648 value = 1; // for sensitivity testing
649 } else if (hashCode == 3) {
650 value = ++GVars.hc_sequence;
651 } else if (hashCode == 4) {
652 value = cast_from_oop<intptr_t>(obj);
653 } else {
654 // Marsaglia's xor-shift scheme with thread-specific state
655 // This is probably the best overall implementation -- we'll
656 // likely make this the default in future releases.
657 unsigned t = current->_hashStateX;
658 t ^= (t << 11);
659 current->_hashStateX = current->_hashStateY;
660 current->_hashStateY = current->_hashStateZ;
661 current->_hashStateZ = current->_hashStateW;
662 unsigned v = current->_hashStateW;
663 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
664 current->_hashStateW = v;
665 value = v;
666 }
667
668 value &= markWord::hash_mask;
669 if (value == 0) value = 0xBAD;
670 assert(value != markWord::no_hash, "invariant");
671 return value;
672 }
673
674 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
675 // VM should be calling bootstrap method.
676 assert(!obj->klass()->is_inline_klass(), "FastHashCode should not be called for inline classes");
677
678 while (true) {
679 ObjectMonitor* monitor = nullptr;
680 markWord temp, test;
681 intptr_t hash;
682 markWord mark = obj->mark_acquire();
683 // If UseObjectMonitorTable is set the hash can simply be installed in the
684 // object header, since the monitor isn't in the object header.
685 if (UseObjectMonitorTable || !mark.has_monitor()) {
686 hash = mark.hash();
687 if (hash != 0) { // if it has a hash, just return it
688 return hash;
689 }
690 hash = get_next_hash(current, obj); // get a new hash
691 temp = mark.copy_set_hash(hash); // merge the hash into header
692 // try to install the hash
693 test = obj->cas_set_mark(temp, mark);
694 if (test == mark) { // if the hash was installed, return it
695 return hash;
696 }
697 // CAS failed, retry
698 continue;
699
700 // Failed to install the hash. It could be that another thread
701 // installed the hash just before our attempt or inflation has
702 // occurred or... so we fall thru to inflate the monitor for
703 // stability and then install the hash.
704 } else {
705 assert(!mark.is_unlocked() && !mark.is_fast_locked(), "invariant");
706 monitor = mark.monitor();
707 temp = monitor->header();
708 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
709 hash = temp.hash();
710 if (hash != 0) {
711 // It has a hash.
712
713 // Separate load of dmw/header above from the loads in
714 // is_being_async_deflated().
715
716 // dmw/header and _contentions may get written by different threads.
717 // Make sure to observe them in the same order when having several observers.
718 OrderAccess::loadload_for_IRIW();
719
720 if (monitor->is_being_async_deflated()) {
721 // But we can't safely use the hash if we detect that async
722 // deflation has occurred. So we attempt to restore the
723 // header/dmw to the object's header so that we only retry
724 // once if the deflater thread happens to be slow.
725 monitor->install_displaced_markword_in_object(obj);
726 continue;
727 }
728 return hash;
729 }
730 // Fall thru so we only have one place that installs the hash in
731 // the ObjectMonitor.
732 }
733
734 // NOTE: an async deflation can race after we get the monitor and
735 // before we can update the ObjectMonitor's header with the hash
736 // value below.
737 assert(mark.has_monitor(), "must be");
738 monitor = mark.monitor();
739
740 // Load ObjectMonitor's header/dmw field and see if it has a hash.
741 mark = monitor->header();
742 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
743 hash = mark.hash();
744 if (hash == 0) { // if it does not have a hash
745 hash = get_next_hash(current, obj); // get a new hash
746 temp = mark.copy_set_hash(hash) ; // merge the hash into header
747 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
748 uintptr_t v = AtomicAccess::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
749 test = markWord(v);
750 if (test != mark) {
751 // The attempt to update the ObjectMonitor's header/dmw field
752 // did not work. This can happen if another thread managed to
753 // merge in the hash just before our cmpxchg().
754 // If we add any new usages of the header/dmw field, this code
755 // will need to be updated.
756 hash = test.hash();
757 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
758 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
759 }
760 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
761 // If we detect that async deflation has occurred, then we
762 // attempt to restore the header/dmw to the object's header
763 // so that we only retry once if the deflater thread happens
764 // to be slow.
765 monitor->install_displaced_markword_in_object(obj);
766 continue;
767 }
768 }
769 // We finally get the hash.
770 return hash;
771 }
772 }
773
774 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
775 Handle h_obj) {
776 if (h_obj->mark().is_inline_type()) {
777 return false;
778 }
779 assert(current == JavaThread::current(), "Can only be called on current thread");
780 oop obj = h_obj();
781
782 markWord mark = obj->mark_acquire();
783
784 if (mark.is_fast_locked()) {
785 // fast-locking case, see if lock is in current's lock stack
786 return current->lock_stack().contains(h_obj());
787 }
788
789 while (mark.has_monitor()) {
790 ObjectMonitor* monitor = read_monitor(current, obj, mark);
791 if (monitor != nullptr) {
792 return monitor->is_entered(current) != 0;
793 }
794 // Racing with inflation/deflation, retry
795 mark = obj->mark_acquire();
796
797 if (mark.is_fast_locked()) {
798 // Some other thread fast_locked, current could not have held the lock
799 return false;
800 }
801 }
802
803 // Unlocked case, header in place
804 assert(mark.is_unlocked(), "sanity check");
805 return false;
806 }
807
808 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
809 oop obj = h_obj();
810 markWord mark = obj->mark_acquire();
811
812 if (mark.is_fast_locked()) {
813 // fast-locked so get owner from the object.
814 // owning_thread_from_object() may also return null here:
815 return Threads::owning_thread_from_object(t_list, h_obj());
816 }
817
818 while (mark.has_monitor()) {
819 ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark);
820 if (monitor != nullptr) {
821 return Threads::owning_thread_from_monitor(t_list, monitor);
822 }
823 // Racing with inflation/deflation, retry
824 mark = obj->mark_acquire();
825
826 if (mark.is_fast_locked()) {
827 // Some other thread fast_locked
828 return Threads::owning_thread_from_object(t_list, h_obj());
829 }
830 }
831
832 // Unlocked case, header in place
833 // Cannot have assertion since this object may have been
834 // locked by another thread when reaching here.
835 // assert(mark.is_unlocked(), "sanity check");
836
837 return nullptr;
838 }
839
840 // Visitors ...
841
842 // Iterate over all ObjectMonitors.
843 template <typename Function>
844 void ObjectSynchronizer::monitors_iterate(Function function) {
845 MonitorList::Iterator iter = _in_use_list.iterator();
846 while (iter.has_next()) {
847 ObjectMonitor* monitor = iter.next();
848 function(monitor);
849 }
850 }
851
852 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
853 // returns true.
854 template <typename OwnerFilter>
855 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
856 monitors_iterate([&](ObjectMonitor* monitor) {
857 // This function is only called at a safepoint or when the
858 // target thread is suspended or when the target thread is
859 // operating on itself. The current closures in use today are
860 // only interested in an owned ObjectMonitor and ownership
861 // cannot be dropped under the calling contexts so the
862 // ObjectMonitor cannot be async deflated.
863 if (monitor->has_owner() && filter(monitor)) {
864 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
865
866 closure->do_monitor(monitor);
867 }
868 });
869 }
870
871 // Iterate ObjectMonitors where the owner == thread; this does NOT include
872 // ObjectMonitors where owner is set to a stack-lock address in thread.
873 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
874 int64_t key = ObjectMonitor::owner_id_from(thread);
875 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
876 return owned_monitors_iterate_filtered(closure, thread_filter);
877 }
878
879 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, oop vthread) {
880 int64_t key = ObjectMonitor::owner_id_from(vthread);
881 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
882 return owned_monitors_iterate_filtered(closure, thread_filter);
883 }
884
885 // Iterate ObjectMonitors owned by any thread.
886 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
887 auto all_filter = [&](ObjectMonitor* monitor) { return true; };
888 return owned_monitors_iterate_filtered(closure, all_filter);
889 }
890
891 static bool monitors_used_above_threshold(MonitorList* list) {
892 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
893 return false;
894 }
895 size_t monitors_used = list->count();
896 if (monitors_used == 0) { // empty list is easy
897 return false;
898 }
899 size_t old_ceiling = ObjectSynchronizer::in_use_list_ceiling();
900 // Make sure that we use a ceiling value that is not lower than
901 // previous, not lower than the recorded max used by the system, and
902 // not lower than the current number of monitors in use (which can
903 // race ahead of max). The result is guaranteed > 0.
904 size_t ceiling = MAX3(old_ceiling, list->max(), monitors_used);
905
906 // Check if our monitor usage is above the threshold:
907 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
908 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
909 // Deflate monitors if over the threshold percentage, unless no
910 // progress on previous deflations.
911 bool is_above_threshold = true;
912
913 // Check if it's time to adjust the in_use_list_ceiling up, due
914 // to too many async deflation attempts without any progress.
915 if (NoAsyncDeflationProgressMax != 0 &&
916 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
917 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
918 size_t delta = (size_t)(ceiling * remainder) + 1;
919 size_t new_ceiling = (ceiling > SIZE_MAX - delta)
920 ? SIZE_MAX // Overflow, let's clamp new_ceiling.
921 : ceiling + delta;
922
923 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
924 log_info(monitorinflation)("Too many deflations without progress; "
925 "bumping in_use_list_ceiling from %zu"
926 " to %zu", old_ceiling, new_ceiling);
927 _no_progress_cnt = 0;
928 ceiling = new_ceiling;
929
930 // Check if our monitor usage is still above the threshold:
931 monitor_usage = (monitors_used * 100LL) / ceiling;
932 is_above_threshold = int(monitor_usage) > MonitorUsedDeflationThreshold;
933 }
934 log_info(monitorinflation)("monitors_used=%zu, ceiling=%zu"
935 ", monitor_usage=%zu, threshold=%d",
936 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
937 return is_above_threshold;
938 }
939
940 return false;
941 }
942
943 size_t ObjectSynchronizer::in_use_list_count() {
944 return _in_use_list.count();
945 }
946
947 size_t ObjectSynchronizer::in_use_list_max() {
948 return _in_use_list.max();
949 }
950
951 size_t ObjectSynchronizer::in_use_list_ceiling() {
952 return _in_use_list_ceiling;
953 }
954
955 void ObjectSynchronizer::dec_in_use_list_ceiling() {
956 AtomicAccess::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
957 }
958
959 void ObjectSynchronizer::inc_in_use_list_ceiling() {
960 AtomicAccess::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
961 }
962
963 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
964 _in_use_list_ceiling = new_value;
965 }
966
967 bool ObjectSynchronizer::is_async_deflation_needed() {
968 if (is_async_deflation_requested()) {
969 // Async deflation request.
970 log_info(monitorinflation)("Async deflation needed: explicit request");
971 return true;
972 }
973
974 jlong time_since_last = time_since_last_async_deflation_ms();
975
976 if (AsyncDeflationInterval > 0 &&
977 time_since_last > AsyncDeflationInterval &&
978 monitors_used_above_threshold(&_in_use_list)) {
979 // It's been longer than our specified deflate interval and there
980 // are too many monitors in use. We don't deflate more frequently
981 // than AsyncDeflationInterval (unless is_async_deflation_requested)
982 // in order to not swamp the MonitorDeflationThread.
983 log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold");
984 return true;
985 }
986
987 if (GuaranteedAsyncDeflationInterval > 0 &&
988 time_since_last > GuaranteedAsyncDeflationInterval) {
989 // It's been longer than our specified guaranteed deflate interval.
990 // We need to clean up the used monitors even if the threshold is
991 // not reached, to keep the memory utilization at bay when many threads
992 // touched many monitors.
993 log_info(monitorinflation)("Async deflation needed: guaranteed interval (%zd ms) "
994 "is greater than time since last deflation (" JLONG_FORMAT " ms)",
995 GuaranteedAsyncDeflationInterval, time_since_last);
996
997 // If this deflation has no progress, then it should not affect the no-progress
998 // tracking, otherwise threshold heuristics would think it was triggered, experienced
999 // no progress, and needs to backoff more aggressively. In this "no progress" case,
1000 // the generic code would bump the no-progress counter, and we compensate for that
1001 // by telling it to skip the update.
1002 //
1003 // If this deflation has progress, then it should let non-progress tracking
1004 // know about this, otherwise the threshold heuristics would kick in, potentially
1005 // experience no-progress due to aggressive cleanup by this deflation, and think
1006 // it is still in no-progress stride. In this "progress" case, the generic code would
1007 // zero the counter, and we allow it to happen.
1008 _no_progress_skip_increment = true;
1009
1010 return true;
1011 }
1012
1013 return false;
1014 }
1015
1016 void ObjectSynchronizer::request_deflate_idle_monitors() {
1017 MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
1018 set_is_async_deflation_requested(true);
1019 ml.notify_all();
1020 }
1021
1022 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
1023 JavaThread* current = JavaThread::current();
1024 bool ret_code = false;
1025
1026 jlong last_time = last_async_deflation_time_ns();
1027
1028 request_deflate_idle_monitors();
1029
1030 const int N_CHECKS = 5;
1031 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
1032 if (last_async_deflation_time_ns() > last_time) {
1033 log_info(monitorinflation)("Async Deflation happened after %d check(s).", i);
1034 ret_code = true;
1035 break;
1036 }
1037 {
1038 // JavaThread has to honor the blocking protocol.
1039 ThreadBlockInVM tbivm(current);
1040 os::naked_short_sleep(999); // sleep for almost 1 second
1041 }
1042 }
1043 if (!ret_code) {
1044 log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1045 }
1046
1047 return ret_code;
1048 }
1049
1050 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1051 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1052 }
1053
1054 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1055 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1056 //
1057 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1058 MonitorList::Iterator iter = _in_use_list.iterator();
1059 size_t deflated_count = 0;
1060 Thread* current = Thread::current();
1061
1062 while (iter.has_next()) {
1063 if (deflated_count >= (size_t)MonitorDeflationMax) {
1064 break;
1065 }
1066 ObjectMonitor* mid = iter.next();
1067 if (mid->deflate_monitor(current)) {
1068 deflated_count++;
1069 }
1070
1071 // Must check for a safepoint/handshake and honor it.
1072 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1073 }
1074
1075 return deflated_count;
1076 }
1077
1078 class DeflationHandshakeClosure : public HandshakeClosure {
1079 public:
1080 DeflationHandshakeClosure() : HandshakeClosure("DeflationHandshakeClosure") {}
1081
1082 void do_thread(Thread* thread) {
1083 log_trace(monitorinflation)("DeflationHandshakeClosure::do_thread: thread="
1084 INTPTR_FORMAT, p2i(thread));
1085 if (thread->is_Java_thread()) {
1086 // Clear OM cache
1087 JavaThread* jt = JavaThread::cast(thread);
1088 jt->om_clear_monitor_cache();
1089 }
1090 }
1091 };
1092
1093 class VM_RendezvousGCThreads : public VM_Operation {
1094 public:
1095 bool evaluate_at_safepoint() const override { return false; }
1096 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1097 void doit() override {
1098 Universe::heap()->safepoint_synchronize_begin();
1099 Universe::heap()->safepoint_synchronize_end();
1100 };
1101 };
1102
1103 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1104 ObjectMonitorDeflationSafepointer* safepointer) {
1105 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1106 size_t deleted_count = 0;
1107 for (ObjectMonitor* monitor: *delete_list) {
1108 delete monitor;
1109 deleted_count++;
1110 // A JavaThread must check for a safepoint/handshake and honor it.
1111 safepointer->block_for_safepoint("deletion", "deleted_count", deleted_count);
1112 }
1113 return deleted_count;
1114 }
1115
1116 class ObjectMonitorDeflationLogging: public StackObj {
1117 LogStreamHandle(Debug, monitorinflation) _debug;
1118 LogStreamHandle(Info, monitorinflation) _info;
1119 LogStream* _stream;
1120 elapsedTimer _timer;
1121
1122 size_t ceiling() const { return ObjectSynchronizer::in_use_list_ceiling(); }
1123 size_t count() const { return ObjectSynchronizer::in_use_list_count(); }
1124 size_t max() const { return ObjectSynchronizer::in_use_list_max(); }
1125
1126 public:
1127 ObjectMonitorDeflationLogging()
1128 : _debug(), _info(), _stream(nullptr) {
1129 if (_debug.is_enabled()) {
1130 _stream = &_debug;
1131 } else if (_info.is_enabled()) {
1132 _stream = &_info;
1133 }
1134 }
1135
1136 void begin() {
1137 if (_stream != nullptr) {
1138 _stream->print_cr("begin deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1139 ceiling(), count(), max());
1140 _timer.start();
1141 }
1142 }
1143
1144 void before_handshake(size_t unlinked_count) {
1145 if (_stream != nullptr) {
1146 _timer.stop();
1147 _stream->print_cr("before handshaking: unlinked_count=%zu"
1148 ", in_use_list stats: ceiling=%zu, count="
1149 "%zu, max=%zu",
1150 unlinked_count, ceiling(), count(), max());
1151 }
1152 }
1153
1154 void after_handshake() {
1155 if (_stream != nullptr) {
1156 _stream->print_cr("after handshaking: in_use_list stats: ceiling="
1157 "%zu, count=%zu, max=%zu",
1158 ceiling(), count(), max());
1159 _timer.start();
1160 }
1161 }
1162
1163 void end(size_t deflated_count, size_t unlinked_count) {
1164 if (_stream != nullptr) {
1165 _timer.stop();
1166 if (deflated_count != 0 || unlinked_count != 0 || _debug.is_enabled()) {
1167 _stream->print_cr("deflated_count=%zu, {unlinked,deleted}_count=%zu monitors in %3.7f secs",
1168 deflated_count, unlinked_count, _timer.seconds());
1169 }
1170 _stream->print_cr("end deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1171 ceiling(), count(), max());
1172 }
1173 }
1174
1175 void before_block_for_safepoint(const char* op_name, const char* cnt_name, size_t cnt) {
1176 if (_stream != nullptr) {
1177 _timer.stop();
1178 _stream->print_cr("pausing %s: %s=%zu, in_use_list stats: ceiling="
1179 "%zu, count=%zu, max=%zu",
1180 op_name, cnt_name, cnt, ceiling(), count(), max());
1181 }
1182 }
1183
1184 void after_block_for_safepoint(const char* op_name) {
1185 if (_stream != nullptr) {
1186 _stream->print_cr("resuming %s: in_use_list stats: ceiling=%zu"
1187 ", count=%zu, max=%zu", op_name,
1188 ceiling(), count(), max());
1189 _timer.start();
1190 }
1191 }
1192 };
1193
1194 void ObjectMonitorDeflationSafepointer::block_for_safepoint(const char* op_name, const char* count_name, size_t counter) {
1195 if (!SafepointMechanism::should_process(_current)) {
1196 return;
1197 }
1198
1199 // A safepoint/handshake has started.
1200 _log->before_block_for_safepoint(op_name, count_name, counter);
1201
1202 {
1203 // Honor block request.
1204 ThreadBlockInVM tbivm(_current);
1205 }
1206
1207 _log->after_block_for_safepoint(op_name);
1208 }
1209
1210 // This function is called by the MonitorDeflationThread to deflate
1211 // ObjectMonitors.
1212 size_t ObjectSynchronizer::deflate_idle_monitors() {
1213 JavaThread* current = JavaThread::current();
1214 assert(current->is_monitor_deflation_thread(), "The only monitor deflater");
1215
1216 // The async deflation request has been processed.
1217 _last_async_deflation_time_ns = os::javaTimeNanos();
1218 set_is_async_deflation_requested(false);
1219
1220 ObjectMonitorDeflationLogging log;
1221 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1222
1223 log.begin();
1224
1225 // Deflate some idle ObjectMonitors.
1226 size_t deflated_count = deflate_monitor_list(&safepointer);
1227
1228 // Unlink the deflated ObjectMonitors from the in-use list.
1229 size_t unlinked_count = 0;
1230 size_t deleted_count = 0;
1231 if (deflated_count > 0) {
1232 ResourceMark rm(current);
1233 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1234 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1235
1236 #ifdef ASSERT
1237 if (UseObjectMonitorTable) {
1238 for (ObjectMonitor* monitor : delete_list) {
1239 assert(!ObjectSynchronizer::contains_monitor(current, monitor), "Should have been removed");
1240 }
1241 }
1242 #endif
1243
1244 log.before_handshake(unlinked_count);
1245
1246 // A JavaThread needs to handshake in order to safely free the
1247 // ObjectMonitors that were deflated in this cycle.
1248 DeflationHandshakeClosure dhc;
1249 Handshake::execute(&dhc);
1250 // Also, we sync and desync GC threads around the handshake, so that they can
1251 // safely read the mark-word and look-through to the object-monitor, without
1252 // being afraid that the object-monitor is going away.
1253 VM_RendezvousGCThreads sync_gc;
1254 VMThread::execute(&sync_gc);
1255
1256 log.after_handshake();
1257
1258 // After the handshake, safely free the ObjectMonitors that were
1259 // deflated and unlinked in this cycle.
1260
1261 // Delete the unlinked ObjectMonitors.
1262 deleted_count = delete_monitors(&delete_list, &safepointer);
1263 assert(unlinked_count == deleted_count, "must be");
1264 }
1265
1266 log.end(deflated_count, unlinked_count);
1267
1268 GVars.stw_random = os::random();
1269
1270 if (deflated_count != 0) {
1271 _no_progress_cnt = 0;
1272 } else if (_no_progress_skip_increment) {
1273 _no_progress_skip_increment = false;
1274 } else {
1275 _no_progress_cnt++;
1276 }
1277
1278 return deflated_count;
1279 }
1280
1281 // Monitor cleanup on JavaThread::exit
1282
1283 // Iterate through monitor cache and attempt to release thread's monitors
1284 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1285 private:
1286 JavaThread* _thread;
1287
1288 public:
1289 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1290 void do_monitor(ObjectMonitor* mid) {
1291 mid->complete_exit(_thread);
1292 }
1293 };
1294
1295 // Release all inflated monitors owned by current thread. Lightweight monitors are
1296 // ignored. This is meant to be called during JNI thread detach which assumes
1297 // all remaining monitors are heavyweight. All exceptions are swallowed.
1298 // Scanning the extant monitor list can be time consuming.
1299 // A simple optimization is to add a per-thread flag that indicates a thread
1300 // called jni_monitorenter() during its lifetime.
1301 //
1302 // Instead of NoSafepointVerifier it might be cheaper to
1303 // use an idiom of the form:
1304 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1305 // <code that must not run at safepoint>
1306 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1307 // Since the tests are extremely cheap we could leave them enabled
1308 // for normal product builds.
1309
1310 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1311 assert(current == JavaThread::current(), "must be current Java thread");
1312 NoSafepointVerifier nsv;
1313 ReleaseJavaMonitorsClosure rjmc(current);
1314 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1315 assert(!current->has_pending_exception(), "Should not be possible");
1316 current->clear_pending_exception();
1317 }
1318
1319 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1320 switch (cause) {
1321 case inflate_cause_vm_internal: return "VM Internal";
1322 case inflate_cause_monitor_enter: return "Monitor Enter";
1323 case inflate_cause_wait: return "Monitor Wait";
1324 case inflate_cause_notify: return "Monitor Notify";
1325 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1326 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1327 default:
1328 ShouldNotReachHere();
1329 }
1330 return "Unknown";
1331 }
1332
1333 //------------------------------------------------------------------------------
1334 // Debugging code
1335
1336 u_char* ObjectSynchronizer::get_gvars_addr() {
1337 return (u_char*)&GVars;
1338 }
1339
1340 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1341 return (u_char*)&GVars.hc_sequence;
1342 }
1343
1344 size_t ObjectSynchronizer::get_gvars_size() {
1345 return sizeof(SharedGlobals);
1346 }
1347
1348 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1349 return (u_char*)&GVars.stw_random;
1350 }
1351
1352 // Do the final audit and print of ObjectMonitor stats; must be done
1353 // by the VMThread at VM exit time.
1354 void ObjectSynchronizer::do_final_audit_and_print_stats() {
1355 assert(Thread::current()->is_VM_thread(), "sanity check");
1356
1357 if (is_final_audit()) { // Only do the audit once.
1358 return;
1359 }
1360 set_is_final_audit();
1361 log_info(monitorinflation)("Starting the final audit.");
1362
1363 if (log_is_enabled(Info, monitorinflation)) {
1364 LogStreamHandle(Info, monitorinflation) ls;
1365 audit_and_print_stats(&ls, true /* on_exit */);
1366 }
1367 }
1368
1369 // This function can be called by the MonitorDeflationThread or it can be called when
1370 // we are trying to exit the VM. The list walker functions can run in parallel with
1371 // the other list operations.
1372 // Calls to this function can be added in various places as a debugging
1373 // aid.
1374 //
1375 void ObjectSynchronizer::audit_and_print_stats(outputStream* ls, bool on_exit) {
1376 int error_cnt = 0;
1377
1378 ls->print_cr("Checking in_use_list:");
1379 chk_in_use_list(ls, &error_cnt);
1380
1381 if (error_cnt == 0) {
1382 ls->print_cr("No errors found in in_use_list checks.");
1383 } else {
1384 log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt);
1385 }
1386
1387 // When exiting, only log the interesting entries at the Info level.
1388 // When called at intervals by the MonitorDeflationThread, log output
1389 // at the Trace level since there can be a lot of it.
1390 if (!on_exit && log_is_enabled(Trace, monitorinflation)) {
1391 LogStreamHandle(Trace, monitorinflation) ls_tr;
1392 log_in_use_monitor_details(&ls_tr, true /* log_all */);
1393 } else if (on_exit) {
1394 log_in_use_monitor_details(ls, false /* log_all */);
1395 }
1396
1397 ls->flush();
1398
1399 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1400 }
1401
1402 // Check the in_use_list; log the results of the checks.
1403 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
1404 size_t l_in_use_count = _in_use_list.count();
1405 size_t l_in_use_max = _in_use_list.max();
1406 out->print_cr("count=%zu, max=%zu", l_in_use_count,
1407 l_in_use_max);
1408
1409 size_t ck_in_use_count = 0;
1410 MonitorList::Iterator iter = _in_use_list.iterator();
1411 while (iter.has_next()) {
1412 ObjectMonitor* mid = iter.next();
1413 chk_in_use_entry(mid, out, error_cnt_p);
1414 ck_in_use_count++;
1415 }
1416
1417 if (l_in_use_count == ck_in_use_count) {
1418 out->print_cr("in_use_count=%zu equals ck_in_use_count=%zu",
1419 l_in_use_count, ck_in_use_count);
1420 } else {
1421 out->print_cr("WARNING: in_use_count=%zu is not equal to "
1422 "ck_in_use_count=%zu", l_in_use_count,
1423 ck_in_use_count);
1424 }
1425
1426 size_t ck_in_use_max = _in_use_list.max();
1427 if (l_in_use_max == ck_in_use_max) {
1428 out->print_cr("in_use_max=%zu equals ck_in_use_max=%zu",
1429 l_in_use_max, ck_in_use_max);
1430 } else {
1431 out->print_cr("WARNING: in_use_max=%zu is not equal to "
1432 "ck_in_use_max=%zu", l_in_use_max, ck_in_use_max);
1433 }
1434 }
1435
1436 // Check an in-use monitor entry; log any errors.
1437 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1438 int* error_cnt_p) {
1439 if (n->owner_is_DEFLATER_MARKER()) {
1440 // This could happen when monitor deflation blocks for a safepoint.
1441 return;
1442 }
1443
1444
1445 if (n->metadata() == 0) {
1446 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1447 "have non-null _metadata (header/hash) field.", p2i(n));
1448 *error_cnt_p = *error_cnt_p + 1;
1449 }
1450
1451 const oop obj = n->object_peek();
1452 if (obj == nullptr) {
1453 return;
1454 }
1455
1456 const markWord mark = obj->mark();
1457 if (!mark.has_monitor()) {
1458 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1459 "object does not think it has a monitor: obj="
1460 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
1461 p2i(obj), mark.value());
1462 *error_cnt_p = *error_cnt_p + 1;
1463 return;
1464 }
1465
1466 ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
1467 if (n != obj_mon) {
1468 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1469 "object does not refer to the same monitor: obj="
1470 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
1471 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
1472 *error_cnt_p = *error_cnt_p + 1;
1473 }
1474 }
1475
1476 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
1477 // flags indicate why the entry is in-use, 'object' and 'object type'
1478 // indicate the associated object and its type.
1479 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
1480 if (_in_use_list.count() > 0) {
1481 stringStream ss;
1482 out->print_cr("In-use monitor info%s:", log_all ? "" : " (eliding idle monitors)");
1483 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
1484 out->print_cr("%18s %s %18s %18s",
1485 "monitor", "BHL", "object", "object type");
1486 out->print_cr("================== === ================== ==================");
1487
1488 auto is_interesting = [&](ObjectMonitor* monitor) {
1489 return log_all || monitor->has_owner() || monitor->is_busy();
1490 };
1491
1492 monitors_iterate([&](ObjectMonitor* monitor) {
1493 if (is_interesting(monitor)) {
1494 const oop obj = monitor->object_peek();
1495 const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
1496 ResourceMark rm;
1497 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
1498 monitor->is_busy(), hash != 0, monitor->has_owner(),
1499 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
1500 if (monitor->is_busy()) {
1501 out->print(" (%s)", monitor->is_busy_to_string(&ss));
1502 ss.reset();
1503 }
1504 out->cr();
1505 }
1506 });
1507 }
1508
1509 out->flush();
1510 }
1511
1512 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
1513 ObjectMonitor* monitor = get_monitor_from_table(current, object);
1514 if (monitor != nullptr) {
1515 *inserted = false;
1516 return monitor;
1517 }
1518
1519 ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
1520 alloced_monitor->set_anonymous_owner();
1521
1522 // Try insert monitor
1523 monitor = add_monitor(current, alloced_monitor, object);
1524
1525 *inserted = alloced_monitor == monitor;
1526 if (!*inserted) {
1527 delete alloced_monitor;
1528 }
1529
1530 return monitor;
1531 }
1532
1533 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
1534 if (log_is_enabled(Trace, monitorinflation)) {
1535 ResourceMark rm(current);
1536 log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
1537 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
1538 object->mark().value(), object->klass()->external_name(),
1539 ObjectSynchronizer::inflate_cause_name(cause));
1540 }
1541 }
1542
1543 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1544 const oop obj,
1545 ObjectSynchronizer::InflateCause cause) {
1546 assert(event != nullptr, "invariant");
1547 const Klass* monitor_klass = obj->klass();
1548 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
1549 return;
1550 }
1551 event->set_monitorClass(monitor_klass);
1552 event->set_address((uintptr_t)(void*)obj);
1553 event->set_cause((u1)cause);
1554 event->commit();
1555 }
1556
1557 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
1558 assert(UseObjectMonitorTable, "must be");
1559
1560 EventJavaMonitorInflate event;
1561
1562 bool inserted;
1563 ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
1564
1565 if (inserted) {
1566 log_inflate(current, object, cause);
1567 if (event.should_commit()) {
1568 post_monitor_inflate_event(&event, object, cause);
1569 }
1570
1571 // The monitor has an anonymous owner so it is safe from async deflation.
1572 ObjectSynchronizer::_in_use_list.add(monitor);
1573 }
1574
1575 return monitor;
1576 }
1577
1578 // Add the hashcode to the monitor to match the object and put it in the hashtable.
1579 ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
1580 assert(UseObjectMonitorTable, "must be");
1581 assert(obj == monitor->object(), "must be");
1582
1583 intptr_t hash = obj->mark().hash();
1584 assert(hash != 0, "must be set when claiming the object monitor");
1585 monitor->set_hash(hash);
1586
1587 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
1588 }
1589
1590 bool ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
1591 assert(UseObjectMonitorTable, "must be");
1592 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
1593
1594 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
1595 }
1596
1597 void ObjectSynchronizer::deflate_mark_word(oop obj) {
1598 assert(UseObjectMonitorTable, "must be");
1599
1600 markWord mark = obj->mark_acquire();
1601 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
1602
1603 while (mark.has_monitor()) {
1604 const markWord new_mark = mark.clear_lock_bits().set_unlocked();
1605 mark = obj->cas_set_mark(new_mark, mark);
1606 }
1607 }
1608
1609 void ObjectSynchronizer::create_om_table() {
1610 if (!UseObjectMonitorTable) {
1611 return;
1612 }
1613 ObjectMonitorTable::create();
1614 }
1615
1616 bool ObjectSynchronizer::needs_resize() {
1617 if (!UseObjectMonitorTable) {
1618 return false;
1619 }
1620 return ObjectMonitorTable::should_resize();
1621 }
1622
1623 bool ObjectSynchronizer::resize_table(JavaThread* current) {
1624 if (!UseObjectMonitorTable) {
1625 return true;
1626 }
1627 return ObjectMonitorTable::resize(current);
1628 }
1629
1630 class ObjectSynchronizer::LockStackInflateContendedLocks : private OopClosure {
1631 private:
1632 oop _contended_oops[LockStack::CAPACITY];
1633 int _length;
1634
1635 void do_oop(oop* o) final {
1636 oop obj = *o;
1637 if (obj->mark_acquire().has_monitor()) {
1638 if (_length > 0 && _contended_oops[_length - 1] == obj) {
1639 // Recursive
1640 return;
1641 }
1642 _contended_oops[_length++] = obj;
1643 }
1644 }
1645
1646 void do_oop(narrowOop* o) final {
1647 ShouldNotReachHere();
1648 }
1649
1650 public:
1651 LockStackInflateContendedLocks() :
1652 _contended_oops(),
1653 _length(0) {};
1654
1655 void inflate(JavaThread* current) {
1656 assert(current == JavaThread::current(), "must be");
1657 current->lock_stack().oops_do(this);
1658 for (int i = 0; i < _length; i++) {
1659 ObjectSynchronizer::
1660 inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1661 }
1662 }
1663 };
1664
1665 void ObjectSynchronizer::ensure_lock_stack_space(JavaThread* current) {
1666 assert(current == JavaThread::current(), "must be");
1667 LockStack& lock_stack = current->lock_stack();
1668
1669 // Make room on lock_stack
1670 if (lock_stack.is_full()) {
1671 // Inflate contended objects
1672 LockStackInflateContendedLocks().inflate(current);
1673 if (lock_stack.is_full()) {
1674 // Inflate the oldest object
1675 inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1676 }
1677 }
1678 }
1679
1680 class ObjectSynchronizer::CacheSetter : StackObj {
1681 JavaThread* const _thread;
1682 BasicLock* const _lock;
1683 ObjectMonitor* _monitor;
1684
1685 NONCOPYABLE(CacheSetter);
1686
1687 public:
1688 CacheSetter(JavaThread* thread, BasicLock* lock) :
1689 _thread(thread),
1690 _lock(lock),
1691 _monitor(nullptr) {}
1692
1693 ~CacheSetter() {
1694 // Only use the cache if using the table.
1695 if (UseObjectMonitorTable) {
1696 if (_monitor != nullptr) {
1697 // If the monitor is already in the BasicLock cache then it is most
1698 // likely in the thread cache, do not set it again to avoid reordering.
1699 if (_monitor != _lock->object_monitor_cache()) {
1700 _thread->om_set_monitor_cache(_monitor);
1701 _lock->set_object_monitor_cache(_monitor);
1702 }
1703 } else {
1704 _lock->clear_object_monitor_cache();
1705 }
1706 }
1707 }
1708
1709 void set_monitor(ObjectMonitor* monitor) {
1710 assert(_monitor == nullptr, "only set once");
1711 _monitor = monitor;
1712 }
1713
1714 };
1715
1716 // Reads first from the BasicLock cache then from the OMCache in the current thread.
1717 // C2 fast-path may have put the monitor in the cache in the BasicLock.
1718 inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) {
1719 ObjectMonitor* monitor = lock->object_monitor_cache();
1720 if (monitor == nullptr) {
1721 monitor = current->om_get_from_monitor_cache(object);
1722 }
1723 return monitor;
1724 }
1725
1726 class ObjectSynchronizer::VerifyThreadState {
1727 bool _no_safepoint;
1728
1729 public:
1730 VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
1731 assert(current == Thread::current(), "must be");
1732 assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
1733 if (_no_safepoint) {
1734 DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
1735 }
1736 }
1737 ~VerifyThreadState() {
1738 if (_no_safepoint){
1739 DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
1740 }
1741 }
1742 };
1743
1744 inline bool ObjectSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
1745 markWord mark = obj->mark();
1746 while (mark.is_unlocked()) {
1747 ensure_lock_stack_space(current);
1748 assert(!lock_stack.is_full(), "must have made room on the lock stack");
1749 assert(!lock_stack.contains(obj), "thread must not already hold the lock");
1750 // Try to swing into 'fast-locked' state.
1751 markWord locked_mark = mark.set_fast_locked();
1752 markWord old_mark = mark;
1753 mark = obj->cas_set_mark(locked_mark, old_mark);
1754 if (old_mark == mark) {
1755 // Successfully fast-locked, push object to lock-stack and return.
1756 lock_stack.push(obj);
1757 return true;
1758 }
1759 }
1760 return false;
1761 }
1762
1763 bool ObjectSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
1764 assert(UseObjectMonitorTable, "must be");
1765 // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
1766 const int log_spin_limit = os::is_MP() ? FastLockingSpins : 1;
1767 const int log_min_safepoint_check_interval = 10;
1768
1769 markWord mark = obj->mark();
1770 const auto should_spin = [&]() {
1771 if (!mark.has_monitor()) {
1772 // Spin while not inflated.
1773 return true;
1774 } else if (observed_deflation) {
1775 // Spin while monitor is being deflated.
1776 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
1777 return monitor == nullptr || monitor->is_being_async_deflated();
1778 }
1779 // Else stop spinning.
1780 return false;
1781 };
1782 // Always attempt to lock once even when safepoint synchronizing.
1783 bool should_process = false;
1784 for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
1785 // Spin with exponential backoff.
1786 const int total_spin_count = 1 << i;
1787 const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
1788 const int outer_spin_count = total_spin_count / inner_spin_count;
1789 for (int outer = 0; outer < outer_spin_count; outer++) {
1790 should_process = SafepointMechanism::should_process(current);
1791 if (should_process) {
1792 // Stop spinning for safepoint.
1793 break;
1794 }
1795 for (int inner = 1; inner < inner_spin_count; inner++) {
1796 SpinPause();
1797 }
1798 }
1799
1800 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
1801 }
1802 return false;
1803 }
1804
1805 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
1806 // When called with locking_thread != Thread::current() some mechanism must synchronize
1807 // the locking_thread with respect to the current thread. Currently only used when
1808 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
1809 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
1810
1811 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
1812 JavaThread* current = JavaThread::current();
1813 VerifyThreadState vts(locking_thread, current);
1814
1815 if (obj->klass()->is_value_based()) {
1816 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
1817 }
1818
1819 LockStack& lock_stack = locking_thread->lock_stack();
1820
1821 ObjectMonitor* monitor = nullptr;
1822 if (lock_stack.contains(obj())) {
1823 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
1824 bool entered = monitor->enter_for(locking_thread);
1825 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
1826 } else {
1827 do {
1828 // It is assumed that enter_for must enter on an object without contention.
1829 monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
1830 // But there may still be a race with deflation.
1831 } while (monitor == nullptr);
1832 }
1833
1834 assert(monitor != nullptr, "ObjectSynchronizer::enter_for must succeed");
1835 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared");
1836 }
1837
1838 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
1839 assert(current == JavaThread::current(), "must be");
1840
1841 if (obj->klass()->is_value_based()) {
1842 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
1843 }
1844
1845 CacheSetter cache_setter(current, lock);
1846
1847 // Used when deflation is observed. Progress here requires progress
1848 // from the deflator. After observing that the deflator is not
1849 // making progress (after two yields), switch to sleeping.
1850 SpinYield spin_yield(0, 2);
1851 bool observed_deflation = false;
1852
1853 LockStack& lock_stack = current->lock_stack();
1854
1855 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
1856 // Recursively fast locked
1857 return;
1858 }
1859
1860 if (lock_stack.contains(obj())) {
1861 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
1862 bool entered = monitor->enter(current);
1863 assert(entered, "recursive ObjectMonitor::enter must succeed");
1864 cache_setter.set_monitor(monitor);
1865 return;
1866 }
1867
1868 while (true) {
1869 // Fast-locking does not use the 'lock' argument.
1870 // Fast-lock spinning to avoid inflating for short critical sections.
1871 // The goal is to only inflate when the extra cost of using ObjectMonitors
1872 // is worth it.
1873 // If deflation has been observed we also spin while deflation is ongoing.
1874 if (fast_lock_try_enter(obj(), lock_stack, current)) {
1875 return;
1876 } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
1877 return;
1878 }
1879
1880 if (observed_deflation) {
1881 spin_yield.wait();
1882 }
1883
1884 ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
1885 if (monitor != nullptr) {
1886 cache_setter.set_monitor(monitor);
1887 return;
1888 }
1889
1890 // If inflate_and_enter returns nullptr it is because a deflated monitor
1891 // was encountered. Fallback to fast locking. The deflater is responsible
1892 // for clearing out the monitor and transitioning the markWord back to
1893 // fast locking.
1894 observed_deflation = true;
1895 }
1896 }
1897
1898 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
1899 assert(current == Thread::current(), "must be");
1900
1901 markWord mark = object->mark();
1902 assert(!mark.is_unlocked(), "must be");
1903
1904 LockStack& lock_stack = current->lock_stack();
1905 if (mark.is_fast_locked()) {
1906 if (lock_stack.try_recursive_exit(object)) {
1907 // This is a recursive exit which succeeded
1908 return;
1909 }
1910 if (lock_stack.is_recursive(object)) {
1911 // Must inflate recursive locks if try_recursive_exit fails
1912 // This happens for un-structured unlocks, could potentially
1913 // fix try_recursive_exit to handle these.
1914 inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1915 }
1916 }
1917
1918 while (mark.is_fast_locked()) {
1919 markWord unlocked_mark = mark.set_unlocked();
1920 markWord old_mark = mark;
1921 mark = object->cas_set_mark(unlocked_mark, old_mark);
1922 if (old_mark == mark) {
1923 // CAS successful, remove from lock_stack
1924 size_t recursion = lock_stack.remove(object) - 1;
1925 assert(recursion == 0, "Should not have unlocked here");
1926 return;
1927 }
1928 }
1929
1930 assert(mark.has_monitor(), "must be");
1931 // The monitor exists
1932 ObjectMonitor* monitor;
1933 if (UseObjectMonitorTable) {
1934 monitor = read_caches(current, lock, object);
1935 if (monitor == nullptr) {
1936 monitor = get_monitor_from_table(current, object);
1937 }
1938 } else {
1939 monitor = ObjectSynchronizer::read_monitor(mark);
1940 }
1941 if (monitor->has_anonymous_owner()) {
1942 assert(current->lock_stack().contains(object), "current must have object on its lock stack");
1943 monitor->set_owner_from_anonymous(current);
1944 monitor->set_recursions(current->lock_stack().remove(object) - 1);
1945 }
1946
1947 monitor->exit(current);
1948 }
1949
1950 // ObjectSynchronizer::inflate_locked_or_imse is used to get an
1951 // inflated ObjectMonitor* from contexts which require that, such as
1952 // notify/wait and jni_exit. Fast locking keeps the invariant that it
1953 // only inflates if it is already locked by the current thread or the current
1954 // thread is in the process of entering. To maintain this invariant we need to
1955 // throw a java.lang.IllegalMonitorStateException before inflating if the
1956 // current thread is not the owner.
1957 ObjectMonitor* ObjectSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
1958 JavaThread* current = THREAD;
1959
1960 for (;;) {
1961 markWord mark = obj->mark_acquire();
1962 if (mark.is_unlocked()) {
1963 // No lock, IMSE.
1964 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1965 "current thread is not owner", nullptr);
1966 }
1967
1968 if (mark.is_fast_locked()) {
1969 if (!current->lock_stack().contains(obj)) {
1970 // Fast locked by other thread, IMSE.
1971 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1972 "current thread is not owner", nullptr);
1973 } else {
1974 // Current thread owns the lock, must inflate
1975 return inflate_fast_locked_object(obj, cause, current, current);
1976 }
1977 }
1978
1979 assert(mark.has_monitor(), "must be");
1980 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
1981 if (monitor != nullptr) {
1982 if (monitor->has_anonymous_owner()) {
1983 LockStack& lock_stack = current->lock_stack();
1984 if (lock_stack.contains(obj)) {
1985 // Current thread owns the lock but someone else inflated it.
1986 // Fix owner and pop lock stack.
1987 monitor->set_owner_from_anonymous(current);
1988 monitor->set_recursions(lock_stack.remove(obj) - 1);
1989 } else {
1990 // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
1991 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1992 "current thread is not owner", nullptr);
1993 }
1994 }
1995 return monitor;
1996 }
1997 }
1998 }
1999
2000 ObjectMonitor* ObjectSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
2001
2002 // The JavaThread* locking parameter requires that the locking_thread == JavaThread::current,
2003 // or is suspended throughout the call by some other mechanism.
2004 // Even with fast locking the thread might be nullptr when called from a non
2005 // JavaThread. (As may still be the case from FastHashCode). However it is only
2006 // important for the correctness of the fast locking algorithm that the thread
2007 // is set when called from ObjectSynchronizer::enter from the owning thread,
2008 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
2009 EventJavaMonitorInflate event;
2010
2011 for (;;) {
2012 const markWord mark = object->mark_acquire();
2013
2014 // The mark can be in one of the following states:
2015 // * inflated - Just return if using stack-locking.
2016 // If using fast-locking and the ObjectMonitor owner
2017 // is anonymous and the locking_thread owns the
2018 // object lock, then we make the locking_thread
2019 // the ObjectMonitor owner and remove the lock from
2020 // the locking_thread's lock stack.
2021 // * fast-locked - Coerce it to inflated from fast-locked.
2022 // * unlocked - Aggressively inflate the object.
2023
2024 // CASE: inflated
2025 if (mark.has_monitor()) {
2026 ObjectMonitor* inf = mark.monitor();
2027 markWord dmw = inf->header();
2028 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2029 if (inf->has_anonymous_owner() &&
2030 locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
2031 inf->set_owner_from_anonymous(locking_thread);
2032 size_t removed = locking_thread->lock_stack().remove(object);
2033 inf->set_recursions(removed - 1);
2034 }
2035 return inf;
2036 }
2037
2038 // CASE: fast-locked
2039 // Could be fast-locked either by the locking_thread or by some other thread.
2040 //
2041 // Note that we allocate the ObjectMonitor speculatively, _before_
2042 // attempting to set the object's mark to the new ObjectMonitor. If
2043 // the locking_thread owns the monitor, then we set the ObjectMonitor's
2044 // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
2045 // to anonymous. If we lose the race to set the object's mark to the
2046 // new ObjectMonitor, then we just delete it and loop around again.
2047 //
2048 if (mark.is_fast_locked()) {
2049 ObjectMonitor* monitor = new ObjectMonitor(object);
2050 monitor->set_header(mark.set_unlocked());
2051 bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
2052 if (own) {
2053 // Owned by locking_thread.
2054 monitor->set_owner(locking_thread);
2055 } else {
2056 // Owned by somebody else.
2057 monitor->set_anonymous_owner();
2058 }
2059 markWord monitor_mark = markWord::encode(monitor);
2060 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
2061 if (old_mark == mark) {
2062 // Success! Return inflated monitor.
2063 if (own) {
2064 size_t removed = locking_thread->lock_stack().remove(object);
2065 monitor->set_recursions(removed - 1);
2066 }
2067 // Once the ObjectMonitor is configured and object is associated
2068 // with the ObjectMonitor, it is safe to allow async deflation:
2069 ObjectSynchronizer::_in_use_list.add(monitor);
2070
2071 log_inflate(current, object, cause);
2072 if (event.should_commit()) {
2073 post_monitor_inflate_event(&event, object, cause);
2074 }
2075 return monitor;
2076 } else {
2077 delete monitor;
2078 continue; // Interference -- just retry
2079 }
2080 }
2081
2082 // CASE: unlocked
2083 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2084 // If we know we're inflating for entry it's better to inflate by swinging a
2085 // pre-locked ObjectMonitor pointer into the object header. A successful
2086 // CAS inflates the object *and* confers ownership to the inflating thread.
2087 // In the current implementation we use a 2-step mechanism where we CAS()
2088 // to inflate and then CAS() again to try to swing _owner from null to current.
2089 // An inflateTry() method that we could call from enter() would be useful.
2090
2091 assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
2092 ObjectMonitor* m = new ObjectMonitor(object);
2093 // prepare m for installation - set monitor to initial state
2094 m->set_header(mark);
2095
2096 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2097 delete m;
2098 m = nullptr;
2099 continue;
2100 // interference - the markword changed - just retry.
2101 // The state-transitions are one-way, so there's no chance of
2102 // live-lock -- "Inflated" is an absorbing state.
2103 }
2104
2105 // Once the ObjectMonitor is configured and object is associated
2106 // with the ObjectMonitor, it is safe to allow async deflation:
2107 ObjectSynchronizer::_in_use_list.add(m);
2108
2109 log_inflate(current, object, cause);
2110 if (event.should_commit()) {
2111 post_monitor_inflate_event(&event, object, cause);
2112 }
2113 return m;
2114 }
2115 }
2116
2117 ObjectMonitor* ObjectSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2118 VerifyThreadState vts(locking_thread, current);
2119 assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
2120
2121 ObjectMonitor* monitor;
2122
2123 if (!UseObjectMonitorTable) {
2124 return inflate_into_object_header(object, cause, locking_thread, current);
2125 }
2126
2127 // Inflating requires a hash code
2128 ObjectSynchronizer::FastHashCode(current, object);
2129
2130 markWord mark = object->mark_acquire();
2131 assert(!mark.is_unlocked(), "Cannot be unlocked");
2132
2133 for (;;) {
2134 // Fetch the monitor from the table
2135 monitor = get_or_insert_monitor(object, current, cause);
2136
2137 // ObjectMonitors are always inserted as anonymously owned, this thread is
2138 // the current holder of the monitor. So unless the entry is stale and
2139 // contains a deflating monitor it must be anonymously owned.
2140 if (monitor->has_anonymous_owner()) {
2141 // The monitor must be anonymously owned if it was added
2142 assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
2143 // New fresh monitor
2144 break;
2145 }
2146
2147 // If the monitor was not anonymously owned then we got a deflating monitor
2148 // from the table. We need to let the deflator make progress and remove this
2149 // entry before we are allowed to add a new one.
2150 os::naked_yield();
2151 assert(monitor->is_being_async_deflated(), "Should be the reason");
2152 }
2153
2154 // Set the mark word; loop to handle concurrent updates to other parts of the mark word
2155 while (mark.is_fast_locked()) {
2156 mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2157 }
2158
2159 // Indicate that the monitor now has a known owner
2160 monitor->set_owner_from_anonymous(locking_thread);
2161
2162 // Remove the entry from the thread's lock stack
2163 monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
2164
2165 if (locking_thread == current) {
2166 // Only change the thread local state of the current thread.
2167 locking_thread->om_set_monitor_cache(monitor);
2168 }
2169
2170 return monitor;
2171 }
2172
2173 ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2174 VerifyThreadState vts(locking_thread, current);
2175
2176 // Note: In some paths (deoptimization) the 'current' thread inflates and
2177 // enters the lock on behalf of the 'locking_thread' thread.
2178
2179 ObjectMonitor* monitor = nullptr;
2180
2181 if (!UseObjectMonitorTable) {
2182 // Do the old inflate and enter.
2183 monitor = inflate_into_object_header(object, cause, locking_thread, current);
2184
2185 bool entered;
2186 if (locking_thread == current) {
2187 entered = monitor->enter(locking_thread);
2188 } else {
2189 entered = monitor->enter_for(locking_thread);
2190 }
2191
2192 // enter returns false for deflation found.
2193 return entered ? monitor : nullptr;
2194 }
2195
2196 NoSafepointVerifier nsv;
2197
2198 // Try to get the monitor from the thread-local cache.
2199 // There's no need to use the cache if we are locking
2200 // on behalf of another thread.
2201 if (current == locking_thread) {
2202 monitor = read_caches(current, lock, object);
2203 }
2204
2205 // Get or create the monitor
2206 if (monitor == nullptr) {
2207 // Lightweight monitors require that hash codes are installed first
2208 ObjectSynchronizer::FastHashCode(locking_thread, object);
2209 monitor = get_or_insert_monitor(object, current, cause);
2210 }
2211
2212 if (monitor->try_enter(locking_thread)) {
2213 return monitor;
2214 }
2215
2216 // Holds is_being_async_deflated() stable throughout this function.
2217 ObjectMonitorContentionMark contention_mark(monitor);
2218
2219 /// First handle the case where the monitor from the table is deflated
2220 if (monitor->is_being_async_deflated()) {
2221 // The MonitorDeflation thread is deflating the monitor. The locking thread
2222 // must spin until further progress has been made.
2223
2224 // Clear the BasicLock cache as it may contain this monitor.
2225 lock->clear_object_monitor_cache();
2226
2227 const markWord mark = object->mark_acquire();
2228
2229 if (mark.has_monitor()) {
2230 // Waiting on the deflation thread to remove the deflated monitor from the table.
2231 os::naked_yield();
2232
2233 } else if (mark.is_fast_locked()) {
2234 // Some other thread managed to fast-lock the lock, or this is a
2235 // recursive lock from the same thread; yield for the deflation
2236 // thread to remove the deflated monitor from the table.
2237 os::naked_yield();
2238
2239 } else {
2240 assert(mark.is_unlocked(), "Implied");
2241 // Retry immediately
2242 }
2243
2244 // Retry
2245 return nullptr;
2246 }
2247
2248 for (;;) {
2249 const markWord mark = object->mark_acquire();
2250 // The mark can be in one of the following states:
2251 // * inflated - If the ObjectMonitor owner is anonymous
2252 // and the locking_thread owns the object
2253 // lock, then we make the locking_thread
2254 // the ObjectMonitor owner and remove the
2255 // lock from the locking_thread's lock stack.
2256 // * fast-locked - Coerce it to inflated from fast-locked.
2257 // * neutral - Inflate the object. Successful CAS is locked
2258
2259 // CASE: inflated
2260 if (mark.has_monitor()) {
2261 LockStack& lock_stack = locking_thread->lock_stack();
2262 if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
2263 // The lock is fast-locked by the locking thread,
2264 // convert it to a held monitor with a known owner.
2265 monitor->set_owner_from_anonymous(locking_thread);
2266 monitor->set_recursions(lock_stack.remove(object) - 1);
2267 }
2268
2269 break; // Success
2270 }
2271
2272 // CASE: fast-locked
2273 // Could be fast-locked either by locking_thread or by some other thread.
2274 //
2275 if (mark.is_fast_locked()) {
2276 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2277 if (old_mark != mark) {
2278 // CAS failed
2279 continue;
2280 }
2281
2282 // Success! Return inflated monitor.
2283 LockStack& lock_stack = locking_thread->lock_stack();
2284 if (lock_stack.contains(object)) {
2285 // The lock is fast-locked by the locking thread,
2286 // convert it to a held monitor with a known owner.
2287 monitor->set_owner_from_anonymous(locking_thread);
2288 monitor->set_recursions(lock_stack.remove(object) - 1);
2289 }
2290
2291 break; // Success
2292 }
2293
2294 // CASE: neutral (unlocked)
2295
2296 // Catch if the object's header is not neutral (not locked and
2297 // not marked is what we care about here).
2298 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2299 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2300 if (old_mark != mark) {
2301 // CAS failed
2302 continue;
2303 }
2304
2305 // Transitioned from unlocked to monitor means locking_thread owns the lock.
2306 monitor->set_owner_from_anonymous(locking_thread);
2307
2308 return monitor;
2309 }
2310
2311 if (current == locking_thread) {
2312 // One round of spinning
2313 if (monitor->spin_enter(locking_thread)) {
2314 return monitor;
2315 }
2316
2317 // Monitor is contended, take the time before entering to fix the lock stack.
2318 LockStackInflateContendedLocks().inflate(current);
2319 }
2320
2321 // enter can block for safepoints; clear the unhandled object oop
2322 PauseNoSafepointVerifier pnsv(&nsv);
2323 object = nullptr;
2324
2325 if (current == locking_thread) {
2326 monitor->enter_with_contention_mark(locking_thread, contention_mark);
2327 } else {
2328 monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
2329 }
2330
2331 return monitor;
2332 }
2333
2334 void ObjectSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
2335 if (obj != nullptr) {
2336 deflate_mark_word(obj);
2337 }
2338 bool removed = remove_monitor(current, monitor, obj);
2339 if (obj != nullptr) {
2340 assert(removed, "Should have removed the entry if obj was alive");
2341 }
2342 }
2343
2344 ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
2345 assert(UseObjectMonitorTable, "must be");
2346 return ObjectMonitorTable::monitor_get(current, obj);
2347 }
2348
2349 bool ObjectSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
2350 assert(UseObjectMonitorTable, "must be");
2351 return ObjectMonitorTable::contains_monitor(current, monitor);
2352 }
2353
2354 ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
2355 return mark.monitor();
2356 }
2357
2358 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) {
2359 return ObjectSynchronizer::read_monitor(current, obj, obj->mark());
2360 }
2361
2362 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) {
2363 if (!UseObjectMonitorTable) {
2364 return read_monitor(mark);
2365 } else {
2366 return ObjectSynchronizer::get_monitor_from_table(current, obj);
2367 }
2368 }
2369
2370 bool ObjectSynchronizer::quick_enter_internal(oop obj, BasicLock* lock, JavaThread* current) {
2371 assert(current->thread_state() == _thread_in_Java, "must be");
2372 assert(obj != nullptr, "must be");
2373 NoSafepointVerifier nsv;
2374
2375 LockStack& lock_stack = current->lock_stack();
2376 if (lock_stack.is_full()) {
2377 // Always go into runtime if the lock stack is full.
2378 return false;
2379 }
2380
2381 const markWord mark = obj->mark();
2382
2383 #ifndef _LP64
2384 // Only for 32bit which has limited support for fast locking outside the runtime.
2385 if (lock_stack.try_recursive_enter(obj)) {
2386 // Recursive lock successful.
2387 return true;
2388 }
2389
2390 if (mark.is_unlocked()) {
2391 markWord locked_mark = mark.set_fast_locked();
2392 if (obj->cas_set_mark(locked_mark, mark) == mark) {
2393 // Successfully fast-locked, push object to lock-stack and return.
2394 lock_stack.push(obj);
2395 return true;
2396 }
2397 }
2398 #endif
2399
2400 if (mark.has_monitor()) {
2401 ObjectMonitor* monitor;
2402 if (UseObjectMonitorTable) {
2403 monitor = read_caches(current, lock, obj);
2404 } else {
2405 monitor = ObjectSynchronizer::read_monitor(mark);
2406 }
2407
2408 if (monitor == nullptr) {
2409 // Take the slow-path on a cache miss.
2410 return false;
2411 }
2412
2413 if (UseObjectMonitorTable) {
2414 // Set the monitor regardless of success.
2415 // Either we successfully lock on the monitor, or we retry with the
2416 // monitor in the slow path. If the monitor gets deflated, it will be
2417 // cleared, either by the CacheSetter if we fast lock in enter or in
2418 // inflate_and_enter when we see that the monitor is deflated.
2419 lock->set_object_monitor_cache(monitor);
2420 }
2421
2422 if (monitor->spin_enter(current)) {
2423 return true;
2424 }
2425 }
2426
2427 // Slow-path.
2428 return false;
2429 }
2430
2431 bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
2432 assert(current->thread_state() == _thread_in_Java, "invariant");
2433 NoSafepointVerifier nsv;
2434 if (obj == nullptr) return false; // Need to throw NPE
2435
2436 if (obj->klass()->is_value_based()) {
2437 return false;
2438 }
2439
2440 return ObjectSynchronizer::quick_enter_internal(obj, lock, current);
2441 }