1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomicAccess.hpp"
37 #include "runtime/basicLock.inline.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/os.inline.hpp"
48 #include "runtime/osThread.hpp"
49 #include "runtime/safepointMechanism.inline.hpp"
50 #include "runtime/safepointVerifiers.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/stubRoutines.hpp"
53 #include "runtime/synchronizer.hpp"
54 #include "runtime/threads.hpp"
55 #include "runtime/timer.hpp"
56 #include "runtime/timerTrace.hpp"
57 #include "runtime/trimNativeHeap.hpp"
58 #include "runtime/vframe.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "utilities/align.hpp"
61 #include "utilities/concurrentHashTable.inline.hpp"
62 #include "utilities/concurrentHashTableTasks.inline.hpp"
63 #include "utilities/dtrace.hpp"
64 #include "utilities/events.hpp"
65 #include "utilities/globalCounter.inline.hpp"
66 #include "utilities/globalDefinitions.hpp"
67 #include "utilities/linkedlist.hpp"
68 #include "utilities/preserveException.hpp"
69
70 class ObjectMonitorDeflationLogging;
71
72 void MonitorList::add(ObjectMonitor* m) {
73 ObjectMonitor* head;
74 do {
75 head = AtomicAccess::load(&_head);
76 m->set_next_om(head);
77 } while (AtomicAccess::cmpxchg(&_head, head, m) != head);
78
79 size_t count = AtomicAccess::add(&_count, 1u, memory_order_relaxed);
80 size_t old_max;
81 do {
82 old_max = AtomicAccess::load(&_max);
83 if (count <= old_max) {
84 break;
85 }
86 } while (AtomicAccess::cmpxchg(&_max, old_max, count, memory_order_relaxed) != old_max);
87 }
88
89 size_t MonitorList::count() const {
90 return AtomicAccess::load(&_count);
91 }
92
93 size_t MonitorList::max() const {
94 return AtomicAccess::load(&_max);
95 }
96
97 class ObjectMonitorDeflationSafepointer : public StackObj {
98 JavaThread* const _current;
99 ObjectMonitorDeflationLogging* const _log;
100
101 public:
102 ObjectMonitorDeflationSafepointer(JavaThread* current, ObjectMonitorDeflationLogging* log)
103 : _current(current), _log(log) {}
104
105 void block_for_safepoint(const char* op_name, const char* count_name, size_t counter);
106 };
107
108 // Walk the in-use list and unlink deflated ObjectMonitors.
109 // Returns the number of unlinked ObjectMonitors.
110 size_t MonitorList::unlink_deflated(size_t deflated_count,
111 GrowableArray<ObjectMonitor*>* unlinked_list,
112 ObjectMonitorDeflationSafepointer* safepointer) {
113 size_t unlinked_count = 0;
114 ObjectMonitor* prev = nullptr;
115 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
116
117 while (m != nullptr) {
118 if (m->is_being_async_deflated()) {
119 // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
120 // modify the list once per batch. The batch starts at "m".
121 size_t unlinked_batch = 0;
122 ObjectMonitor* next = m;
123 // Look for at most MonitorUnlinkBatch monitors, or the number of
124 // deflated and not unlinked monitors, whatever comes first.
125 assert(deflated_count >= unlinked_count, "Sanity: underflow");
126 size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch);
127 do {
128 ObjectMonitor* next_next = next->next_om();
129 unlinked_batch++;
130 unlinked_list->append(next);
131 next = next_next;
132 if (unlinked_batch >= unlinked_batch_limit) {
133 // Reached the max batch, so bail out of the gathering loop.
134 break;
135 }
136 if (prev == nullptr && AtomicAccess::load(&_head) != m) {
137 // Current batch used to be at head, but it is not at head anymore.
138 // Bail out and figure out where we currently are. This avoids long
139 // walks searching for new prev during unlink under heavy list inserts.
140 break;
141 }
142 } while (next != nullptr && next->is_being_async_deflated());
143
144 // Unlink the found batch.
145 if (prev == nullptr) {
146 // The current batch is the first batch, so there is a chance that it starts at head.
147 // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
148 ObjectMonitor* prev_head = AtomicAccess::cmpxchg(&_head, m, next);
149 if (prev_head != m) {
150 // Something must have updated the head. Figure out the actual prev for this batch.
151 for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
152 prev = n;
153 }
154 assert(prev != nullptr, "Should have found the prev for the current batch");
155 prev->set_next_om(next);
156 }
157 } else {
158 // The current batch is preceded by another batch. This guarantees the current batch
159 // does not start at head. Unlink the entire current batch without updating the head.
160 assert(AtomicAccess::load(&_head) != m, "Sanity");
161 prev->set_next_om(next);
162 }
163
164 unlinked_count += unlinked_batch;
165 if (unlinked_count >= deflated_count) {
166 // Reached the max so bail out of the searching loop.
167 // There should be no more deflated monitors left.
168 break;
169 }
170 m = next;
171 } else {
172 prev = m;
173 m = m->next_om();
174 }
175
176 // Must check for a safepoint/handshake and honor it.
177 safepointer->block_for_safepoint("unlinking", "unlinked_count", unlinked_count);
178 }
179
180 #ifdef ASSERT
181 // Invariant: the code above should unlink all deflated monitors.
182 // The code that runs after this unlinking does not expect deflated monitors.
183 // Notably, attempting to deflate the already deflated monitor would break.
184 {
185 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
186 while (m != nullptr) {
187 assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
188 m = m->next_om();
189 }
190 }
191 #endif
192
193 AtomicAccess::sub(&_count, unlinked_count);
194 return unlinked_count;
195 }
196
197 MonitorList::Iterator MonitorList::iterator() const {
198 return Iterator(AtomicAccess::load_acquire(&_head));
199 }
200
201 ObjectMonitor* MonitorList::Iterator::next() {
202 ObjectMonitor* current = _current;
203 _current = current->next_om();
204 return current;
205 }
206
207 // The "core" versions of monitor enter and exit reside in this file.
208 // The interpreter and compilers contain specialized transliterated
209 // variants of the enter-exit fast-path operations. See c2_MacroAssembler_x86.cpp
210 // fast_lock(...) for instance. If you make changes here, make sure to modify the
211 // interpreter, and both C1 and C2 fast-path inline locking code emission.
212 //
213 // -----------------------------------------------------------------------------
214
215 #ifdef DTRACE_ENABLED
216
217 // Only bother with this argument setup if dtrace is available
218 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
219
220 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
221 char* bytes = nullptr; \
222 int len = 0; \
223 jlong jtid = SharedRuntime::get_java_tid(thread); \
224 Symbol* klassname = obj->klass()->name(); \
225 if (klassname != nullptr) { \
226 bytes = (char*)klassname->bytes(); \
227 len = klassname->utf8_length(); \
228 }
229
230 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
231 { \
232 if (DTraceMonitorProbes) { \
233 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
234 HOTSPOT_MONITOR_WAIT(jtid, \
235 (uintptr_t)(monitor), bytes, len, (millis)); \
236 } \
237 }
238
239 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
240 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
241 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
242
243 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
244 { \
245 if (DTraceMonitorProbes) { \
246 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
247 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
248 (uintptr_t)(monitor), bytes, len); \
249 } \
250 }
251
252 #else // ndef DTRACE_ENABLED
253
254 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
255 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
256
257 #endif // ndef DTRACE_ENABLED
258
259 // This exists only as a workaround of dtrace bug 6254741
260 static int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) {
261 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
262 return 0;
263 }
264
265 static constexpr size_t inflation_lock_count() {
266 return 256;
267 }
268
269 // Static storage for an array of PlatformMutex.
270 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
271
272 static inline PlatformMutex* inflation_lock(size_t index) {
273 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
274 }
275
276 void ObjectSynchronizer::initialize() {
277 for (size_t i = 0; i < inflation_lock_count(); i++) {
278 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
279 }
280 // Start the ceiling with the estimate for one thread.
281 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
282
283 // Start the timer for deflations, so it does not trigger immediately.
284 _last_async_deflation_time_ns = os::javaTimeNanos();
285
286 ObjectSynchronizer::create_om_table();
287 }
288
289 MonitorList ObjectSynchronizer::_in_use_list;
290 // monitors_used_above_threshold() policy is as follows:
291 //
292 // The ratio of the current _in_use_list count to the ceiling is used
293 // to determine if we are above MonitorUsedDeflationThreshold and need
294 // to do an async monitor deflation cycle. The ceiling is increased by
295 // AvgMonitorsPerThreadEstimate when a thread is added to the system
296 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
297 // removed from the system.
298 //
299 // Note: If the _in_use_list max exceeds the ceiling, then
300 // monitors_used_above_threshold() will use the in_use_list max instead
301 // of the thread count derived ceiling because we have used more
302 // ObjectMonitors than the estimated average.
303 //
304 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
305 // no-progress async monitor deflation cycles in a row, then the ceiling
306 // is adjusted upwards by monitors_used_above_threshold().
307 //
308 // Start the ceiling with the estimate for one thread in initialize()
309 // which is called after cmd line options are processed.
310 static size_t _in_use_list_ceiling = 0;
311 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
312 bool volatile ObjectSynchronizer::_is_final_audit = false;
313 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
314 static uintx _no_progress_cnt = 0;
315 static bool _no_progress_skip_increment = false;
316
317 // =====================> Quick functions
318
319 // The quick_* forms are special fast-path variants used to improve
320 // performance. In the simplest case, a "quick_*" implementation could
321 // simply return false, in which case the caller will perform the necessary
322 // state transitions and call the slow-path form.
323 // The fast-path is designed to handle frequently arising cases in an efficient
324 // manner and is just a degenerate "optimistic" variant of the slow-path.
325 // returns true -- to indicate the call was satisfied.
326 // returns false -- to indicate the call needs the services of the slow-path.
327 // A no-loitering ordinance is in effect for code in the quick_* family
328 // operators: safepoints or indefinite blocking (blocking that might span a
329 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
330 // entry.
331 //
332 // Consider: An interesting optimization is to have the JIT recognize the
333 // following common idiom:
334 // synchronized (someobj) { .... ; notify(); }
335 // That is, we find a notify() or notifyAll() call that immediately precedes
336 // the monitorexit operation. In that case the JIT could fuse the operations
337 // into a single notifyAndExit() runtime primitive.
338
339 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
340 assert(current->thread_state() == _thread_in_Java, "invariant");
341 NoSafepointVerifier nsv;
342 if (obj == nullptr) return false; // slow-path for invalid obj
343 const markWord mark = obj->mark();
344
345 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
346 // Degenerate notify
347 // fast-locked by caller so by definition the implied waitset is empty.
348 return true;
349 }
350
351 if (mark.has_monitor()) {
352 ObjectMonitor* const mon = read_monitor(current, obj, mark);
353 if (mon == nullptr) {
354 // Racing with inflation/deflation go slow path
355 return false;
356 }
357 assert(mon->object() == oop(obj), "invariant");
358 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
359
360 if (mon->first_waiter() != nullptr) {
361 // We have one or more waiters. Since this is an inflated monitor
362 // that we own, we quickly notify them here and now, avoiding the slow-path.
363 if (all) {
364 mon->quick_notifyAll(current);
365 } else {
366 mon->quick_notify(current);
367 }
368 }
369 return true;
370 }
371
372 // other IMS exception states take the slow-path
373 return false;
374 }
375
376 // Handle notifications when synchronizing on value based classes
377 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
378 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
379 frame last_frame = locking_thread->last_frame();
380 bool bcp_was_adjusted = false;
381 // Don't decrement bcp if it points to the frame's first instruction. This happens when
382 // handle_sync_on_value_based_class() is called because of a synchronized method. There
383 // is no actual monitorenter instruction in the byte code in this case.
384 if (last_frame.is_interpreted_frame() &&
385 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
386 // adjust bcp to point back to monitorenter so that we print the correct line numbers
387 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
388 bcp_was_adjusted = true;
389 }
390
391 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
392 ResourceMark rm;
393 stringStream ss;
394 locking_thread->print_active_stack_on(&ss);
395 char* base = (char*)strstr(ss.base(), "at");
396 char* newline = (char*)strchr(ss.base(), '\n');
397 if (newline != nullptr) {
398 *newline = '\0';
399 }
400 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
401 } else {
402 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
403 ResourceMark rm;
404 Log(valuebasedclasses) vblog;
405
406 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
407 if (locking_thread->has_last_Java_frame()) {
408 LogStream info_stream(vblog.info());
409 locking_thread->print_active_stack_on(&info_stream);
410 } else {
411 vblog.info("Cannot find the last Java frame");
412 }
413
414 EventSyncOnValueBasedClass event;
415 if (event.should_commit()) {
416 event.set_valueBasedClass(obj->klass());
417 event.commit();
418 }
419 }
420
421 if (bcp_was_adjusted) {
422 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
423 }
424 }
425
426 // -----------------------------------------------------------------------------
427 // JNI locks on java objects
428 // NOTE: must use heavy weight monitor to handle jni monitor enter
429 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
430 // Top native frames in the stack will not be seen if we attempt
431 // preemption, since we start walking from the last Java anchor.
432 NoPreemptMark npm(current);
433
434 if (obj->klass()->is_value_based()) {
435 handle_sync_on_value_based_class(obj, current);
436 }
437
438 // the current locking is from JNI instead of Java code
439 current->set_current_pending_monitor_is_from_java(false);
440 // An async deflation can race after the inflate() call and before
441 // enter() can make the ObjectMonitor busy. enter() returns false if
442 // we have lost the race to async deflation and we simply try again.
443 while (true) {
444 BasicLock lock;
445 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
446 break;
447 }
448 }
449 current->set_current_pending_monitor_is_from_java(true);
450 }
451
452 // NOTE: must use heavy weight monitor to handle jni monitor exit
453 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
454 JavaThread* current = THREAD;
455
456 ObjectMonitor* monitor;
457 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
458 // If this thread has locked the object, exit the monitor. We
459 // intentionally do not use CHECK on check_owner because we must exit the
460 // monitor even if an exception was already pending.
461 if (monitor->check_owner(THREAD)) {
462 monitor->exit(current);
463 }
464 }
465
466 // -----------------------------------------------------------------------------
467 // Internal VM locks on java objects
468 // standard constructor, allows locking failures
469 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
470 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
471 assert(!_thread->preempting(), "");
472
473 _thread->check_for_valid_safepoint_state();
474
475 if (_obj() != nullptr) {
476 ObjectSynchronizer::enter(_obj, &_lock, _thread);
477
478 if (_thread->preempting()) {
479 // If preemption was cancelled we acquired the monitor after freezing
480 // the frames. Redoing the vm call laterĀ in thaw will require us to
481 // release it since the call should look like the original one. We
482 // do it in ~ObjectLocker to reduce the window of time we hold the
483 // monitor since we can't do anything useful with it now, and would
484 // otherwise just force other vthreads to preempt in case they try
485 // to acquire this monitor.
486 _skip_exit = !_thread->preemption_cancelled();
487 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
488 _thread->set_pending_preempted_exception();
489
490 }
491 }
492 }
493
494 ObjectLocker::~ObjectLocker() {
495 if (_obj() != nullptr && !_skip_exit) {
496 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
497 }
498 }
499
500 void ObjectLocker::wait_uninterruptibly(TRAPS) {
501 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
502 if (_thread->preempting()) {
503 _skip_exit = true;
504 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
505 _thread->set_pending_preempted_exception();
506 }
507 }
508
509 // -----------------------------------------------------------------------------
510 // Wait/Notify/NotifyAll
511 // NOTE: must use heavy weight monitor to handle wait()
512
513 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
514 JavaThread* current = THREAD;
515 if (millis < 0) {
516 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
517 }
518
519 ObjectMonitor* monitor;
520 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
521
522 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
523 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
524
525 // This dummy call is in place to get around dtrace bug 6254741. Once
526 // that's fixed we can uncomment the following line, remove the call
527 // and change this function back into a "void" func.
528 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
529 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
530 return ret_code;
531 }
532
533 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
534 assert(millis >= 0, "timeout value is negative");
535
536 ObjectMonitor* monitor;
537 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
538 monitor->wait(millis, false, THREAD);
539 }
540
541
542 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
543 JavaThread* current = THREAD;
544
545 markWord mark = obj->mark();
546 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
547 // Not inflated so there can't be any waiters to notify.
548 return;
549 }
550 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
551 monitor->notify(CHECK);
552 }
553
554 // NOTE: see comment of notify()
555 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
556 JavaThread* current = THREAD;
557
558 markWord mark = obj->mark();
559 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
560 // Not inflated so there can't be any waiters to notify.
561 return;
562 }
563
564 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
565 monitor->notifyAll(CHECK);
566 }
567
568 // -----------------------------------------------------------------------------
569 // Hash Code handling
570
571 struct SharedGlobals {
572 char _pad_prefix[OM_CACHE_LINE_SIZE];
573 // This is a highly shared mostly-read variable.
574 // To avoid false-sharing it needs to be the sole occupant of a cache line.
575 volatile int stw_random;
576 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
577 // Hot RW variable -- Sequester to avoid false-sharing
578 volatile int hc_sequence;
579 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
580 };
581
582 static SharedGlobals GVars;
583
584 // hashCode() generation :
585 //
586 // Possibilities:
587 // * MD5Digest of {obj,stw_random}
588 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
589 // * A DES- or AES-style SBox[] mechanism
590 // * One of the Phi-based schemes, such as:
591 // 2654435761 = 2^32 * Phi (golden ratio)
592 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
593 // * A variation of Marsaglia's shift-xor RNG scheme.
594 // * (obj ^ stw_random) is appealing, but can result
595 // in undesirable regularity in the hashCode values of adjacent objects
596 // (objects allocated back-to-back, in particular). This could potentially
597 // result in hashtable collisions and reduced hashtable efficiency.
598 // There are simple ways to "diffuse" the middle address bits over the
599 // generated hashCode values:
600
601 static intptr_t get_next_hash(Thread* current, oop obj) {
602 intptr_t value = 0;
603 if (hashCode == 0) {
604 // This form uses global Park-Miller RNG.
605 // On MP system we'll have lots of RW access to a global, so the
606 // mechanism induces lots of coherency traffic.
607 value = os::random();
608 } else if (hashCode == 1) {
609 // This variation has the property of being stable (idempotent)
610 // between STW operations. This can be useful in some of the 1-0
611 // synchronization schemes.
612 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
613 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
614 } else if (hashCode == 2) {
615 value = 1; // for sensitivity testing
616 } else if (hashCode == 3) {
617 value = ++GVars.hc_sequence;
618 } else if (hashCode == 4) {
619 value = cast_from_oop<intptr_t>(obj);
620 } else {
621 // Marsaglia's xor-shift scheme with thread-specific state
622 // This is probably the best overall implementation -- we'll
623 // likely make this the default in future releases.
624 unsigned t = current->_hashStateX;
625 t ^= (t << 11);
626 current->_hashStateX = current->_hashStateY;
627 current->_hashStateY = current->_hashStateZ;
628 current->_hashStateZ = current->_hashStateW;
629 unsigned v = current->_hashStateW;
630 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
631 current->_hashStateW = v;
632 value = v;
633 }
634
635 value &= markWord::hash_mask;
636 if (value == 0) value = 0xBAD;
637 assert(value != markWord::no_hash, "invariant");
638 return value;
639 }
640
641 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
642 while (true) {
643 ObjectMonitor* monitor = nullptr;
644 markWord temp, test;
645 intptr_t hash;
646 markWord mark = obj->mark_acquire();
647 // If UseObjectMonitorTable is set the hash can simply be installed in the
648 // object header, since the monitor isn't in the object header.
649 if (UseObjectMonitorTable || !mark.has_monitor()) {
650 hash = mark.hash();
651 if (hash != 0) { // if it has a hash, just return it
652 return hash;
653 }
654 hash = get_next_hash(current, obj); // get a new hash
655 temp = mark.copy_set_hash(hash); // merge the hash into header
656 // try to install the hash
657 test = obj->cas_set_mark(temp, mark);
658 if (test == mark) { // if the hash was installed, return it
659 return hash;
660 }
661 // CAS failed, retry
662 continue;
663
664 // Failed to install the hash. It could be that another thread
665 // installed the hash just before our attempt or inflation has
666 // occurred or... so we fall thru to inflate the monitor for
667 // stability and then install the hash.
668 } else {
669 assert(!mark.is_unlocked() && !mark.is_fast_locked(), "invariant");
670 monitor = mark.monitor();
671 temp = monitor->header();
672 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
673 hash = temp.hash();
674 if (hash != 0) {
675 // It has a hash.
676
677 // Separate load of dmw/header above from the loads in
678 // is_being_async_deflated().
679
680 // dmw/header and _contentions may get written by different threads.
681 // Make sure to observe them in the same order when having several observers.
682 OrderAccess::loadload_for_IRIW();
683
684 if (monitor->is_being_async_deflated()) {
685 // But we can't safely use the hash if we detect that async
686 // deflation has occurred. So we attempt to restore the
687 // header/dmw to the object's header so that we only retry
688 // once if the deflater thread happens to be slow.
689 monitor->install_displaced_markword_in_object(obj);
690 continue;
691 }
692 return hash;
693 }
694 // Fall thru so we only have one place that installs the hash in
695 // the ObjectMonitor.
696 }
697
698 // NOTE: an async deflation can race after we get the monitor and
699 // before we can update the ObjectMonitor's header with the hash
700 // value below.
701 assert(mark.has_monitor(), "must be");
702 monitor = mark.monitor();
703
704 // Load ObjectMonitor's header/dmw field and see if it has a hash.
705 mark = monitor->header();
706 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
707 hash = mark.hash();
708 if (hash == 0) { // if it does not have a hash
709 hash = get_next_hash(current, obj); // get a new hash
710 temp = mark.copy_set_hash(hash) ; // merge the hash into header
711 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
712 uintptr_t v = AtomicAccess::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
713 test = markWord(v);
714 if (test != mark) {
715 // The attempt to update the ObjectMonitor's header/dmw field
716 // did not work. This can happen if another thread managed to
717 // merge in the hash just before our cmpxchg().
718 // If we add any new usages of the header/dmw field, this code
719 // will need to be updated.
720 hash = test.hash();
721 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
722 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
723 }
724 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
725 // If we detect that async deflation has occurred, then we
726 // attempt to restore the header/dmw to the object's header
727 // so that we only retry once if the deflater thread happens
728 // to be slow.
729 monitor->install_displaced_markword_in_object(obj);
730 continue;
731 }
732 }
733 // We finally get the hash.
734 return hash;
735 }
736 }
737
738 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
739 Handle h_obj) {
740 assert(current == JavaThread::current(), "Can only be called on current thread");
741 oop obj = h_obj();
742
743 markWord mark = obj->mark_acquire();
744
745 if (mark.is_fast_locked()) {
746 // fast-locking case, see if lock is in current's lock stack
747 return current->lock_stack().contains(h_obj());
748 }
749
750 while (mark.has_monitor()) {
751 ObjectMonitor* monitor = read_monitor(current, obj, mark);
752 if (monitor != nullptr) {
753 return monitor->is_entered(current) != 0;
754 }
755 // Racing with inflation/deflation, retry
756 mark = obj->mark_acquire();
757
758 if (mark.is_fast_locked()) {
759 // Some other thread fast_locked, current could not have held the lock
760 return false;
761 }
762 }
763
764 // Unlocked case, header in place
765 assert(mark.is_unlocked(), "sanity check");
766 return false;
767 }
768
769 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
770 oop obj = h_obj();
771 markWord mark = obj->mark_acquire();
772
773 if (mark.is_fast_locked()) {
774 // fast-locked so get owner from the object.
775 // owning_thread_from_object() may also return null here:
776 return Threads::owning_thread_from_object(t_list, h_obj());
777 }
778
779 while (mark.has_monitor()) {
780 ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark);
781 if (monitor != nullptr) {
782 return Threads::owning_thread_from_monitor(t_list, monitor);
783 }
784 // Racing with inflation/deflation, retry
785 mark = obj->mark_acquire();
786
787 if (mark.is_fast_locked()) {
788 // Some other thread fast_locked
789 return Threads::owning_thread_from_object(t_list, h_obj());
790 }
791 }
792
793 // Unlocked case, header in place
794 // Cannot have assertion since this object may have been
795 // locked by another thread when reaching here.
796 // assert(mark.is_unlocked(), "sanity check");
797
798 return nullptr;
799 }
800
801 // Visitors ...
802
803 // Iterate over all ObjectMonitors.
804 template <typename Function>
805 void ObjectSynchronizer::monitors_iterate(Function function) {
806 MonitorList::Iterator iter = _in_use_list.iterator();
807 while (iter.has_next()) {
808 ObjectMonitor* monitor = iter.next();
809 function(monitor);
810 }
811 }
812
813 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
814 // returns true.
815 template <typename OwnerFilter>
816 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
817 monitors_iterate([&](ObjectMonitor* monitor) {
818 // This function is only called at a safepoint or when the
819 // target thread is suspended or when the target thread is
820 // operating on itself. The current closures in use today are
821 // only interested in an owned ObjectMonitor and ownership
822 // cannot be dropped under the calling contexts so the
823 // ObjectMonitor cannot be async deflated.
824 if (monitor->has_owner() && filter(monitor)) {
825 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
826
827 closure->do_monitor(monitor);
828 }
829 });
830 }
831
832 // Iterate ObjectMonitors where the owner == thread; this does NOT include
833 // ObjectMonitors where owner is set to a stack-lock address in thread.
834 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
835 int64_t key = ObjectMonitor::owner_id_from(thread);
836 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
837 return owned_monitors_iterate_filtered(closure, thread_filter);
838 }
839
840 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, oop vthread) {
841 int64_t key = ObjectMonitor::owner_id_from(vthread);
842 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
843 return owned_monitors_iterate_filtered(closure, thread_filter);
844 }
845
846 // Iterate ObjectMonitors owned by any thread.
847 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
848 auto all_filter = [&](ObjectMonitor* monitor) { return true; };
849 return owned_monitors_iterate_filtered(closure, all_filter);
850 }
851
852 static bool monitors_used_above_threshold(MonitorList* list) {
853 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
854 return false;
855 }
856 size_t monitors_used = list->count();
857 if (monitors_used == 0) { // empty list is easy
858 return false;
859 }
860 size_t old_ceiling = ObjectSynchronizer::in_use_list_ceiling();
861 // Make sure that we use a ceiling value that is not lower than
862 // previous, not lower than the recorded max used by the system, and
863 // not lower than the current number of monitors in use (which can
864 // race ahead of max). The result is guaranteed > 0.
865 size_t ceiling = MAX3(old_ceiling, list->max(), monitors_used);
866
867 // Check if our monitor usage is above the threshold:
868 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
869 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
870 // Deflate monitors if over the threshold percentage, unless no
871 // progress on previous deflations.
872 bool is_above_threshold = true;
873
874 // Check if it's time to adjust the in_use_list_ceiling up, due
875 // to too many async deflation attempts without any progress.
876 if (NoAsyncDeflationProgressMax != 0 &&
877 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
878 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
879 size_t delta = (size_t)(ceiling * remainder) + 1;
880 size_t new_ceiling = (ceiling > SIZE_MAX - delta)
881 ? SIZE_MAX // Overflow, let's clamp new_ceiling.
882 : ceiling + delta;
883
884 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
885 log_info(monitorinflation)("Too many deflations without progress; "
886 "bumping in_use_list_ceiling from %zu"
887 " to %zu", old_ceiling, new_ceiling);
888 _no_progress_cnt = 0;
889 ceiling = new_ceiling;
890
891 // Check if our monitor usage is still above the threshold:
892 monitor_usage = (monitors_used * 100LL) / ceiling;
893 is_above_threshold = int(monitor_usage) > MonitorUsedDeflationThreshold;
894 }
895 log_info(monitorinflation)("monitors_used=%zu, ceiling=%zu"
896 ", monitor_usage=%zu, threshold=%d",
897 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
898 return is_above_threshold;
899 }
900
901 return false;
902 }
903
904 size_t ObjectSynchronizer::in_use_list_count() {
905 return _in_use_list.count();
906 }
907
908 size_t ObjectSynchronizer::in_use_list_max() {
909 return _in_use_list.max();
910 }
911
912 size_t ObjectSynchronizer::in_use_list_ceiling() {
913 return _in_use_list_ceiling;
914 }
915
916 void ObjectSynchronizer::dec_in_use_list_ceiling() {
917 AtomicAccess::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
918 }
919
920 void ObjectSynchronizer::inc_in_use_list_ceiling() {
921 AtomicAccess::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
922 }
923
924 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
925 _in_use_list_ceiling = new_value;
926 }
927
928 bool ObjectSynchronizer::is_async_deflation_needed() {
929 if (is_async_deflation_requested()) {
930 // Async deflation request.
931 log_info(monitorinflation)("Async deflation needed: explicit request");
932 return true;
933 }
934
935 jlong time_since_last = time_since_last_async_deflation_ms();
936
937 if (AsyncDeflationInterval > 0 &&
938 time_since_last > AsyncDeflationInterval &&
939 monitors_used_above_threshold(&_in_use_list)) {
940 // It's been longer than our specified deflate interval and there
941 // are too many monitors in use. We don't deflate more frequently
942 // than AsyncDeflationInterval (unless is_async_deflation_requested)
943 // in order to not swamp the MonitorDeflationThread.
944 log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold");
945 return true;
946 }
947
948 if (GuaranteedAsyncDeflationInterval > 0 &&
949 time_since_last > GuaranteedAsyncDeflationInterval) {
950 // It's been longer than our specified guaranteed deflate interval.
951 // We need to clean up the used monitors even if the threshold is
952 // not reached, to keep the memory utilization at bay when many threads
953 // touched many monitors.
954 log_info(monitorinflation)("Async deflation needed: guaranteed interval (%zd ms) "
955 "is greater than time since last deflation (" JLONG_FORMAT " ms)",
956 GuaranteedAsyncDeflationInterval, time_since_last);
957
958 // If this deflation has no progress, then it should not affect the no-progress
959 // tracking, otherwise threshold heuristics would think it was triggered, experienced
960 // no progress, and needs to backoff more aggressively. In this "no progress" case,
961 // the generic code would bump the no-progress counter, and we compensate for that
962 // by telling it to skip the update.
963 //
964 // If this deflation has progress, then it should let non-progress tracking
965 // know about this, otherwise the threshold heuristics would kick in, potentially
966 // experience no-progress due to aggressive cleanup by this deflation, and think
967 // it is still in no-progress stride. In this "progress" case, the generic code would
968 // zero the counter, and we allow it to happen.
969 _no_progress_skip_increment = true;
970
971 return true;
972 }
973
974 return false;
975 }
976
977 void ObjectSynchronizer::request_deflate_idle_monitors() {
978 MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
979 set_is_async_deflation_requested(true);
980 ml.notify_all();
981 }
982
983 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
984 JavaThread* current = JavaThread::current();
985 bool ret_code = false;
986
987 jlong last_time = last_async_deflation_time_ns();
988
989 request_deflate_idle_monitors();
990
991 const int N_CHECKS = 5;
992 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
993 if (last_async_deflation_time_ns() > last_time) {
994 log_info(monitorinflation)("Async Deflation happened after %d check(s).", i);
995 ret_code = true;
996 break;
997 }
998 {
999 // JavaThread has to honor the blocking protocol.
1000 ThreadBlockInVM tbivm(current);
1001 os::naked_short_sleep(999); // sleep for almost 1 second
1002 }
1003 }
1004 if (!ret_code) {
1005 log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1006 }
1007
1008 return ret_code;
1009 }
1010
1011 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1012 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1013 }
1014
1015 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1016 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1017 //
1018 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1019 MonitorList::Iterator iter = _in_use_list.iterator();
1020 size_t deflated_count = 0;
1021 Thread* current = Thread::current();
1022
1023 while (iter.has_next()) {
1024 if (deflated_count >= (size_t)MonitorDeflationMax) {
1025 break;
1026 }
1027 ObjectMonitor* mid = iter.next();
1028 if (mid->deflate_monitor(current)) {
1029 deflated_count++;
1030 }
1031
1032 // Must check for a safepoint/handshake and honor it.
1033 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1034 }
1035
1036 return deflated_count;
1037 }
1038
1039 class DeflationHandshakeClosure : public HandshakeClosure {
1040 public:
1041 DeflationHandshakeClosure() : HandshakeClosure("DeflationHandshakeClosure") {}
1042
1043 void do_thread(Thread* thread) {
1044 log_trace(monitorinflation)("DeflationHandshakeClosure::do_thread: thread="
1045 INTPTR_FORMAT, p2i(thread));
1046 if (thread->is_Java_thread()) {
1047 // Clear OM cache
1048 JavaThread* jt = JavaThread::cast(thread);
1049 jt->om_clear_monitor_cache();
1050 }
1051 }
1052 };
1053
1054 class VM_RendezvousGCThreads : public VM_Operation {
1055 public:
1056 bool evaluate_at_safepoint() const override { return false; }
1057 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1058 void doit() override {
1059 Universe::heap()->safepoint_synchronize_begin();
1060 Universe::heap()->safepoint_synchronize_end();
1061 };
1062 };
1063
1064 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1065 ObjectMonitorDeflationSafepointer* safepointer) {
1066 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1067 size_t deleted_count = 0;
1068 for (ObjectMonitor* monitor: *delete_list) {
1069 delete monitor;
1070 deleted_count++;
1071 // A JavaThread must check for a safepoint/handshake and honor it.
1072 safepointer->block_for_safepoint("deletion", "deleted_count", deleted_count);
1073 }
1074 return deleted_count;
1075 }
1076
1077 class ObjectMonitorDeflationLogging: public StackObj {
1078 LogStreamHandle(Debug, monitorinflation) _debug;
1079 LogStreamHandle(Info, monitorinflation) _info;
1080 LogStream* _stream;
1081 elapsedTimer _timer;
1082
1083 size_t ceiling() const { return ObjectSynchronizer::in_use_list_ceiling(); }
1084 size_t count() const { return ObjectSynchronizer::in_use_list_count(); }
1085 size_t max() const { return ObjectSynchronizer::in_use_list_max(); }
1086
1087 public:
1088 ObjectMonitorDeflationLogging()
1089 : _debug(), _info(), _stream(nullptr) {
1090 if (_debug.is_enabled()) {
1091 _stream = &_debug;
1092 } else if (_info.is_enabled()) {
1093 _stream = &_info;
1094 }
1095 }
1096
1097 void begin() {
1098 if (_stream != nullptr) {
1099 _stream->print_cr("begin deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1100 ceiling(), count(), max());
1101 _timer.start();
1102 }
1103 }
1104
1105 void before_handshake(size_t unlinked_count) {
1106 if (_stream != nullptr) {
1107 _timer.stop();
1108 _stream->print_cr("before handshaking: unlinked_count=%zu"
1109 ", in_use_list stats: ceiling=%zu, count="
1110 "%zu, max=%zu",
1111 unlinked_count, ceiling(), count(), max());
1112 }
1113 }
1114
1115 void after_handshake() {
1116 if (_stream != nullptr) {
1117 _stream->print_cr("after handshaking: in_use_list stats: ceiling="
1118 "%zu, count=%zu, max=%zu",
1119 ceiling(), count(), max());
1120 _timer.start();
1121 }
1122 }
1123
1124 void end(size_t deflated_count, size_t unlinked_count) {
1125 if (_stream != nullptr) {
1126 _timer.stop();
1127 if (deflated_count != 0 || unlinked_count != 0 || _debug.is_enabled()) {
1128 _stream->print_cr("deflated_count=%zu, {unlinked,deleted}_count=%zu monitors in %3.7f secs",
1129 deflated_count, unlinked_count, _timer.seconds());
1130 }
1131 _stream->print_cr("end deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1132 ceiling(), count(), max());
1133 }
1134 }
1135
1136 void before_block_for_safepoint(const char* op_name, const char* cnt_name, size_t cnt) {
1137 if (_stream != nullptr) {
1138 _timer.stop();
1139 _stream->print_cr("pausing %s: %s=%zu, in_use_list stats: ceiling="
1140 "%zu, count=%zu, max=%zu",
1141 op_name, cnt_name, cnt, ceiling(), count(), max());
1142 }
1143 }
1144
1145 void after_block_for_safepoint(const char* op_name) {
1146 if (_stream != nullptr) {
1147 _stream->print_cr("resuming %s: in_use_list stats: ceiling=%zu"
1148 ", count=%zu, max=%zu", op_name,
1149 ceiling(), count(), max());
1150 _timer.start();
1151 }
1152 }
1153 };
1154
1155 void ObjectMonitorDeflationSafepointer::block_for_safepoint(const char* op_name, const char* count_name, size_t counter) {
1156 if (!SafepointMechanism::should_process(_current)) {
1157 return;
1158 }
1159
1160 // A safepoint/handshake has started.
1161 _log->before_block_for_safepoint(op_name, count_name, counter);
1162
1163 {
1164 // Honor block request.
1165 ThreadBlockInVM tbivm(_current);
1166 }
1167
1168 _log->after_block_for_safepoint(op_name);
1169 }
1170
1171 // This function is called by the MonitorDeflationThread to deflate
1172 // ObjectMonitors.
1173 size_t ObjectSynchronizer::deflate_idle_monitors() {
1174 JavaThread* current = JavaThread::current();
1175 assert(current->is_monitor_deflation_thread(), "The only monitor deflater");
1176
1177 // The async deflation request has been processed.
1178 _last_async_deflation_time_ns = os::javaTimeNanos();
1179 set_is_async_deflation_requested(false);
1180
1181 ObjectMonitorDeflationLogging log;
1182 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1183
1184 log.begin();
1185
1186 // Deflate some idle ObjectMonitors.
1187 size_t deflated_count = deflate_monitor_list(&safepointer);
1188
1189 // Unlink the deflated ObjectMonitors from the in-use list.
1190 size_t unlinked_count = 0;
1191 size_t deleted_count = 0;
1192 if (deflated_count > 0) {
1193 ResourceMark rm(current);
1194 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1195 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1196
1197 #ifdef ASSERT
1198 if (UseObjectMonitorTable) {
1199 for (ObjectMonitor* monitor : delete_list) {
1200 assert(!ObjectSynchronizer::contains_monitor(current, monitor), "Should have been removed");
1201 }
1202 }
1203 #endif
1204
1205 log.before_handshake(unlinked_count);
1206
1207 // A JavaThread needs to handshake in order to safely free the
1208 // ObjectMonitors that were deflated in this cycle.
1209 DeflationHandshakeClosure dhc;
1210 Handshake::execute(&dhc);
1211 // Also, we sync and desync GC threads around the handshake, so that they can
1212 // safely read the mark-word and look-through to the object-monitor, without
1213 // being afraid that the object-monitor is going away.
1214 VM_RendezvousGCThreads sync_gc;
1215 VMThread::execute(&sync_gc);
1216
1217 log.after_handshake();
1218
1219 // After the handshake, safely free the ObjectMonitors that were
1220 // deflated and unlinked in this cycle.
1221
1222 // Delete the unlinked ObjectMonitors.
1223 deleted_count = delete_monitors(&delete_list, &safepointer);
1224 assert(unlinked_count == deleted_count, "must be");
1225 }
1226
1227 log.end(deflated_count, unlinked_count);
1228
1229 GVars.stw_random = os::random();
1230
1231 if (deflated_count != 0) {
1232 _no_progress_cnt = 0;
1233 } else if (_no_progress_skip_increment) {
1234 _no_progress_skip_increment = false;
1235 } else {
1236 _no_progress_cnt++;
1237 }
1238
1239 return deflated_count;
1240 }
1241
1242 // Monitor cleanup on JavaThread::exit
1243
1244 // Iterate through monitor cache and attempt to release thread's monitors
1245 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1246 private:
1247 JavaThread* _thread;
1248
1249 public:
1250 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1251 void do_monitor(ObjectMonitor* mid) {
1252 mid->complete_exit(_thread);
1253 }
1254 };
1255
1256 // Release all inflated monitors owned by current thread. Lightweight monitors are
1257 // ignored. This is meant to be called during JNI thread detach which assumes
1258 // all remaining monitors are heavyweight. All exceptions are swallowed.
1259 // Scanning the extant monitor list can be time consuming.
1260 // A simple optimization is to add a per-thread flag that indicates a thread
1261 // called jni_monitorenter() during its lifetime.
1262 //
1263 // Instead of NoSafepointVerifier it might be cheaper to
1264 // use an idiom of the form:
1265 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1266 // <code that must not run at safepoint>
1267 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1268 // Since the tests are extremely cheap we could leave them enabled
1269 // for normal product builds.
1270
1271 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1272 assert(current == JavaThread::current(), "must be current Java thread");
1273 NoSafepointVerifier nsv;
1274 ReleaseJavaMonitorsClosure rjmc(current);
1275 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1276 assert(!current->has_pending_exception(), "Should not be possible");
1277 current->clear_pending_exception();
1278 }
1279
1280 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1281 switch (cause) {
1282 case inflate_cause_vm_internal: return "VM Internal";
1283 case inflate_cause_monitor_enter: return "Monitor Enter";
1284 case inflate_cause_wait: return "Monitor Wait";
1285 case inflate_cause_notify: return "Monitor Notify";
1286 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1287 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1288 default:
1289 ShouldNotReachHere();
1290 }
1291 return "Unknown";
1292 }
1293
1294 //------------------------------------------------------------------------------
1295 // Debugging code
1296
1297 u_char* ObjectSynchronizer::get_gvars_addr() {
1298 return (u_char*)&GVars;
1299 }
1300
1301 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1302 return (u_char*)&GVars.hc_sequence;
1303 }
1304
1305 size_t ObjectSynchronizer::get_gvars_size() {
1306 return sizeof(SharedGlobals);
1307 }
1308
1309 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1310 return (u_char*)&GVars.stw_random;
1311 }
1312
1313 // Do the final audit and print of ObjectMonitor stats; must be done
1314 // by the VMThread at VM exit time.
1315 void ObjectSynchronizer::do_final_audit_and_print_stats() {
1316 assert(Thread::current()->is_VM_thread(), "sanity check");
1317
1318 if (is_final_audit()) { // Only do the audit once.
1319 return;
1320 }
1321 set_is_final_audit();
1322 log_info(monitorinflation)("Starting the final audit.");
1323
1324 if (log_is_enabled(Info, monitorinflation)) {
1325 LogStreamHandle(Info, monitorinflation) ls;
1326 audit_and_print_stats(&ls, true /* on_exit */);
1327 }
1328 }
1329
1330 // This function can be called by the MonitorDeflationThread or it can be called when
1331 // we are trying to exit the VM. The list walker functions can run in parallel with
1332 // the other list operations.
1333 // Calls to this function can be added in various places as a debugging
1334 // aid.
1335 //
1336 void ObjectSynchronizer::audit_and_print_stats(outputStream* ls, bool on_exit) {
1337 int error_cnt = 0;
1338
1339 ls->print_cr("Checking in_use_list:");
1340 chk_in_use_list(ls, &error_cnt);
1341
1342 if (error_cnt == 0) {
1343 ls->print_cr("No errors found in in_use_list checks.");
1344 } else {
1345 log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt);
1346 }
1347
1348 // When exiting, only log the interesting entries at the Info level.
1349 // When called at intervals by the MonitorDeflationThread, log output
1350 // at the Trace level since there can be a lot of it.
1351 if (!on_exit && log_is_enabled(Trace, monitorinflation)) {
1352 LogStreamHandle(Trace, monitorinflation) ls_tr;
1353 log_in_use_monitor_details(&ls_tr, true /* log_all */);
1354 } else if (on_exit) {
1355 log_in_use_monitor_details(ls, false /* log_all */);
1356 }
1357
1358 ls->flush();
1359
1360 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1361 }
1362
1363 // Check the in_use_list; log the results of the checks.
1364 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
1365 size_t l_in_use_count = _in_use_list.count();
1366 size_t l_in_use_max = _in_use_list.max();
1367 out->print_cr("count=%zu, max=%zu", l_in_use_count,
1368 l_in_use_max);
1369
1370 size_t ck_in_use_count = 0;
1371 MonitorList::Iterator iter = _in_use_list.iterator();
1372 while (iter.has_next()) {
1373 ObjectMonitor* mid = iter.next();
1374 chk_in_use_entry(mid, out, error_cnt_p);
1375 ck_in_use_count++;
1376 }
1377
1378 if (l_in_use_count == ck_in_use_count) {
1379 out->print_cr("in_use_count=%zu equals ck_in_use_count=%zu",
1380 l_in_use_count, ck_in_use_count);
1381 } else {
1382 out->print_cr("WARNING: in_use_count=%zu is not equal to "
1383 "ck_in_use_count=%zu", l_in_use_count,
1384 ck_in_use_count);
1385 }
1386
1387 size_t ck_in_use_max = _in_use_list.max();
1388 if (l_in_use_max == ck_in_use_max) {
1389 out->print_cr("in_use_max=%zu equals ck_in_use_max=%zu",
1390 l_in_use_max, ck_in_use_max);
1391 } else {
1392 out->print_cr("WARNING: in_use_max=%zu is not equal to "
1393 "ck_in_use_max=%zu", l_in_use_max, ck_in_use_max);
1394 }
1395 }
1396
1397 // Check an in-use monitor entry; log any errors.
1398 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1399 int* error_cnt_p) {
1400 if (n->owner_is_DEFLATER_MARKER()) {
1401 // This could happen when monitor deflation blocks for a safepoint.
1402 return;
1403 }
1404
1405
1406 if (n->metadata() == 0) {
1407 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1408 "have non-null _metadata (header/hash) field.", p2i(n));
1409 *error_cnt_p = *error_cnt_p + 1;
1410 }
1411
1412 const oop obj = n->object_peek();
1413 if (obj == nullptr) {
1414 return;
1415 }
1416
1417 const markWord mark = obj->mark();
1418 if (!mark.has_monitor()) {
1419 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1420 "object does not think it has a monitor: obj="
1421 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
1422 p2i(obj), mark.value());
1423 *error_cnt_p = *error_cnt_p + 1;
1424 return;
1425 }
1426
1427 ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
1428 if (n != obj_mon) {
1429 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1430 "object does not refer to the same monitor: obj="
1431 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
1432 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
1433 *error_cnt_p = *error_cnt_p + 1;
1434 }
1435 }
1436
1437 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
1438 // flags indicate why the entry is in-use, 'object' and 'object type'
1439 // indicate the associated object and its type.
1440 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
1441 if (_in_use_list.count() > 0) {
1442 stringStream ss;
1443 out->print_cr("In-use monitor info%s:", log_all ? "" : " (eliding idle monitors)");
1444 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
1445 out->print_cr("%18s %s %18s %18s",
1446 "monitor", "BHL", "object", "object type");
1447 out->print_cr("================== === ================== ==================");
1448
1449 auto is_interesting = [&](ObjectMonitor* monitor) {
1450 return log_all || monitor->has_owner() || monitor->is_busy();
1451 };
1452
1453 monitors_iterate([&](ObjectMonitor* monitor) {
1454 if (is_interesting(monitor)) {
1455 const oop obj = monitor->object_peek();
1456 const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
1457 ResourceMark rm;
1458 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
1459 monitor->is_busy(), hash != 0, monitor->has_owner(),
1460 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
1461 if (monitor->is_busy()) {
1462 out->print(" (%s)", monitor->is_busy_to_string(&ss));
1463 ss.reset();
1464 }
1465 out->cr();
1466 }
1467 });
1468 }
1469
1470 out->flush();
1471 }
1472
1473 // -----------------------------------------------------------------------------
1474 // ConcurrentHashTable storing links from objects to ObjectMonitors
1475 class ObjectMonitorTable : AllStatic {
1476 struct Config {
1477 using Value = ObjectMonitor*;
1478 static uintx get_hash(Value const& value, bool* is_dead) {
1479 return (uintx)value->hash();
1480 }
1481 static void* allocate_node(void* context, size_t size, Value const& value) {
1482 ObjectMonitorTable::inc_items_count();
1483 return AllocateHeap(size, mtObjectMonitor);
1484 };
1485 static void free_node(void* context, void* memory, Value const& value) {
1486 ObjectMonitorTable::dec_items_count();
1487 FreeHeap(memory);
1488 }
1489 };
1490 using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
1491
1492 static ConcurrentTable* _table;
1493 static volatile size_t _items_count;
1494 static size_t _table_size;
1495 static volatile bool _resize;
1496
1497 class Lookup : public StackObj {
1498 oop _obj;
1499
1500 public:
1501 explicit Lookup(oop obj) : _obj(obj) {}
1502
1503 uintx get_hash() const {
1504 uintx hash = _obj->mark().hash();
1505 assert(hash != 0, "should have a hash");
1506 return hash;
1507 }
1508
1509 bool equals(ObjectMonitor** value) {
1510 assert(*value != nullptr, "must be");
1511 return (*value)->object_refers_to(_obj);
1512 }
1513
1514 bool is_dead(ObjectMonitor** value) {
1515 assert(*value != nullptr, "must be");
1516 return false;
1517 }
1518 };
1519
1520 class LookupMonitor : public StackObj {
1521 ObjectMonitor* _monitor;
1522
1523 public:
1524 explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
1525
1526 uintx get_hash() const {
1527 return _monitor->hash();
1528 }
1529
1530 bool equals(ObjectMonitor** value) {
1531 return (*value) == _monitor;
1532 }
1533
1534 bool is_dead(ObjectMonitor** value) {
1535 assert(*value != nullptr, "must be");
1536 return (*value)->object_is_dead();
1537 }
1538 };
1539
1540 static void inc_items_count() {
1541 AtomicAccess::inc(&_items_count, memory_order_relaxed);
1542 }
1543
1544 static void dec_items_count() {
1545 AtomicAccess::dec(&_items_count, memory_order_relaxed);
1546 }
1547
1548 static double get_load_factor() {
1549 size_t count = AtomicAccess::load(&_items_count);
1550 return (double)count / (double)_table_size;
1551 }
1552
1553 static size_t table_size(Thread* current = Thread::current()) {
1554 return ((size_t)1) << _table->get_size_log2(current);
1555 }
1556
1557 static size_t max_log_size() {
1558 // TODO[OMTable]: Evaluate the max size.
1559 // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
1560 // Using MaxHeapSize directly this early may be wrong, and there
1561 // are definitely rounding errors (alignment).
1562 const size_t max_capacity = MaxHeapSize;
1563 const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
1564 const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
1565 const size_t log_max_objects = log2i_graceful(max_objects);
1566
1567 return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
1568 }
1569
1570 static size_t min_log_size() {
1571 // ~= log(AvgMonitorsPerThreadEstimate default)
1572 return 10;
1573 }
1574
1575 template<typename V>
1576 static size_t clamp_log_size(V log_size) {
1577 return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
1578 }
1579
1580 static size_t initial_log_size() {
1581 const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
1582 return clamp_log_size(estimate);
1583 }
1584
1585 static size_t grow_hint () {
1586 return ConcurrentTable::DEFAULT_GROW_HINT;
1587 }
1588
1589 public:
1590 static void create() {
1591 _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
1592 _items_count = 0;
1593 _table_size = table_size();
1594 _resize = false;
1595 }
1596
1597 static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
1598 #ifdef ASSERT
1599 if (SafepointSynchronize::is_at_safepoint()) {
1600 bool has_monitor = obj->mark().has_monitor();
1601 assert(has_monitor == (monitor != nullptr),
1602 "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
1603 BOOL_TO_STR(has_monitor), p2i(monitor));
1604 }
1605 #endif
1606 }
1607
1608 static ObjectMonitor* monitor_get(Thread* current, oop obj) {
1609 ObjectMonitor* result = nullptr;
1610 Lookup lookup_f(obj);
1611 auto found_f = [&](ObjectMonitor** found) {
1612 assert((*found)->object_peek() == obj, "must be");
1613 result = *found;
1614 };
1615 _table->get(current, lookup_f, found_f);
1616 verify_monitor_get_result(obj, result);
1617 return result;
1618 }
1619
1620 static void try_notify_grow() {
1621 if (!_table->is_max_size_reached() && !AtomicAccess::load(&_resize)) {
1622 AtomicAccess::store(&_resize, true);
1623 if (Service_lock->try_lock()) {
1624 Service_lock->notify();
1625 Service_lock->unlock();
1626 }
1627 }
1628 }
1629
1630 static bool should_shrink() {
1631 // Not implemented;
1632 return false;
1633 }
1634
1635 static constexpr double GROW_LOAD_FACTOR = 0.75;
1636
1637 static bool should_grow() {
1638 return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
1639 }
1640
1641 static bool should_resize() {
1642 return should_grow() || should_shrink() || AtomicAccess::load(&_resize);
1643 }
1644
1645 template<typename Task, typename... Args>
1646 static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
1647 if (task.prepare(current)) {
1648 log_trace(monitortable)("Started to %s", task_name);
1649 TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
1650 while (task.do_task(current, args...)) {
1651 task.pause(current);
1652 {
1653 ThreadBlockInVM tbivm(current);
1654 }
1655 task.cont(current);
1656 }
1657 task.done(current);
1658 return true;
1659 }
1660 return false;
1661 }
1662
1663 static bool grow(JavaThread* current) {
1664 ConcurrentTable::GrowTask grow_task(_table);
1665 if (run_task(current, grow_task, "Grow")) {
1666 _table_size = table_size(current);
1667 log_info(monitortable)("Grown to size: %zu", _table_size);
1668 return true;
1669 }
1670 return false;
1671 }
1672
1673 static bool clean(JavaThread* current) {
1674 ConcurrentTable::BulkDeleteTask clean_task(_table);
1675 auto is_dead = [&](ObjectMonitor** monitor) {
1676 return (*monitor)->object_is_dead();
1677 };
1678 auto do_nothing = [&](ObjectMonitor** monitor) {};
1679 NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
1680 return run_task(current, clean_task, "Clean", is_dead, do_nothing);
1681 }
1682
1683 static bool resize(JavaThread* current) {
1684 LogTarget(Info, monitortable) lt;
1685 bool success = false;
1686
1687 if (should_grow()) {
1688 lt.print("Start growing with load factor %f", get_load_factor());
1689 success = grow(current);
1690 } else {
1691 if (!_table->is_max_size_reached() && AtomicAccess::load(&_resize)) {
1692 lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
1693 }
1694 lt.print("Start cleaning with load factor %f", get_load_factor());
1695 success = clean(current);
1696 }
1697
1698 AtomicAccess::store(&_resize, false);
1699
1700 return success;
1701 }
1702
1703 static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
1704 // Enter the monitor into the concurrent hashtable.
1705 ObjectMonitor* result = monitor;
1706 Lookup lookup_f(obj);
1707 auto found_f = [&](ObjectMonitor** found) {
1708 assert((*found)->object_peek() == obj, "must be");
1709 result = *found;
1710 };
1711 bool grow;
1712 _table->insert_get(current, lookup_f, monitor, found_f, &grow);
1713 verify_monitor_get_result(obj, result);
1714 if (grow) {
1715 try_notify_grow();
1716 }
1717 return result;
1718 }
1719
1720 static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
1721 LookupMonitor lookup_f(monitor);
1722 return _table->remove(current, lookup_f);
1723 }
1724
1725 static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
1726 LookupMonitor lookup_f(monitor);
1727 bool result = false;
1728 auto found_f = [&](ObjectMonitor** found) {
1729 result = true;
1730 };
1731 _table->get(current, lookup_f, found_f);
1732 return result;
1733 }
1734
1735 static void print_on(outputStream* st) {
1736 auto printer = [&] (ObjectMonitor** entry) {
1737 ObjectMonitor* om = *entry;
1738 oop obj = om->object_peek();
1739 st->print("monitor=" PTR_FORMAT ", ", p2i(om));
1740 st->print("object=" PTR_FORMAT, p2i(obj));
1741 assert(obj->mark().hash() == om->hash(), "hash must match");
1742 st->cr();
1743 return true;
1744 };
1745 if (SafepointSynchronize::is_at_safepoint()) {
1746 _table->do_safepoint_scan(printer);
1747 } else {
1748 _table->do_scan(Thread::current(), printer);
1749 }
1750 }
1751 };
1752
1753 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
1754 volatile size_t ObjectMonitorTable::_items_count = 0;
1755 size_t ObjectMonitorTable::_table_size = 0;
1756 volatile bool ObjectMonitorTable::_resize = false;
1757
1758 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
1759 ObjectMonitor* monitor = get_monitor_from_table(current, object);
1760 if (monitor != nullptr) {
1761 *inserted = false;
1762 return monitor;
1763 }
1764
1765 ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
1766 alloced_monitor->set_anonymous_owner();
1767
1768 // Try insert monitor
1769 monitor = add_monitor(current, alloced_monitor, object);
1770
1771 *inserted = alloced_monitor == monitor;
1772 if (!*inserted) {
1773 delete alloced_monitor;
1774 }
1775
1776 return monitor;
1777 }
1778
1779 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
1780 if (log_is_enabled(Trace, monitorinflation)) {
1781 ResourceMark rm(current);
1782 log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
1783 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
1784 object->mark().value(), object->klass()->external_name(),
1785 ObjectSynchronizer::inflate_cause_name(cause));
1786 }
1787 }
1788
1789 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1790 const oop obj,
1791 ObjectSynchronizer::InflateCause cause) {
1792 assert(event != nullptr, "invariant");
1793 const Klass* monitor_klass = obj->klass();
1794 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
1795 return;
1796 }
1797 event->set_monitorClass(monitor_klass);
1798 event->set_address((uintptr_t)(void*)obj);
1799 event->set_cause((u1)cause);
1800 event->commit();
1801 }
1802
1803 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
1804 assert(UseObjectMonitorTable, "must be");
1805
1806 EventJavaMonitorInflate event;
1807
1808 bool inserted;
1809 ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
1810
1811 if (inserted) {
1812 log_inflate(current, object, cause);
1813 if (event.should_commit()) {
1814 post_monitor_inflate_event(&event, object, cause);
1815 }
1816
1817 // The monitor has an anonymous owner so it is safe from async deflation.
1818 ObjectSynchronizer::_in_use_list.add(monitor);
1819 }
1820
1821 return monitor;
1822 }
1823
1824 // Add the hashcode to the monitor to match the object and put it in the hashtable.
1825 ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
1826 assert(UseObjectMonitorTable, "must be");
1827 assert(obj == monitor->object(), "must be");
1828
1829 intptr_t hash = obj->mark().hash();
1830 assert(hash != 0, "must be set when claiming the object monitor");
1831 monitor->set_hash(hash);
1832
1833 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
1834 }
1835
1836 bool ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
1837 assert(UseObjectMonitorTable, "must be");
1838 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
1839
1840 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
1841 }
1842
1843 void ObjectSynchronizer::deflate_mark_word(oop obj) {
1844 assert(UseObjectMonitorTable, "must be");
1845
1846 markWord mark = obj->mark_acquire();
1847 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
1848
1849 while (mark.has_monitor()) {
1850 const markWord new_mark = mark.clear_lock_bits().set_unlocked();
1851 mark = obj->cas_set_mark(new_mark, mark);
1852 }
1853 }
1854
1855 void ObjectSynchronizer::create_om_table() {
1856 if (!UseObjectMonitorTable) {
1857 return;
1858 }
1859 ObjectMonitorTable::create();
1860 }
1861
1862 bool ObjectSynchronizer::needs_resize() {
1863 if (!UseObjectMonitorTable) {
1864 return false;
1865 }
1866 return ObjectMonitorTable::should_resize();
1867 }
1868
1869 bool ObjectSynchronizer::resize_table(JavaThread* current) {
1870 if (!UseObjectMonitorTable) {
1871 return true;
1872 }
1873 return ObjectMonitorTable::resize(current);
1874 }
1875
1876 class ObjectSynchronizer::LockStackInflateContendedLocks : private OopClosure {
1877 private:
1878 oop _contended_oops[LockStack::CAPACITY];
1879 int _length;
1880
1881 void do_oop(oop* o) final {
1882 oop obj = *o;
1883 if (obj->mark_acquire().has_monitor()) {
1884 if (_length > 0 && _contended_oops[_length - 1] == obj) {
1885 // Recursive
1886 return;
1887 }
1888 _contended_oops[_length++] = obj;
1889 }
1890 }
1891
1892 void do_oop(narrowOop* o) final {
1893 ShouldNotReachHere();
1894 }
1895
1896 public:
1897 LockStackInflateContendedLocks() :
1898 _contended_oops(),
1899 _length(0) {};
1900
1901 void inflate(JavaThread* current) {
1902 assert(current == JavaThread::current(), "must be");
1903 current->lock_stack().oops_do(this);
1904 for (int i = 0; i < _length; i++) {
1905 ObjectSynchronizer::
1906 inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1907 }
1908 }
1909 };
1910
1911 void ObjectSynchronizer::ensure_lock_stack_space(JavaThread* current) {
1912 assert(current == JavaThread::current(), "must be");
1913 LockStack& lock_stack = current->lock_stack();
1914
1915 // Make room on lock_stack
1916 if (lock_stack.is_full()) {
1917 // Inflate contended objects
1918 LockStackInflateContendedLocks().inflate(current);
1919 if (lock_stack.is_full()) {
1920 // Inflate the oldest object
1921 inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1922 }
1923 }
1924 }
1925
1926 class ObjectSynchronizer::CacheSetter : StackObj {
1927 JavaThread* const _thread;
1928 BasicLock* const _lock;
1929 ObjectMonitor* _monitor;
1930
1931 NONCOPYABLE(CacheSetter);
1932
1933 public:
1934 CacheSetter(JavaThread* thread, BasicLock* lock) :
1935 _thread(thread),
1936 _lock(lock),
1937 _monitor(nullptr) {}
1938
1939 ~CacheSetter() {
1940 // Only use the cache if using the table.
1941 if (UseObjectMonitorTable) {
1942 if (_monitor != nullptr) {
1943 // If the monitor is already in the BasicLock cache then it is most
1944 // likely in the thread cache, do not set it again to avoid reordering.
1945 if (_monitor != _lock->object_monitor_cache()) {
1946 _thread->om_set_monitor_cache(_monitor);
1947 _lock->set_object_monitor_cache(_monitor);
1948 }
1949 } else {
1950 _lock->clear_object_monitor_cache();
1951 }
1952 }
1953 }
1954
1955 void set_monitor(ObjectMonitor* monitor) {
1956 assert(_monitor == nullptr, "only set once");
1957 _monitor = monitor;
1958 }
1959
1960 };
1961
1962 // Reads first from the BasicLock cache then from the OMCache in the current thread.
1963 // C2 fast-path may have put the monitor in the cache in the BasicLock.
1964 inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) {
1965 ObjectMonitor* monitor = lock->object_monitor_cache();
1966 if (monitor == nullptr) {
1967 monitor = current->om_get_from_monitor_cache(object);
1968 }
1969 return monitor;
1970 }
1971
1972 class ObjectSynchronizer::VerifyThreadState {
1973 bool _no_safepoint;
1974
1975 public:
1976 VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
1977 assert(current == Thread::current(), "must be");
1978 assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
1979 if (_no_safepoint) {
1980 DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
1981 }
1982 }
1983 ~VerifyThreadState() {
1984 if (_no_safepoint){
1985 DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
1986 }
1987 }
1988 };
1989
1990 inline bool ObjectSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
1991 markWord mark = obj->mark();
1992 while (mark.is_unlocked()) {
1993 ensure_lock_stack_space(current);
1994 assert(!lock_stack.is_full(), "must have made room on the lock stack");
1995 assert(!lock_stack.contains(obj), "thread must not already hold the lock");
1996 // Try to swing into 'fast-locked' state.
1997 markWord locked_mark = mark.set_fast_locked();
1998 markWord old_mark = mark;
1999 mark = obj->cas_set_mark(locked_mark, old_mark);
2000 if (old_mark == mark) {
2001 // Successfully fast-locked, push object to lock-stack and return.
2002 lock_stack.push(obj);
2003 return true;
2004 }
2005 }
2006 return false;
2007 }
2008
2009 bool ObjectSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
2010 assert(UseObjectMonitorTable, "must be");
2011 // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
2012 const int log_spin_limit = os::is_MP() ? FastLockingSpins : 1;
2013 const int log_min_safepoint_check_interval = 10;
2014
2015 markWord mark = obj->mark();
2016 const auto should_spin = [&]() {
2017 if (!mark.has_monitor()) {
2018 // Spin while not inflated.
2019 return true;
2020 } else if (observed_deflation) {
2021 // Spin while monitor is being deflated.
2022 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
2023 return monitor == nullptr || monitor->is_being_async_deflated();
2024 }
2025 // Else stop spinning.
2026 return false;
2027 };
2028 // Always attempt to lock once even when safepoint synchronizing.
2029 bool should_process = false;
2030 for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
2031 // Spin with exponential backoff.
2032 const int total_spin_count = 1 << i;
2033 const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
2034 const int outer_spin_count = total_spin_count / inner_spin_count;
2035 for (int outer = 0; outer < outer_spin_count; outer++) {
2036 should_process = SafepointMechanism::should_process(current);
2037 if (should_process) {
2038 // Stop spinning for safepoint.
2039 break;
2040 }
2041 for (int inner = 1; inner < inner_spin_count; inner++) {
2042 SpinPause();
2043 }
2044 }
2045
2046 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
2047 }
2048 return false;
2049 }
2050
2051 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
2052 // When called with locking_thread != Thread::current() some mechanism must synchronize
2053 // the locking_thread with respect to the current thread. Currently only used when
2054 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
2055 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
2056
2057 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
2058 JavaThread* current = JavaThread::current();
2059 VerifyThreadState vts(locking_thread, current);
2060
2061 if (obj->klass()->is_value_based()) {
2062 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
2063 }
2064
2065 LockStack& lock_stack = locking_thread->lock_stack();
2066
2067 ObjectMonitor* monitor = nullptr;
2068 if (lock_stack.contains(obj())) {
2069 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
2070 bool entered = monitor->enter_for(locking_thread);
2071 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
2072 } else {
2073 do {
2074 // It is assumed that enter_for must enter on an object without contention.
2075 monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
2076 // But there may still be a race with deflation.
2077 } while (monitor == nullptr);
2078 }
2079
2080 assert(monitor != nullptr, "ObjectSynchronizer::enter_for must succeed");
2081 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared");
2082 }
2083
2084 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
2085 assert(current == JavaThread::current(), "must be");
2086
2087 if (obj->klass()->is_value_based()) {
2088 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
2089 }
2090
2091 CacheSetter cache_setter(current, lock);
2092
2093 // Used when deflation is observed. Progress here requires progress
2094 // from the deflator. After observing that the deflator is not
2095 // making progress (after two yields), switch to sleeping.
2096 SpinYield spin_yield(0, 2);
2097 bool observed_deflation = false;
2098
2099 LockStack& lock_stack = current->lock_stack();
2100
2101 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
2102 // Recursively fast locked
2103 return;
2104 }
2105
2106 if (lock_stack.contains(obj())) {
2107 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
2108 bool entered = monitor->enter(current);
2109 assert(entered, "recursive ObjectMonitor::enter must succeed");
2110 cache_setter.set_monitor(monitor);
2111 return;
2112 }
2113
2114 while (true) {
2115 // Fast-locking does not use the 'lock' argument.
2116 // Fast-lock spinning to avoid inflating for short critical sections.
2117 // The goal is to only inflate when the extra cost of using ObjectMonitors
2118 // is worth it.
2119 // If deflation has been observed we also spin while deflation is ongoing.
2120 if (fast_lock_try_enter(obj(), lock_stack, current)) {
2121 return;
2122 } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
2123 return;
2124 }
2125
2126 if (observed_deflation) {
2127 spin_yield.wait();
2128 }
2129
2130 ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
2131 if (monitor != nullptr) {
2132 cache_setter.set_monitor(monitor);
2133 return;
2134 }
2135
2136 // If inflate_and_enter returns nullptr it is because a deflated monitor
2137 // was encountered. Fallback to fast locking. The deflater is responsible
2138 // for clearing out the monitor and transitioning the markWord back to
2139 // fast locking.
2140 observed_deflation = true;
2141 }
2142 }
2143
2144 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
2145 assert(current == Thread::current(), "must be");
2146
2147 markWord mark = object->mark();
2148 assert(!mark.is_unlocked(), "must be");
2149
2150 LockStack& lock_stack = current->lock_stack();
2151 if (mark.is_fast_locked()) {
2152 if (lock_stack.try_recursive_exit(object)) {
2153 // This is a recursive exit which succeeded
2154 return;
2155 }
2156 if (lock_stack.is_recursive(object)) {
2157 // Must inflate recursive locks if try_recursive_exit fails
2158 // This happens for un-structured unlocks, could potentially
2159 // fix try_recursive_exit to handle these.
2160 inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
2161 }
2162 }
2163
2164 while (mark.is_fast_locked()) {
2165 markWord unlocked_mark = mark.set_unlocked();
2166 markWord old_mark = mark;
2167 mark = object->cas_set_mark(unlocked_mark, old_mark);
2168 if (old_mark == mark) {
2169 // CAS successful, remove from lock_stack
2170 size_t recursion = lock_stack.remove(object) - 1;
2171 assert(recursion == 0, "Should not have unlocked here");
2172 return;
2173 }
2174 }
2175
2176 assert(mark.has_monitor(), "must be");
2177 // The monitor exists
2178 ObjectMonitor* monitor;
2179 if (UseObjectMonitorTable) {
2180 monitor = read_caches(current, lock, object);
2181 if (monitor == nullptr) {
2182 monitor = get_monitor_from_table(current, object);
2183 }
2184 } else {
2185 monitor = ObjectSynchronizer::read_monitor(mark);
2186 }
2187 if (monitor->has_anonymous_owner()) {
2188 assert(current->lock_stack().contains(object), "current must have object on its lock stack");
2189 monitor->set_owner_from_anonymous(current);
2190 monitor->set_recursions(current->lock_stack().remove(object) - 1);
2191 }
2192
2193 monitor->exit(current);
2194 }
2195
2196 // ObjectSynchronizer::inflate_locked_or_imse is used to get an
2197 // inflated ObjectMonitor* from contexts which require that, such as
2198 // notify/wait and jni_exit. Fast locking keeps the invariant that it
2199 // only inflates if it is already locked by the current thread or the current
2200 // thread is in the process of entering. To maintain this invariant we need to
2201 // throw a java.lang.IllegalMonitorStateException before inflating if the
2202 // current thread is not the owner.
2203 ObjectMonitor* ObjectSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
2204 JavaThread* current = THREAD;
2205
2206 for (;;) {
2207 markWord mark = obj->mark_acquire();
2208 if (mark.is_unlocked()) {
2209 // No lock, IMSE.
2210 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
2211 "current thread is not owner", nullptr);
2212 }
2213
2214 if (mark.is_fast_locked()) {
2215 if (!current->lock_stack().contains(obj)) {
2216 // Fast locked by other thread, IMSE.
2217 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
2218 "current thread is not owner", nullptr);
2219 } else {
2220 // Current thread owns the lock, must inflate
2221 return inflate_fast_locked_object(obj, cause, current, current);
2222 }
2223 }
2224
2225 assert(mark.has_monitor(), "must be");
2226 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
2227 if (monitor != nullptr) {
2228 if (monitor->has_anonymous_owner()) {
2229 LockStack& lock_stack = current->lock_stack();
2230 if (lock_stack.contains(obj)) {
2231 // Current thread owns the lock but someone else inflated it.
2232 // Fix owner and pop lock stack.
2233 monitor->set_owner_from_anonymous(current);
2234 monitor->set_recursions(lock_stack.remove(obj) - 1);
2235 } else {
2236 // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
2237 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
2238 "current thread is not owner", nullptr);
2239 }
2240 }
2241 return monitor;
2242 }
2243 }
2244 }
2245
2246 ObjectMonitor* ObjectSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
2247
2248 // The JavaThread* locking parameter requires that the locking_thread == JavaThread::current,
2249 // or is suspended throughout the call by some other mechanism.
2250 // Even with fast locking the thread might be nullptr when called from a non
2251 // JavaThread. (As may still be the case from FastHashCode). However it is only
2252 // important for the correctness of the fast locking algorithm that the thread
2253 // is set when called from ObjectSynchronizer::enter from the owning thread,
2254 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
2255 EventJavaMonitorInflate event;
2256
2257 for (;;) {
2258 const markWord mark = object->mark_acquire();
2259
2260 // The mark can be in one of the following states:
2261 // * inflated - Just return if using stack-locking.
2262 // If using fast-locking and the ObjectMonitor owner
2263 // is anonymous and the locking_thread owns the
2264 // object lock, then we make the locking_thread
2265 // the ObjectMonitor owner and remove the lock from
2266 // the locking_thread's lock stack.
2267 // * fast-locked - Coerce it to inflated from fast-locked.
2268 // * unlocked - Aggressively inflate the object.
2269
2270 // CASE: inflated
2271 if (mark.has_monitor()) {
2272 ObjectMonitor* inf = mark.monitor();
2273 markWord dmw = inf->header();
2274 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2275 if (inf->has_anonymous_owner() &&
2276 locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
2277 inf->set_owner_from_anonymous(locking_thread);
2278 size_t removed = locking_thread->lock_stack().remove(object);
2279 inf->set_recursions(removed - 1);
2280 }
2281 return inf;
2282 }
2283
2284 // CASE: fast-locked
2285 // Could be fast-locked either by the locking_thread or by some other thread.
2286 //
2287 // Note that we allocate the ObjectMonitor speculatively, _before_
2288 // attempting to set the object's mark to the new ObjectMonitor. If
2289 // the locking_thread owns the monitor, then we set the ObjectMonitor's
2290 // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
2291 // to anonymous. If we lose the race to set the object's mark to the
2292 // new ObjectMonitor, then we just delete it and loop around again.
2293 //
2294 if (mark.is_fast_locked()) {
2295 ObjectMonitor* monitor = new ObjectMonitor(object);
2296 monitor->set_header(mark.set_unlocked());
2297 bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
2298 if (own) {
2299 // Owned by locking_thread.
2300 monitor->set_owner(locking_thread);
2301 } else {
2302 // Owned by somebody else.
2303 monitor->set_anonymous_owner();
2304 }
2305 markWord monitor_mark = markWord::encode(monitor);
2306 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
2307 if (old_mark == mark) {
2308 // Success! Return inflated monitor.
2309 if (own) {
2310 size_t removed = locking_thread->lock_stack().remove(object);
2311 monitor->set_recursions(removed - 1);
2312 }
2313 // Once the ObjectMonitor is configured and object is associated
2314 // with the ObjectMonitor, it is safe to allow async deflation:
2315 ObjectSynchronizer::_in_use_list.add(monitor);
2316
2317 log_inflate(current, object, cause);
2318 if (event.should_commit()) {
2319 post_monitor_inflate_event(&event, object, cause);
2320 }
2321 return monitor;
2322 } else {
2323 delete monitor;
2324 continue; // Interference -- just retry
2325 }
2326 }
2327
2328 // CASE: unlocked
2329 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2330 // If we know we're inflating for entry it's better to inflate by swinging a
2331 // pre-locked ObjectMonitor pointer into the object header. A successful
2332 // CAS inflates the object *and* confers ownership to the inflating thread.
2333 // In the current implementation we use a 2-step mechanism where we CAS()
2334 // to inflate and then CAS() again to try to swing _owner from null to current.
2335 // An inflateTry() method that we could call from enter() would be useful.
2336
2337 assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
2338 ObjectMonitor* m = new ObjectMonitor(object);
2339 // prepare m for installation - set monitor to initial state
2340 m->set_header(mark);
2341
2342 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2343 delete m;
2344 m = nullptr;
2345 continue;
2346 // interference - the markword changed - just retry.
2347 // The state-transitions are one-way, so there's no chance of
2348 // live-lock -- "Inflated" is an absorbing state.
2349 }
2350
2351 // Once the ObjectMonitor is configured and object is associated
2352 // with the ObjectMonitor, it is safe to allow async deflation:
2353 ObjectSynchronizer::_in_use_list.add(m);
2354
2355 log_inflate(current, object, cause);
2356 if (event.should_commit()) {
2357 post_monitor_inflate_event(&event, object, cause);
2358 }
2359 return m;
2360 }
2361 }
2362
2363 ObjectMonitor* ObjectSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2364 VerifyThreadState vts(locking_thread, current);
2365 assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
2366
2367 ObjectMonitor* monitor;
2368
2369 if (!UseObjectMonitorTable) {
2370 return inflate_into_object_header(object, cause, locking_thread, current);
2371 }
2372
2373 // Inflating requires a hash code
2374 ObjectSynchronizer::FastHashCode(current, object);
2375
2376 markWord mark = object->mark_acquire();
2377 assert(!mark.is_unlocked(), "Cannot be unlocked");
2378
2379 for (;;) {
2380 // Fetch the monitor from the table
2381 monitor = get_or_insert_monitor(object, current, cause);
2382
2383 // ObjectMonitors are always inserted as anonymously owned, this thread is
2384 // the current holder of the monitor. So unless the entry is stale and
2385 // contains a deflating monitor it must be anonymously owned.
2386 if (monitor->has_anonymous_owner()) {
2387 // The monitor must be anonymously owned if it was added
2388 assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
2389 // New fresh monitor
2390 break;
2391 }
2392
2393 // If the monitor was not anonymously owned then we got a deflating monitor
2394 // from the table. We need to let the deflator make progress and remove this
2395 // entry before we are allowed to add a new one.
2396 os::naked_yield();
2397 assert(monitor->is_being_async_deflated(), "Should be the reason");
2398 }
2399
2400 // Set the mark word; loop to handle concurrent updates to other parts of the mark word
2401 while (mark.is_fast_locked()) {
2402 mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2403 }
2404
2405 // Indicate that the monitor now has a known owner
2406 monitor->set_owner_from_anonymous(locking_thread);
2407
2408 // Remove the entry from the thread's lock stack
2409 monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
2410
2411 if (locking_thread == current) {
2412 // Only change the thread local state of the current thread.
2413 locking_thread->om_set_monitor_cache(monitor);
2414 }
2415
2416 return monitor;
2417 }
2418
2419 ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2420 VerifyThreadState vts(locking_thread, current);
2421
2422 // Note: In some paths (deoptimization) the 'current' thread inflates and
2423 // enters the lock on behalf of the 'locking_thread' thread.
2424
2425 ObjectMonitor* monitor = nullptr;
2426
2427 if (!UseObjectMonitorTable) {
2428 // Do the old inflate and enter.
2429 monitor = inflate_into_object_header(object, cause, locking_thread, current);
2430
2431 bool entered;
2432 if (locking_thread == current) {
2433 entered = monitor->enter(locking_thread);
2434 } else {
2435 entered = monitor->enter_for(locking_thread);
2436 }
2437
2438 // enter returns false for deflation found.
2439 return entered ? monitor : nullptr;
2440 }
2441
2442 NoSafepointVerifier nsv;
2443
2444 // Try to get the monitor from the thread-local cache.
2445 // There's no need to use the cache if we are locking
2446 // on behalf of another thread.
2447 if (current == locking_thread) {
2448 monitor = read_caches(current, lock, object);
2449 }
2450
2451 // Get or create the monitor
2452 if (monitor == nullptr) {
2453 // Lightweight monitors require that hash codes are installed first
2454 ObjectSynchronizer::FastHashCode(locking_thread, object);
2455 monitor = get_or_insert_monitor(object, current, cause);
2456 }
2457
2458 if (monitor->try_enter(locking_thread)) {
2459 return monitor;
2460 }
2461
2462 // Holds is_being_async_deflated() stable throughout this function.
2463 ObjectMonitorContentionMark contention_mark(monitor);
2464
2465 /// First handle the case where the monitor from the table is deflated
2466 if (monitor->is_being_async_deflated()) {
2467 // The MonitorDeflation thread is deflating the monitor. The locking thread
2468 // must spin until further progress has been made.
2469
2470 // Clear the BasicLock cache as it may contain this monitor.
2471 lock->clear_object_monitor_cache();
2472
2473 const markWord mark = object->mark_acquire();
2474
2475 if (mark.has_monitor()) {
2476 // Waiting on the deflation thread to remove the deflated monitor from the table.
2477 os::naked_yield();
2478
2479 } else if (mark.is_fast_locked()) {
2480 // Some other thread managed to fast-lock the lock, or this is a
2481 // recursive lock from the same thread; yield for the deflation
2482 // thread to remove the deflated monitor from the table.
2483 os::naked_yield();
2484
2485 } else {
2486 assert(mark.is_unlocked(), "Implied");
2487 // Retry immediately
2488 }
2489
2490 // Retry
2491 return nullptr;
2492 }
2493
2494 for (;;) {
2495 const markWord mark = object->mark_acquire();
2496 // The mark can be in one of the following states:
2497 // * inflated - If the ObjectMonitor owner is anonymous
2498 // and the locking_thread owns the object
2499 // lock, then we make the locking_thread
2500 // the ObjectMonitor owner and remove the
2501 // lock from the locking_thread's lock stack.
2502 // * fast-locked - Coerce it to inflated from fast-locked.
2503 // * neutral - Inflate the object. Successful CAS is locked
2504
2505 // CASE: inflated
2506 if (mark.has_monitor()) {
2507 LockStack& lock_stack = locking_thread->lock_stack();
2508 if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
2509 // The lock is fast-locked by the locking thread,
2510 // convert it to a held monitor with a known owner.
2511 monitor->set_owner_from_anonymous(locking_thread);
2512 monitor->set_recursions(lock_stack.remove(object) - 1);
2513 }
2514
2515 break; // Success
2516 }
2517
2518 // CASE: fast-locked
2519 // Could be fast-locked either by locking_thread or by some other thread.
2520 //
2521 if (mark.is_fast_locked()) {
2522 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2523 if (old_mark != mark) {
2524 // CAS failed
2525 continue;
2526 }
2527
2528 // Success! Return inflated monitor.
2529 LockStack& lock_stack = locking_thread->lock_stack();
2530 if (lock_stack.contains(object)) {
2531 // The lock is fast-locked by the locking thread,
2532 // convert it to a held monitor with a known owner.
2533 monitor->set_owner_from_anonymous(locking_thread);
2534 monitor->set_recursions(lock_stack.remove(object) - 1);
2535 }
2536
2537 break; // Success
2538 }
2539
2540 // CASE: neutral (unlocked)
2541
2542 // Catch if the object's header is not neutral (not locked and
2543 // not marked is what we care about here).
2544 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2545 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2546 if (old_mark != mark) {
2547 // CAS failed
2548 continue;
2549 }
2550
2551 // Transitioned from unlocked to monitor means locking_thread owns the lock.
2552 monitor->set_owner_from_anonymous(locking_thread);
2553
2554 return monitor;
2555 }
2556
2557 if (current == locking_thread) {
2558 // One round of spinning
2559 if (monitor->spin_enter(locking_thread)) {
2560 return monitor;
2561 }
2562
2563 // Monitor is contended, take the time before entering to fix the lock stack.
2564 LockStackInflateContendedLocks().inflate(current);
2565 }
2566
2567 // enter can block for safepoints; clear the unhandled object oop
2568 PauseNoSafepointVerifier pnsv(&nsv);
2569 object = nullptr;
2570
2571 if (current == locking_thread) {
2572 monitor->enter_with_contention_mark(locking_thread, contention_mark);
2573 } else {
2574 monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
2575 }
2576
2577 return monitor;
2578 }
2579
2580 void ObjectSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
2581 if (obj != nullptr) {
2582 deflate_mark_word(obj);
2583 }
2584 bool removed = remove_monitor(current, monitor, obj);
2585 if (obj != nullptr) {
2586 assert(removed, "Should have removed the entry if obj was alive");
2587 }
2588 }
2589
2590 ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
2591 assert(UseObjectMonitorTable, "must be");
2592 return ObjectMonitorTable::monitor_get(current, obj);
2593 }
2594
2595 bool ObjectSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
2596 assert(UseObjectMonitorTable, "must be");
2597 return ObjectMonitorTable::contains_monitor(current, monitor);
2598 }
2599
2600 ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
2601 return mark.monitor();
2602 }
2603
2604 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) {
2605 return ObjectSynchronizer::read_monitor(current, obj, obj->mark());
2606 }
2607
2608 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) {
2609 if (!UseObjectMonitorTable) {
2610 return read_monitor(mark);
2611 } else {
2612 return ObjectSynchronizer::get_monitor_from_table(current, obj);
2613 }
2614 }
2615
2616 bool ObjectSynchronizer::quick_enter_internal(oop obj, BasicLock* lock, JavaThread* current) {
2617 assert(current->thread_state() == _thread_in_Java, "must be");
2618 assert(obj != nullptr, "must be");
2619 NoSafepointVerifier nsv;
2620
2621 LockStack& lock_stack = current->lock_stack();
2622 if (lock_stack.is_full()) {
2623 // Always go into runtime if the lock stack is full.
2624 return false;
2625 }
2626
2627 const markWord mark = obj->mark();
2628
2629 #ifndef _LP64
2630 // Only for 32bit which has limited support for fast locking outside the runtime.
2631 if (lock_stack.try_recursive_enter(obj)) {
2632 // Recursive lock successful.
2633 return true;
2634 }
2635
2636 if (mark.is_unlocked()) {
2637 markWord locked_mark = mark.set_fast_locked();
2638 if (obj->cas_set_mark(locked_mark, mark) == mark) {
2639 // Successfully fast-locked, push object to lock-stack and return.
2640 lock_stack.push(obj);
2641 return true;
2642 }
2643 }
2644 #endif
2645
2646 if (mark.has_monitor()) {
2647 ObjectMonitor* monitor;
2648 if (UseObjectMonitorTable) {
2649 monitor = read_caches(current, lock, obj);
2650 } else {
2651 monitor = ObjectSynchronizer::read_monitor(mark);
2652 }
2653
2654 if (monitor == nullptr) {
2655 // Take the slow-path on a cache miss.
2656 return false;
2657 }
2658
2659 if (UseObjectMonitorTable) {
2660 // Set the monitor regardless of success.
2661 // Either we successfully lock on the monitor, or we retry with the
2662 // monitor in the slow path. If the monitor gets deflated, it will be
2663 // cleared, either by the CacheSetter if we fast lock in enter or in
2664 // inflate_and_enter when we see that the monitor is deflated.
2665 lock->set_object_monitor_cache(monitor);
2666 }
2667
2668 if (monitor->spin_enter(current)) {
2669 return true;
2670 }
2671 }
2672
2673 // Slow-path.
2674 return false;
2675 }
2676
2677 bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
2678 assert(current->thread_state() == _thread_in_Java, "invariant");
2679 NoSafepointVerifier nsv;
2680 if (obj == nullptr) return false; // Need to throw NPE
2681
2682 if (obj->klass()->is_value_based()) {
2683 return false;
2684 }
2685
2686 return ObjectSynchronizer::quick_enter_internal(obj, lock, current);
2687 }