1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/vmSymbols.hpp"
26 #include "gc/shared/collectedHeap.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomicAccess.hpp"
37 #include "runtime/basicLock.inline.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/os.inline.hpp"
48 #include "runtime/osThread.hpp"
49 #include "runtime/safepointMechanism.inline.hpp"
50 #include "runtime/safepointVerifiers.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/stubRoutines.hpp"
53 #include "runtime/synchronizer.hpp"
54 #include "runtime/threads.hpp"
55 #include "runtime/timer.hpp"
56 #include "runtime/timerTrace.hpp"
57 #include "runtime/trimNativeHeap.hpp"
58 #include "runtime/vframe.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "utilities/align.hpp"
61 #include "utilities/concurrentHashTable.inline.hpp"
62 #include "utilities/concurrentHashTableTasks.inline.hpp"
63 #include "utilities/dtrace.hpp"
64 #include "utilities/events.hpp"
65 #include "utilities/fastHash.hpp"
66 #include "utilities/globalCounter.inline.hpp"
67 #include "utilities/globalDefinitions.hpp"
68 #include "utilities/linkedlist.hpp"
69 #include "utilities/preserveException.hpp"
70
71 class ObjectMonitorDeflationLogging;
72
73 void MonitorList::add(ObjectMonitor* m) {
74 ObjectMonitor* head;
75 do {
76 head = AtomicAccess::load(&_head);
77 m->set_next_om(head);
78 } while (AtomicAccess::cmpxchg(&_head, head, m) != head);
79
80 size_t count = AtomicAccess::add(&_count, 1u, memory_order_relaxed);
81 size_t old_max;
82 do {
83 old_max = AtomicAccess::load(&_max);
84 if (count <= old_max) {
85 break;
86 }
87 } while (AtomicAccess::cmpxchg(&_max, old_max, count, memory_order_relaxed) != old_max);
88 }
89
90 size_t MonitorList::count() const {
91 return AtomicAccess::load(&_count);
92 }
93
94 size_t MonitorList::max() const {
95 return AtomicAccess::load(&_max);
96 }
97
98 class ObjectMonitorDeflationSafepointer : public StackObj {
99 JavaThread* const _current;
100 ObjectMonitorDeflationLogging* const _log;
101
102 public:
103 ObjectMonitorDeflationSafepointer(JavaThread* current, ObjectMonitorDeflationLogging* log)
104 : _current(current), _log(log) {}
105
106 void block_for_safepoint(const char* op_name, const char* count_name, size_t counter);
107 };
108
109 // Walk the in-use list and unlink deflated ObjectMonitors.
110 // Returns the number of unlinked ObjectMonitors.
111 size_t MonitorList::unlink_deflated(size_t deflated_count,
112 GrowableArray<ObjectMonitor*>* unlinked_list,
113 ObjectMonitorDeflationSafepointer* safepointer) {
114 size_t unlinked_count = 0;
115 ObjectMonitor* prev = nullptr;
116 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
117
118 while (m != nullptr) {
119 if (m->is_being_async_deflated()) {
120 // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can
121 // modify the list once per batch. The batch starts at "m".
122 size_t unlinked_batch = 0;
123 ObjectMonitor* next = m;
124 // Look for at most MonitorUnlinkBatch monitors, or the number of
125 // deflated and not unlinked monitors, whatever comes first.
126 assert(deflated_count >= unlinked_count, "Sanity: underflow");
127 size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch);
128 do {
129 ObjectMonitor* next_next = next->next_om();
130 unlinked_batch++;
131 unlinked_list->append(next);
132 next = next_next;
133 if (unlinked_batch >= unlinked_batch_limit) {
134 // Reached the max batch, so bail out of the gathering loop.
135 break;
136 }
137 if (prev == nullptr && AtomicAccess::load(&_head) != m) {
138 // Current batch used to be at head, but it is not at head anymore.
139 // Bail out and figure out where we currently are. This avoids long
140 // walks searching for new prev during unlink under heavy list inserts.
141 break;
142 }
143 } while (next != nullptr && next->is_being_async_deflated());
144
145 // Unlink the found batch.
146 if (prev == nullptr) {
147 // The current batch is the first batch, so there is a chance that it starts at head.
148 // Optimistically assume no inserts happened, and try to unlink the entire batch from the head.
149 ObjectMonitor* prev_head = AtomicAccess::cmpxchg(&_head, m, next);
150 if (prev_head != m) {
151 // Something must have updated the head. Figure out the actual prev for this batch.
152 for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) {
153 prev = n;
154 }
155 assert(prev != nullptr, "Should have found the prev for the current batch");
156 prev->set_next_om(next);
157 }
158 } else {
159 // The current batch is preceded by another batch. This guarantees the current batch
160 // does not start at head. Unlink the entire current batch without updating the head.
161 assert(AtomicAccess::load(&_head) != m, "Sanity");
162 prev->set_next_om(next);
163 }
164
165 unlinked_count += unlinked_batch;
166 if (unlinked_count >= deflated_count) {
167 // Reached the max so bail out of the searching loop.
168 // There should be no more deflated monitors left.
169 break;
170 }
171 m = next;
172 } else {
173 prev = m;
174 m = m->next_om();
175 }
176
177 // Must check for a safepoint/handshake and honor it.
178 safepointer->block_for_safepoint("unlinking", "unlinked_count", unlinked_count);
179 }
180
181 #ifdef ASSERT
182 // Invariant: the code above should unlink all deflated monitors.
183 // The code that runs after this unlinking does not expect deflated monitors.
184 // Notably, attempting to deflate the already deflated monitor would break.
185 {
186 ObjectMonitor* m = AtomicAccess::load_acquire(&_head);
187 while (m != nullptr) {
188 assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked");
189 m = m->next_om();
190 }
191 }
192 #endif
193
194 AtomicAccess::sub(&_count, unlinked_count);
195 return unlinked_count;
196 }
197
198 MonitorList::Iterator MonitorList::iterator() const {
199 return Iterator(AtomicAccess::load_acquire(&_head));
200 }
201
202 ObjectMonitor* MonitorList::Iterator::next() {
203 ObjectMonitor* current = _current;
204 _current = current->next_om();
205 return current;
206 }
207
208 // The "core" versions of monitor enter and exit reside in this file.
209 // The interpreter and compilers contain specialized transliterated
210 // variants of the enter-exit fast-path operations. See c2_MacroAssembler_x86.cpp
211 // fast_lock(...) for instance. If you make changes here, make sure to modify the
212 // interpreter, and both C1 and C2 fast-path inline locking code emission.
213 //
214 // -----------------------------------------------------------------------------
215
216 #ifdef DTRACE_ENABLED
217
218 // Only bother with this argument setup if dtrace is available
219 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
220
221 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
222 char* bytes = nullptr; \
223 int len = 0; \
224 jlong jtid = SharedRuntime::get_java_tid(thread); \
225 Symbol* klassname = obj->klass()->name(); \
226 if (klassname != nullptr) { \
227 bytes = (char*)klassname->bytes(); \
228 len = klassname->utf8_length(); \
229 }
230
231 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
232 { \
233 if (DTraceMonitorProbes) { \
234 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
235 HOTSPOT_MONITOR_WAIT(jtid, \
236 (uintptr_t)(monitor), bytes, len, (millis)); \
237 } \
238 }
239
240 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
241 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
242 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
243
244 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
245 { \
246 if (DTraceMonitorProbes) { \
247 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
248 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
249 (uintptr_t)(monitor), bytes, len); \
250 } \
251 }
252
253 #else // ndef DTRACE_ENABLED
254
255 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
256 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
257
258 #endif // ndef DTRACE_ENABLED
259
260 // This exists only as a workaround of dtrace bug 6254741
261 static int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) {
262 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
263 return 0;
264 }
265
266 static constexpr size_t inflation_lock_count() {
267 return 256;
268 }
269
270 // Static storage for an array of PlatformMutex.
271 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)];
272
273 static inline PlatformMutex* inflation_lock(size_t index) {
274 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]);
275 }
276
277 void ObjectSynchronizer::initialize() {
278 for (size_t i = 0; i < inflation_lock_count(); i++) {
279 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex();
280 }
281 // Start the ceiling with the estimate for one thread.
282 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
283
284 // Start the timer for deflations, so it does not trigger immediately.
285 _last_async_deflation_time_ns = os::javaTimeNanos();
286
287 ObjectSynchronizer::create_om_table();
288 }
289
290 MonitorList ObjectSynchronizer::_in_use_list;
291 // monitors_used_above_threshold() policy is as follows:
292 //
293 // The ratio of the current _in_use_list count to the ceiling is used
294 // to determine if we are above MonitorUsedDeflationThreshold and need
295 // to do an async monitor deflation cycle. The ceiling is increased by
296 // AvgMonitorsPerThreadEstimate when a thread is added to the system
297 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // =====================> Quick functions
319
320 // The quick_* forms are special fast-path variants used to improve
321 // performance. In the simplest case, a "quick_*" implementation could
322 // simply return false, in which case the caller will perform the necessary
323 // state transitions and call the slow-path form.
324 // The fast-path is designed to handle frequently arising cases in an efficient
325 // manner and is just a degenerate "optimistic" variant of the slow-path.
326 // returns true -- to indicate the call was satisfied.
327 // returns false -- to indicate the call needs the services of the slow-path.
328 // A no-loitering ordinance is in effect for code in the quick_* family
329 // operators: safepoints or indefinite blocking (blocking that might span a
330 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
331 // entry.
332 //
333 // Consider: An interesting optimization is to have the JIT recognize the
334 // following common idiom:
335 // synchronized (someobj) { .... ; notify(); }
336 // That is, we find a notify() or notifyAll() call that immediately precedes
337 // the monitorexit operation. In that case the JIT could fuse the operations
338 // into a single notifyAndExit() runtime primitive.
339
340 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
341 assert(current->thread_state() == _thread_in_Java, "invariant");
342 NoSafepointVerifier nsv;
343 if (obj == nullptr) return false; // slow-path for invalid obj
344 const markWord mark = obj->mark();
345
346 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
347 // Degenerate notify
348 // fast-locked by caller so by definition the implied waitset is empty.
349 return true;
350 }
351
352 if (mark.has_monitor()) {
353 ObjectMonitor* const mon = read_monitor(current, obj, mark);
354 if (mon == nullptr) {
355 // Racing with inflation/deflation go slow path
356 return false;
357 }
358 assert(mon->object() == oop(obj), "invariant");
359 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
360
361 if (mon->first_waiter() != nullptr) {
362 // We have one or more waiters. Since this is an inflated monitor
363 // that we own, we quickly notify them here and now, avoiding the slow-path.
364 if (all) {
365 mon->quick_notifyAll(current);
366 } else {
367 mon->quick_notify(current);
368 }
369 }
370 return true;
371 }
372
373 // other IMS exception states take the slow-path
374 return false;
375 }
376
377 // Handle notifications when synchronizing on value based classes
378 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
379 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
380 frame last_frame = locking_thread->last_frame();
381 bool bcp_was_adjusted = false;
382 // Don't decrement bcp if it points to the frame's first instruction. This happens when
383 // handle_sync_on_value_based_class() is called because of a synchronized method. There
384 // is no actual monitorenter instruction in the byte code in this case.
385 if (last_frame.is_interpreted_frame() &&
386 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
387 // adjust bcp to point back to monitorenter so that we print the correct line numbers
388 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
389 bcp_was_adjusted = true;
390 }
391
392 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
393 ResourceMark rm;
394 stringStream ss;
395 locking_thread->print_active_stack_on(&ss);
396 char* base = (char*)strstr(ss.base(), "at");
397 char* newline = (char*)strchr(ss.base(), '\n');
398 if (newline != nullptr) {
399 *newline = '\0';
400 }
401 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
402 } else {
403 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
404 ResourceMark rm;
405 Log(valuebasedclasses) vblog;
406
407 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
408 if (locking_thread->has_last_Java_frame()) {
409 LogStream info_stream(vblog.info());
410 locking_thread->print_active_stack_on(&info_stream);
411 } else {
412 vblog.info("Cannot find the last Java frame");
413 }
414
415 EventSyncOnValueBasedClass event;
416 if (event.should_commit()) {
417 event.set_valueBasedClass(obj->klass());
418 event.commit();
419 }
420 }
421
422 if (bcp_was_adjusted) {
423 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
424 }
425 }
426
427 // -----------------------------------------------------------------------------
428 // JNI locks on java objects
429 // NOTE: must use heavy weight monitor to handle jni monitor enter
430 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
431 // Top native frames in the stack will not be seen if we attempt
432 // preemption, since we start walking from the last Java anchor.
433 NoPreemptMark npm(current);
434
435 if (obj->klass()->is_value_based()) {
436 handle_sync_on_value_based_class(obj, current);
437 }
438
439 // the current locking is from JNI instead of Java code
440 current->set_current_pending_monitor_is_from_java(false);
441 // An async deflation can race after the inflate() call and before
442 // enter() can make the ObjectMonitor busy. enter() returns false if
443 // we have lost the race to async deflation and we simply try again.
444 while (true) {
445 BasicLock lock;
446 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
447 break;
448 }
449 }
450 current->set_current_pending_monitor_is_from_java(true);
451 }
452
453 // NOTE: must use heavy weight monitor to handle jni monitor exit
454 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
455 JavaThread* current = THREAD;
456
457 ObjectMonitor* monitor;
458 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
459 // If this thread has locked the object, exit the monitor. We
460 // intentionally do not use CHECK on check_owner because we must exit the
461 // monitor even if an exception was already pending.
462 if (monitor->check_owner(THREAD)) {
463 monitor->exit(current);
464 }
465 }
466
467 // -----------------------------------------------------------------------------
468 // Internal VM locks on java objects
469 // standard constructor, allows locking failures
470 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
471 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
472 assert(!_thread->preempting(), "");
473
474 _thread->check_for_valid_safepoint_state();
475
476 if (_obj() != nullptr) {
477 ObjectSynchronizer::enter(_obj, &_lock, _thread);
478
479 if (_thread->preempting()) {
480 // If preemption was cancelled we acquired the monitor after freezing
481 // the frames. Redoing the vm call laterĀ in thaw will require us to
482 // release it since the call should look like the original one. We
483 // do it in ~ObjectLocker to reduce the window of time we hold the
484 // monitor since we can't do anything useful with it now, and would
485 // otherwise just force other vthreads to preempt in case they try
486 // to acquire this monitor.
487 _skip_exit = !_thread->preemption_cancelled();
488 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
489 _thread->set_pending_preempted_exception();
490
491 }
492 }
493 }
494
495 ObjectLocker::~ObjectLocker() {
496 if (_obj() != nullptr && !_skip_exit) {
497 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
498 }
499 }
500
501 void ObjectLocker::wait_uninterruptibly(TRAPS) {
502 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
503 if (_thread->preempting()) {
504 _skip_exit = true;
505 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
506 _thread->set_pending_preempted_exception();
507 }
508 }
509
510 // -----------------------------------------------------------------------------
511 // Wait/Notify/NotifyAll
512 // NOTE: must use heavy weight monitor to handle wait()
513
514 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
515 JavaThread* current = THREAD;
516 if (millis < 0) {
517 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
518 }
519
520 ObjectMonitor* monitor;
521 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
522
523 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
524 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
525
526 // This dummy call is in place to get around dtrace bug 6254741. Once
527 // that's fixed we can uncomment the following line, remove the call
528 // and change this function back into a "void" func.
529 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
530 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
531 return ret_code;
532 }
533
534 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
535 assert(millis >= 0, "timeout value is negative");
536
537 ObjectMonitor* monitor;
538 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
539 monitor->wait(millis, false, THREAD);
540 }
541
542
543 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
544 JavaThread* current = THREAD;
545
546 markWord mark = obj->mark();
547 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
548 // Not inflated so there can't be any waiters to notify.
549 return;
550 }
551 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
552 monitor->notify(CHECK);
553 }
554
555 // NOTE: see comment of notify()
556 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
557 JavaThread* current = THREAD;
558
559 markWord mark = obj->mark();
560 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
561 // Not inflated so there can't be any waiters to notify.
562 return;
563 }
564
565 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
566 monitor->notifyAll(CHECK);
567 }
568
569 // -----------------------------------------------------------------------------
570 // Hash Code handling
571
572 struct SharedGlobals {
573 char _pad_prefix[OM_CACHE_LINE_SIZE];
574 // This is a highly shared mostly-read variable.
575 // To avoid false-sharing it needs to be the sole occupant of a cache line.
576 volatile int stw_random;
577 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
578 // Hot RW variable -- Sequester to avoid false-sharing
579 volatile int hc_sequence;
580 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
581 };
582
583 static SharedGlobals GVars;
584
585 // hashCode() generation :
586 //
587 // Possibilities:
588 // * MD5Digest of {obj,stw_random}
589 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function.
590 // * A DES- or AES-style SBox[] mechanism
591 // * One of the Phi-based schemes, such as:
592 // 2654435761 = 2^32 * Phi (golden ratio)
593 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ;
594 // * A variation of Marsaglia's shift-xor RNG scheme.
595 // * (obj ^ stw_random) is appealing, but can result
596 // in undesirable regularity in the hashCode values of adjacent objects
597 // (objects allocated back-to-back, in particular). This could potentially
598 // result in hashtable collisions and reduced hashtable efficiency.
599 // There are simple ways to "diffuse" the middle address bits over the
600 // generated hashCode values:
601
602 intptr_t ObjectSynchronizer::get_next_hash(Thread* current, oop obj) {
603 intptr_t value = 0;
604 if (hashCode == 0) {
605 // This form uses global Park-Miller RNG.
606 // On MP system we'll have lots of RW access to a global, so the
607 // mechanism induces lots of coherency traffic.
608 value = os::random();
609 } else if (hashCode == 1) {
610 // This variation has the property of being stable (idempotent)
611 // between STW operations. This can be useful in some of the 1-0
612 // synchronization schemes.
613 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3;
614 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random;
615 } else if (hashCode == 2) {
616 value = 1; // for sensitivity testing
617 } else if (hashCode == 3) {
618 value = ++GVars.hc_sequence;
619 } else if (hashCode == 4) {
620 value = cast_from_oop<intptr_t>(obj);
621 } else if (hashCode == 5) {
622 // Marsaglia's xor-shift scheme with thread-specific state
623 // This is probably the best overall implementation -- we'll
624 // likely make this the default in future releases.
625 unsigned t = current->_hashStateX;
626 t ^= (t << 11);
627 current->_hashStateX = current->_hashStateY;
628 current->_hashStateY = current->_hashStateZ;
629 current->_hashStateZ = current->_hashStateW;
630 unsigned v = current->_hashStateW;
631 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
632 current->_hashStateW = v;
633 value = v;
634 } else {
635 assert(UseCompactObjectHeaders, "Only with compact i-hash");
636 #ifdef _LP64
637 uint64_t val = cast_from_oop<uint64_t>(obj);
638 uint32_t hash = FastHash::get_hash32((uint32_t)val, (uint32_t)(val >> 32));
639 #else
640 uint32_t val = cast_from_oop<uint32_t>(obj);
641 uint32_t hash = FastHash::get_hash32(val, UCONST64(0xAAAAAAAA));
642 #endif
643 value= static_cast<intptr_t>(hash);
644 }
645
646 value &= markWord::hash_mask;
647 if (hashCode != 6 && value == 0) value = 0xBAD;
648 assert(value != markWord::no_hash || hashCode == 6, "invariant");
649 return value;
650 }
651
652 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
653 while (true) {
654 ObjectMonitor* monitor = nullptr;
655 markWord temp, test;
656 intptr_t hash;
657 markWord mark = obj->mark_acquire();
658 if (UseCompactObjectHeaders) {
659 if (mark.is_hashed()) {
660 return get_hash(mark, obj);
661 }
662 intptr_t hash = get_next_hash(current, obj); // get a new hash
663 markWord new_mark;
664 if (mark.is_not_hashed_expanded()) {
665 new_mark = mark.set_hashed_expanded();
666 int offset = mark.klass()->hash_offset_in_bytes(obj, mark);
667 obj->int_field_put(offset, (jint) hash);
668 } else {
669 new_mark = mark.set_hashed_not_expanded();
670 }
671 markWord old_mark = obj->cas_set_mark(new_mark, mark);
672 if (old_mark == mark) {
673 return hash;
674 }
675 // CAS failed, retry.
676 continue;
677 } else if (UseObjectMonitorTable || !mark.has_monitor()) {
678 // If UseObjectMonitorTable is set the hash can simply be installed in the
679 // object header, since the monitor isn't in the object header.
680 hash = mark.hash();
681 if (hash != 0) { // if it has a hash, just return it
682 return hash;
683 }
684 hash = get_next_hash(current, obj); // get a new hash
685 temp = mark.copy_set_hash(hash); // merge the hash into header
686 // try to install the hash
687 test = obj->cas_set_mark(temp, mark);
688 if (test == mark) { // if the hash was installed, return it
689 return hash;
690 }
691 // CAS failed, retry
692 continue;
693
694 // Failed to install the hash. It could be that another thread
695 // installed the hash just before our attempt or inflation has
696 // occurred or... so we fall thru to inflate the monitor for
697 // stability and then install the hash.
698 } else {
699 assert(!mark.is_unlocked() && !mark.is_fast_locked(), "invariant");
700 monitor = mark.monitor();
701 temp = monitor->header();
702 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
703 hash = temp.hash();
704 if (hash != 0) {
705 // It has a hash.
706
707 // Separate load of dmw/header above from the loads in
708 // is_being_async_deflated().
709
710 // dmw/header and _contentions may get written by different threads.
711 // Make sure to observe them in the same order when having several observers.
712 OrderAccess::loadload_for_IRIW();
713
714 if (monitor->is_being_async_deflated()) {
715 // But we can't safely use the hash if we detect that async
716 // deflation has occurred. So we attempt to restore the
717 // header/dmw to the object's header so that we only retry
718 // once if the deflater thread happens to be slow.
719 monitor->install_displaced_markword_in_object(obj);
720 continue;
721 }
722 return hash;
723 }
724 // Fall thru so we only have one place that installs the hash in
725 // the ObjectMonitor.
726 }
727
728 // NOTE: an async deflation can race after we get the monitor and
729 // before we can update the ObjectMonitor's header with the hash
730 // value below.
731 assert(mark.has_monitor(), "must be");
732 monitor = mark.monitor();
733
734 // Load ObjectMonitor's header/dmw field and see if it has a hash.
735 mark = monitor->header();
736 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
737 hash = mark.hash();
738 if (hash == 0) { // if it does not have a hash
739 hash = get_next_hash(current, obj); // get a new hash
740 temp = mark.copy_set_hash(hash) ; // merge the hash into header
741 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
742 uintptr_t v = AtomicAccess::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value());
743 test = markWord(v);
744 if (test != mark) {
745 // The attempt to update the ObjectMonitor's header/dmw field
746 // did not work. This can happen if another thread managed to
747 // merge in the hash just before our cmpxchg().
748 // If we add any new usages of the header/dmw field, this code
749 // will need to be updated.
750 hash = test.hash();
751 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
752 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
753 }
754 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
755 // If we detect that async deflation has occurred, then we
756 // attempt to restore the header/dmw to the object's header
757 // so that we only retry once if the deflater thread happens
758 // to be slow.
759 monitor->install_displaced_markword_in_object(obj);
760 continue;
761 }
762 }
763 // We finally get the hash.
764 return hash;
765 }
766 }
767
768
769 uint32_t ObjectSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
770 assert(UseCompactObjectHeaders, "Only with compact i-hash");
771 //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
772 assert(mark.is_hashed(), "only from hashed or copied object");
773 if (mark.is_hashed_expanded()) {
774 return obj->int_field(klass->hash_offset_in_bytes(obj, mark));
775 } else {
776 assert(mark.is_hashed_not_expanded(), "must be hashed");
777 assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
778 // Already marked as hashed, but not yet copied. Recompute hash and return it.
779 return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
780 }
781 }
782
783 uint32_t ObjectSynchronizer::get_hash(markWord mark, oop obj) {
784 return get_hash(mark, obj, mark.klass());
785 }
786
787 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
788 Handle h_obj) {
789 assert(current == JavaThread::current(), "Can only be called on current thread");
790 oop obj = h_obj();
791
792 markWord mark = obj->mark_acquire();
793
794 if (mark.is_fast_locked()) {
795 // fast-locking case, see if lock is in current's lock stack
796 return current->lock_stack().contains(h_obj());
797 }
798
799 while (mark.has_monitor()) {
800 ObjectMonitor* monitor = read_monitor(current, obj, mark);
801 if (monitor != nullptr) {
802 return monitor->is_entered(current) != 0;
803 }
804 // Racing with inflation/deflation, retry
805 mark = obj->mark_acquire();
806
807 if (mark.is_fast_locked()) {
808 // Some other thread fast_locked, current could not have held the lock
809 return false;
810 }
811 }
812
813 // Unlocked case, header in place
814 assert(mark.is_unlocked(), "sanity check");
815 return false;
816 }
817
818 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
819 oop obj = h_obj();
820 markWord mark = obj->mark_acquire();
821
822 if (mark.is_fast_locked()) {
823 // fast-locked so get owner from the object.
824 // owning_thread_from_object() may also return null here:
825 return Threads::owning_thread_from_object(t_list, h_obj());
826 }
827
828 while (mark.has_monitor()) {
829 ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark);
830 if (monitor != nullptr) {
831 return Threads::owning_thread_from_monitor(t_list, monitor);
832 }
833 // Racing with inflation/deflation, retry
834 mark = obj->mark_acquire();
835
836 if (mark.is_fast_locked()) {
837 // Some other thread fast_locked
838 return Threads::owning_thread_from_object(t_list, h_obj());
839 }
840 }
841
842 // Unlocked case, header in place
843 // Cannot have assertion since this object may have been
844 // locked by another thread when reaching here.
845 // assert(mark.is_unlocked(), "sanity check");
846
847 return nullptr;
848 }
849
850 // Visitors ...
851
852 // Iterate over all ObjectMonitors.
853 template <typename Function>
854 void ObjectSynchronizer::monitors_iterate(Function function) {
855 MonitorList::Iterator iter = _in_use_list.iterator();
856 while (iter.has_next()) {
857 ObjectMonitor* monitor = iter.next();
858 function(monitor);
859 }
860 }
861
862 // Iterate ObjectMonitors owned by any thread and where the owner `filter`
863 // returns true.
864 template <typename OwnerFilter>
865 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
866 monitors_iterate([&](ObjectMonitor* monitor) {
867 // This function is only called at a safepoint or when the
868 // target thread is suspended or when the target thread is
869 // operating on itself. The current closures in use today are
870 // only interested in an owned ObjectMonitor and ownership
871 // cannot be dropped under the calling contexts so the
872 // ObjectMonitor cannot be async deflated.
873 if (monitor->has_owner() && filter(monitor)) {
874 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
875
876 closure->do_monitor(monitor);
877 }
878 });
879 }
880
881 // Iterate ObjectMonitors where the owner == thread; this does NOT include
882 // ObjectMonitors where owner is set to a stack-lock address in thread.
883 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
884 int64_t key = ObjectMonitor::owner_id_from(thread);
885 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
886 return owned_monitors_iterate_filtered(closure, thread_filter);
887 }
888
889 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, oop vthread) {
890 int64_t key = ObjectMonitor::owner_id_from(vthread);
891 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
892 return owned_monitors_iterate_filtered(closure, thread_filter);
893 }
894
895 // Iterate ObjectMonitors owned by any thread.
896 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
897 auto all_filter = [&](ObjectMonitor* monitor) { return true; };
898 return owned_monitors_iterate_filtered(closure, all_filter);
899 }
900
901 static bool monitors_used_above_threshold(MonitorList* list) {
902 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
903 return false;
904 }
905 size_t monitors_used = list->count();
906 if (monitors_used == 0) { // empty list is easy
907 return false;
908 }
909 size_t old_ceiling = ObjectSynchronizer::in_use_list_ceiling();
910 // Make sure that we use a ceiling value that is not lower than
911 // previous, not lower than the recorded max used by the system, and
912 // not lower than the current number of monitors in use (which can
913 // race ahead of max). The result is guaranteed > 0.
914 size_t ceiling = MAX3(old_ceiling, list->max(), monitors_used);
915
916 // Check if our monitor usage is above the threshold:
917 size_t monitor_usage = (monitors_used * 100LL) / ceiling;
918 if (int(monitor_usage) > MonitorUsedDeflationThreshold) {
919 // Deflate monitors if over the threshold percentage, unless no
920 // progress on previous deflations.
921 bool is_above_threshold = true;
922
923 // Check if it's time to adjust the in_use_list_ceiling up, due
924 // to too many async deflation attempts without any progress.
925 if (NoAsyncDeflationProgressMax != 0 &&
926 _no_progress_cnt >= NoAsyncDeflationProgressMax) {
927 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
928 size_t delta = (size_t)(ceiling * remainder) + 1;
929 size_t new_ceiling = (ceiling > SIZE_MAX - delta)
930 ? SIZE_MAX // Overflow, let's clamp new_ceiling.
931 : ceiling + delta;
932
933 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
934 log_info(monitorinflation)("Too many deflations without progress; "
935 "bumping in_use_list_ceiling from %zu"
936 " to %zu", old_ceiling, new_ceiling);
937 _no_progress_cnt = 0;
938 ceiling = new_ceiling;
939
940 // Check if our monitor usage is still above the threshold:
941 monitor_usage = (monitors_used * 100LL) / ceiling;
942 is_above_threshold = int(monitor_usage) > MonitorUsedDeflationThreshold;
943 }
944 log_info(monitorinflation)("monitors_used=%zu, ceiling=%zu"
945 ", monitor_usage=%zu, threshold=%d",
946 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold);
947 return is_above_threshold;
948 }
949
950 return false;
951 }
952
953 size_t ObjectSynchronizer::in_use_list_count() {
954 return _in_use_list.count();
955 }
956
957 size_t ObjectSynchronizer::in_use_list_max() {
958 return _in_use_list.max();
959 }
960
961 size_t ObjectSynchronizer::in_use_list_ceiling() {
962 return _in_use_list_ceiling;
963 }
964
965 void ObjectSynchronizer::dec_in_use_list_ceiling() {
966 AtomicAccess::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
967 }
968
969 void ObjectSynchronizer::inc_in_use_list_ceiling() {
970 AtomicAccess::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate);
971 }
972
973 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) {
974 _in_use_list_ceiling = new_value;
975 }
976
977 bool ObjectSynchronizer::is_async_deflation_needed() {
978 if (is_async_deflation_requested()) {
979 // Async deflation request.
980 log_info(monitorinflation)("Async deflation needed: explicit request");
981 return true;
982 }
983
984 jlong time_since_last = time_since_last_async_deflation_ms();
985
986 if (AsyncDeflationInterval > 0 &&
987 time_since_last > AsyncDeflationInterval &&
988 monitors_used_above_threshold(&_in_use_list)) {
989 // It's been longer than our specified deflate interval and there
990 // are too many monitors in use. We don't deflate more frequently
991 // than AsyncDeflationInterval (unless is_async_deflation_requested)
992 // in order to not swamp the MonitorDeflationThread.
993 log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold");
994 return true;
995 }
996
997 if (GuaranteedAsyncDeflationInterval > 0 &&
998 time_since_last > GuaranteedAsyncDeflationInterval) {
999 // It's been longer than our specified guaranteed deflate interval.
1000 // We need to clean up the used monitors even if the threshold is
1001 // not reached, to keep the memory utilization at bay when many threads
1002 // touched many monitors.
1003 log_info(monitorinflation)("Async deflation needed: guaranteed interval (%zd ms) "
1004 "is greater than time since last deflation (" JLONG_FORMAT " ms)",
1005 GuaranteedAsyncDeflationInterval, time_since_last);
1006
1007 // If this deflation has no progress, then it should not affect the no-progress
1008 // tracking, otherwise threshold heuristics would think it was triggered, experienced
1009 // no progress, and needs to backoff more aggressively. In this "no progress" case,
1010 // the generic code would bump the no-progress counter, and we compensate for that
1011 // by telling it to skip the update.
1012 //
1013 // If this deflation has progress, then it should let non-progress tracking
1014 // know about this, otherwise the threshold heuristics would kick in, potentially
1015 // experience no-progress due to aggressive cleanup by this deflation, and think
1016 // it is still in no-progress stride. In this "progress" case, the generic code would
1017 // zero the counter, and we allow it to happen.
1018 _no_progress_skip_increment = true;
1019
1020 return true;
1021 }
1022
1023 return false;
1024 }
1025
1026 void ObjectSynchronizer::request_deflate_idle_monitors() {
1027 MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
1028 set_is_async_deflation_requested(true);
1029 ml.notify_all();
1030 }
1031
1032 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
1033 JavaThread* current = JavaThread::current();
1034 bool ret_code = false;
1035
1036 jlong last_time = last_async_deflation_time_ns();
1037
1038 request_deflate_idle_monitors();
1039
1040 const int N_CHECKS = 5;
1041 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
1042 if (last_async_deflation_time_ns() > last_time) {
1043 log_info(monitorinflation)("Async Deflation happened after %d check(s).", i);
1044 ret_code = true;
1045 break;
1046 }
1047 {
1048 // JavaThread has to honor the blocking protocol.
1049 ThreadBlockInVM tbivm(current);
1050 os::naked_short_sleep(999); // sleep for almost 1 second
1051 }
1052 }
1053 if (!ret_code) {
1054 log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS);
1055 }
1056
1057 return ret_code;
1058 }
1059
1060 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1061 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS);
1062 }
1063
1064 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle
1065 // ObjectMonitors. Returns the number of deflated ObjectMonitors.
1066 //
1067 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
1068 MonitorList::Iterator iter = _in_use_list.iterator();
1069 size_t deflated_count = 0;
1070 Thread* current = Thread::current();
1071
1072 while (iter.has_next()) {
1073 if (deflated_count >= (size_t)MonitorDeflationMax) {
1074 break;
1075 }
1076 ObjectMonitor* mid = iter.next();
1077 if (mid->deflate_monitor(current)) {
1078 deflated_count++;
1079 }
1080
1081 // Must check for a safepoint/handshake and honor it.
1082 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
1083 }
1084
1085 return deflated_count;
1086 }
1087
1088 class DeflationHandshakeClosure : public HandshakeClosure {
1089 public:
1090 DeflationHandshakeClosure() : HandshakeClosure("DeflationHandshakeClosure") {}
1091
1092 void do_thread(Thread* thread) {
1093 log_trace(monitorinflation)("DeflationHandshakeClosure::do_thread: thread="
1094 INTPTR_FORMAT, p2i(thread));
1095 if (thread->is_Java_thread()) {
1096 // Clear OM cache
1097 JavaThread* jt = JavaThread::cast(thread);
1098 jt->om_clear_monitor_cache();
1099 }
1100 }
1101 };
1102
1103 class VM_RendezvousGCThreads : public VM_Operation {
1104 public:
1105 bool evaluate_at_safepoint() const override { return false; }
1106 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1107 void doit() override {
1108 Universe::heap()->safepoint_synchronize_begin();
1109 Universe::heap()->safepoint_synchronize_end();
1110 };
1111 };
1112
1113 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list,
1114 ObjectMonitorDeflationSafepointer* safepointer) {
1115 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1116 size_t deleted_count = 0;
1117 for (ObjectMonitor* monitor: *delete_list) {
1118 delete monitor;
1119 deleted_count++;
1120 // A JavaThread must check for a safepoint/handshake and honor it.
1121 safepointer->block_for_safepoint("deletion", "deleted_count", deleted_count);
1122 }
1123 return deleted_count;
1124 }
1125
1126 class ObjectMonitorDeflationLogging: public StackObj {
1127 LogStreamHandle(Debug, monitorinflation) _debug;
1128 LogStreamHandle(Info, monitorinflation) _info;
1129 LogStream* _stream;
1130 elapsedTimer _timer;
1131
1132 size_t ceiling() const { return ObjectSynchronizer::in_use_list_ceiling(); }
1133 size_t count() const { return ObjectSynchronizer::in_use_list_count(); }
1134 size_t max() const { return ObjectSynchronizer::in_use_list_max(); }
1135
1136 public:
1137 ObjectMonitorDeflationLogging()
1138 : _debug(), _info(), _stream(nullptr) {
1139 if (_debug.is_enabled()) {
1140 _stream = &_debug;
1141 } else if (_info.is_enabled()) {
1142 _stream = &_info;
1143 }
1144 }
1145
1146 void begin() {
1147 if (_stream != nullptr) {
1148 _stream->print_cr("begin deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1149 ceiling(), count(), max());
1150 _timer.start();
1151 }
1152 }
1153
1154 void before_handshake(size_t unlinked_count) {
1155 if (_stream != nullptr) {
1156 _timer.stop();
1157 _stream->print_cr("before handshaking: unlinked_count=%zu"
1158 ", in_use_list stats: ceiling=%zu, count="
1159 "%zu, max=%zu",
1160 unlinked_count, ceiling(), count(), max());
1161 }
1162 }
1163
1164 void after_handshake() {
1165 if (_stream != nullptr) {
1166 _stream->print_cr("after handshaking: in_use_list stats: ceiling="
1167 "%zu, count=%zu, max=%zu",
1168 ceiling(), count(), max());
1169 _timer.start();
1170 }
1171 }
1172
1173 void end(size_t deflated_count, size_t unlinked_count) {
1174 if (_stream != nullptr) {
1175 _timer.stop();
1176 if (deflated_count != 0 || unlinked_count != 0 || _debug.is_enabled()) {
1177 _stream->print_cr("deflated_count=%zu, {unlinked,deleted}_count=%zu monitors in %3.7f secs",
1178 deflated_count, unlinked_count, _timer.seconds());
1179 }
1180 _stream->print_cr("end deflating: in_use_list stats: ceiling=%zu, count=%zu, max=%zu",
1181 ceiling(), count(), max());
1182 }
1183 }
1184
1185 void before_block_for_safepoint(const char* op_name, const char* cnt_name, size_t cnt) {
1186 if (_stream != nullptr) {
1187 _timer.stop();
1188 _stream->print_cr("pausing %s: %s=%zu, in_use_list stats: ceiling="
1189 "%zu, count=%zu, max=%zu",
1190 op_name, cnt_name, cnt, ceiling(), count(), max());
1191 }
1192 }
1193
1194 void after_block_for_safepoint(const char* op_name) {
1195 if (_stream != nullptr) {
1196 _stream->print_cr("resuming %s: in_use_list stats: ceiling=%zu"
1197 ", count=%zu, max=%zu", op_name,
1198 ceiling(), count(), max());
1199 _timer.start();
1200 }
1201 }
1202 };
1203
1204 void ObjectMonitorDeflationSafepointer::block_for_safepoint(const char* op_name, const char* count_name, size_t counter) {
1205 if (!SafepointMechanism::should_process(_current)) {
1206 return;
1207 }
1208
1209 // A safepoint/handshake has started.
1210 _log->before_block_for_safepoint(op_name, count_name, counter);
1211
1212 {
1213 // Honor block request.
1214 ThreadBlockInVM tbivm(_current);
1215 }
1216
1217 _log->after_block_for_safepoint(op_name);
1218 }
1219
1220 // This function is called by the MonitorDeflationThread to deflate
1221 // ObjectMonitors.
1222 size_t ObjectSynchronizer::deflate_idle_monitors() {
1223 JavaThread* current = JavaThread::current();
1224 assert(current->is_monitor_deflation_thread(), "The only monitor deflater");
1225
1226 // The async deflation request has been processed.
1227 _last_async_deflation_time_ns = os::javaTimeNanos();
1228 set_is_async_deflation_requested(false);
1229
1230 ObjectMonitorDeflationLogging log;
1231 ObjectMonitorDeflationSafepointer safepointer(current, &log);
1232
1233 log.begin();
1234
1235 // Deflate some idle ObjectMonitors.
1236 size_t deflated_count = deflate_monitor_list(&safepointer);
1237
1238 // Unlink the deflated ObjectMonitors from the in-use list.
1239 size_t unlinked_count = 0;
1240 size_t deleted_count = 0;
1241 if (deflated_count > 0) {
1242 ResourceMark rm(current);
1243 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1244 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
1245
1246 #ifdef ASSERT
1247 if (UseObjectMonitorTable) {
1248 for (ObjectMonitor* monitor : delete_list) {
1249 assert(!ObjectSynchronizer::contains_monitor(current, monitor), "Should have been removed");
1250 }
1251 }
1252 #endif
1253
1254 log.before_handshake(unlinked_count);
1255
1256 // A JavaThread needs to handshake in order to safely free the
1257 // ObjectMonitors that were deflated in this cycle.
1258 DeflationHandshakeClosure dhc;
1259 Handshake::execute(&dhc);
1260 // Also, we sync and desync GC threads around the handshake, so that they can
1261 // safely read the mark-word and look-through to the object-monitor, without
1262 // being afraid that the object-monitor is going away.
1263 VM_RendezvousGCThreads sync_gc;
1264 VMThread::execute(&sync_gc);
1265
1266 log.after_handshake();
1267
1268 // After the handshake, safely free the ObjectMonitors that were
1269 // deflated and unlinked in this cycle.
1270
1271 // Delete the unlinked ObjectMonitors.
1272 deleted_count = delete_monitors(&delete_list, &safepointer);
1273 assert(unlinked_count == deleted_count, "must be");
1274 }
1275
1276 log.end(deflated_count, unlinked_count);
1277
1278 GVars.stw_random = os::random();
1279
1280 if (deflated_count != 0) {
1281 _no_progress_cnt = 0;
1282 } else if (_no_progress_skip_increment) {
1283 _no_progress_skip_increment = false;
1284 } else {
1285 _no_progress_cnt++;
1286 }
1287
1288 return deflated_count;
1289 }
1290
1291 // Monitor cleanup on JavaThread::exit
1292
1293 // Iterate through monitor cache and attempt to release thread's monitors
1294 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1295 private:
1296 JavaThread* _thread;
1297
1298 public:
1299 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
1300 void do_monitor(ObjectMonitor* mid) {
1301 mid->complete_exit(_thread);
1302 }
1303 };
1304
1305 // Release all inflated monitors owned by current thread. Lightweight monitors are
1306 // ignored. This is meant to be called during JNI thread detach which assumes
1307 // all remaining monitors are heavyweight. All exceptions are swallowed.
1308 // Scanning the extant monitor list can be time consuming.
1309 // A simple optimization is to add a per-thread flag that indicates a thread
1310 // called jni_monitorenter() during its lifetime.
1311 //
1312 // Instead of NoSafepointVerifier it might be cheaper to
1313 // use an idiom of the form:
1314 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1315 // <code that must not run at safepoint>
1316 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1317 // Since the tests are extremely cheap we could leave them enabled
1318 // for normal product builds.
1319
1320 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
1321 assert(current == JavaThread::current(), "must be current Java thread");
1322 NoSafepointVerifier nsv;
1323 ReleaseJavaMonitorsClosure rjmc(current);
1324 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
1325 assert(!current->has_pending_exception(), "Should not be possible");
1326 current->clear_pending_exception();
1327 }
1328
1329 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1330 switch (cause) {
1331 case inflate_cause_vm_internal: return "VM Internal";
1332 case inflate_cause_monitor_enter: return "Monitor Enter";
1333 case inflate_cause_wait: return "Monitor Wait";
1334 case inflate_cause_notify: return "Monitor Notify";
1335 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1336 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1337 default:
1338 ShouldNotReachHere();
1339 }
1340 return "Unknown";
1341 }
1342
1343 //------------------------------------------------------------------------------
1344 // Debugging code
1345
1346 u_char* ObjectSynchronizer::get_gvars_addr() {
1347 return (u_char*)&GVars;
1348 }
1349
1350 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() {
1351 return (u_char*)&GVars.hc_sequence;
1352 }
1353
1354 size_t ObjectSynchronizer::get_gvars_size() {
1355 return sizeof(SharedGlobals);
1356 }
1357
1358 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1359 return (u_char*)&GVars.stw_random;
1360 }
1361
1362 // Do the final audit and print of ObjectMonitor stats; must be done
1363 // by the VMThread at VM exit time.
1364 void ObjectSynchronizer::do_final_audit_and_print_stats() {
1365 assert(Thread::current()->is_VM_thread(), "sanity check");
1366
1367 if (is_final_audit()) { // Only do the audit once.
1368 return;
1369 }
1370 set_is_final_audit();
1371 log_info(monitorinflation)("Starting the final audit.");
1372
1373 if (log_is_enabled(Info, monitorinflation)) {
1374 LogStreamHandle(Info, monitorinflation) ls;
1375 audit_and_print_stats(&ls, true /* on_exit */);
1376 }
1377 }
1378
1379 // This function can be called by the MonitorDeflationThread or it can be called when
1380 // we are trying to exit the VM. The list walker functions can run in parallel with
1381 // the other list operations.
1382 // Calls to this function can be added in various places as a debugging
1383 // aid.
1384 //
1385 void ObjectSynchronizer::audit_and_print_stats(outputStream* ls, bool on_exit) {
1386 int error_cnt = 0;
1387
1388 ls->print_cr("Checking in_use_list:");
1389 chk_in_use_list(ls, &error_cnt);
1390
1391 if (error_cnt == 0) {
1392 ls->print_cr("No errors found in in_use_list checks.");
1393 } else {
1394 log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt);
1395 }
1396
1397 // When exiting, only log the interesting entries at the Info level.
1398 // When called at intervals by the MonitorDeflationThread, log output
1399 // at the Trace level since there can be a lot of it.
1400 if (!on_exit && log_is_enabled(Trace, monitorinflation)) {
1401 LogStreamHandle(Trace, monitorinflation) ls_tr;
1402 log_in_use_monitor_details(&ls_tr, true /* log_all */);
1403 } else if (on_exit) {
1404 log_in_use_monitor_details(ls, false /* log_all */);
1405 }
1406
1407 ls->flush();
1408
1409 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1410 }
1411
1412 // Check the in_use_list; log the results of the checks.
1413 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) {
1414 size_t l_in_use_count = _in_use_list.count();
1415 size_t l_in_use_max = _in_use_list.max();
1416 out->print_cr("count=%zu, max=%zu", l_in_use_count,
1417 l_in_use_max);
1418
1419 size_t ck_in_use_count = 0;
1420 MonitorList::Iterator iter = _in_use_list.iterator();
1421 while (iter.has_next()) {
1422 ObjectMonitor* mid = iter.next();
1423 chk_in_use_entry(mid, out, error_cnt_p);
1424 ck_in_use_count++;
1425 }
1426
1427 if (l_in_use_count == ck_in_use_count) {
1428 out->print_cr("in_use_count=%zu equals ck_in_use_count=%zu",
1429 l_in_use_count, ck_in_use_count);
1430 } else {
1431 out->print_cr("WARNING: in_use_count=%zu is not equal to "
1432 "ck_in_use_count=%zu", l_in_use_count,
1433 ck_in_use_count);
1434 }
1435
1436 size_t ck_in_use_max = _in_use_list.max();
1437 if (l_in_use_max == ck_in_use_max) {
1438 out->print_cr("in_use_max=%zu equals ck_in_use_max=%zu",
1439 l_in_use_max, ck_in_use_max);
1440 } else {
1441 out->print_cr("WARNING: in_use_max=%zu is not equal to "
1442 "ck_in_use_max=%zu", l_in_use_max, ck_in_use_max);
1443 }
1444 }
1445
1446 // Check an in-use monitor entry; log any errors.
1447 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
1448 int* error_cnt_p) {
1449 if (n->owner_is_DEFLATER_MARKER()) {
1450 // This could happen when monitor deflation blocks for a safepoint.
1451 return;
1452 }
1453
1454
1455 if (n->metadata() == 0) {
1456 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
1457 "have non-null _metadata (header/hash) field.", p2i(n));
1458 *error_cnt_p = *error_cnt_p + 1;
1459 }
1460
1461 const oop obj = n->object_peek();
1462 if (obj == nullptr) {
1463 return;
1464 }
1465
1466 const markWord mark = obj->mark();
1467 if (!mark.has_monitor()) {
1468 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1469 "object does not think it has a monitor: obj="
1470 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
1471 p2i(obj), mark.value());
1472 *error_cnt_p = *error_cnt_p + 1;
1473 return;
1474 }
1475
1476 ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
1477 if (n != obj_mon) {
1478 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
1479 "object does not refer to the same monitor: obj="
1480 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
1481 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
1482 *error_cnt_p = *error_cnt_p + 1;
1483 }
1484 }
1485
1486 // Log details about ObjectMonitors on the in_use_list. The 'BHL'
1487 // flags indicate why the entry is in-use, 'object' and 'object type'
1488 // indicate the associated object and its type.
1489 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) {
1490 if (_in_use_list.count() > 0) {
1491 stringStream ss;
1492 out->print_cr("In-use monitor info%s:", log_all ? "" : " (eliding idle monitors)");
1493 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
1494 out->print_cr("%18s %s %18s %18s",
1495 "monitor", "BHL", "object", "object type");
1496 out->print_cr("================== === ================== ==================");
1497
1498 auto is_interesting = [&](ObjectMonitor* monitor) {
1499 return log_all || monitor->has_owner() || monitor->is_busy();
1500 };
1501
1502 monitors_iterate([&](ObjectMonitor* monitor) {
1503 if (is_interesting(monitor)) {
1504 const oop obj = monitor->object_peek();
1505 const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
1506 ResourceMark rm;
1507 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
1508 monitor->is_busy(), hash != 0, monitor->has_owner(),
1509 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
1510 if (monitor->is_busy()) {
1511 out->print(" (%s)", monitor->is_busy_to_string(&ss));
1512 ss.reset();
1513 }
1514 out->cr();
1515 }
1516 });
1517 }
1518
1519 out->flush();
1520 }
1521
1522 static uintx objhash(oop obj) {
1523 if (UseCompactObjectHeaders) {
1524 uintx hash = ObjectSynchronizer::get_hash(obj->mark(), obj);
1525 assert(hash != 0, "should have a hash");
1526 return hash;
1527 } else {
1528 uintx hash = obj->mark().hash();
1529 assert(hash != 0, "should have a hash");
1530 return hash;
1531 }
1532 }
1533
1534 // -----------------------------------------------------------------------------
1535 // ConcurrentHashTable storing links from objects to ObjectMonitors
1536 class ObjectMonitorTable : AllStatic {
1537 struct Config {
1538 using Value = ObjectMonitor*;
1539 static uintx get_hash(Value const& value, bool* is_dead) {
1540 return (uintx)value->hash();
1541 }
1542 static void* allocate_node(void* context, size_t size, Value const& value) {
1543 ObjectMonitorTable::inc_items_count();
1544 return AllocateHeap(size, mtObjectMonitor);
1545 };
1546 static void free_node(void* context, void* memory, Value const& value) {
1547 ObjectMonitorTable::dec_items_count();
1548 FreeHeap(memory);
1549 }
1550 };
1551 using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
1552
1553 static ConcurrentTable* _table;
1554 static volatile size_t _items_count;
1555 static size_t _table_size;
1556 static volatile bool _resize;
1557
1558 class Lookup : public StackObj {
1559 oop _obj;
1560
1561 public:
1562 explicit Lookup(oop obj) : _obj(obj) {}
1563
1564 uintx get_hash() const {
1565 return objhash(_obj);
1566 }
1567
1568 bool equals(ObjectMonitor** value) {
1569 assert(*value != nullptr, "must be");
1570 return (*value)->object_refers_to(_obj);
1571 }
1572
1573 bool is_dead(ObjectMonitor** value) {
1574 assert(*value != nullptr, "must be");
1575 return false;
1576 }
1577 };
1578
1579 class LookupMonitor : public StackObj {
1580 ObjectMonitor* _monitor;
1581
1582 public:
1583 explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
1584
1585 uintx get_hash() const {
1586 return _monitor->hash();
1587 }
1588
1589 bool equals(ObjectMonitor** value) {
1590 return (*value) == _monitor;
1591 }
1592
1593 bool is_dead(ObjectMonitor** value) {
1594 assert(*value != nullptr, "must be");
1595 return (*value)->object_is_dead();
1596 }
1597 };
1598
1599 static void inc_items_count() {
1600 AtomicAccess::inc(&_items_count, memory_order_relaxed);
1601 }
1602
1603 static void dec_items_count() {
1604 AtomicAccess::dec(&_items_count, memory_order_relaxed);
1605 }
1606
1607 static double get_load_factor() {
1608 size_t count = AtomicAccess::load(&_items_count);
1609 return (double)count / (double)_table_size;
1610 }
1611
1612 static size_t table_size(Thread* current = Thread::current()) {
1613 return ((size_t)1) << _table->get_size_log2(current);
1614 }
1615
1616 static size_t max_log_size() {
1617 // TODO[OMTable]: Evaluate the max size.
1618 // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
1619 // Using MaxHeapSize directly this early may be wrong, and there
1620 // are definitely rounding errors (alignment).
1621 const size_t max_capacity = MaxHeapSize;
1622 const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
1623 const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
1624 const size_t log_max_objects = log2i_graceful(max_objects);
1625
1626 return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
1627 }
1628
1629 static size_t min_log_size() {
1630 // ~= log(AvgMonitorsPerThreadEstimate default)
1631 return 10;
1632 }
1633
1634 template<typename V>
1635 static size_t clamp_log_size(V log_size) {
1636 return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
1637 }
1638
1639 static size_t initial_log_size() {
1640 const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
1641 return clamp_log_size(estimate);
1642 }
1643
1644 static size_t grow_hint () {
1645 return ConcurrentTable::DEFAULT_GROW_HINT;
1646 }
1647
1648 public:
1649 static void create() {
1650 _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
1651 _items_count = 0;
1652 _table_size = table_size();
1653 _resize = false;
1654 }
1655
1656 static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
1657 #ifdef ASSERT
1658 if (SafepointSynchronize::is_at_safepoint()) {
1659 bool has_monitor = obj->mark().has_monitor();
1660 assert(has_monitor == (monitor != nullptr),
1661 "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
1662 BOOL_TO_STR(has_monitor), p2i(monitor));
1663 }
1664 #endif
1665 }
1666
1667 static ObjectMonitor* monitor_get(Thread* current, oop obj) {
1668 ObjectMonitor* result = nullptr;
1669 Lookup lookup_f(obj);
1670 auto found_f = [&](ObjectMonitor** found) {
1671 assert((*found)->object_peek() == obj, "must be");
1672 assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
1673 result = *found;
1674 };
1675 _table->get(current, lookup_f, found_f);
1676 verify_monitor_get_result(obj, result);
1677 return result;
1678 }
1679
1680 static void try_notify_grow() {
1681 if (!_table->is_max_size_reached() && !AtomicAccess::load(&_resize)) {
1682 AtomicAccess::store(&_resize, true);
1683 if (Service_lock->try_lock()) {
1684 Service_lock->notify();
1685 Service_lock->unlock();
1686 }
1687 }
1688 }
1689
1690 static bool should_shrink() {
1691 // Not implemented;
1692 return false;
1693 }
1694
1695 static constexpr double GROW_LOAD_FACTOR = 0.75;
1696
1697 static bool should_grow() {
1698 return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
1699 }
1700
1701 static bool should_resize() {
1702 return should_grow() || should_shrink() || AtomicAccess::load(&_resize);
1703 }
1704
1705 template<typename Task, typename... Args>
1706 static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
1707 if (task.prepare(current)) {
1708 log_trace(monitortable)("Started to %s", task_name);
1709 TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
1710 while (task.do_task(current, args...)) {
1711 task.pause(current);
1712 {
1713 ThreadBlockInVM tbivm(current);
1714 }
1715 task.cont(current);
1716 }
1717 task.done(current);
1718 return true;
1719 }
1720 return false;
1721 }
1722
1723 static bool grow(JavaThread* current) {
1724 ConcurrentTable::GrowTask grow_task(_table);
1725 if (run_task(current, grow_task, "Grow")) {
1726 _table_size = table_size(current);
1727 log_info(monitortable)("Grown to size: %zu", _table_size);
1728 return true;
1729 }
1730 return false;
1731 }
1732
1733 static bool clean(JavaThread* current) {
1734 ConcurrentTable::BulkDeleteTask clean_task(_table);
1735 auto is_dead = [&](ObjectMonitor** monitor) {
1736 return (*monitor)->object_is_dead();
1737 };
1738 auto do_nothing = [&](ObjectMonitor** monitor) {};
1739 NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
1740 return run_task(current, clean_task, "Clean", is_dead, do_nothing);
1741 }
1742
1743 static bool resize(JavaThread* current) {
1744 LogTarget(Info, monitortable) lt;
1745 bool success = false;
1746
1747 if (should_grow()) {
1748 lt.print("Start growing with load factor %f", get_load_factor());
1749 success = grow(current);
1750 } else {
1751 if (!_table->is_max_size_reached() && AtomicAccess::load(&_resize)) {
1752 lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
1753 }
1754 lt.print("Start cleaning with load factor %f", get_load_factor());
1755 success = clean(current);
1756 }
1757
1758 AtomicAccess::store(&_resize, false);
1759
1760 return success;
1761 }
1762
1763 static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
1764 // Enter the monitor into the concurrent hashtable.
1765 ObjectMonitor* result = monitor;
1766 Lookup lookup_f(obj);
1767 auto found_f = [&](ObjectMonitor** found) {
1768 assert((*found)->object_peek() == obj, "must be");
1769 result = *found;
1770 };
1771 bool grow;
1772 _table->insert_get(current, lookup_f, monitor, found_f, &grow);
1773 verify_monitor_get_result(obj, result);
1774 if (grow) {
1775 try_notify_grow();
1776 }
1777 return result;
1778 }
1779
1780 static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
1781 LookupMonitor lookup_f(monitor);
1782 return _table->remove(current, lookup_f);
1783 }
1784
1785 static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
1786 LookupMonitor lookup_f(monitor);
1787 bool result = false;
1788 auto found_f = [&](ObjectMonitor** found) {
1789 result = true;
1790 };
1791 _table->get(current, lookup_f, found_f);
1792 return result;
1793 }
1794
1795 static void print_on(outputStream* st) {
1796 auto printer = [&] (ObjectMonitor** entry) {
1797 ObjectMonitor* om = *entry;
1798 oop obj = om->object_peek();
1799 st->print("monitor=" PTR_FORMAT ", ", p2i(om));
1800 st->print("object=" PTR_FORMAT, p2i(obj));
1801 assert(objhash(obj) == (uintx)om->hash(), "hash must match");
1802 st->cr();
1803 return true;
1804 };
1805 if (SafepointSynchronize::is_at_safepoint()) {
1806 _table->do_safepoint_scan(printer);
1807 } else {
1808 _table->do_scan(Thread::current(), printer);
1809 }
1810 }
1811 };
1812
1813 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
1814 volatile size_t ObjectMonitorTable::_items_count = 0;
1815 size_t ObjectMonitorTable::_table_size = 0;
1816 volatile bool ObjectMonitorTable::_resize = false;
1817
1818 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
1819 ObjectMonitor* monitor = get_monitor_from_table(current, object);
1820 if (monitor != nullptr) {
1821 *inserted = false;
1822 return monitor;
1823 }
1824
1825 ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
1826 alloced_monitor->set_anonymous_owner();
1827
1828 // Try insert monitor
1829 monitor = add_monitor(current, alloced_monitor, object);
1830
1831 *inserted = alloced_monitor == monitor;
1832 if (!*inserted) {
1833 delete alloced_monitor;
1834 }
1835
1836 return monitor;
1837 }
1838
1839 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
1840 if (log_is_enabled(Trace, monitorinflation)) {
1841 ResourceMark rm(current);
1842 log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
1843 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
1844 object->mark().value(), object->klass()->external_name(),
1845 ObjectSynchronizer::inflate_cause_name(cause));
1846 }
1847 }
1848
1849 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1850 const oop obj,
1851 ObjectSynchronizer::InflateCause cause) {
1852 assert(event != nullptr, "invariant");
1853 const Klass* monitor_klass = obj->klass();
1854 if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
1855 return;
1856 }
1857 event->set_monitorClass(monitor_klass);
1858 event->set_address((uintptr_t)(void*)obj);
1859 event->set_cause((u1)cause);
1860 event->commit();
1861 }
1862
1863 ObjectMonitor* ObjectSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
1864 assert(UseObjectMonitorTable, "must be");
1865
1866 EventJavaMonitorInflate event;
1867
1868 bool inserted;
1869 ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
1870
1871 if (inserted) {
1872 log_inflate(current, object, cause);
1873 if (event.should_commit()) {
1874 post_monitor_inflate_event(&event, object, cause);
1875 }
1876
1877 // The monitor has an anonymous owner so it is safe from async deflation.
1878 ObjectSynchronizer::_in_use_list.add(monitor);
1879 }
1880
1881 return monitor;
1882 }
1883
1884 // Add the hashcode to the monitor to match the object and put it in the hashtable.
1885 ObjectMonitor* ObjectSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
1886 assert(UseObjectMonitorTable, "must be");
1887 assert(obj == monitor->object(), "must be");
1888
1889 intptr_t hash = objhash(obj);
1890 assert(hash != 0, "must be set when claiming the object monitor");
1891 monitor->set_hash(hash);
1892
1893 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
1894 }
1895
1896 bool ObjectSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
1897 assert(UseObjectMonitorTable, "must be");
1898 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
1899
1900 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
1901 }
1902
1903 void ObjectSynchronizer::deflate_mark_word(oop obj) {
1904 assert(UseObjectMonitorTable, "must be");
1905
1906 markWord mark = obj->mark_acquire();
1907 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
1908
1909 while (mark.has_monitor()) {
1910 const markWord new_mark = mark.clear_lock_bits().set_unlocked();
1911 mark = obj->cas_set_mark(new_mark, mark);
1912 }
1913 }
1914
1915 void ObjectSynchronizer::create_om_table() {
1916 if (!UseObjectMonitorTable) {
1917 return;
1918 }
1919 ObjectMonitorTable::create();
1920 }
1921
1922 bool ObjectSynchronizer::needs_resize() {
1923 if (!UseObjectMonitorTable) {
1924 return false;
1925 }
1926 return ObjectMonitorTable::should_resize();
1927 }
1928
1929 bool ObjectSynchronizer::resize_table(JavaThread* current) {
1930 if (!UseObjectMonitorTable) {
1931 return true;
1932 }
1933 return ObjectMonitorTable::resize(current);
1934 }
1935
1936 class ObjectSynchronizer::LockStackInflateContendedLocks : private OopClosure {
1937 private:
1938 oop _contended_oops[LockStack::CAPACITY];
1939 int _length;
1940
1941 void do_oop(oop* o) final {
1942 oop obj = *o;
1943 if (obj->mark_acquire().has_monitor()) {
1944 if (_length > 0 && _contended_oops[_length - 1] == obj) {
1945 // Recursive
1946 return;
1947 }
1948 _contended_oops[_length++] = obj;
1949 }
1950 }
1951
1952 void do_oop(narrowOop* o) final {
1953 ShouldNotReachHere();
1954 }
1955
1956 public:
1957 LockStackInflateContendedLocks() :
1958 _contended_oops(),
1959 _length(0) {};
1960
1961 void inflate(JavaThread* current) {
1962 assert(current == JavaThread::current(), "must be");
1963 current->lock_stack().oops_do(this);
1964 for (int i = 0; i < _length; i++) {
1965 ObjectSynchronizer::
1966 inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1967 }
1968 }
1969 };
1970
1971 void ObjectSynchronizer::ensure_lock_stack_space(JavaThread* current) {
1972 assert(current == JavaThread::current(), "must be");
1973 LockStack& lock_stack = current->lock_stack();
1974
1975 // Make room on lock_stack
1976 if (lock_stack.is_full()) {
1977 // Inflate contended objects
1978 LockStackInflateContendedLocks().inflate(current);
1979 if (lock_stack.is_full()) {
1980 // Inflate the oldest object
1981 inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
1982 }
1983 }
1984 }
1985
1986 class ObjectSynchronizer::CacheSetter : StackObj {
1987 JavaThread* const _thread;
1988 BasicLock* const _lock;
1989 ObjectMonitor* _monitor;
1990
1991 NONCOPYABLE(CacheSetter);
1992
1993 public:
1994 CacheSetter(JavaThread* thread, BasicLock* lock) :
1995 _thread(thread),
1996 _lock(lock),
1997 _monitor(nullptr) {}
1998
1999 ~CacheSetter() {
2000 // Only use the cache if using the table.
2001 if (UseObjectMonitorTable) {
2002 if (_monitor != nullptr) {
2003 // If the monitor is already in the BasicLock cache then it is most
2004 // likely in the thread cache, do not set it again to avoid reordering.
2005 if (_monitor != _lock->object_monitor_cache()) {
2006 _thread->om_set_monitor_cache(_monitor);
2007 _lock->set_object_monitor_cache(_monitor);
2008 }
2009 } else {
2010 _lock->clear_object_monitor_cache();
2011 }
2012 }
2013 }
2014
2015 void set_monitor(ObjectMonitor* monitor) {
2016 assert(_monitor == nullptr, "only set once");
2017 _monitor = monitor;
2018 }
2019
2020 };
2021
2022 // Reads first from the BasicLock cache then from the OMCache in the current thread.
2023 // C2 fast-path may have put the monitor in the cache in the BasicLock.
2024 inline static ObjectMonitor* read_caches(JavaThread* current, BasicLock* lock, oop object) {
2025 ObjectMonitor* monitor = lock->object_monitor_cache();
2026 if (monitor == nullptr) {
2027 monitor = current->om_get_from_monitor_cache(object);
2028 }
2029 return monitor;
2030 }
2031
2032 class ObjectSynchronizer::VerifyThreadState {
2033 bool _no_safepoint;
2034
2035 public:
2036 VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
2037 assert(current == Thread::current(), "must be");
2038 assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
2039 if (_no_safepoint) {
2040 DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
2041 }
2042 }
2043 ~VerifyThreadState() {
2044 if (_no_safepoint){
2045 DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
2046 }
2047 }
2048 };
2049
2050 inline bool ObjectSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
2051 markWord mark = obj->mark();
2052 while (mark.is_unlocked()) {
2053 ensure_lock_stack_space(current);
2054 assert(!lock_stack.is_full(), "must have made room on the lock stack");
2055 assert(!lock_stack.contains(obj), "thread must not already hold the lock");
2056 // Try to swing into 'fast-locked' state.
2057 markWord locked_mark = mark.set_fast_locked();
2058 markWord old_mark = mark;
2059 mark = obj->cas_set_mark(locked_mark, old_mark);
2060 if (old_mark == mark) {
2061 // Successfully fast-locked, push object to lock-stack and return.
2062 lock_stack.push(obj);
2063 return true;
2064 }
2065 }
2066 return false;
2067 }
2068
2069 bool ObjectSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
2070 assert(UseObjectMonitorTable, "must be");
2071 // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
2072 const int log_spin_limit = os::is_MP() ? FastLockingSpins : 1;
2073 const int log_min_safepoint_check_interval = 10;
2074
2075 markWord mark = obj->mark();
2076 const auto should_spin = [&]() {
2077 if (!mark.has_monitor()) {
2078 // Spin while not inflated.
2079 return true;
2080 } else if (observed_deflation) {
2081 // Spin while monitor is being deflated.
2082 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
2083 return monitor == nullptr || monitor->is_being_async_deflated();
2084 }
2085 // Else stop spinning.
2086 return false;
2087 };
2088 // Always attempt to lock once even when safepoint synchronizing.
2089 bool should_process = false;
2090 for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
2091 // Spin with exponential backoff.
2092 const int total_spin_count = 1 << i;
2093 const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
2094 const int outer_spin_count = total_spin_count / inner_spin_count;
2095 for (int outer = 0; outer < outer_spin_count; outer++) {
2096 should_process = SafepointMechanism::should_process(current);
2097 if (should_process) {
2098 // Stop spinning for safepoint.
2099 break;
2100 }
2101 for (int inner = 1; inner < inner_spin_count; inner++) {
2102 SpinPause();
2103 }
2104 }
2105
2106 if (fast_lock_try_enter(obj, lock_stack, current)) return true;
2107 }
2108 return false;
2109 }
2110
2111 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
2112 // When called with locking_thread != Thread::current() some mechanism must synchronize
2113 // the locking_thread with respect to the current thread. Currently only used when
2114 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
2115 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
2116
2117 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "must be cleared");
2118 JavaThread* current = JavaThread::current();
2119 VerifyThreadState vts(locking_thread, current);
2120
2121 if (obj->klass()->is_value_based()) {
2122 ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
2123 }
2124
2125 LockStack& lock_stack = locking_thread->lock_stack();
2126
2127 ObjectMonitor* monitor = nullptr;
2128 if (lock_stack.contains(obj())) {
2129 monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
2130 bool entered = monitor->enter_for(locking_thread);
2131 assert(entered, "recursive ObjectMonitor::enter_for must succeed");
2132 } else {
2133 do {
2134 // It is assumed that enter_for must enter on an object without contention.
2135 monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
2136 // But there may still be a race with deflation.
2137 } while (monitor == nullptr);
2138 }
2139
2140 assert(monitor != nullptr, "ObjectSynchronizer::enter_for must succeed");
2141 assert(!UseObjectMonitorTable || lock->object_monitor_cache() == nullptr, "unused. already cleared");
2142 }
2143
2144 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
2145 assert(current == JavaThread::current(), "must be");
2146
2147 if (obj->klass()->is_value_based()) {
2148 ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
2149 }
2150
2151 CacheSetter cache_setter(current, lock);
2152
2153 // Used when deflation is observed. Progress here requires progress
2154 // from the deflator. After observing that the deflator is not
2155 // making progress (after two yields), switch to sleeping.
2156 SpinYield spin_yield(0, 2);
2157 bool observed_deflation = false;
2158
2159 LockStack& lock_stack = current->lock_stack();
2160
2161 if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
2162 // Recursively fast locked
2163 return;
2164 }
2165
2166 if (lock_stack.contains(obj())) {
2167 ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
2168 bool entered = monitor->enter(current);
2169 assert(entered, "recursive ObjectMonitor::enter must succeed");
2170 cache_setter.set_monitor(monitor);
2171 return;
2172 }
2173
2174 while (true) {
2175 // Fast-locking does not use the 'lock' argument.
2176 // Fast-lock spinning to avoid inflating for short critical sections.
2177 // The goal is to only inflate when the extra cost of using ObjectMonitors
2178 // is worth it.
2179 // If deflation has been observed we also spin while deflation is ongoing.
2180 if (fast_lock_try_enter(obj(), lock_stack, current)) {
2181 return;
2182 } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
2183 return;
2184 }
2185
2186 if (observed_deflation) {
2187 spin_yield.wait();
2188 }
2189
2190 ObjectMonitor* monitor = inflate_and_enter(obj(), lock, ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
2191 if (monitor != nullptr) {
2192 cache_setter.set_monitor(monitor);
2193 return;
2194 }
2195
2196 // If inflate_and_enter returns nullptr it is because a deflated monitor
2197 // was encountered. Fallback to fast locking. The deflater is responsible
2198 // for clearing out the monitor and transitioning the markWord back to
2199 // fast locking.
2200 observed_deflation = true;
2201 }
2202 }
2203
2204 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
2205 assert(current == Thread::current(), "must be");
2206
2207 markWord mark = object->mark();
2208 assert(!mark.is_unlocked(), "must be");
2209
2210 LockStack& lock_stack = current->lock_stack();
2211 if (mark.is_fast_locked()) {
2212 if (lock_stack.try_recursive_exit(object)) {
2213 // This is a recursive exit which succeeded
2214 return;
2215 }
2216 if (lock_stack.is_recursive(object)) {
2217 // Must inflate recursive locks if try_recursive_exit fails
2218 // This happens for un-structured unlocks, could potentially
2219 // fix try_recursive_exit to handle these.
2220 inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
2221 }
2222 }
2223
2224 while (mark.is_fast_locked()) {
2225 markWord unlocked_mark = mark.set_unlocked();
2226 markWord old_mark = mark;
2227 mark = object->cas_set_mark(unlocked_mark, old_mark);
2228 if (old_mark == mark) {
2229 // CAS successful, remove from lock_stack
2230 size_t recursion = lock_stack.remove(object) - 1;
2231 assert(recursion == 0, "Should not have unlocked here");
2232 return;
2233 }
2234 }
2235
2236 assert(mark.has_monitor(), "must be");
2237 // The monitor exists
2238 ObjectMonitor* monitor;
2239 if (UseObjectMonitorTable) {
2240 monitor = read_caches(current, lock, object);
2241 if (monitor == nullptr) {
2242 monitor = get_monitor_from_table(current, object);
2243 }
2244 } else {
2245 monitor = ObjectSynchronizer::read_monitor(mark);
2246 }
2247 if (monitor->has_anonymous_owner()) {
2248 assert(current->lock_stack().contains(object), "current must have object on its lock stack");
2249 monitor->set_owner_from_anonymous(current);
2250 monitor->set_recursions(current->lock_stack().remove(object) - 1);
2251 }
2252
2253 monitor->exit(current);
2254 }
2255
2256 // ObjectSynchronizer::inflate_locked_or_imse is used to get an
2257 // inflated ObjectMonitor* from contexts which require that, such as
2258 // notify/wait and jni_exit. Fast locking keeps the invariant that it
2259 // only inflates if it is already locked by the current thread or the current
2260 // thread is in the process of entering. To maintain this invariant we need to
2261 // throw a java.lang.IllegalMonitorStateException before inflating if the
2262 // current thread is not the owner.
2263 ObjectMonitor* ObjectSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
2264 JavaThread* current = THREAD;
2265
2266 for (;;) {
2267 markWord mark = obj->mark_acquire();
2268 if (mark.is_unlocked()) {
2269 // No lock, IMSE.
2270 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
2271 "current thread is not owner", nullptr);
2272 }
2273
2274 if (mark.is_fast_locked()) {
2275 if (!current->lock_stack().contains(obj)) {
2276 // Fast locked by other thread, IMSE.
2277 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
2278 "current thread is not owner", nullptr);
2279 } else {
2280 // Current thread owns the lock, must inflate
2281 return inflate_fast_locked_object(obj, cause, current, current);
2282 }
2283 }
2284
2285 assert(mark.has_monitor(), "must be");
2286 ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
2287 if (monitor != nullptr) {
2288 if (monitor->has_anonymous_owner()) {
2289 LockStack& lock_stack = current->lock_stack();
2290 if (lock_stack.contains(obj)) {
2291 // Current thread owns the lock but someone else inflated it.
2292 // Fix owner and pop lock stack.
2293 monitor->set_owner_from_anonymous(current);
2294 monitor->set_recursions(lock_stack.remove(obj) - 1);
2295 } else {
2296 // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
2297 THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
2298 "current thread is not owner", nullptr);
2299 }
2300 }
2301 return monitor;
2302 }
2303 }
2304 }
2305
2306 ObjectMonitor* ObjectSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
2307
2308 // The JavaThread* locking parameter requires that the locking_thread == JavaThread::current,
2309 // or is suspended throughout the call by some other mechanism.
2310 // Even with fast locking the thread might be nullptr when called from a non
2311 // JavaThread. (As may still be the case from FastHashCode). However it is only
2312 // important for the correctness of the fast locking algorithm that the thread
2313 // is set when called from ObjectSynchronizer::enter from the owning thread,
2314 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
2315 EventJavaMonitorInflate event;
2316
2317 for (;;) {
2318 const markWord mark = object->mark_acquire();
2319
2320 // The mark can be in one of the following states:
2321 // * inflated - Just return if using stack-locking.
2322 // If using fast-locking and the ObjectMonitor owner
2323 // is anonymous and the locking_thread owns the
2324 // object lock, then we make the locking_thread
2325 // the ObjectMonitor owner and remove the lock from
2326 // the locking_thread's lock stack.
2327 // * fast-locked - Coerce it to inflated from fast-locked.
2328 // * unlocked - Aggressively inflate the object.
2329
2330 // CASE: inflated
2331 if (mark.has_monitor()) {
2332 ObjectMonitor* inf = mark.monitor();
2333 markWord dmw = inf->header();
2334 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2335 if (inf->has_anonymous_owner() &&
2336 locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
2337 inf->set_owner_from_anonymous(locking_thread);
2338 size_t removed = locking_thread->lock_stack().remove(object);
2339 inf->set_recursions(removed - 1);
2340 }
2341 return inf;
2342 }
2343
2344 // CASE: fast-locked
2345 // Could be fast-locked either by the locking_thread or by some other thread.
2346 //
2347 // Note that we allocate the ObjectMonitor speculatively, _before_
2348 // attempting to set the object's mark to the new ObjectMonitor. If
2349 // the locking_thread owns the monitor, then we set the ObjectMonitor's
2350 // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
2351 // to anonymous. If we lose the race to set the object's mark to the
2352 // new ObjectMonitor, then we just delete it and loop around again.
2353 //
2354 if (mark.is_fast_locked()) {
2355 ObjectMonitor* monitor = new ObjectMonitor(object);
2356 monitor->set_header(mark.set_unlocked());
2357 bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
2358 if (own) {
2359 // Owned by locking_thread.
2360 monitor->set_owner(locking_thread);
2361 } else {
2362 // Owned by somebody else.
2363 monitor->set_anonymous_owner();
2364 }
2365 markWord monitor_mark = markWord::encode(monitor);
2366 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
2367 if (old_mark == mark) {
2368 // Success! Return inflated monitor.
2369 if (own) {
2370 size_t removed = locking_thread->lock_stack().remove(object);
2371 monitor->set_recursions(removed - 1);
2372 }
2373 // Once the ObjectMonitor is configured and object is associated
2374 // with the ObjectMonitor, it is safe to allow async deflation:
2375 ObjectSynchronizer::_in_use_list.add(monitor);
2376
2377 log_inflate(current, object, cause);
2378 if (event.should_commit()) {
2379 post_monitor_inflate_event(&event, object, cause);
2380 }
2381 return monitor;
2382 } else {
2383 delete monitor;
2384 continue; // Interference -- just retry
2385 }
2386 }
2387
2388 // CASE: unlocked
2389 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2390 // If we know we're inflating for entry it's better to inflate by swinging a
2391 // pre-locked ObjectMonitor pointer into the object header. A successful
2392 // CAS inflates the object *and* confers ownership to the inflating thread.
2393 // In the current implementation we use a 2-step mechanism where we CAS()
2394 // to inflate and then CAS() again to try to swing _owner from null to current.
2395 // An inflateTry() method that we could call from enter() would be useful.
2396
2397 assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
2398 ObjectMonitor* m = new ObjectMonitor(object);
2399 // prepare m for installation - set monitor to initial state
2400 m->set_header(mark);
2401
2402 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2403 delete m;
2404 m = nullptr;
2405 continue;
2406 // interference - the markword changed - just retry.
2407 // The state-transitions are one-way, so there's no chance of
2408 // live-lock -- "Inflated" is an absorbing state.
2409 }
2410
2411 // Once the ObjectMonitor is configured and object is associated
2412 // with the ObjectMonitor, it is safe to allow async deflation:
2413 ObjectSynchronizer::_in_use_list.add(m);
2414
2415 log_inflate(current, object, cause);
2416 if (event.should_commit()) {
2417 post_monitor_inflate_event(&event, object, cause);
2418 }
2419 return m;
2420 }
2421 }
2422
2423 ObjectMonitor* ObjectSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2424 VerifyThreadState vts(locking_thread, current);
2425 assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
2426
2427 ObjectMonitor* monitor;
2428
2429 if (!UseObjectMonitorTable) {
2430 return inflate_into_object_header(object, cause, locking_thread, current);
2431 }
2432
2433 // Inflating requires a hash code
2434 ObjectSynchronizer::FastHashCode(current, object);
2435
2436 markWord mark = object->mark_acquire();
2437 assert(!mark.is_unlocked(), "Cannot be unlocked");
2438
2439 for (;;) {
2440 // Fetch the monitor from the table
2441 monitor = get_or_insert_monitor(object, current, cause);
2442
2443 // ObjectMonitors are always inserted as anonymously owned, this thread is
2444 // the current holder of the monitor. So unless the entry is stale and
2445 // contains a deflating monitor it must be anonymously owned.
2446 if (monitor->has_anonymous_owner()) {
2447 // The monitor must be anonymously owned if it was added
2448 assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
2449 // New fresh monitor
2450 break;
2451 }
2452
2453 // If the monitor was not anonymously owned then we got a deflating monitor
2454 // from the table. We need to let the deflator make progress and remove this
2455 // entry before we are allowed to add a new one.
2456 os::naked_yield();
2457 assert(monitor->is_being_async_deflated(), "Should be the reason");
2458 }
2459
2460 // Set the mark word; loop to handle concurrent updates to other parts of the mark word
2461 while (mark.is_fast_locked()) {
2462 mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2463 }
2464
2465 // Indicate that the monitor now has a known owner
2466 monitor->set_owner_from_anonymous(locking_thread);
2467
2468 // Remove the entry from the thread's lock stack
2469 monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
2470
2471 if (locking_thread == current) {
2472 // Only change the thread local state of the current thread.
2473 locking_thread->om_set_monitor_cache(monitor);
2474 }
2475
2476 return monitor;
2477 }
2478
2479 ObjectMonitor* ObjectSynchronizer::inflate_and_enter(oop object, BasicLock* lock, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
2480 VerifyThreadState vts(locking_thread, current);
2481
2482 // Note: In some paths (deoptimization) the 'current' thread inflates and
2483 // enters the lock on behalf of the 'locking_thread' thread.
2484
2485 ObjectMonitor* monitor = nullptr;
2486
2487 if (!UseObjectMonitorTable) {
2488 // Do the old inflate and enter.
2489 monitor = inflate_into_object_header(object, cause, locking_thread, current);
2490
2491 bool entered;
2492 if (locking_thread == current) {
2493 entered = monitor->enter(locking_thread);
2494 } else {
2495 entered = monitor->enter_for(locking_thread);
2496 }
2497
2498 // enter returns false for deflation found.
2499 return entered ? monitor : nullptr;
2500 }
2501
2502 NoSafepointVerifier nsv;
2503
2504 // Try to get the monitor from the thread-local cache.
2505 // There's no need to use the cache if we are locking
2506 // on behalf of another thread.
2507 if (current == locking_thread) {
2508 monitor = read_caches(current, lock, object);
2509 }
2510
2511 // Get or create the monitor
2512 if (monitor == nullptr) {
2513 // Lightweight monitors require that hash codes are installed first
2514 ObjectSynchronizer::FastHashCode(locking_thread, object);
2515 monitor = get_or_insert_monitor(object, current, cause);
2516 }
2517
2518 if (monitor->try_enter(locking_thread)) {
2519 return monitor;
2520 }
2521
2522 // Holds is_being_async_deflated() stable throughout this function.
2523 ObjectMonitorContentionMark contention_mark(monitor);
2524
2525 /// First handle the case where the monitor from the table is deflated
2526 if (monitor->is_being_async_deflated()) {
2527 // The MonitorDeflation thread is deflating the monitor. The locking thread
2528 // must spin until further progress has been made.
2529
2530 // Clear the BasicLock cache as it may contain this monitor.
2531 lock->clear_object_monitor_cache();
2532
2533 const markWord mark = object->mark_acquire();
2534
2535 if (mark.has_monitor()) {
2536 // Waiting on the deflation thread to remove the deflated monitor from the table.
2537 os::naked_yield();
2538
2539 } else if (mark.is_fast_locked()) {
2540 // Some other thread managed to fast-lock the lock, or this is a
2541 // recursive lock from the same thread; yield for the deflation
2542 // thread to remove the deflated monitor from the table.
2543 os::naked_yield();
2544
2545 } else {
2546 assert(mark.is_unlocked(), "Implied");
2547 // Retry immediately
2548 }
2549
2550 // Retry
2551 return nullptr;
2552 }
2553
2554 for (;;) {
2555 const markWord mark = object->mark_acquire();
2556 // The mark can be in one of the following states:
2557 // * inflated - If the ObjectMonitor owner is anonymous
2558 // and the locking_thread owns the object
2559 // lock, then we make the locking_thread
2560 // the ObjectMonitor owner and remove the
2561 // lock from the locking_thread's lock stack.
2562 // * fast-locked - Coerce it to inflated from fast-locked.
2563 // * neutral - Inflate the object. Successful CAS is locked
2564
2565 // CASE: inflated
2566 if (mark.has_monitor()) {
2567 LockStack& lock_stack = locking_thread->lock_stack();
2568 if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
2569 // The lock is fast-locked by the locking thread,
2570 // convert it to a held monitor with a known owner.
2571 monitor->set_owner_from_anonymous(locking_thread);
2572 monitor->set_recursions(lock_stack.remove(object) - 1);
2573 }
2574
2575 break; // Success
2576 }
2577
2578 // CASE: fast-locked
2579 // Could be fast-locked either by locking_thread or by some other thread.
2580 //
2581 if (mark.is_fast_locked()) {
2582 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2583 if (old_mark != mark) {
2584 // CAS failed
2585 continue;
2586 }
2587
2588 // Success! Return inflated monitor.
2589 LockStack& lock_stack = locking_thread->lock_stack();
2590 if (lock_stack.contains(object)) {
2591 // The lock is fast-locked by the locking thread,
2592 // convert it to a held monitor with a known owner.
2593 monitor->set_owner_from_anonymous(locking_thread);
2594 monitor->set_recursions(lock_stack.remove(object) - 1);
2595 }
2596
2597 break; // Success
2598 }
2599
2600 // CASE: neutral (unlocked)
2601
2602 // Catch if the object's header is not neutral (not locked and
2603 // not marked is what we care about here).
2604 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2605 markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
2606 if (old_mark != mark) {
2607 // CAS failed
2608 continue;
2609 }
2610
2611 // Transitioned from unlocked to monitor means locking_thread owns the lock.
2612 monitor->set_owner_from_anonymous(locking_thread);
2613
2614 return monitor;
2615 }
2616
2617 if (current == locking_thread) {
2618 // One round of spinning
2619 if (monitor->spin_enter(locking_thread)) {
2620 return monitor;
2621 }
2622
2623 // Monitor is contended, take the time before entering to fix the lock stack.
2624 LockStackInflateContendedLocks().inflate(current);
2625 }
2626
2627 // enter can block for safepoints; clear the unhandled object oop
2628 PauseNoSafepointVerifier pnsv(&nsv);
2629 object = nullptr;
2630
2631 if (current == locking_thread) {
2632 monitor->enter_with_contention_mark(locking_thread, contention_mark);
2633 } else {
2634 monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
2635 }
2636
2637 return monitor;
2638 }
2639
2640 void ObjectSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
2641 if (obj != nullptr) {
2642 deflate_mark_word(obj);
2643 }
2644 bool removed = remove_monitor(current, monitor, obj);
2645 if (obj != nullptr) {
2646 assert(removed, "Should have removed the entry if obj was alive");
2647 }
2648 }
2649
2650 ObjectMonitor* ObjectSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
2651 assert(UseObjectMonitorTable, "must be");
2652 return ObjectMonitorTable::monitor_get(current, obj);
2653 }
2654
2655 bool ObjectSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
2656 assert(UseObjectMonitorTable, "must be");
2657 return ObjectMonitorTable::contains_monitor(current, monitor);
2658 }
2659
2660 ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
2661 return mark.monitor();
2662 }
2663
2664 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj) {
2665 return ObjectSynchronizer::read_monitor(current, obj, obj->mark());
2666 }
2667
2668 ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) {
2669 if (!UseObjectMonitorTable) {
2670 return read_monitor(mark);
2671 } else {
2672 return ObjectSynchronizer::get_monitor_from_table(current, obj);
2673 }
2674 }
2675
2676 bool ObjectSynchronizer::quick_enter_internal(oop obj, BasicLock* lock, JavaThread* current) {
2677 assert(current->thread_state() == _thread_in_Java, "must be");
2678 assert(obj != nullptr, "must be");
2679 NoSafepointVerifier nsv;
2680
2681 LockStack& lock_stack = current->lock_stack();
2682 if (lock_stack.is_full()) {
2683 // Always go into runtime if the lock stack is full.
2684 return false;
2685 }
2686
2687 const markWord mark = obj->mark();
2688
2689 #ifndef _LP64
2690 // Only for 32bit which has limited support for fast locking outside the runtime.
2691 if (lock_stack.try_recursive_enter(obj)) {
2692 // Recursive lock successful.
2693 return true;
2694 }
2695
2696 if (mark.is_unlocked()) {
2697 markWord locked_mark = mark.set_fast_locked();
2698 if (obj->cas_set_mark(locked_mark, mark) == mark) {
2699 // Successfully fast-locked, push object to lock-stack and return.
2700 lock_stack.push(obj);
2701 return true;
2702 }
2703 }
2704 #endif
2705
2706 if (mark.has_monitor()) {
2707 ObjectMonitor* monitor;
2708 if (UseObjectMonitorTable) {
2709 monitor = read_caches(current, lock, obj);
2710 } else {
2711 monitor = ObjectSynchronizer::read_monitor(mark);
2712 }
2713
2714 if (monitor == nullptr) {
2715 // Take the slow-path on a cache miss.
2716 return false;
2717 }
2718
2719 if (UseObjectMonitorTable) {
2720 // Set the monitor regardless of success.
2721 // Either we successfully lock on the monitor, or we retry with the
2722 // monitor in the slow path. If the monitor gets deflated, it will be
2723 // cleared, either by the CacheSetter if we fast lock in enter or in
2724 // inflate_and_enter when we see that the monitor is deflated.
2725 lock->set_object_monitor_cache(monitor);
2726 }
2727
2728 if (monitor->spin_enter(current)) {
2729 return true;
2730 }
2731 }
2732
2733 // Slow-path.
2734 return false;
2735 }
2736
2737 bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
2738 assert(current->thread_state() == _thread_in_Java, "invariant");
2739 NoSafepointVerifier nsv;
2740 if (obj == nullptr) return false; // Need to throw NPE
2741
2742 if (obj->klass()->is_value_based()) {
2743 return false;
2744 }
2745
2746 return ObjectSynchronizer::quick_enter_internal(obj, lock, current);
2747 }