1 /* 2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "jfr/jfrEvents.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/allocation.inline.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/globals.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/handshake.hpp" 42 #include "runtime/interfaceSupport.inline.hpp" 43 #include "runtime/javaThread.hpp" 44 #include "runtime/lockStack.inline.hpp" 45 #include "runtime/mutexLocker.hpp" 46 #include "runtime/objectMonitor.hpp" 47 #include "runtime/objectMonitor.inline.hpp" 48 #include "runtime/os.inline.hpp" 49 #include "runtime/osThread.hpp" 50 #include "runtime/perfData.hpp" 51 #include "runtime/safepointMechanism.inline.hpp" 52 #include "runtime/safepointVerifiers.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "runtime/stubRoutines.hpp" 55 #include "runtime/synchronizer.hpp" 56 #include "runtime/threads.hpp" 57 #include "runtime/timer.hpp" 58 #include "runtime/trimNativeHeap.hpp" 59 #include "runtime/vframe.hpp" 60 #include "runtime/vmThread.hpp" 61 #include "utilities/align.hpp" 62 #include "utilities/dtrace.hpp" 63 #include "utilities/events.hpp" 64 #include "utilities/globalDefinitions.hpp" 65 #include "utilities/linkedlist.hpp" 66 #include "utilities/preserveException.hpp" 67 68 class ObjectMonitorDeflationLogging; 69 70 void MonitorList::add(ObjectMonitor* m) { 71 ObjectMonitor* head; 72 do { 73 head = Atomic::load(&_head); 74 m->set_next_om(head); 75 } while (Atomic::cmpxchg(&_head, head, m) != head); 76 77 size_t count = Atomic::add(&_count, 1u); 78 if (count > max()) { 79 Atomic::inc(&_max); 80 } 81 } 82 83 size_t MonitorList::count() const { 84 return Atomic::load(&_count); 85 } 86 87 size_t MonitorList::max() const { 88 return Atomic::load(&_max); 89 } 90 91 class ObjectMonitorDeflationSafepointer : public StackObj { 92 JavaThread* const _current; 93 ObjectMonitorDeflationLogging* const _log; 94 95 public: 96 ObjectMonitorDeflationSafepointer(JavaThread* current, ObjectMonitorDeflationLogging* log) 97 : _current(current), _log(log) {} 98 99 void block_for_safepoint(const char* op_name, const char* count_name, size_t counter); 100 }; 101 102 // Walk the in-use list and unlink deflated ObjectMonitors. 103 // Returns the number of unlinked ObjectMonitors. 104 size_t MonitorList::unlink_deflated(size_t deflated_count, 105 GrowableArray<ObjectMonitor*>* unlinked_list, 106 ObjectMonitorDeflationSafepointer* safepointer) { 107 size_t unlinked_count = 0; 108 ObjectMonitor* prev = nullptr; 109 ObjectMonitor* m = Atomic::load_acquire(&_head); 110 111 while (m != nullptr) { 112 if (m->is_being_async_deflated()) { 113 // Find next live ObjectMonitor. Batch up the unlinkable monitors, so we can 114 // modify the list once per batch. The batch starts at "m". 115 size_t unlinked_batch = 0; 116 ObjectMonitor* next = m; 117 // Look for at most MonitorUnlinkBatch monitors, or the number of 118 // deflated and not unlinked monitors, whatever comes first. 119 assert(deflated_count >= unlinked_count, "Sanity: underflow"); 120 size_t unlinked_batch_limit = MIN2<size_t>(deflated_count - unlinked_count, MonitorUnlinkBatch); 121 do { 122 ObjectMonitor* next_next = next->next_om(); 123 unlinked_batch++; 124 unlinked_list->append(next); 125 next = next_next; 126 if (unlinked_batch >= unlinked_batch_limit) { 127 // Reached the max batch, so bail out of the gathering loop. 128 break; 129 } 130 if (prev == nullptr && Atomic::load(&_head) != m) { 131 // Current batch used to be at head, but it is not at head anymore. 132 // Bail out and figure out where we currently are. This avoids long 133 // walks searching for new prev during unlink under heavy list inserts. 134 break; 135 } 136 } while (next != nullptr && next->is_being_async_deflated()); 137 138 // Unlink the found batch. 139 if (prev == nullptr) { 140 // The current batch is the first batch, so there is a chance that it starts at head. 141 // Optimistically assume no inserts happened, and try to unlink the entire batch from the head. 142 ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, m, next); 143 if (prev_head != m) { 144 // Something must have updated the head. Figure out the actual prev for this batch. 145 for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) { 146 prev = n; 147 } 148 assert(prev != nullptr, "Should have found the prev for the current batch"); 149 prev->set_next_om(next); 150 } 151 } else { 152 // The current batch is preceded by another batch. This guarantees the current batch 153 // does not start at head. Unlink the entire current batch without updating the head. 154 assert(Atomic::load(&_head) != m, "Sanity"); 155 prev->set_next_om(next); 156 } 157 158 unlinked_count += unlinked_batch; 159 if (unlinked_count >= deflated_count) { 160 // Reached the max so bail out of the searching loop. 161 // There should be no more deflated monitors left. 162 break; 163 } 164 m = next; 165 } else { 166 prev = m; 167 m = m->next_om(); 168 } 169 170 // Must check for a safepoint/handshake and honor it. 171 safepointer->block_for_safepoint("unlinking", "unlinked_count", unlinked_count); 172 } 173 174 #ifdef ASSERT 175 // Invariant: the code above should unlink all deflated monitors. 176 // The code that runs after this unlinking does not expect deflated monitors. 177 // Notably, attempting to deflate the already deflated monitor would break. 178 { 179 ObjectMonitor* m = Atomic::load_acquire(&_head); 180 while (m != nullptr) { 181 assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked"); 182 m = m->next_om(); 183 } 184 } 185 #endif 186 187 Atomic::sub(&_count, unlinked_count); 188 return unlinked_count; 189 } 190 191 MonitorList::Iterator MonitorList::iterator() const { 192 return Iterator(Atomic::load_acquire(&_head)); 193 } 194 195 ObjectMonitor* MonitorList::Iterator::next() { 196 ObjectMonitor* current = _current; 197 _current = current->next_om(); 198 return current; 199 } 200 201 // The "core" versions of monitor enter and exit reside in this file. 202 // The interpreter and compilers contain specialized transliterated 203 // variants of the enter-exit fast-path operations. See c2_MacroAssembler_x86.cpp 204 // fast_lock(...) for instance. If you make changes here, make sure to modify the 205 // interpreter, and both C1 and C2 fast-path inline locking code emission. 206 // 207 // ----------------------------------------------------------------------------- 208 209 #ifdef DTRACE_ENABLED 210 211 // Only bother with this argument setup if dtrace is available 212 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 213 214 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 215 char* bytes = nullptr; \ 216 int len = 0; \ 217 jlong jtid = SharedRuntime::get_java_tid(thread); \ 218 Symbol* klassname = obj->klass()->name(); \ 219 if (klassname != nullptr) { \ 220 bytes = (char*)klassname->bytes(); \ 221 len = klassname->utf8_length(); \ 222 } 223 224 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 225 { \ 226 if (DTraceMonitorProbes) { \ 227 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 228 HOTSPOT_MONITOR_WAIT(jtid, \ 229 (uintptr_t)(monitor), bytes, len, (millis)); \ 230 } \ 231 } 232 233 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 234 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 235 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 236 237 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 238 { \ 239 if (DTraceMonitorProbes) { \ 240 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 241 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 242 (uintptr_t)(monitor), bytes, len); \ 243 } \ 244 } 245 246 #else // ndef DTRACE_ENABLED 247 248 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 249 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 250 251 #endif // ndef DTRACE_ENABLED 252 253 // This exists only as a workaround of dtrace bug 6254741 254 static int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, JavaThread* thr) { 255 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 256 return 0; 257 } 258 259 static constexpr size_t inflation_lock_count() { 260 return 256; 261 } 262 263 // Static storage for an array of PlatformMutex. 264 alignas(PlatformMutex) static uint8_t _inflation_locks[inflation_lock_count()][sizeof(PlatformMutex)]; 265 266 static inline PlatformMutex* inflation_lock(size_t index) { 267 return reinterpret_cast<PlatformMutex*>(_inflation_locks[index]); 268 } 269 270 void ObjectSynchronizer::initialize() { 271 for (size_t i = 0; i < inflation_lock_count(); i++) { 272 ::new(static_cast<void*>(inflation_lock(i))) PlatformMutex(); 273 } 274 // Start the ceiling with the estimate for one thread. 275 set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate); 276 277 // Start the timer for deflations, so it does not trigger immediately. 278 _last_async_deflation_time_ns = os::javaTimeNanos(); 279 } 280 281 MonitorList ObjectSynchronizer::_in_use_list; 282 // monitors_used_above_threshold() policy is as follows: 283 // 284 // The ratio of the current _in_use_list count to the ceiling is used 285 // to determine if we are above MonitorUsedDeflationThreshold and need 286 // to do an async monitor deflation cycle. The ceiling is increased by 287 // AvgMonitorsPerThreadEstimate when a thread is added to the system 288 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is 289 // removed from the system. 290 // 291 // Note: If the _in_use_list max exceeds the ceiling, then 292 // monitors_used_above_threshold() will use the in_use_list max instead 293 // of the thread count derived ceiling because we have used more 294 // ObjectMonitors than the estimated average. 295 // 296 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax 297 // no-progress async monitor deflation cycles in a row, then the ceiling 298 // is adjusted upwards by monitors_used_above_threshold(). 299 // 300 // Start the ceiling with the estimate for one thread in initialize() 301 // which is called after cmd line options are processed. 302 static size_t _in_use_list_ceiling = 0; 303 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 304 bool volatile ObjectSynchronizer::_is_final_audit = false; 305 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 306 static uintx _no_progress_cnt = 0; 307 static bool _no_progress_skip_increment = false; 308 309 // =====================> Quick functions 310 311 // The quick_* forms are special fast-path variants used to improve 312 // performance. In the simplest case, a "quick_*" implementation could 313 // simply return false, in which case the caller will perform the necessary 314 // state transitions and call the slow-path form. 315 // The fast-path is designed to handle frequently arising cases in an efficient 316 // manner and is just a degenerate "optimistic" variant of the slow-path. 317 // returns true -- to indicate the call was satisfied. 318 // returns false -- to indicate the call needs the services of the slow-path. 319 // A no-loitering ordinance is in effect for code in the quick_* family 320 // operators: safepoints or indefinite blocking (blocking that might span a 321 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 322 // entry. 323 // 324 // Consider: An interesting optimization is to have the JIT recognize the 325 // following common idiom: 326 // synchronized (someobj) { .... ; notify(); } 327 // That is, we find a notify() or notifyAll() call that immediately precedes 328 // the monitorexit operation. In that case the JIT could fuse the operations 329 // into a single notifyAndExit() runtime primitive. 330 331 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) { 332 assert(current->thread_state() == _thread_in_Java, "invariant"); 333 NoSafepointVerifier nsv; 334 if (obj == nullptr) return false; // slow-path for invalid obj 335 const markWord mark = obj->mark(); 336 337 if (LockingMode == LM_LIGHTWEIGHT) { 338 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) { 339 // Degenerate notify 340 // fast-locked by caller so by definition the implied waitset is empty. 341 return true; 342 } 343 } else if (LockingMode == LM_LEGACY) { 344 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) { 345 // Degenerate notify 346 // stack-locked by caller so by definition the implied waitset is empty. 347 return true; 348 } 349 } 350 351 if (mark.has_monitor()) { 352 ObjectMonitor* const mon = mark.monitor(); 353 assert(mon->object() == oop(obj), "invariant"); 354 if (!mon->is_owner(current)) return false; // slow-path for IMS exception 355 356 if (mon->first_waiter() != nullptr) { 357 // We have one or more waiters. Since this is an inflated monitor 358 // that we own, we can transfer one or more threads from the waitset 359 // to the entrylist here and now, avoiding the slow-path. 360 if (all) { 361 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current); 362 } else { 363 DTRACE_MONITOR_PROBE(notify, mon, obj, current); 364 } 365 int free_count = 0; 366 do { 367 mon->INotify(current); 368 ++free_count; 369 } while (mon->first_waiter() != nullptr && all); 370 OM_PERFDATA_OP(Notifications, inc(free_count)); 371 } 372 return true; 373 } 374 375 // other IMS exception states take the slow-path 376 return false; 377 } 378 379 380 // The LockNode emitted directly at the synchronization site would have 381 // been too big if it were to have included support for the cases of inflated 382 // recursive enter and exit, so they go here instead. 383 // Note that we can't safely call AsyncPrintJavaStack() from within 384 // quick_enter() as our thread state remains _in_Java. 385 386 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, 387 BasicLock * lock) { 388 assert(current->thread_state() == _thread_in_Java, "invariant"); 389 NoSafepointVerifier nsv; 390 if (obj == nullptr) return false; // Need to throw NPE 391 392 if (obj->klass()->is_value_based()) { 393 return false; 394 } 395 396 if (LockingMode == LM_LIGHTWEIGHT) { 397 LockStack& lock_stack = current->lock_stack(); 398 if (lock_stack.is_full()) { 399 // Always go into runtime if the lock stack is full. 400 return false; 401 } 402 if (lock_stack.try_recursive_enter(obj)) { 403 // Recursive lock successful. 404 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();) 405 return true; 406 } 407 } 408 409 const markWord mark = obj->mark(); 410 411 if (mark.has_monitor()) { 412 ObjectMonitor* const m = mark.monitor(); 413 // An async deflation or GC can race us before we manage to make 414 // the ObjectMonitor busy by setting the owner below. If we detect 415 // that race we just bail out to the slow-path here. 416 if (m->object_peek() == nullptr) { 417 return false; 418 } 419 420 // Lock contention and Transactional Lock Elision (TLE) diagnostics 421 // and observability 422 // Case: light contention possibly amenable to TLE 423 // Case: TLE inimical operations such as nested/recursive synchronization 424 425 if (m->is_owner(current)) { 426 m->_recursions++; 427 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();) 428 return true; 429 } 430 431 if (LockingMode != LM_LIGHTWEIGHT) { 432 // This Java Monitor is inflated so obj's header will never be 433 // displaced to this thread's BasicLock. Make the displaced header 434 // non-null so this BasicLock is not seen as recursive nor as 435 // being locked. We do this unconditionally so that this thread's 436 // BasicLock cannot be mis-interpreted by any stack walkers. For 437 // performance reasons, stack walkers generally first check for 438 // stack-locking in the object's header, the second check is for 439 // recursive stack-locking in the displaced header in the BasicLock, 440 // and last are the inflated Java Monitor (ObjectMonitor) checks. 441 lock->set_displaced_header(markWord::unused_mark()); 442 } 443 444 if (!m->has_owner() && m->try_set_owner_from(nullptr, current) == nullptr) { 445 assert(m->_recursions == 0, "invariant"); 446 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();) 447 return true; 448 } 449 } 450 451 // Note that we could inflate in quick_enter. 452 // This is likely a useful optimization 453 // Critically, in quick_enter() we must not: 454 // -- block indefinitely, or 455 // -- reach a safepoint 456 457 return false; // revert to slow-path 458 } 459 460 // Handle notifications when synchronizing on value based classes 461 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) { 462 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); 463 frame last_frame = locking_thread->last_frame(); 464 bool bcp_was_adjusted = false; 465 // Don't decrement bcp if it points to the frame's first instruction. This happens when 466 // handle_sync_on_value_based_class() is called because of a synchronized method. There 467 // is no actual monitorenter instruction in the byte code in this case. 468 if (last_frame.is_interpreted_frame() && 469 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) { 470 // adjust bcp to point back to monitorenter so that we print the correct line numbers 471 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1); 472 bcp_was_adjusted = true; 473 } 474 475 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) { 476 ResourceMark rm; 477 stringStream ss; 478 locking_thread->print_active_stack_on(&ss); 479 char* base = (char*)strstr(ss.base(), "at"); 480 char* newline = (char*)strchr(ss.base(), '\n'); 481 if (newline != nullptr) { 482 *newline = '\0'; 483 } 484 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base); 485 } else { 486 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses"); 487 ResourceMark rm; 488 Log(valuebasedclasses) vblog; 489 490 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name()); 491 if (locking_thread->has_last_Java_frame()) { 492 LogStream info_stream(vblog.info()); 493 locking_thread->print_active_stack_on(&info_stream); 494 } else { 495 vblog.info("Cannot find the last Java frame"); 496 } 497 498 EventSyncOnValueBasedClass event; 499 if (event.should_commit()) { 500 event.set_valueBasedClass(obj->klass()); 501 event.commit(); 502 } 503 } 504 505 if (bcp_was_adjusted) { 506 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1); 507 } 508 } 509 510 static bool useHeavyMonitors() { 511 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390) 512 return LockingMode == LM_MONITOR; 513 #else 514 return false; 515 #endif 516 } 517 518 // ----------------------------------------------------------------------------- 519 // Monitor Enter/Exit 520 521 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) { 522 // When called with locking_thread != Thread::current() some mechanism must synchronize 523 // the locking_thread with respect to the current thread. Currently only used when 524 // deoptimizing and re-locking locks. See Deoptimization::relock_objects 525 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); 526 527 NOT_LOOM_MONITOR_SUPPORT(locking_thread->inc_held_monitor_count();) 528 529 if (!enter_fast_impl(obj, lock, locking_thread)) { 530 // Inflated ObjectMonitor::enter_for is required 531 532 // An async deflation can race after the inflate_for() call and before 533 // enter_for() can make the ObjectMonitor busy. enter_for() returns false 534 // if we have lost the race to async deflation and we simply try again. 535 while (true) { 536 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter); 537 if (monitor->enter_for(locking_thread)) { 538 return; 539 } 540 assert(monitor->is_being_async_deflated(), "must be"); 541 } 542 } 543 } 544 545 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { 546 assert(current == Thread::current(), "must be"); 547 548 NOT_LOOM_MONITOR_SUPPORT(current->inc_held_monitor_count();) 549 550 if (!enter_fast_impl(obj, lock, current)) { 551 // Inflated ObjectMonitor::enter is required 552 553 // An async deflation can race after the inflate() call and before 554 // enter() can make the ObjectMonitor busy. enter() returns false if 555 // we have lost the race to async deflation and we simply try again. 556 while (true) { 557 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter); 558 if (monitor->enter(current)) { 559 return; 560 } 561 } 562 } 563 } 564 565 // The interpreter and compiler assembly code tries to lock using the fast path 566 // of this algorithm. Make sure to update that code if the following function is 567 // changed. The implementation is extremely sensitive to race condition. Be careful. 568 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) { 569 if (obj->klass()->is_value_based()) { 570 handle_sync_on_value_based_class(obj, locking_thread); 571 } 572 573 if (!useHeavyMonitors()) { 574 if (LockingMode == LM_LIGHTWEIGHT) { 575 // Fast-locking does not use the 'lock' argument. 576 LockStack& lock_stack = locking_thread->lock_stack(); 577 if (lock_stack.is_full()) { 578 // We unconditionally make room on the lock stack by inflating 579 // the least recently locked object on the lock stack. 580 581 // About the choice to inflate least recently locked object. 582 // First we must chose to inflate a lock, either some lock on 583 // the lock-stack or the lock that is currently being entered 584 // (which may or may not be on the lock-stack). 585 // Second the best lock to inflate is a lock which is entered 586 // in a control flow where there are only a very few locks being 587 // used, as the costly part of inflated locking is inflation, 588 // not locking. But this property is entirely program dependent. 589 // Third inflating the lock currently being entered on when it 590 // is not present on the lock-stack will result in a still full 591 // lock-stack. This creates a scenario where every deeper nested 592 // monitorenter must call into the runtime. 593 // The rational here is as follows: 594 // Because we cannot (currently) figure out the second, and want 595 // to avoid the third, we inflate a lock on the lock-stack. 596 // The least recently locked lock is chosen as it is the lock 597 // with the longest critical section. 598 599 log_info(monitorinflation)("LockStack capacity exceeded, inflating."); 600 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal); 601 assert(monitor->is_owner(JavaThread::current()), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT, 602 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value()); 603 assert(!lock_stack.is_full(), "must have made room here"); 604 } 605 606 markWord mark = obj()->mark_acquire(); 607 while (mark.is_neutral()) { 608 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. 609 // Try to swing into 'fast-locked' state. 610 assert(!lock_stack.contains(obj()), "thread must not already hold the lock"); 611 const markWord locked_mark = mark.set_fast_locked(); 612 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark); 613 if (old_mark == mark) { 614 // Successfully fast-locked, push object to lock-stack and return. 615 lock_stack.push(obj()); 616 return true; 617 } 618 mark = old_mark; 619 } 620 621 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) { 622 // Recursive lock successful. 623 return true; 624 } 625 626 // Failed to fast lock. 627 return false; 628 } else if (LockingMode == LM_LEGACY) { 629 markWord mark = obj->mark(); 630 if (mark.is_neutral()) { 631 // Anticipate successful CAS -- the ST of the displaced mark must 632 // be visible <= the ST performed by the CAS. 633 lock->set_displaced_header(mark); 634 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 635 LOOM_MONITOR_SUPPORT_ONLY(locking_thread->inc_held_monitor_count();) 636 return true; 637 } 638 } else if (mark.has_locker() && 639 locking_thread->is_lock_owned((address) mark.locker())) { 640 assert(lock != mark.locker(), "must not re-lock the same lock"); 641 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock"); 642 lock->set_displaced_header(markWord::from_pointer(nullptr)); 643 return true; 644 } 645 646 // The object header will never be displaced to this lock, 647 // so it does not matter what the value is, except that it 648 // must be non-zero to avoid looking like a re-entrant lock, 649 // and must not look locked either. 650 lock->set_displaced_header(markWord::unused_mark()); 651 652 // Failed to fast lock. 653 return false; 654 } 655 } else if (VerifyHeavyMonitors) { 656 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked"); 657 } 658 659 return false; 660 } 661 662 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) { 663 NOT_LOOM_MONITOR_SUPPORT(current->dec_held_monitor_count();) 664 665 if (!useHeavyMonitors()) { 666 markWord mark = object->mark(); 667 if (LockingMode == LM_LIGHTWEIGHT) { 668 // Fast-locking does not use the 'lock' argument. 669 LockStack& lock_stack = current->lock_stack(); 670 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) { 671 // Recursively unlocked. 672 return; 673 } 674 675 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) { 676 // This lock is recursive but is not at the top of the lock stack so we're 677 // doing an unbalanced exit. We have to fall thru to inflation below and 678 // let ObjectMonitor::exit() do the unlock. 679 } else { 680 while (mark.is_fast_locked()) { 681 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. 682 const markWord unlocked_mark = mark.set_unlocked(); 683 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark); 684 if (old_mark == mark) { 685 size_t recursions = lock_stack.remove(object) - 1; 686 assert(recursions == 0, "must not be recursive here"); 687 return; 688 } 689 mark = old_mark; 690 } 691 } 692 } else if (LockingMode == LM_LEGACY) { 693 markWord dhw = lock->displaced_header(); 694 if (dhw.value() == 0) { 695 // If the displaced header is null, then this exit matches up with 696 // a recursive enter. No real work to do here except for diagnostics. 697 #ifndef PRODUCT 698 if (mark != markWord::INFLATING()) { 699 // Only do diagnostics if we are not racing an inflation. Simply 700 // exiting a recursive enter of a Java Monitor that is being 701 // inflated is safe; see the has_monitor() comment below. 702 assert(!mark.is_neutral(), "invariant"); 703 assert(!mark.has_locker() || 704 current->is_lock_owned((address)mark.locker()), "invariant"); 705 if (mark.has_monitor()) { 706 // The BasicLock's displaced_header is marked as a recursive 707 // enter and we have an inflated Java Monitor (ObjectMonitor). 708 // This is a special case where the Java Monitor was inflated 709 // after this thread entered the stack-lock recursively. When a 710 // Java Monitor is inflated, we cannot safely walk the Java 711 // Monitor owner's stack and update the BasicLocks because a 712 // Java Monitor can be asynchronously inflated by a thread that 713 // does not own the Java Monitor. 714 ObjectMonitor* m = mark.monitor(); 715 assert(m->object()->mark() == mark, "invariant"); 716 assert(m->is_entered(current), "invariant"); 717 } 718 } 719 #endif 720 return; 721 } 722 723 if (mark == markWord::from_pointer(lock)) { 724 // If the object is stack-locked by the current thread, try to 725 // swing the displaced header from the BasicLock back to the mark. 726 assert(dhw.is_neutral(), "invariant"); 727 if (object->cas_set_mark(dhw, mark) == mark) { 728 LOOM_MONITOR_SUPPORT_ONLY(current->dec_held_monitor_count();) 729 return; 730 } 731 } 732 } 733 } else if (VerifyHeavyMonitors) { 734 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked"); 735 } 736 737 // We have to take the slow-path of possible inflation and then exit. 738 // The ObjectMonitor* can't be async deflated until ownership is 739 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 740 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal); 741 assert(!monitor->is_owner_anonymous(), "must not be"); 742 monitor->exit(current); 743 } 744 745 // ----------------------------------------------------------------------------- 746 // JNI locks on java objects 747 // NOTE: must use heavy weight monitor to handle jni monitor enter 748 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) { 749 if (obj->klass()->is_value_based()) { 750 handle_sync_on_value_based_class(obj, current); 751 } 752 753 // the current locking is from JNI instead of Java code 754 current->set_current_pending_monitor_is_from_java(false); 755 // An async deflation can race after the inflate() call and before 756 // enter() can make the ObjectMonitor busy. enter() returns false if 757 // we have lost the race to async deflation and we simply try again. 758 while (true) { 759 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter); 760 if (monitor->enter(current)) { 761 current->inc_held_monitor_count(1, true); 762 break; 763 } 764 } 765 current->set_current_pending_monitor_is_from_java(true); 766 } 767 768 // NOTE: must use heavy weight monitor to handle jni monitor exit 769 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) { 770 JavaThread* current = THREAD; 771 772 // The ObjectMonitor* can't be async deflated until ownership is 773 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 774 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit); 775 // If this thread has locked the object, exit the monitor. We 776 // intentionally do not use CHECK on check_owner because we must exit the 777 // monitor even if an exception was already pending. 778 if (monitor->check_owner(THREAD)) { 779 monitor->exit(current); 780 current->dec_held_monitor_count(1, true); 781 } 782 } 783 784 // ----------------------------------------------------------------------------- 785 // Internal VM locks on java objects 786 // standard constructor, allows locking failures 787 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) { 788 _thread = thread; 789 _thread->check_for_valid_safepoint_state(); 790 DEBUG_ONLY(_thread->inc_obj_locker_count();) 791 _obj = obj; 792 793 if (_obj() != nullptr) { 794 ObjectSynchronizer::enter(_obj, &_lock, _thread); 795 } 796 } 797 798 ObjectLocker::~ObjectLocker() { 799 DEBUG_ONLY(_thread->dec_obj_locker_count();) 800 if (_obj() != nullptr) { 801 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 802 } 803 } 804 805 806 // ----------------------------------------------------------------------------- 807 // Wait/Notify/NotifyAll 808 // NOTE: must use heavy weight monitor to handle wait() 809 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 810 JavaThread* current = THREAD; 811 if (millis < 0) { 812 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 813 } 814 // The ObjectMonitor* can't be async deflated because the _waiters 815 // field is incremented before ownership is dropped and decremented 816 // after ownership is regained. 817 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait); 818 819 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis); 820 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code 821 822 // This dummy call is in place to get around dtrace bug 6254741. Once 823 // that's fixed we can uncomment the following line, remove the call 824 // and change this function back into a "void" func. 825 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 826 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 827 return ret_code; 828 } 829 830 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 831 JavaThread* current = THREAD; 832 833 markWord mark = obj->mark(); 834 if (LockingMode == LM_LIGHTWEIGHT) { 835 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) { 836 // Not inflated so there can't be any waiters to notify. 837 return; 838 } 839 } else if (LockingMode == LM_LEGACY) { 840 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) { 841 // Not inflated so there can't be any waiters to notify. 842 return; 843 } 844 } 845 // The ObjectMonitor* can't be async deflated until ownership is 846 // dropped by the calling thread. 847 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify); 848 monitor->notify(CHECK); 849 } 850 851 // NOTE: see comment of notify() 852 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 853 JavaThread* current = THREAD; 854 855 markWord mark = obj->mark(); 856 if (LockingMode == LM_LIGHTWEIGHT) { 857 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) { 858 // Not inflated so there can't be any waiters to notify. 859 return; 860 } 861 } else if (LockingMode == LM_LEGACY) { 862 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) { 863 // Not inflated so there can't be any waiters to notify. 864 return; 865 } 866 } 867 // The ObjectMonitor* can't be async deflated until ownership is 868 // dropped by the calling thread. 869 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify); 870 monitor->notifyAll(CHECK); 871 } 872 873 // ----------------------------------------------------------------------------- 874 // Hash Code handling 875 876 struct SharedGlobals { 877 char _pad_prefix[OM_CACHE_LINE_SIZE]; 878 // This is a highly shared mostly-read variable. 879 // To avoid false-sharing it needs to be the sole occupant of a cache line. 880 volatile int stw_random; 881 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 882 // Hot RW variable -- Sequester to avoid false-sharing 883 volatile int hc_sequence; 884 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 885 }; 886 887 static SharedGlobals GVars; 888 889 static markWord read_stable_mark(oop obj) { 890 markWord mark = obj->mark_acquire(); 891 if (!mark.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT) { 892 // New lightweight locking does not use the markWord::INFLATING() protocol. 893 return mark; // normal fast-path return 894 } 895 896 int its = 0; 897 for (;;) { 898 markWord mark = obj->mark_acquire(); 899 if (!mark.is_being_inflated()) { 900 return mark; // normal fast-path return 901 } 902 903 // The object is being inflated by some other thread. 904 // The caller of read_stable_mark() must wait for inflation to complete. 905 // Avoid live-lock. 906 907 ++its; 908 if (its > 10000 || !os::is_MP()) { 909 if (its & 1) { 910 os::naked_yield(); 911 } else { 912 // Note that the following code attenuates the livelock problem but is not 913 // a complete remedy. A more complete solution would require that the inflating 914 // thread hold the associated inflation lock. The following code simply restricts 915 // the number of spinners to at most one. We'll have N-2 threads blocked 916 // on the inflationlock, 1 thread holding the inflation lock and using 917 // a yield/park strategy, and 1 thread in the midst of inflation. 918 // A more refined approach would be to change the encoding of INFLATING 919 // to allow encapsulation of a native thread pointer. Threads waiting for 920 // inflation to complete would use CAS to push themselves onto a singly linked 921 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 922 // and calling park(). When inflation was complete the thread that accomplished inflation 923 // would detach the list and set the markword to inflated with a single CAS and 924 // then for each thread on the list, set the flag and unpark() the thread. 925 926 // Index into the lock array based on the current object address. 927 static_assert(is_power_of_2(inflation_lock_count()), "must be"); 928 size_t ix = (cast_from_oop<intptr_t>(obj) >> 5) & (inflation_lock_count() - 1); 929 int YieldThenBlock = 0; 930 assert(ix < inflation_lock_count(), "invariant"); 931 inflation_lock(ix)->lock(); 932 while (obj->mark_acquire() == markWord::INFLATING()) { 933 // Beware: naked_yield() is advisory and has almost no effect on some platforms 934 // so we periodically call current->_ParkEvent->park(1). 935 // We use a mixed spin/yield/block mechanism. 936 if ((YieldThenBlock++) >= 16) { 937 Thread::current()->_ParkEvent->park(1); 938 } else { 939 os::naked_yield(); 940 } 941 } 942 inflation_lock(ix)->unlock(); 943 } 944 } else { 945 SpinPause(); // SMP-polite spinning 946 } 947 } 948 } 949 950 // hashCode() generation : 951 // 952 // Possibilities: 953 // * MD5Digest of {obj,stw_random} 954 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 955 // * A DES- or AES-style SBox[] mechanism 956 // * One of the Phi-based schemes, such as: 957 // 2654435761 = 2^32 * Phi (golden ratio) 958 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 959 // * A variation of Marsaglia's shift-xor RNG scheme. 960 // * (obj ^ stw_random) is appealing, but can result 961 // in undesirable regularity in the hashCode values of adjacent objects 962 // (objects allocated back-to-back, in particular). This could potentially 963 // result in hashtable collisions and reduced hashtable efficiency. 964 // There are simple ways to "diffuse" the middle address bits over the 965 // generated hashCode values: 966 967 static inline intptr_t get_next_hash(Thread* current, oop obj) { 968 intptr_t value = 0; 969 if (hashCode == 0) { 970 // This form uses global Park-Miller RNG. 971 // On MP system we'll have lots of RW access to a global, so the 972 // mechanism induces lots of coherency traffic. 973 value = os::random(); 974 } else if (hashCode == 1) { 975 // This variation has the property of being stable (idempotent) 976 // between STW operations. This can be useful in some of the 1-0 977 // synchronization schemes. 978 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 979 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 980 } else if (hashCode == 2) { 981 value = 1; // for sensitivity testing 982 } else if (hashCode == 3) { 983 value = ++GVars.hc_sequence; 984 } else if (hashCode == 4) { 985 value = cast_from_oop<intptr_t>(obj); 986 } else { 987 // Marsaglia's xor-shift scheme with thread-specific state 988 // This is probably the best overall implementation -- we'll 989 // likely make this the default in future releases. 990 unsigned t = current->_hashStateX; 991 t ^= (t << 11); 992 current->_hashStateX = current->_hashStateY; 993 current->_hashStateY = current->_hashStateZ; 994 current->_hashStateZ = current->_hashStateW; 995 unsigned v = current->_hashStateW; 996 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 997 current->_hashStateW = v; 998 value = v; 999 } 1000 1001 value &= markWord::hash_mask; 1002 if (value == 0) value = 0xBAD; 1003 assert(value != markWord::no_hash, "invariant"); 1004 return value; 1005 } 1006 1007 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { 1008 1009 while (true) { 1010 ObjectMonitor* monitor = nullptr; 1011 markWord temp, test; 1012 intptr_t hash; 1013 markWord mark = read_stable_mark(obj); 1014 if (VerifyHeavyMonitors) { 1015 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)"); 1016 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked"); 1017 } 1018 if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) { 1019 hash = mark.hash(); 1020 if (hash != 0) { // if it has a hash, just return it 1021 return hash; 1022 } 1023 hash = get_next_hash(current, obj); // get a new hash 1024 temp = mark.copy_set_hash(hash); // merge the hash into header 1025 // try to install the hash 1026 test = obj->cas_set_mark(temp, mark); 1027 if (test == mark) { // if the hash was installed, return it 1028 return hash; 1029 } 1030 if (LockingMode == LM_LIGHTWEIGHT) { 1031 // CAS failed, retry 1032 continue; 1033 } 1034 // Failed to install the hash. It could be that another thread 1035 // installed the hash just before our attempt or inflation has 1036 // occurred or... so we fall thru to inflate the monitor for 1037 // stability and then install the hash. 1038 } else if (mark.has_monitor()) { 1039 monitor = mark.monitor(); 1040 temp = monitor->header(); 1041 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1042 hash = temp.hash(); 1043 if (hash != 0) { 1044 // It has a hash. 1045 1046 // Separate load of dmw/header above from the loads in 1047 // is_being_async_deflated(). 1048 1049 // dmw/header and _contentions may get written by different threads. 1050 // Make sure to observe them in the same order when having several observers. 1051 OrderAccess::loadload_for_IRIW(); 1052 1053 if (monitor->is_being_async_deflated()) { 1054 // But we can't safely use the hash if we detect that async 1055 // deflation has occurred. So we attempt to restore the 1056 // header/dmw to the object's header so that we only retry 1057 // once if the deflater thread happens to be slow. 1058 monitor->install_displaced_markword_in_object(obj); 1059 continue; 1060 } 1061 return hash; 1062 } 1063 // Fall thru so we only have one place that installs the hash in 1064 // the ObjectMonitor. 1065 } else if (LockingMode == LM_LEGACY && mark.has_locker() && current->is_lock_owned((address)mark.locker())) { 1066 // This is a stack-lock owned by the calling thread so fetch the 1067 // displaced markWord from the BasicLock on the stack. 1068 temp = mark.displaced_mark_helper(); 1069 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1070 hash = temp.hash(); 1071 if (hash != 0) { // if it has a hash, just return it 1072 return hash; 1073 } 1074 // WARNING: 1075 // The displaced header in the BasicLock on a thread's stack 1076 // is strictly immutable. It CANNOT be changed in ANY cases. 1077 // So we have to inflate the stack-lock into an ObjectMonitor 1078 // even if the current thread owns the lock. The BasicLock on 1079 // a thread's stack can be asynchronously read by other threads 1080 // during an inflate() call so any change to that stack memory 1081 // may not propagate to other threads correctly. 1082 } 1083 1084 // Inflate the monitor to set the hash. 1085 1086 // An async deflation can race after the inflate() call and before we 1087 // can update the ObjectMonitor's header with the hash value below. 1088 monitor = inflate(current, obj, inflate_cause_hash_code); 1089 // Load ObjectMonitor's header/dmw field and see if it has a hash. 1090 mark = monitor->header(); 1091 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1092 hash = mark.hash(); 1093 if (hash == 0) { // if it does not have a hash 1094 hash = get_next_hash(current, obj); // get a new hash 1095 temp = mark.copy_set_hash(hash) ; // merge the hash into header 1096 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1097 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1098 test = markWord(v); 1099 if (test != mark) { 1100 // The attempt to update the ObjectMonitor's header/dmw field 1101 // did not work. This can happen if another thread managed to 1102 // merge in the hash just before our cmpxchg(). 1103 // If we add any new usages of the header/dmw field, this code 1104 // will need to be updated. 1105 hash = test.hash(); 1106 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1107 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1108 } 1109 if (monitor->is_being_async_deflated()) { 1110 // If we detect that async deflation has occurred, then we 1111 // attempt to restore the header/dmw to the object's header 1112 // so that we only retry once if the deflater thread happens 1113 // to be slow. 1114 monitor->install_displaced_markword_in_object(obj); 1115 continue; 1116 } 1117 } 1118 // We finally get the hash. 1119 return hash; 1120 } 1121 } 1122 1123 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current, 1124 Handle h_obj) { 1125 assert(current == JavaThread::current(), "Can only be called on current thread"); 1126 oop obj = h_obj(); 1127 1128 markWord mark = read_stable_mark(obj); 1129 1130 if (LockingMode == LM_LEGACY && mark.has_locker()) { 1131 // stack-locked case, header points into owner's stack 1132 return current->is_lock_owned((address)mark.locker()); 1133 } 1134 1135 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) { 1136 // fast-locking case, see if lock is in current's lock stack 1137 return current->lock_stack().contains(h_obj()); 1138 } 1139 1140 if (mark.has_monitor()) { 1141 // Inflated monitor so header points to ObjectMonitor (tagged pointer). 1142 // The first stage of async deflation does not affect any field 1143 // used by this comparison so the ObjectMonitor* is usable here. 1144 ObjectMonitor* monitor = mark.monitor(); 1145 return monitor->is_entered(current) != 0; 1146 } 1147 // Unlocked case, header in place 1148 assert(mark.is_neutral(), "sanity check"); 1149 return false; 1150 } 1151 1152 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1153 oop obj = h_obj(); 1154 markWord mark = read_stable_mark(obj); 1155 1156 if (LockingMode == LM_LEGACY && mark.has_locker()) { 1157 // stack-locked so header points into owner's stack. 1158 // owning_thread_from_monitor_owner() may also return null here: 1159 return Threads::owning_thread_from_stacklock(t_list, (address) mark.locker()); 1160 } 1161 1162 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) { 1163 // fast-locked so get owner from the object. 1164 // owning_thread_from_object() may also return null here: 1165 return Threads::owning_thread_from_object(t_list, h_obj()); 1166 } 1167 1168 if (mark.has_monitor()) { 1169 // Inflated monitor so header points to ObjectMonitor (tagged pointer). 1170 // The first stage of async deflation does not affect any field 1171 // used by this comparison so the ObjectMonitor* is usable here. 1172 ObjectMonitor* monitor = mark.monitor(); 1173 assert(monitor != nullptr, "monitor should be non-null"); 1174 // owning_thread_from_monitor() may also return null here: 1175 return Threads::owning_thread_from_monitor(t_list, monitor); 1176 } 1177 1178 // Unlocked case, header in place 1179 // Cannot have assertion since this object may have been 1180 // locked by another thread when reaching here. 1181 // assert(mark.is_neutral(), "sanity check"); 1182 1183 return nullptr; 1184 } 1185 1186 // Visitors ... 1187 1188 // Iterate over all ObjectMonitors. 1189 template <typename Function> 1190 void ObjectSynchronizer::monitors_iterate(Function function) { 1191 MonitorList::Iterator iter = _in_use_list.iterator(); 1192 while (iter.has_next()) { 1193 ObjectMonitor* monitor = iter.next(); 1194 function(monitor); 1195 } 1196 } 1197 1198 // Iterate ObjectMonitors owned by any thread and where the owner `filter` 1199 // returns true. 1200 template <typename OwnerFilter> 1201 void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) { 1202 monitors_iterate([&](ObjectMonitor* monitor) { 1203 // This function is only called at a safepoint or when the 1204 // target thread is suspended or when the target thread is 1205 // operating on itself. The current closures in use today are 1206 // only interested in an owned ObjectMonitor and ownership 1207 // cannot be dropped under the calling contexts so the 1208 // ObjectMonitor cannot be async deflated. 1209 if (monitor->has_owner() && filter(monitor)) { 1210 assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating"); 1211 1212 closure->do_monitor(monitor); 1213 } 1214 }); 1215 } 1216 1217 // Iterate ObjectMonitors where the owner == thread; this does NOT include 1218 // ObjectMonitors where owner is set to a stack-lock address in thread. 1219 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) { 1220 auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->is_owner(thread); }; 1221 return owned_monitors_iterate_filtered(closure, thread_filter); 1222 } 1223 1224 // Iterate ObjectMonitors owned by any thread. 1225 void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) { 1226 auto all_filter = [&](ObjectMonitor* monitor) { return true; }; 1227 return owned_monitors_iterate_filtered(closure, all_filter); 1228 } 1229 1230 static bool monitors_used_above_threshold(MonitorList* list) { 1231 if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy 1232 return false; 1233 } 1234 // Start with ceiling based on a per-thread estimate: 1235 size_t ceiling = ObjectSynchronizer::in_use_list_ceiling(); 1236 size_t old_ceiling = ceiling; 1237 if (ceiling < list->max()) { 1238 // The max used by the system has exceeded the ceiling so use that: 1239 ceiling = list->max(); 1240 } 1241 size_t monitors_used = list->count(); 1242 if (monitors_used == 0) { // empty list is easy 1243 return false; 1244 } 1245 if (NoAsyncDeflationProgressMax != 0 && 1246 _no_progress_cnt >= NoAsyncDeflationProgressMax) { 1247 double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0; 1248 size_t new_ceiling = ceiling + (size_t)((double)ceiling * remainder) + 1; 1249 ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling); 1250 log_info(monitorinflation)("Too many deflations without progress; " 1251 "bumping in_use_list_ceiling from " SIZE_FORMAT 1252 " to " SIZE_FORMAT, old_ceiling, new_ceiling); 1253 _no_progress_cnt = 0; 1254 ceiling = new_ceiling; 1255 } 1256 1257 // Check if our monitor usage is above the threshold: 1258 size_t monitor_usage = (monitors_used * 100LL) / ceiling; 1259 if (int(monitor_usage) > MonitorUsedDeflationThreshold) { 1260 log_info(monitorinflation)("monitors_used=" SIZE_FORMAT ", ceiling=" SIZE_FORMAT 1261 ", monitor_usage=" SIZE_FORMAT ", threshold=%d", 1262 monitors_used, ceiling, monitor_usage, MonitorUsedDeflationThreshold); 1263 return true; 1264 } 1265 1266 return false; 1267 } 1268 1269 size_t ObjectSynchronizer::in_use_list_ceiling() { 1270 return _in_use_list_ceiling; 1271 } 1272 1273 void ObjectSynchronizer::dec_in_use_list_ceiling() { 1274 Atomic::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate); 1275 } 1276 1277 void ObjectSynchronizer::inc_in_use_list_ceiling() { 1278 Atomic::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate); 1279 } 1280 1281 void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) { 1282 _in_use_list_ceiling = new_value; 1283 } 1284 1285 bool ObjectSynchronizer::is_async_deflation_needed() { 1286 if (is_async_deflation_requested()) { 1287 // Async deflation request. 1288 log_info(monitorinflation)("Async deflation needed: explicit request"); 1289 return true; 1290 } 1291 1292 jlong time_since_last = time_since_last_async_deflation_ms(); 1293 1294 if (AsyncDeflationInterval > 0 && 1295 time_since_last > AsyncDeflationInterval && 1296 monitors_used_above_threshold(&_in_use_list)) { 1297 // It's been longer than our specified deflate interval and there 1298 // are too many monitors in use. We don't deflate more frequently 1299 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1300 // in order to not swamp the MonitorDeflationThread. 1301 log_info(monitorinflation)("Async deflation needed: monitors used are above the threshold"); 1302 return true; 1303 } 1304 1305 if (GuaranteedAsyncDeflationInterval > 0 && 1306 time_since_last > GuaranteedAsyncDeflationInterval) { 1307 // It's been longer than our specified guaranteed deflate interval. 1308 // We need to clean up the used monitors even if the threshold is 1309 // not reached, to keep the memory utilization at bay when many threads 1310 // touched many monitors. 1311 log_info(monitorinflation)("Async deflation needed: guaranteed interval (" INTX_FORMAT " ms) " 1312 "is greater than time since last deflation (" JLONG_FORMAT " ms)", 1313 GuaranteedAsyncDeflationInterval, time_since_last); 1314 1315 // If this deflation has no progress, then it should not affect the no-progress 1316 // tracking, otherwise threshold heuristics would think it was triggered, experienced 1317 // no progress, and needs to backoff more aggressively. In this "no progress" case, 1318 // the generic code would bump the no-progress counter, and we compensate for that 1319 // by telling it to skip the update. 1320 // 1321 // If this deflation has progress, then it should let non-progress tracking 1322 // know about this, otherwise the threshold heuristics would kick in, potentially 1323 // experience no-progress due to aggressive cleanup by this deflation, and think 1324 // it is still in no-progress stride. In this "progress" case, the generic code would 1325 // zero the counter, and we allow it to happen. 1326 _no_progress_skip_increment = true; 1327 1328 return true; 1329 } 1330 1331 return false; 1332 } 1333 1334 void ObjectSynchronizer::request_deflate_idle_monitors() { 1335 MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag); 1336 set_is_async_deflation_requested(true); 1337 ml.notify_all(); 1338 } 1339 1340 bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() { 1341 JavaThread* current = JavaThread::current(); 1342 bool ret_code = false; 1343 1344 jlong last_time = last_async_deflation_time_ns(); 1345 1346 request_deflate_idle_monitors(); 1347 1348 const int N_CHECKS = 5; 1349 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds 1350 if (last_async_deflation_time_ns() > last_time) { 1351 log_info(monitorinflation)("Async Deflation happened after %d check(s).", i); 1352 ret_code = true; 1353 break; 1354 } 1355 { 1356 // JavaThread has to honor the blocking protocol. 1357 ThreadBlockInVM tbivm(current); 1358 os::naked_short_sleep(999); // sleep for almost 1 second 1359 } 1360 } 1361 if (!ret_code) { 1362 log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS); 1363 } 1364 1365 return ret_code; 1366 } 1367 1368 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1369 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS); 1370 } 1371 1372 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1373 const oop obj, 1374 ObjectSynchronizer::InflateCause cause) { 1375 assert(event != nullptr, "invariant"); 1376 event->set_monitorClass(obj->klass()); 1377 event->set_address((uintptr_t)(void*)obj); 1378 event->set_cause((u1)cause); 1379 event->commit(); 1380 } 1381 1382 // Fast path code shared by multiple functions 1383 void ObjectSynchronizer::inflate_helper(oop obj) { 1384 markWord mark = obj->mark_acquire(); 1385 if (mark.has_monitor()) { 1386 ObjectMonitor* monitor = mark.monitor(); 1387 markWord dmw = monitor->header(); 1388 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1389 return; 1390 } 1391 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal); 1392 } 1393 1394 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) { 1395 assert(current == Thread::current(), "must be"); 1396 return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause); 1397 } 1398 1399 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) { 1400 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be"); 1401 return inflate_impl(thread, obj, cause); 1402 } 1403 1404 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) { 1405 // The JavaThread* inflating_thread requires that the inflating_thread == Thread::current() or 1406 // is suspended throughout the call by some other mechanism. 1407 // The thread might be nullptr when called from a non JavaThread. (As may still be 1408 // the case from FastHashCode). However it is only important for correctness that the 1409 // thread is set when called from ObjectSynchronizer::enter from the owning thread, 1410 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit. 1411 EventJavaMonitorInflate event; 1412 1413 for (;;) { 1414 const markWord mark = object->mark_acquire(); 1415 1416 // The mark can be in one of the following states: 1417 // * inflated - If the ObjectMonitor owner is anonymous and the 1418 // inflating_thread owns the object lock, then we 1419 // make the inflating_thread the ObjectMonitor owner. 1420 // For LM_LIGHTWEIGHT we also remove the lock from 1421 // the inflating_thread's lock stack. 1422 // * fast-locked - Coerce it to inflated from fast-locked. 1423 // * stack-locked - Coerce it to inflated from stack-locked. 1424 // * INFLATING - Busy wait for conversion from stack-locked to 1425 // inflated. 1426 // * neutral - Aggressively inflate the object. 1427 1428 // CASE: inflated 1429 if (mark.has_monitor()) { 1430 ObjectMonitor* inf = mark.monitor(); 1431 markWord dmw = inf->header(); 1432 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1433 if (inf->is_owner_anonymous() && inflating_thread != nullptr) { 1434 if (LockingMode == LM_LIGHTWEIGHT) { 1435 if (inflating_thread->lock_stack().contains(object)) { 1436 inf->set_owner_from_anonymous(inflating_thread); 1437 size_t removed = inflating_thread->lock_stack().remove(object); 1438 inf->set_recursions(removed - 1); 1439 } 1440 } else { 1441 assert(LockingMode == LM_LEGACY, "invariant"); 1442 if (inflating_thread->is_lock_owned((address)inf->stack_locker())) { 1443 inf->set_owner_from_BasicLock(inflating_thread); 1444 // Decrement monitor count now since this monitor is okay for freezing 1445 LOOM_MONITOR_SUPPORT_ONLY(inflating_thread->dec_held_monitor_count();) 1446 } 1447 } 1448 } 1449 return inf; 1450 } 1451 1452 if (LockingMode != LM_LIGHTWEIGHT) { 1453 // New lightweight locking does not use INFLATING. 1454 // CASE: inflation in progress - inflating over a stack-lock. 1455 // Some other thread is converting from stack-locked to inflated. 1456 // Only that thread can complete inflation -- other threads must wait. 1457 // The INFLATING value is transient. 1458 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1459 // We could always eliminate polling by parking the thread on some auxiliary list. 1460 if (mark == markWord::INFLATING()) { 1461 read_stable_mark(object); 1462 continue; 1463 } 1464 } 1465 1466 // CASE: fast-locked 1467 // Could be fast-locked either by the inflating_thread or by some other thread. 1468 // 1469 // Note that we allocate the ObjectMonitor speculatively, _before_ 1470 // attempting to set the object's mark to the new ObjectMonitor. If 1471 // the inflating_thread owns the monitor, then we set the ObjectMonitor's 1472 // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner 1473 // to anonymous. If we lose the race to set the object's mark to the 1474 // new ObjectMonitor, then we just delete it and loop around again. 1475 // 1476 LogStreamHandle(Trace, monitorinflation) lsh; 1477 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) { 1478 ObjectMonitor* monitor = new ObjectMonitor(object); 1479 monitor->set_header(mark.set_unlocked()); 1480 bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object); 1481 if (own) { 1482 // Owned by inflating_thread. 1483 monitor->set_owner_from(nullptr, inflating_thread); 1484 } else { 1485 // Owned by somebody else. 1486 monitor->set_owner_anonymous(); 1487 } 1488 markWord monitor_mark = markWord::encode(monitor); 1489 markWord old_mark = object->cas_set_mark(monitor_mark, mark); 1490 if (old_mark == mark) { 1491 // Success! Return inflated monitor. 1492 if (own) { 1493 size_t removed = inflating_thread->lock_stack().remove(object); 1494 monitor->set_recursions(removed - 1); 1495 } 1496 // Once the ObjectMonitor is configured and object is associated 1497 // with the ObjectMonitor, it is safe to allow async deflation: 1498 _in_use_list.add(monitor); 1499 1500 // Hopefully the performance counters are allocated on distinct 1501 // cache lines to avoid false sharing on MP systems ... 1502 OM_PERFDATA_OP(Inflations, inc()); 1503 if (log_is_enabled(Trace, monitorinflation)) { 1504 ResourceMark rm; 1505 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1506 INTPTR_FORMAT ", type='%s'", p2i(object), 1507 object->mark().value(), object->klass()->external_name()); 1508 } 1509 if (event.should_commit()) { 1510 post_monitor_inflate_event(&event, object, cause); 1511 } 1512 return monitor; 1513 } else { 1514 delete monitor; 1515 continue; // Interference -- just retry 1516 } 1517 } 1518 1519 // CASE: stack-locked 1520 // Could be stack-locked either by current or by some other thread. 1521 // 1522 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting 1523 // to install INFLATING into the mark word. We originally installed INFLATING, 1524 // allocated the ObjectMonitor, and then finally STed the address of the 1525 // ObjectMonitor into the mark. This was correct, but artificially lengthened 1526 // the interval in which INFLATING appeared in the mark, thus increasing 1527 // the odds of inflation contention. If we lose the race to set INFLATING, 1528 // then we just delete the ObjectMonitor and loop around again. 1529 // 1530 if (LockingMode == LM_LEGACY && mark.has_locker()) { 1531 assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking"); 1532 ObjectMonitor* m = new ObjectMonitor(object); 1533 // Optimistically prepare the ObjectMonitor - anticipate successful CAS 1534 // We do this before the CAS in order to minimize the length of time 1535 // in which INFLATING appears in the mark. 1536 1537 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1538 if (cmp != mark) { 1539 delete m; 1540 continue; // Interference -- just retry 1541 } 1542 1543 // We've successfully installed INFLATING (0) into the mark-word. 1544 // This is the only case where 0 will appear in a mark-word. 1545 // Only the singular thread that successfully swings the mark-word 1546 // to 0 can perform (or more precisely, complete) inflation. 1547 // 1548 // Why do we CAS a 0 into the mark-word instead of just CASing the 1549 // mark-word from the stack-locked value directly to the new inflated state? 1550 // Consider what happens when a thread unlocks a stack-locked object. 1551 // It attempts to use CAS to swing the displaced header value from the 1552 // on-stack BasicLock back into the object header. Recall also that the 1553 // header value (hash code, etc) can reside in (a) the object header, or 1554 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1555 // header in an ObjectMonitor. The inflate() routine must copy the header 1556 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1557 // the while preserving the hashCode stability invariants. If the owner 1558 // decides to release the lock while the value is 0, the unlock will fail 1559 // and control will eventually pass from slow_exit() to inflate. The owner 1560 // will then spin, waiting for the 0 value to disappear. Put another way, 1561 // the 0 causes the owner to stall if the owner happens to try to 1562 // drop the lock (restoring the header from the BasicLock to the object) 1563 // while inflation is in-progress. This protocol avoids races that might 1564 // would otherwise permit hashCode values to change or "flicker" for an object. 1565 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1566 // 0 serves as a "BUSY" inflate-in-progress indicator. 1567 1568 1569 // fetch the displaced mark from the owner's stack. 1570 // The owner can't die or unwind past the lock while our INFLATING 1571 // object is in the mark. Furthermore the owner can't complete 1572 // an unlock on the object, either. 1573 markWord dmw = mark.displaced_mark_helper(); 1574 // Catch if the object's header is not neutral (not locked and 1575 // not marked is what we care about here). 1576 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1577 1578 // Setup monitor fields to proper values -- prepare the monitor 1579 m->set_header(dmw); 1580 1581 // Note that a thread can inflate an object 1582 // that it has stack-locked -- as might happen in wait() -- directly 1583 // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom. 1584 if (inflating_thread != nullptr && inflating_thread->is_lock_owned((address)mark.locker())) { 1585 m->set_owner_from(nullptr, inflating_thread); 1586 // Decrement monitor count now since this monitor is okay for freezing 1587 LOOM_MONITOR_SUPPORT_ONLY(inflating_thread->dec_held_monitor_count();) 1588 } else { 1589 // Use ANONYMOUS_OWNER to indicate that the owner is the BasicLock on the stack, 1590 // and set the stack locker field in the monitor. 1591 m->set_stack_locker(mark.locker()); 1592 m->set_owner_anonymous(); // second 1593 } 1594 // TODO-FIXME: assert BasicLock->dhw != 0. 1595 1596 // Must preserve store ordering. The monitor state must 1597 // be stable at the time of publishing the monitor address. 1598 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1599 // Release semantics so that above set_object() is seen first. 1600 object->release_set_mark(markWord::encode(m)); 1601 1602 // Once ObjectMonitor is configured and the object is associated 1603 // with the ObjectMonitor, it is safe to allow async deflation: 1604 _in_use_list.add(m); 1605 1606 // Hopefully the performance counters are allocated on distinct cache lines 1607 // to avoid false sharing on MP systems ... 1608 OM_PERFDATA_OP(Inflations, inc()); 1609 if (log_is_enabled(Trace, monitorinflation)) { 1610 ResourceMark rm; 1611 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1612 INTPTR_FORMAT ", type='%s'", p2i(object), 1613 object->mark().value(), object->klass()->external_name()); 1614 } 1615 if (event.should_commit()) { 1616 post_monitor_inflate_event(&event, object, cause); 1617 } 1618 return m; 1619 } 1620 1621 // CASE: neutral 1622 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1623 // If we know we're inflating for entry it's better to inflate by swinging a 1624 // pre-locked ObjectMonitor pointer into the object header. A successful 1625 // CAS inflates the object *and* confers ownership to the inflating thread. 1626 // In the current implementation we use a 2-step mechanism where we CAS() 1627 // to inflate and then CAS() again to try to swing _owner from null to current. 1628 // An inflateTry() method that we could call from enter() would be useful. 1629 1630 // Catch if the object's header is not neutral (not locked and 1631 // not marked is what we care about here). 1632 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1633 ObjectMonitor* m = new ObjectMonitor(object); 1634 // prepare m for installation - set monitor to initial state 1635 m->set_header(mark); 1636 1637 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 1638 delete m; 1639 m = nullptr; 1640 continue; 1641 // interference - the markword changed - just retry. 1642 // The state-transitions are one-way, so there's no chance of 1643 // live-lock -- "Inflated" is an absorbing state. 1644 } 1645 1646 // Once the ObjectMonitor is configured and object is associated 1647 // with the ObjectMonitor, it is safe to allow async deflation: 1648 _in_use_list.add(m); 1649 1650 // Hopefully the performance counters are allocated on distinct 1651 // cache lines to avoid false sharing on MP systems ... 1652 OM_PERFDATA_OP(Inflations, inc()); 1653 if (log_is_enabled(Trace, monitorinflation)) { 1654 ResourceMark rm; 1655 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1656 INTPTR_FORMAT ", type='%s'", p2i(object), 1657 object->mark().value(), object->klass()->external_name()); 1658 } 1659 if (event.should_commit()) { 1660 post_monitor_inflate_event(&event, object, cause); 1661 } 1662 return m; 1663 } 1664 } 1665 1666 // Walk the in-use list and deflate (at most MonitorDeflationMax) idle 1667 // ObjectMonitors. Returns the number of deflated ObjectMonitors. 1668 // 1669 size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) { 1670 MonitorList::Iterator iter = _in_use_list.iterator(); 1671 size_t deflated_count = 0; 1672 1673 while (iter.has_next()) { 1674 if (deflated_count >= (size_t)MonitorDeflationMax) { 1675 break; 1676 } 1677 ObjectMonitor* mid = iter.next(); 1678 if (mid->deflate_monitor()) { 1679 deflated_count++; 1680 } 1681 1682 // Must check for a safepoint/handshake and honor it. 1683 safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count); 1684 } 1685 1686 return deflated_count; 1687 } 1688 1689 class HandshakeForDeflation : public HandshakeClosure { 1690 public: 1691 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {} 1692 1693 void do_thread(Thread* thread) { 1694 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" 1695 INTPTR_FORMAT, p2i(thread)); 1696 } 1697 }; 1698 1699 class VM_RendezvousGCThreads : public VM_Operation { 1700 public: 1701 bool evaluate_at_safepoint() const override { return false; } 1702 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; } 1703 void doit() override { 1704 Universe::heap()->safepoint_synchronize_begin(); 1705 Universe::heap()->safepoint_synchronize_end(); 1706 }; 1707 }; 1708 1709 static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list, 1710 ObjectMonitorDeflationSafepointer* safepointer) { 1711 NativeHeapTrimmer::SuspendMark sm("monitor deletion"); 1712 size_t deleted_count = 0; 1713 for (ObjectMonitor* monitor: *delete_list) { 1714 delete monitor; 1715 deleted_count++; 1716 // A JavaThread must check for a safepoint/handshake and honor it. 1717 safepointer->block_for_safepoint("deletion", "deleted_count", deleted_count); 1718 } 1719 return deleted_count; 1720 } 1721 1722 class ObjectMonitorDeflationLogging: public StackObj { 1723 LogStreamHandle(Debug, monitorinflation) _debug; 1724 LogStreamHandle(Info, monitorinflation) _info; 1725 LogStream* _stream; 1726 elapsedTimer _timer; 1727 1728 size_t ceiling() const { return ObjectSynchronizer::in_use_list_ceiling(); } 1729 size_t count() const { return ObjectSynchronizer::_in_use_list.count(); } 1730 size_t max() const { return ObjectSynchronizer::_in_use_list.max(); } 1731 1732 public: 1733 ObjectMonitorDeflationLogging() 1734 : _debug(), _info(), _stream(nullptr) { 1735 if (_debug.is_enabled()) { 1736 _stream = &_debug; 1737 } else if (_info.is_enabled()) { 1738 _stream = &_info; 1739 } 1740 } 1741 1742 void begin() { 1743 if (_stream != nullptr) { 1744 _stream->print_cr("begin deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, 1745 ceiling(), count(), max()); 1746 _timer.start(); 1747 } 1748 } 1749 1750 void before_handshake(size_t unlinked_count) { 1751 if (_stream != nullptr) { 1752 _timer.stop(); 1753 _stream->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT 1754 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count=" 1755 SIZE_FORMAT ", max=" SIZE_FORMAT, 1756 unlinked_count, ceiling(), count(), max()); 1757 } 1758 } 1759 1760 void after_handshake() { 1761 if (_stream != nullptr) { 1762 _stream->print_cr("after handshaking: in_use_list stats: ceiling=" 1763 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, 1764 ceiling(), count(), max()); 1765 _timer.start(); 1766 } 1767 } 1768 1769 void end(size_t deflated_count, size_t unlinked_count) { 1770 if (_stream != nullptr) { 1771 _timer.stop(); 1772 if (deflated_count != 0 || unlinked_count != 0 || _debug.is_enabled()) { 1773 _stream->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs", 1774 deflated_count, unlinked_count, _timer.seconds()); 1775 } 1776 _stream->print_cr("end deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, 1777 ceiling(), count(), max()); 1778 } 1779 } 1780 1781 void before_block_for_safepoint(const char* op_name, const char* cnt_name, size_t cnt) { 1782 if (_stream != nullptr) { 1783 _timer.stop(); 1784 _stream->print_cr("pausing %s: %s=" SIZE_FORMAT ", in_use_list stats: ceiling=" 1785 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, 1786 op_name, cnt_name, cnt, ceiling(), count(), max()); 1787 } 1788 } 1789 1790 void after_block_for_safepoint(const char* op_name) { 1791 if (_stream != nullptr) { 1792 _stream->print_cr("resuming %s: in_use_list stats: ceiling=" SIZE_FORMAT 1793 ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, op_name, 1794 ceiling(), count(), max()); 1795 _timer.start(); 1796 } 1797 } 1798 }; 1799 1800 void ObjectMonitorDeflationSafepointer::block_for_safepoint(const char* op_name, const char* count_name, size_t counter) { 1801 if (!SafepointMechanism::should_process(_current)) { 1802 return; 1803 } 1804 1805 // A safepoint/handshake has started. 1806 _log->before_block_for_safepoint(op_name, count_name, counter); 1807 1808 { 1809 // Honor block request. 1810 ThreadBlockInVM tbivm(_current); 1811 } 1812 1813 _log->after_block_for_safepoint(op_name); 1814 } 1815 1816 // This function is called by the MonitorDeflationThread to deflate 1817 // ObjectMonitors. 1818 size_t ObjectSynchronizer::deflate_idle_monitors() { 1819 JavaThread* current = JavaThread::current(); 1820 assert(current->is_monitor_deflation_thread(), "The only monitor deflater"); 1821 1822 // The async deflation request has been processed. 1823 _last_async_deflation_time_ns = os::javaTimeNanos(); 1824 set_is_async_deflation_requested(false); 1825 1826 ObjectMonitorDeflationLogging log; 1827 ObjectMonitorDeflationSafepointer safepointer(current, &log); 1828 1829 log.begin(); 1830 1831 // Deflate some idle ObjectMonitors. 1832 size_t deflated_count = deflate_monitor_list(&safepointer); 1833 1834 // Unlink the deflated ObjectMonitors from the in-use list. 1835 size_t unlinked_count = 0; 1836 size_t deleted_count = 0; 1837 if (deflated_count > 0) { 1838 ResourceMark rm(current); 1839 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count); 1840 unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer); 1841 1842 log.before_handshake(unlinked_count); 1843 1844 // A JavaThread needs to handshake in order to safely free the 1845 // ObjectMonitors that were deflated in this cycle. 1846 HandshakeForDeflation hfd_hc; 1847 Handshake::execute(&hfd_hc); 1848 // Also, we sync and desync GC threads around the handshake, so that they can 1849 // safely read the mark-word and look-through to the object-monitor, without 1850 // being afraid that the object-monitor is going away. 1851 VM_RendezvousGCThreads sync_gc; 1852 VMThread::execute(&sync_gc); 1853 1854 log.after_handshake(); 1855 1856 // After the handshake, safely free the ObjectMonitors that were 1857 // deflated and unlinked in this cycle. 1858 1859 // Delete the unlinked ObjectMonitors. 1860 deleted_count = delete_monitors(&delete_list, &safepointer); 1861 assert(unlinked_count == deleted_count, "must be"); 1862 } 1863 1864 log.end(deflated_count, unlinked_count); 1865 1866 OM_PERFDATA_OP(MonExtant, set_value(_in_use_list.count())); 1867 OM_PERFDATA_OP(Deflations, inc(deflated_count)); 1868 1869 GVars.stw_random = os::random(); 1870 1871 if (deflated_count != 0) { 1872 _no_progress_cnt = 0; 1873 } else if (_no_progress_skip_increment) { 1874 _no_progress_skip_increment = false; 1875 } else { 1876 _no_progress_cnt++; 1877 } 1878 1879 return deflated_count; 1880 } 1881 1882 // Monitor cleanup on JavaThread::exit 1883 1884 // Iterate through monitor cache and attempt to release thread's monitors 1885 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1886 private: 1887 JavaThread* _thread; 1888 1889 public: 1890 ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {} 1891 void do_monitor(ObjectMonitor* mid) { 1892 intx rec = mid->complete_exit(_thread); 1893 _thread->dec_held_monitor_count(NOT_LOOM_MONITOR_SUPPORT((rec + 1))); 1894 } 1895 }; 1896 1897 // Release all inflated monitors owned by current thread. Lightweight monitors are 1898 // ignored. This is meant to be called during JNI thread detach which assumes 1899 // all remaining monitors are heavyweight. All exceptions are swallowed. 1900 // Scanning the extant monitor list can be time consuming. 1901 // A simple optimization is to add a per-thread flag that indicates a thread 1902 // called jni_monitorenter() during its lifetime. 1903 // 1904 // Instead of NoSafepointVerifier it might be cheaper to 1905 // use an idiom of the form: 1906 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1907 // <code that must not run at safepoint> 1908 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1909 // Since the tests are extremely cheap we could leave them enabled 1910 // for normal product builds. 1911 1912 void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) { 1913 assert(current == JavaThread::current(), "must be current Java thread"); 1914 NoSafepointVerifier nsv; 1915 ReleaseJavaMonitorsClosure rjmc(current); 1916 ObjectSynchronizer::owned_monitors_iterate(&rjmc, current); 1917 assert(!current->has_pending_exception(), "Should not be possible"); 1918 current->clear_pending_exception(); 1919 assert(current->held_monitor_count() == 0, "Should not be possible"); 1920 // All monitors (including entered via JNI) have been unlocked above, so we need to clear jni count. 1921 current->clear_jni_monitor_count(); 1922 } 1923 1924 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1925 switch (cause) { 1926 case inflate_cause_vm_internal: return "VM Internal"; 1927 case inflate_cause_monitor_enter: return "Monitor Enter"; 1928 case inflate_cause_wait: return "Monitor Wait"; 1929 case inflate_cause_notify: return "Monitor Notify"; 1930 case inflate_cause_hash_code: return "Monitor Hash Code"; 1931 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1932 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1933 case inflate_cause_cont_freeze: return "Continuation Freeze"; 1934 default: 1935 ShouldNotReachHere(); 1936 } 1937 return "Unknown"; 1938 } 1939 1940 //------------------------------------------------------------------------------ 1941 // Debugging code 1942 1943 u_char* ObjectSynchronizer::get_gvars_addr() { 1944 return (u_char*)&GVars; 1945 } 1946 1947 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 1948 return (u_char*)&GVars.hc_sequence; 1949 } 1950 1951 size_t ObjectSynchronizer::get_gvars_size() { 1952 return sizeof(SharedGlobals); 1953 } 1954 1955 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 1956 return (u_char*)&GVars.stw_random; 1957 } 1958 1959 // Do the final audit and print of ObjectMonitor stats; must be done 1960 // by the VMThread at VM exit time. 1961 void ObjectSynchronizer::do_final_audit_and_print_stats() { 1962 assert(Thread::current()->is_VM_thread(), "sanity check"); 1963 1964 if (is_final_audit()) { // Only do the audit once. 1965 return; 1966 } 1967 set_is_final_audit(); 1968 log_info(monitorinflation)("Starting the final audit."); 1969 1970 if (log_is_enabled(Info, monitorinflation)) { 1971 // The other audit_and_print_stats() call is done at the Debug 1972 // level at a safepoint in SafepointSynchronize::do_cleanup_tasks. 1973 audit_and_print_stats(true /* on_exit */); 1974 } 1975 } 1976 1977 // This function can be called at a safepoint or it can be called when 1978 // we are trying to exit the VM. When we are trying to exit the VM, the 1979 // list walker functions can run in parallel with the other list 1980 // operations so spin-locking is used for safety. 1981 // 1982 // Calls to this function can be added in various places as a debugging 1983 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor 1984 // details logged at the Info level and 'false' for the 'on_exit' 1985 // parameter to have in-use monitor details logged at the Trace level. 1986 // 1987 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 1988 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 1989 1990 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1991 LogStreamHandle(Info, monitorinflation) lsh_info; 1992 LogStreamHandle(Trace, monitorinflation) lsh_trace; 1993 LogStream* ls = nullptr; 1994 if (log_is_enabled(Trace, monitorinflation)) { 1995 ls = &lsh_trace; 1996 } else if (log_is_enabled(Debug, monitorinflation)) { 1997 ls = &lsh_debug; 1998 } else if (log_is_enabled(Info, monitorinflation)) { 1999 ls = &lsh_info; 2000 } 2001 assert(ls != nullptr, "sanity check"); 2002 2003 int error_cnt = 0; 2004 2005 ls->print_cr("Checking in_use_list:"); 2006 chk_in_use_list(ls, &error_cnt); 2007 2008 if (error_cnt == 0) { 2009 ls->print_cr("No errors found in in_use_list checks."); 2010 } else { 2011 log_error(monitorinflation)("found in_use_list errors: error_cnt=%d", error_cnt); 2012 } 2013 2014 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2015 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2016 // When exiting this log output is at the Info level. When called 2017 // at a safepoint, this log output is at the Trace level since 2018 // there can be a lot of it. 2019 log_in_use_monitor_details(ls, !on_exit /* log_all */); 2020 } 2021 2022 ls->flush(); 2023 2024 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2025 } 2026 2027 // Check the in_use_list; log the results of the checks. 2028 void ObjectSynchronizer::chk_in_use_list(outputStream* out, int *error_cnt_p) { 2029 size_t l_in_use_count = _in_use_list.count(); 2030 size_t l_in_use_max = _in_use_list.max(); 2031 out->print_cr("count=" SIZE_FORMAT ", max=" SIZE_FORMAT, l_in_use_count, 2032 l_in_use_max); 2033 2034 size_t ck_in_use_count = 0; 2035 MonitorList::Iterator iter = _in_use_list.iterator(); 2036 while (iter.has_next()) { 2037 ObjectMonitor* mid = iter.next(); 2038 chk_in_use_entry(mid, out, error_cnt_p); 2039 ck_in_use_count++; 2040 } 2041 2042 if (l_in_use_count == ck_in_use_count) { 2043 out->print_cr("in_use_count=" SIZE_FORMAT " equals ck_in_use_count=" 2044 SIZE_FORMAT, l_in_use_count, ck_in_use_count); 2045 } else { 2046 out->print_cr("WARNING: in_use_count=" SIZE_FORMAT " is not equal to " 2047 "ck_in_use_count=" SIZE_FORMAT, l_in_use_count, 2048 ck_in_use_count); 2049 } 2050 2051 size_t ck_in_use_max = _in_use_list.max(); 2052 if (l_in_use_max == ck_in_use_max) { 2053 out->print_cr("in_use_max=" SIZE_FORMAT " equals ck_in_use_max=" 2054 SIZE_FORMAT, l_in_use_max, ck_in_use_max); 2055 } else { 2056 out->print_cr("WARNING: in_use_max=" SIZE_FORMAT " is not equal to " 2057 "ck_in_use_max=" SIZE_FORMAT, l_in_use_max, ck_in_use_max); 2058 } 2059 } 2060 2061 // Check an in-use monitor entry; log any errors. 2062 void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out, 2063 int* error_cnt_p) { 2064 if (n->owner_is_DEFLATER_MARKER()) { 2065 // This could happen when monitor deflation blocks for a safepoint. 2066 return; 2067 } 2068 2069 if (n->header().value() == 0) { 2070 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must " 2071 "have non-null _header field.", p2i(n)); 2072 *error_cnt_p = *error_cnt_p + 1; 2073 } 2074 const oop obj = n->object_peek(); 2075 if (obj != nullptr) { 2076 const markWord mark = obj->mark(); 2077 if (!mark.has_monitor()) { 2078 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " 2079 "object does not think it has a monitor: obj=" 2080 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 2081 p2i(obj), mark.value()); 2082 *error_cnt_p = *error_cnt_p + 1; 2083 } 2084 ObjectMonitor* const obj_mon = mark.monitor(); 2085 if (n != obj_mon) { 2086 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " 2087 "object does not refer to the same monitor: obj=" 2088 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 2089 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 2090 *error_cnt_p = *error_cnt_p + 1; 2091 } 2092 } 2093 } 2094 2095 // Log details about ObjectMonitors on the in_use_list. The 'BHL' 2096 // flags indicate why the entry is in-use, 'object' and 'object type' 2097 // indicate the associated object and its type. 2098 void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_all) { 2099 if (_in_use_list.count() > 0) { 2100 stringStream ss; 2101 out->print_cr("In-use monitor info:"); 2102 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2103 out->print_cr("%18s %s %18s %18s", 2104 "monitor", "BHL", "object", "object type"); 2105 out->print_cr("================== === ================== =================="); 2106 2107 auto is_interesting = [&](ObjectMonitor* monitor) { 2108 return log_all || monitor->has_owner() || monitor->is_busy(); 2109 }; 2110 2111 monitors_iterate([&](ObjectMonitor* monitor) { 2112 if (is_interesting(monitor)) { 2113 const oop obj = monitor->object_peek(); 2114 const markWord mark = monitor->header(); 2115 ResourceMark rm; 2116 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor), 2117 monitor->is_busy(), mark.hash() != 0, monitor->owner() != nullptr, 2118 p2i(obj), obj == nullptr ? "" : obj->klass()->external_name()); 2119 if (monitor->is_busy()) { 2120 out->print(" (%s)", monitor->is_busy_to_string(&ss)); 2121 ss.reset(); 2122 } 2123 out->cr(); 2124 } 2125 }); 2126 } 2127 2128 out->flush(); 2129 }