1 /*
2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/handshake.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/javaThread.hpp"
43 #include "runtime/lockStack.inline.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/objectMonitor.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/os.inline.hpp"
48 #include "runtime/osThread.hpp"
49 #include "runtime/perfData.hpp"
50 #include "runtime/safepointMechanism.inline.hpp"
51 #include "runtime/safepointVerifiers.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/threads.hpp"
56 #include "runtime/timer.hpp"
57 #include "runtime/trimNativeHeap.hpp"
58 #include "runtime/vframe.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "utilities/align.hpp"
61 #include "utilities/dtrace.hpp"
62 #include "utilities/events.hpp"
63 #include "utilities/linkedlist.hpp"
64 #include "utilities/preserveException.hpp"
65
66 void MonitorList::add(ObjectMonitor* m) {
67 ObjectMonitor* head;
68 do {
69 head = Atomic::load(&_head);
70 m->set_next_om(head);
71 } while (Atomic::cmpxchg(&_head, head, m) != head);
72
73 size_t count = Atomic::add(&_count, 1u);
74 if (count > max()) {
75 Atomic::inc(&_max);
76 }
77 }
78
79 size_t MonitorList::count() const {
80 return Atomic::load(&_count);
81 }
82
367 return false;
368 }
369
370
371 // The LockNode emitted directly at the synchronization site would have
372 // been too big if it were to have included support for the cases of inflated
373 // recursive enter and exit, so they go here instead.
374 // Note that we can't safely call AsyncPrintJavaStack() from within
375 // quick_enter() as our thread state remains _in_Java.
376
377 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
378 BasicLock * lock) {
379 assert(current->thread_state() == _thread_in_Java, "invariant");
380 NoSafepointVerifier nsv;
381 if (obj == nullptr) return false; // Need to throw NPE
382
383 if (obj->klass()->is_value_based()) {
384 return false;
385 }
386
387 const markWord mark = obj->mark();
388
389 if (mark.has_monitor()) {
390 ObjectMonitor* const m = mark.monitor();
391 // An async deflation or GC can race us before we manage to make
392 // the ObjectMonitor busy by setting the owner below. If we detect
393 // that race we just bail out to the slow-path here.
394 if (m->object_peek() == nullptr) {
395 return false;
396 }
397 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
398
399 // Lock contention and Transactional Lock Elision (TLE) diagnostics
400 // and observability
401 // Case: light contention possibly amenable to TLE
402 // Case: TLE inimical operations such as nested/recursive synchronization
403
404 if (owner == current) {
405 m->_recursions++;
406 current->inc_held_monitor_count();
420 lock->set_displaced_header(markWord::unused_mark());
421 }
422
423 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
424 assert(m->_recursions == 0, "invariant");
425 current->inc_held_monitor_count();
426 return true;
427 }
428 }
429
430 // Note that we could inflate in quick_enter.
431 // This is likely a useful optimization
432 // Critically, in quick_enter() we must not:
433 // -- block indefinitely, or
434 // -- reach a safepoint
435
436 return false; // revert to slow-path
437 }
438
439 // Handle notifications when synchronizing on value based classes
440 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
441 frame last_frame = current->last_frame();
442 bool bcp_was_adjusted = false;
443 // Don't decrement bcp if it points to the frame's first instruction. This happens when
444 // handle_sync_on_value_based_class() is called because of a synchronized method. There
445 // is no actual monitorenter instruction in the byte code in this case.
446 if (last_frame.is_interpreted_frame() &&
447 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
448 // adjust bcp to point back to monitorenter so that we print the correct line numbers
449 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
450 bcp_was_adjusted = true;
451 }
452
453 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
454 ResourceMark rm(current);
455 stringStream ss;
456 current->print_active_stack_on(&ss);
457 char* base = (char*)strstr(ss.base(), "at");
458 char* newline = (char*)strchr(ss.base(), '\n');
459 if (newline != nullptr) {
460 *newline = '\0';
461 }
462 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
463 } else {
464 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
465 ResourceMark rm(current);
466 Log(valuebasedclasses) vblog;
467
468 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
469 if (current->has_last_Java_frame()) {
470 LogStream info_stream(vblog.info());
471 current->print_active_stack_on(&info_stream);
472 } else {
473 vblog.info("Cannot find the last Java frame");
474 }
475
476 EventSyncOnValueBasedClass event;
477 if (event.should_commit()) {
478 event.set_valueBasedClass(obj->klass());
479 event.commit();
480 }
481 }
482
483 if (bcp_was_adjusted) {
484 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
485 }
486 }
487
488 static bool useHeavyMonitors() {
489 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
490 return LockingMode == LM_MONITOR;
491 #else
492 return false;
493 #endif
494 }
495
496 // -----------------------------------------------------------------------------
497 // Monitor Enter/Exit
498 // The interpreter and compiler assembly code tries to lock using the fast path
499 // of this algorithm. Make sure to update that code if the following function is
500 // changed. The implementation is extremely sensitive to race condition. Be careful.
501
502 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
503 if (obj->klass()->is_value_based()) {
504 handle_sync_on_value_based_class(obj, current);
505 }
506
507 current->inc_held_monitor_count();
508
509 if (!useHeavyMonitors()) {
510 if (LockingMode == LM_LIGHTWEIGHT) {
511 // Fast-locking does not use the 'lock' argument.
512 LockStack& lock_stack = current->lock_stack();
513 if (lock_stack.can_push()) {
514 markWord mark = obj()->mark_acquire();
515 while (mark.is_neutral()) {
516 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
517 // Try to swing into 'fast-locked' state.
518 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
519 const markWord locked_mark = mark.set_fast_locked();
520 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
521 if (old_mark == mark) {
522 // Successfully fast-locked, push object to lock-stack and return.
523 lock_stack.push(obj());
524 return;
525 }
526 mark = old_mark;
527 }
528 }
529 // All other paths fall-through to inflate-enter.
530 } else if (LockingMode == LM_LEGACY) {
531 markWord mark = obj->mark();
532 if (mark.is_neutral()) {
533 // Anticipate successful CAS -- the ST of the displaced mark must
534 // be visible <= the ST performed by the CAS.
535 lock->set_displaced_header(mark);
536 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
537 return;
538 }
539 // Fall through to inflate() ...
540 } else if (mark.has_locker() &&
541 current->is_lock_owned((address) mark.locker())) {
542 assert(lock != mark.locker(), "must not re-lock the same lock");
543 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
544 lock->set_displaced_header(markWord::from_pointer(nullptr));
545 return;
546 }
547
548 // The object header will never be displaced to this lock,
549 // so it does not matter what the value is, except that it
550 // must be non-zero to avoid looking like a re-entrant lock,
551 // and must not look locked either.
552 lock->set_displaced_header(markWord::unused_mark());
553 }
554 } else if (VerifyHeavyMonitors) {
555 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
556 }
557
558 // An async deflation can race after the inflate() call and before
559 // enter() can make the ObjectMonitor busy. enter() returns false if
560 // we have lost the race to async deflation and we simply try again.
561 while (true) {
562 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
563 if (monitor->enter(current)) {
564 return;
565 }
566 }
567 }
568
569 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
570 current->dec_held_monitor_count();
571
572 if (!useHeavyMonitors()) {
573 markWord mark = object->mark();
574 if (LockingMode == LM_LIGHTWEIGHT) {
575 // Fast-locking does not use the 'lock' argument.
576 while (mark.is_fast_locked()) {
577 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
578 const markWord unlocked_mark = mark.set_unlocked();
579 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
580 if (old_mark == mark) {
581 current->lock_stack().remove(object);
582 return;
583 }
584 mark = old_mark;
585 }
586 } else if (LockingMode == LM_LEGACY) {
587 markWord dhw = lock->displaced_header();
588 if (dhw.value() == 0) {
589 // If the displaced header is null, then this exit matches up with
590 // a recursive enter. No real work to do here except for diagnostics.
591 #ifndef PRODUCT
592 if (mark != markWord::INFLATING()) {
593 // Only do diagnostics if we are not racing an inflation. Simply
594 // exiting a recursive enter of a Java Monitor that is being
595 // inflated is safe; see the has_monitor() comment below.
596 assert(!mark.is_neutral(), "invariant");
597 assert(!mark.has_locker() ||
598 current->is_lock_owned((address)mark.locker()), "invariant");
599 if (mark.has_monitor()) {
600 // The BasicLock's displaced_header is marked as a recursive
601 // enter and we have an inflated Java Monitor (ObjectMonitor).
602 // This is a special case where the Java Monitor was inflated
603 // after this thread entered the stack-lock recursively. When a
604 // Java Monitor is inflated, we cannot safely walk the Java
614 return;
615 }
616
617 if (mark == markWord::from_pointer(lock)) {
618 // If the object is stack-locked by the current thread, try to
619 // swing the displaced header from the BasicLock back to the mark.
620 assert(dhw.is_neutral(), "invariant");
621 if (object->cas_set_mark(dhw, mark) == mark) {
622 return;
623 }
624 }
625 }
626 } else if (VerifyHeavyMonitors) {
627 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
628 }
629
630 // We have to take the slow-path of possible inflation and then exit.
631 // The ObjectMonitor* can't be async deflated until ownership is
632 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
633 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
634 if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
635 // It must be owned by us. Pop lock object from lock stack.
636 LockStack& lock_stack = current->lock_stack();
637 oop popped = lock_stack.pop();
638 assert(popped == object, "must be owned by this thread");
639 monitor->set_owner_from_anonymous(current);
640 }
641 monitor->exit(current);
642 }
643
644 // -----------------------------------------------------------------------------
645 // JNI locks on java objects
646 // NOTE: must use heavy weight monitor to handle jni monitor enter
647 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
648 if (obj->klass()->is_value_based()) {
649 handle_sync_on_value_based_class(obj, current);
650 }
651
652 // the current locking is from JNI instead of Java code
653 current->set_current_pending_monitor_is_from_java(false);
654 // An async deflation can race after the inflate() call and before
655 // enter() can make the ObjectMonitor busy. enter() returns false if
656 // we have lost the race to async deflation and we simply try again.
657 while (true) {
658 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
659 if (monitor->enter(current)) {
660 current->inc_held_monitor_count(1, true);
888 value = 1; // for sensitivity testing
889 } else if (hashCode == 3) {
890 value = ++GVars.hc_sequence;
891 } else if (hashCode == 4) {
892 value = cast_from_oop<intptr_t>(obj);
893 } else {
894 // Marsaglia's xor-shift scheme with thread-specific state
895 // This is probably the best overall implementation -- we'll
896 // likely make this the default in future releases.
897 unsigned t = current->_hashStateX;
898 t ^= (t << 11);
899 current->_hashStateX = current->_hashStateY;
900 current->_hashStateY = current->_hashStateZ;
901 current->_hashStateZ = current->_hashStateW;
902 unsigned v = current->_hashStateW;
903 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
904 current->_hashStateW = v;
905 value = v;
906 }
907
908 value &= markWord::hash_mask;
909 if (value == 0) value = 0xBAD;
910 assert(value != markWord::no_hash, "invariant");
911 return value;
912 }
913
914 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
915
916 while (true) {
917 ObjectMonitor* monitor = nullptr;
918 markWord temp, test;
919 intptr_t hash;
920 markWord mark = read_stable_mark(obj);
921 if (VerifyHeavyMonitors) {
922 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
923 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
924 }
925 if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
926 hash = mark.hash();
927 if (hash != 0) { // if it has a hash, just return it
928 return hash;
1283 ObjectSynchronizer::InflateCause cause) {
1284 assert(event != nullptr, "invariant");
1285 event->set_monitorClass(obj->klass());
1286 event->set_address((uintptr_t)(void*)obj);
1287 event->set_cause((u1)cause);
1288 event->commit();
1289 }
1290
1291 // Fast path code shared by multiple functions
1292 void ObjectSynchronizer::inflate_helper(oop obj) {
1293 markWord mark = obj->mark_acquire();
1294 if (mark.has_monitor()) {
1295 ObjectMonitor* monitor = mark.monitor();
1296 markWord dmw = monitor->header();
1297 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1298 return;
1299 }
1300 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1301 }
1302
1303 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
1304 // calculations as part of JVM/TI tagging.
1305 static bool is_lock_owned(Thread* thread, oop obj) {
1306 assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
1307 return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
1308 }
1309
1310 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1311 const InflateCause cause) {
1312 EventJavaMonitorInflate event;
1313
1314 for (;;) {
1315 const markWord mark = object->mark_acquire();
1316
1317 // The mark can be in one of the following states:
1318 // * inflated - Just return if using stack-locking.
1319 // If using fast-locking and the ObjectMonitor owner
1320 // is anonymous and the current thread owns the
1321 // object lock, then we make the current thread the
1322 // ObjectMonitor owner and remove the lock from the
1323 // current thread's lock stack.
1324 // * fast-locked - Coerce it to inflated from fast-locked.
1325 // * stack-locked - Coerce it to inflated from stack-locked.
1326 // * INFLATING - Busy wait for conversion from stack-locked to
1327 // inflated.
1328 // * neutral - Aggressively inflate the object.
1329
1330 // CASE: inflated
1331 if (mark.has_monitor()) {
1332 ObjectMonitor* inf = mark.monitor();
1333 markWord dmw = inf->header();
1334 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1335 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
1336 inf->set_owner_from_anonymous(current);
1337 JavaThread::cast(current)->lock_stack().remove(object);
1338 }
1339 return inf;
1340 }
1341
1342 if (LockingMode != LM_LIGHTWEIGHT) {
1343 // New lightweight locking does not use INFLATING.
1344 // CASE: inflation in progress - inflating over a stack-lock.
1345 // Some other thread is converting from stack-locked to inflated.
1346 // Only that thread can complete inflation -- other threads must wait.
1347 // The INFLATING value is transient.
1348 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1349 // We could always eliminate polling by parking the thread on some auxiliary list.
1350 if (mark == markWord::INFLATING()) {
1351 read_stable_mark(object);
1352 continue;
1353 }
1354 }
1355
1356 // CASE: fast-locked
1357 // Could be fast-locked either by current or by some other thread.
1358 //
1359 // Note that we allocate the ObjectMonitor speculatively, _before_
1360 // attempting to set the object's mark to the new ObjectMonitor. If
1361 // this thread owns the monitor, then we set the ObjectMonitor's
1362 // owner to this thread. Otherwise, we set the ObjectMonitor's owner
1363 // to anonymous. If we lose the race to set the object's mark to the
1364 // new ObjectMonitor, then we just delete it and loop around again.
1365 //
1366 LogStreamHandle(Trace, monitorinflation) lsh;
1367 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1368 ObjectMonitor* monitor = new ObjectMonitor(object);
1369 monitor->set_header(mark.set_unlocked());
1370 bool own = is_lock_owned(current, object);
1371 if (own) {
1372 // Owned by us.
1373 monitor->set_owner_from(nullptr, current);
1374 } else {
1375 // Owned by somebody else.
1376 monitor->set_owner_anonymous();
1377 }
1378 markWord monitor_mark = markWord::encode(monitor);
1379 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1380 if (old_mark == mark) {
1381 // Success! Return inflated monitor.
1382 if (own) {
1383 JavaThread::cast(current)->lock_stack().remove(object);
1384 }
1385 // Once the ObjectMonitor is configured and object is associated
1386 // with the ObjectMonitor, it is safe to allow async deflation:
1387 _in_use_list.add(monitor);
1388
1389 // Hopefully the performance counters are allocated on distinct
1390 // cache lines to avoid false sharing on MP systems ...
1391 OM_PERFDATA_OP(Inflations, inc());
1392 if (log_is_enabled(Trace, monitorinflation)) {
1393 ResourceMark rm(current);
1394 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1395 INTPTR_FORMAT ", type='%s'", p2i(object),
1396 object->mark().value(), object->klass()->external_name());
1397 }
1398 if (event.should_commit()) {
1399 post_monitor_inflate_event(&event, object, cause);
1400 }
1401 return monitor;
1402 } else {
1403 delete monitor;
1404 continue; // Interference -- just retry
1405 }
1406 }
1407
1408 // CASE: stack-locked
1409 // Could be stack-locked either by current or by some other thread.
1410 //
1411 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1412 // to install INFLATING into the mark word. We originally installed INFLATING,
1413 // allocated the ObjectMonitor, and then finally STed the address of the
1472 // Note that a thread can inflate an object
1473 // that it has stack-locked -- as might happen in wait() -- directly
1474 // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
1475 m->set_owner_from(nullptr, mark.locker());
1476 // TODO-FIXME: assert BasicLock->dhw != 0.
1477
1478 // Must preserve store ordering. The monitor state must
1479 // be stable at the time of publishing the monitor address.
1480 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1481 // Release semantics so that above set_object() is seen first.
1482 object->release_set_mark(markWord::encode(m));
1483
1484 // Once ObjectMonitor is configured and the object is associated
1485 // with the ObjectMonitor, it is safe to allow async deflation:
1486 _in_use_list.add(m);
1487
1488 // Hopefully the performance counters are allocated on distinct cache lines
1489 // to avoid false sharing on MP systems ...
1490 OM_PERFDATA_OP(Inflations, inc());
1491 if (log_is_enabled(Trace, monitorinflation)) {
1492 ResourceMark rm(current);
1493 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1494 INTPTR_FORMAT ", type='%s'", p2i(object),
1495 object->mark().value(), object->klass()->external_name());
1496 }
1497 if (event.should_commit()) {
1498 post_monitor_inflate_event(&event, object, cause);
1499 }
1500 return m;
1501 }
1502
1503 // CASE: neutral
1504 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1505 // If we know we're inflating for entry it's better to inflate by swinging a
1506 // pre-locked ObjectMonitor pointer into the object header. A successful
1507 // CAS inflates the object *and* confers ownership to the inflating thread.
1508 // In the current implementation we use a 2-step mechanism where we CAS()
1509 // to inflate and then CAS() again to try to swing _owner from null to current.
1510 // An inflateTry() method that we could call from enter() would be useful.
1511
1512 // Catch if the object's header is not neutral (not locked and
1516 // prepare m for installation - set monitor to initial state
1517 m->set_header(mark);
1518
1519 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1520 delete m;
1521 m = nullptr;
1522 continue;
1523 // interference - the markword changed - just retry.
1524 // The state-transitions are one-way, so there's no chance of
1525 // live-lock -- "Inflated" is an absorbing state.
1526 }
1527
1528 // Once the ObjectMonitor is configured and object is associated
1529 // with the ObjectMonitor, it is safe to allow async deflation:
1530 _in_use_list.add(m);
1531
1532 // Hopefully the performance counters are allocated on distinct
1533 // cache lines to avoid false sharing on MP systems ...
1534 OM_PERFDATA_OP(Inflations, inc());
1535 if (log_is_enabled(Trace, monitorinflation)) {
1536 ResourceMark rm(current);
1537 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1538 INTPTR_FORMAT ", type='%s'", p2i(object),
1539 object->mark().value(), object->klass()->external_name());
1540 }
1541 if (event.should_commit()) {
1542 post_monitor_inflate_event(&event, object, cause);
1543 }
1544 return m;
1545 }
1546 }
1547
1548 void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1549 const char* cnt_name, size_t cnt,
1550 LogStream* ls, elapsedTimer* timer_p) {
1551 if (!SafepointMechanism::should_process(current)) {
1552 return;
1553 }
1554
1555 // A safepoint/handshake has started.
1556 if (ls != nullptr) {
|
1 /*
2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.hpp"
47 #include "runtime/objectMonitor.inline.hpp"
48 #include "runtime/os.inline.hpp"
49 #include "runtime/osThread.hpp"
50 #include "runtime/perfData.hpp"
51 #include "runtime/safepointMechanism.inline.hpp"
52 #include "runtime/safepointVerifiers.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "runtime/stubRoutines.hpp"
55 #include "runtime/synchronizer.hpp"
56 #include "runtime/threads.hpp"
57 #include "runtime/timer.hpp"
58 #include "runtime/trimNativeHeap.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/align.hpp"
62 #include "utilities/dtrace.hpp"
63 #include "utilities/events.hpp"
64 #include "utilities/globalDefinitions.hpp"
65 #include "utilities/linkedlist.hpp"
66 #include "utilities/preserveException.hpp"
67
68 void MonitorList::add(ObjectMonitor* m) {
69 ObjectMonitor* head;
70 do {
71 head = Atomic::load(&_head);
72 m->set_next_om(head);
73 } while (Atomic::cmpxchg(&_head, head, m) != head);
74
75 size_t count = Atomic::add(&_count, 1u);
76 if (count > max()) {
77 Atomic::inc(&_max);
78 }
79 }
80
81 size_t MonitorList::count() const {
82 return Atomic::load(&_count);
83 }
84
369 return false;
370 }
371
372
373 // The LockNode emitted directly at the synchronization site would have
374 // been too big if it were to have included support for the cases of inflated
375 // recursive enter and exit, so they go here instead.
376 // Note that we can't safely call AsyncPrintJavaStack() from within
377 // quick_enter() as our thread state remains _in_Java.
378
379 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
380 BasicLock * lock) {
381 assert(current->thread_state() == _thread_in_Java, "invariant");
382 NoSafepointVerifier nsv;
383 if (obj == nullptr) return false; // Need to throw NPE
384
385 if (obj->klass()->is_value_based()) {
386 return false;
387 }
388
389 if (LockingMode == LM_LIGHTWEIGHT) {
390 LockStack& lock_stack = current->lock_stack();
391 if (lock_stack.is_full()) {
392 // Always go into runtime if the lock stack is full.
393 return false;
394 }
395 if (lock_stack.try_recursive_enter(obj)) {
396 // Recursive lock successful.
397 current->inc_held_monitor_count();
398 return true;
399 }
400 }
401
402 const markWord mark = obj->mark();
403
404 if (mark.has_monitor()) {
405 ObjectMonitor* const m = mark.monitor();
406 // An async deflation or GC can race us before we manage to make
407 // the ObjectMonitor busy by setting the owner below. If we detect
408 // that race we just bail out to the slow-path here.
409 if (m->object_peek() == nullptr) {
410 return false;
411 }
412 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
413
414 // Lock contention and Transactional Lock Elision (TLE) diagnostics
415 // and observability
416 // Case: light contention possibly amenable to TLE
417 // Case: TLE inimical operations such as nested/recursive synchronization
418
419 if (owner == current) {
420 m->_recursions++;
421 current->inc_held_monitor_count();
435 lock->set_displaced_header(markWord::unused_mark());
436 }
437
438 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
439 assert(m->_recursions == 0, "invariant");
440 current->inc_held_monitor_count();
441 return true;
442 }
443 }
444
445 // Note that we could inflate in quick_enter.
446 // This is likely a useful optimization
447 // Critically, in quick_enter() we must not:
448 // -- block indefinitely, or
449 // -- reach a safepoint
450
451 return false; // revert to slow-path
452 }
453
454 // Handle notifications when synchronizing on value based classes
455 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
456 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
457 frame last_frame = locking_thread->last_frame();
458 bool bcp_was_adjusted = false;
459 // Don't decrement bcp if it points to the frame's first instruction. This happens when
460 // handle_sync_on_value_based_class() is called because of a synchronized method. There
461 // is no actual monitorenter instruction in the byte code in this case.
462 if (last_frame.is_interpreted_frame() &&
463 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
464 // adjust bcp to point back to monitorenter so that we print the correct line numbers
465 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
466 bcp_was_adjusted = true;
467 }
468
469 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
470 ResourceMark rm;
471 stringStream ss;
472 locking_thread->print_active_stack_on(&ss);
473 char* base = (char*)strstr(ss.base(), "at");
474 char* newline = (char*)strchr(ss.base(), '\n');
475 if (newline != nullptr) {
476 *newline = '\0';
477 }
478 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
479 } else {
480 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
481 ResourceMark rm;
482 Log(valuebasedclasses) vblog;
483
484 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
485 if (locking_thread->has_last_Java_frame()) {
486 LogStream info_stream(vblog.info());
487 locking_thread->print_active_stack_on(&info_stream);
488 } else {
489 vblog.info("Cannot find the last Java frame");
490 }
491
492 EventSyncOnValueBasedClass event;
493 if (event.should_commit()) {
494 event.set_valueBasedClass(obj->klass());
495 event.commit();
496 }
497 }
498
499 if (bcp_was_adjusted) {
500 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
501 }
502 }
503
504 static bool useHeavyMonitors() {
505 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
506 return LockingMode == LM_MONITOR;
507 #else
508 return false;
509 #endif
510 }
511
512 // -----------------------------------------------------------------------------
513 // Monitor Enter/Exit
514
515 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
516 // When called with locking_thread != Thread::current() some mechanism must synchronize
517 // the locking_thread with respect to the current thread. Currently only used when
518 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
519 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
520 if (!enter_fast_impl(obj, lock, locking_thread)) {
521 // Inflated ObjectMonitor::enter_for is required
522
523 // An async deflation can race after the inflate_for() call and before
524 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
525 // if we have lost the race to async deflation and we simply try again.
526 while (true) {
527 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
528 if (monitor->enter_for(locking_thread)) {
529 return;
530 }
531 assert(monitor->is_being_async_deflated(), "must be");
532 }
533 }
534 }
535
536 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
537 assert(current == Thread::current(), "must be");
538 if (!enter_fast_impl(obj, lock, current)) {
539 // Inflated ObjectMonitor::enter is required
540
541 // An async deflation can race after the inflate() call and before
542 // enter() can make the ObjectMonitor busy. enter() returns false if
543 // we have lost the race to async deflation and we simply try again.
544 while (true) {
545 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
546 if (monitor->enter(current)) {
547 return;
548 }
549 }
550 }
551 }
552
553 // The interpreter and compiler assembly code tries to lock using the fast path
554 // of this algorithm. Make sure to update that code if the following function is
555 // changed. The implementation is extremely sensitive to race condition. Be careful.
556 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
557
558 if (obj->klass()->is_value_based()) {
559 handle_sync_on_value_based_class(obj, locking_thread);
560 }
561
562 locking_thread->inc_held_monitor_count();
563
564 if (!useHeavyMonitors()) {
565 if (LockingMode == LM_LIGHTWEIGHT) {
566 // Fast-locking does not use the 'lock' argument.
567 LockStack& lock_stack = locking_thread->lock_stack();
568 if (lock_stack.is_full()) {
569 // We unconditionally make room on the lock stack by inflating
570 // the least recently locked object on the lock stack.
571
572 // About the choice to inflate least recently locked object.
573 // First we must chose to inflate a lock, either some lock on
574 // the lock-stack or the lock that is currently being entered
575 // (which may or may not be on the lock-stack).
576 // Second the best lock to inflate is a lock which is entered
577 // in a control flow where there are only a very few locks being
578 // used, as the costly part of inflated locking is inflation,
579 // not locking. But this property is entirely program dependent.
580 // Third inflating the lock currently being entered on when it
581 // is not present on the lock-stack will result in a still full
582 // lock-stack. This creates a scenario where every deeper nested
583 // monitorenter must call into the runtime.
584 // The rational here is as follows:
585 // Because we cannot (currently) figure out the second, and want
586 // to avoid the third, we inflate a lock on the lock-stack.
587 // The least recently locked lock is chosen as it is the lock
588 // with the longest critical section.
589
590 log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
591 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
592 assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
593 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
594 assert(!lock_stack.is_full(), "must have made room here");
595 }
596
597 markWord mark = obj()->mark_acquire();
598 while (mark.is_neutral()) {
599 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
600 // Try to swing into 'fast-locked' state.
601 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
602 const markWord locked_mark = mark.set_fast_locked();
603 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
604 if (old_mark == mark) {
605 // Successfully fast-locked, push object to lock-stack and return.
606 lock_stack.push(obj());
607 return true;
608 }
609 mark = old_mark;
610 }
611
612 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
613 // Recursive lock successful.
614 return true;
615 }
616
617 // Failed to fast lock.
618 return false;
619 } else if (LockingMode == LM_LEGACY) {
620 markWord mark = obj->mark();
621 if (mark.is_neutral()) {
622 // Anticipate successful CAS -- the ST of the displaced mark must
623 // be visible <= the ST performed by the CAS.
624 lock->set_displaced_header(mark);
625 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
626 return true;
627 }
628 } else if (mark.has_locker() &&
629 locking_thread->is_lock_owned((address) mark.locker())) {
630 assert(lock != mark.locker(), "must not re-lock the same lock");
631 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
632 lock->set_displaced_header(markWord::from_pointer(nullptr));
633 return true;
634 }
635
636 // The object header will never be displaced to this lock,
637 // so it does not matter what the value is, except that it
638 // must be non-zero to avoid looking like a re-entrant lock,
639 // and must not look locked either.
640 lock->set_displaced_header(markWord::unused_mark());
641
642 // Failed to fast lock.
643 return false;
644 }
645 } else if (VerifyHeavyMonitors) {
646 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
647 }
648
649 return false;
650 }
651
652 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
653 current->dec_held_monitor_count();
654
655 if (!useHeavyMonitors()) {
656 markWord mark = object->mark();
657 if (LockingMode == LM_LIGHTWEIGHT) {
658 // Fast-locking does not use the 'lock' argument.
659 LockStack& lock_stack = current->lock_stack();
660 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
661 // Recursively unlocked.
662 return;
663 }
664
665 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
666 // This lock is recursive but is not at the top of the lock stack so we're
667 // doing an unbalanced exit. We have to fall thru to inflation below and
668 // let ObjectMonitor::exit() do the unlock.
669 } else {
670 while (mark.is_fast_locked()) {
671 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
672 const markWord unlocked_mark = mark.set_unlocked();
673 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
674 if (old_mark == mark) {
675 size_t recursions = lock_stack.remove(object) - 1;
676 assert(recursions == 0, "must not be recursive here");
677 return;
678 }
679 mark = old_mark;
680 }
681 }
682 } else if (LockingMode == LM_LEGACY) {
683 markWord dhw = lock->displaced_header();
684 if (dhw.value() == 0) {
685 // If the displaced header is null, then this exit matches up with
686 // a recursive enter. No real work to do here except for diagnostics.
687 #ifndef PRODUCT
688 if (mark != markWord::INFLATING()) {
689 // Only do diagnostics if we are not racing an inflation. Simply
690 // exiting a recursive enter of a Java Monitor that is being
691 // inflated is safe; see the has_monitor() comment below.
692 assert(!mark.is_neutral(), "invariant");
693 assert(!mark.has_locker() ||
694 current->is_lock_owned((address)mark.locker()), "invariant");
695 if (mark.has_monitor()) {
696 // The BasicLock's displaced_header is marked as a recursive
697 // enter and we have an inflated Java Monitor (ObjectMonitor).
698 // This is a special case where the Java Monitor was inflated
699 // after this thread entered the stack-lock recursively. When a
700 // Java Monitor is inflated, we cannot safely walk the Java
710 return;
711 }
712
713 if (mark == markWord::from_pointer(lock)) {
714 // If the object is stack-locked by the current thread, try to
715 // swing the displaced header from the BasicLock back to the mark.
716 assert(dhw.is_neutral(), "invariant");
717 if (object->cas_set_mark(dhw, mark) == mark) {
718 return;
719 }
720 }
721 }
722 } else if (VerifyHeavyMonitors) {
723 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
724 }
725
726 // We have to take the slow-path of possible inflation and then exit.
727 // The ObjectMonitor* can't be async deflated until ownership is
728 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
729 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
730 assert(!monitor->is_owner_anonymous(), "must not be");
731 monitor->exit(current);
732 }
733
734 // -----------------------------------------------------------------------------
735 // JNI locks on java objects
736 // NOTE: must use heavy weight monitor to handle jni monitor enter
737 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
738 if (obj->klass()->is_value_based()) {
739 handle_sync_on_value_based_class(obj, current);
740 }
741
742 // the current locking is from JNI instead of Java code
743 current->set_current_pending_monitor_is_from_java(false);
744 // An async deflation can race after the inflate() call and before
745 // enter() can make the ObjectMonitor busy. enter() returns false if
746 // we have lost the race to async deflation and we simply try again.
747 while (true) {
748 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
749 if (monitor->enter(current)) {
750 current->inc_held_monitor_count(1, true);
978 value = 1; // for sensitivity testing
979 } else if (hashCode == 3) {
980 value = ++GVars.hc_sequence;
981 } else if (hashCode == 4) {
982 value = cast_from_oop<intptr_t>(obj);
983 } else {
984 // Marsaglia's xor-shift scheme with thread-specific state
985 // This is probably the best overall implementation -- we'll
986 // likely make this the default in future releases.
987 unsigned t = current->_hashStateX;
988 t ^= (t << 11);
989 current->_hashStateX = current->_hashStateY;
990 current->_hashStateY = current->_hashStateZ;
991 current->_hashStateZ = current->_hashStateW;
992 unsigned v = current->_hashStateW;
993 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
994 current->_hashStateW = v;
995 value = v;
996 }
997
998 value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask;
999 if (value == 0) value = 0xBAD;
1000 assert(value != markWord::no_hash, "invariant");
1001 return value;
1002 }
1003
1004 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1005
1006 while (true) {
1007 ObjectMonitor* monitor = nullptr;
1008 markWord temp, test;
1009 intptr_t hash;
1010 markWord mark = read_stable_mark(obj);
1011 if (VerifyHeavyMonitors) {
1012 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1013 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1014 }
1015 if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1016 hash = mark.hash();
1017 if (hash != 0) { // if it has a hash, just return it
1018 return hash;
1373 ObjectSynchronizer::InflateCause cause) {
1374 assert(event != nullptr, "invariant");
1375 event->set_monitorClass(obj->klass());
1376 event->set_address((uintptr_t)(void*)obj);
1377 event->set_cause((u1)cause);
1378 event->commit();
1379 }
1380
1381 // Fast path code shared by multiple functions
1382 void ObjectSynchronizer::inflate_helper(oop obj) {
1383 markWord mark = obj->mark_acquire();
1384 if (mark.has_monitor()) {
1385 ObjectMonitor* monitor = mark.monitor();
1386 markWord dmw = monitor->header();
1387 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1388 return;
1389 }
1390 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1391 }
1392
1393 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1394 assert(current == Thread::current(), "must be");
1395 if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1396 return inflate_impl(JavaThread::cast(current), obj, cause);
1397 }
1398 return inflate_impl(nullptr, obj, cause);
1399 }
1400
1401 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1402 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1403 return inflate_impl(thread, obj, cause);
1404 }
1405
1406 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1407 // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1408 // that the inflating_thread == Thread::current() or is suspended throughout the call by
1409 // some other mechanism.
1410 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1411 // JavaThread. (As may still be the case from FastHashCode). However it is only
1412 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1413 // is set when called from ObjectSynchronizer::enter from the owning thread,
1414 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1415 EventJavaMonitorInflate event;
1416
1417 for (;;) {
1418 const markWord mark = object->mark_acquire();
1419
1420 // The mark can be in one of the following states:
1421 // * inflated - Just return if using stack-locking.
1422 // If using fast-locking and the ObjectMonitor owner
1423 // is anonymous and the inflating_thread owns the
1424 // object lock, then we make the inflating_thread
1425 // the ObjectMonitor owner and remove the lock from
1426 // the inflating_thread's lock stack.
1427 // * fast-locked - Coerce it to inflated from fast-locked.
1428 // * stack-locked - Coerce it to inflated from stack-locked.
1429 // * INFLATING - Busy wait for conversion from stack-locked to
1430 // inflated.
1431 // * neutral - Aggressively inflate the object.
1432
1433 // CASE: inflated
1434 if (mark.has_monitor()) {
1435 ObjectMonitor* inf = mark.monitor();
1436 markWord dmw = inf->header();
1437 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1438 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
1439 inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
1440 inf->set_owner_from_anonymous(inflating_thread);
1441 size_t removed = inflating_thread->lock_stack().remove(object);
1442 inf->set_recursions(removed - 1);
1443 }
1444 return inf;
1445 }
1446
1447 if (LockingMode != LM_LIGHTWEIGHT) {
1448 // New lightweight locking does not use INFLATING.
1449 // CASE: inflation in progress - inflating over a stack-lock.
1450 // Some other thread is converting from stack-locked to inflated.
1451 // Only that thread can complete inflation -- other threads must wait.
1452 // The INFLATING value is transient.
1453 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1454 // We could always eliminate polling by parking the thread on some auxiliary list.
1455 if (mark == markWord::INFLATING()) {
1456 read_stable_mark(object);
1457 continue;
1458 }
1459 }
1460
1461 // CASE: fast-locked
1462 // Could be fast-locked either by the inflating_thread or by some other thread.
1463 //
1464 // Note that we allocate the ObjectMonitor speculatively, _before_
1465 // attempting to set the object's mark to the new ObjectMonitor. If
1466 // the inflating_thread owns the monitor, then we set the ObjectMonitor's
1467 // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
1468 // to anonymous. If we lose the race to set the object's mark to the
1469 // new ObjectMonitor, then we just delete it and loop around again.
1470 //
1471 LogStreamHandle(Trace, monitorinflation) lsh;
1472 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1473 ObjectMonitor* monitor = new ObjectMonitor(object);
1474 monitor->set_header(mark.set_unlocked());
1475 bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
1476 if (own) {
1477 // Owned by inflating_thread.
1478 monitor->set_owner_from(nullptr, inflating_thread);
1479 } else {
1480 // Owned by somebody else.
1481 monitor->set_owner_anonymous();
1482 }
1483 markWord monitor_mark = markWord::encode(monitor);
1484 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1485 if (old_mark == mark) {
1486 // Success! Return inflated monitor.
1487 if (own) {
1488 size_t removed = inflating_thread->lock_stack().remove(object);
1489 monitor->set_recursions(removed - 1);
1490 }
1491 // Once the ObjectMonitor is configured and object is associated
1492 // with the ObjectMonitor, it is safe to allow async deflation:
1493 _in_use_list.add(monitor);
1494
1495 // Hopefully the performance counters are allocated on distinct
1496 // cache lines to avoid false sharing on MP systems ...
1497 OM_PERFDATA_OP(Inflations, inc());
1498 if (log_is_enabled(Trace, monitorinflation)) {
1499 ResourceMark rm;
1500 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1501 INTPTR_FORMAT ", type='%s'", p2i(object),
1502 object->mark().value(), object->klass()->external_name());
1503 }
1504 if (event.should_commit()) {
1505 post_monitor_inflate_event(&event, object, cause);
1506 }
1507 return monitor;
1508 } else {
1509 delete monitor;
1510 continue; // Interference -- just retry
1511 }
1512 }
1513
1514 // CASE: stack-locked
1515 // Could be stack-locked either by current or by some other thread.
1516 //
1517 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1518 // to install INFLATING into the mark word. We originally installed INFLATING,
1519 // allocated the ObjectMonitor, and then finally STed the address of the
1578 // Note that a thread can inflate an object
1579 // that it has stack-locked -- as might happen in wait() -- directly
1580 // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
1581 m->set_owner_from(nullptr, mark.locker());
1582 // TODO-FIXME: assert BasicLock->dhw != 0.
1583
1584 // Must preserve store ordering. The monitor state must
1585 // be stable at the time of publishing the monitor address.
1586 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1587 // Release semantics so that above set_object() is seen first.
1588 object->release_set_mark(markWord::encode(m));
1589
1590 // Once ObjectMonitor is configured and the object is associated
1591 // with the ObjectMonitor, it is safe to allow async deflation:
1592 _in_use_list.add(m);
1593
1594 // Hopefully the performance counters are allocated on distinct cache lines
1595 // to avoid false sharing on MP systems ...
1596 OM_PERFDATA_OP(Inflations, inc());
1597 if (log_is_enabled(Trace, monitorinflation)) {
1598 ResourceMark rm;
1599 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1600 INTPTR_FORMAT ", type='%s'", p2i(object),
1601 object->mark().value(), object->klass()->external_name());
1602 }
1603 if (event.should_commit()) {
1604 post_monitor_inflate_event(&event, object, cause);
1605 }
1606 return m;
1607 }
1608
1609 // CASE: neutral
1610 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1611 // If we know we're inflating for entry it's better to inflate by swinging a
1612 // pre-locked ObjectMonitor pointer into the object header. A successful
1613 // CAS inflates the object *and* confers ownership to the inflating thread.
1614 // In the current implementation we use a 2-step mechanism where we CAS()
1615 // to inflate and then CAS() again to try to swing _owner from null to current.
1616 // An inflateTry() method that we could call from enter() would be useful.
1617
1618 // Catch if the object's header is not neutral (not locked and
1622 // prepare m for installation - set monitor to initial state
1623 m->set_header(mark);
1624
1625 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1626 delete m;
1627 m = nullptr;
1628 continue;
1629 // interference - the markword changed - just retry.
1630 // The state-transitions are one-way, so there's no chance of
1631 // live-lock -- "Inflated" is an absorbing state.
1632 }
1633
1634 // Once the ObjectMonitor is configured and object is associated
1635 // with the ObjectMonitor, it is safe to allow async deflation:
1636 _in_use_list.add(m);
1637
1638 // Hopefully the performance counters are allocated on distinct
1639 // cache lines to avoid false sharing on MP systems ...
1640 OM_PERFDATA_OP(Inflations, inc());
1641 if (log_is_enabled(Trace, monitorinflation)) {
1642 ResourceMark rm;
1643 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1644 INTPTR_FORMAT ", type='%s'", p2i(object),
1645 object->mark().value(), object->klass()->external_name());
1646 }
1647 if (event.should_commit()) {
1648 post_monitor_inflate_event(&event, object, cause);
1649 }
1650 return m;
1651 }
1652 }
1653
1654 void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1655 const char* cnt_name, size_t cnt,
1656 LogStream* ls, elapsedTimer* timer_p) {
1657 if (!SafepointMechanism::should_process(current)) {
1658 return;
1659 }
1660
1661 // A safepoint/handshake has started.
1662 if (ls != nullptr) {
|