1 /*
2 * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/handshake.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/javaThread.hpp"
43 #include "runtime/lockStack.inline.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/objectMonitor.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/os.inline.hpp"
48 #include "runtime/osThread.hpp"
49 #include "runtime/perfData.hpp"
50 #include "runtime/safepointMechanism.inline.hpp"
51 #include "runtime/safepointVerifiers.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/threads.hpp"
56 #include "runtime/timer.hpp"
57 #include "runtime/trimNativeHeap.hpp"
58 #include "runtime/vframe.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "utilities/align.hpp"
61 #include "utilities/dtrace.hpp"
62 #include "utilities/events.hpp"
63 #include "utilities/linkedlist.hpp"
64 #include "utilities/preserveException.hpp"
65
66 class ObjectMonitorsHashtable::PtrList :
67 public LinkedListImpl<ObjectMonitor*,
68 AnyObj::C_HEAP, mtThread,
69 AllocFailStrategy::RETURN_NULL> {};
70
71 class CleanupObjectMonitorsHashtable: StackObj {
72 public:
73 bool do_entry(void*& key, ObjectMonitorsHashtable::PtrList*& list) {
74 list->clear(); // clear the LinkListNodes
75 delete list; // then delete the LinkedList
76 return true;
77 }
78 };
79
80 ObjectMonitorsHashtable::~ObjectMonitorsHashtable() {
81 CleanupObjectMonitorsHashtable cleanup;
82 _ptrs->unlink(&cleanup); // cleanup the LinkedLists
368 return false;
369 }
370
371
372 // The LockNode emitted directly at the synchronization site would have
373 // been too big if it were to have included support for the cases of inflated
374 // recursive enter and exit, so they go here instead.
375 // Note that we can't safely call AsyncPrintJavaStack() from within
376 // quick_enter() as our thread state remains _in_Java.
377
378 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
379 BasicLock * lock) {
380 assert(current->thread_state() == _thread_in_Java, "invariant");
381 NoSafepointVerifier nsv;
382 if (obj == nullptr) return false; // Need to throw NPE
383
384 if (obj->klass()->is_value_based()) {
385 return false;
386 }
387
388 const markWord mark = obj->mark();
389
390 if (mark.has_monitor()) {
391 ObjectMonitor* const m = mark.monitor();
392 // An async deflation or GC can race us before we manage to make
393 // the ObjectMonitor busy by setting the owner below. If we detect
394 // that race we just bail out to the slow-path here.
395 if (m->object_peek() == nullptr) {
396 return false;
397 }
398 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
399
400 // Lock contention and Transactional Lock Elision (TLE) diagnostics
401 // and observability
402 // Case: light contention possibly amenable to TLE
403 // Case: TLE inimical operations such as nested/recursive synchronization
404
405 if (owner == current) {
406 m->_recursions++;
407 current->inc_held_monitor_count();
421 lock->set_displaced_header(markWord::unused_mark());
422 }
423
424 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
425 assert(m->_recursions == 0, "invariant");
426 current->inc_held_monitor_count();
427 return true;
428 }
429 }
430
431 // Note that we could inflate in quick_enter.
432 // This is likely a useful optimization
433 // Critically, in quick_enter() we must not:
434 // -- block indefinitely, or
435 // -- reach a safepoint
436
437 return false; // revert to slow-path
438 }
439
440 // Handle notifications when synchronizing on value based classes
441 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
442 frame last_frame = current->last_frame();
443 bool bcp_was_adjusted = false;
444 // Don't decrement bcp if it points to the frame's first instruction. This happens when
445 // handle_sync_on_value_based_class() is called because of a synchronized method. There
446 // is no actual monitorenter instruction in the byte code in this case.
447 if (last_frame.is_interpreted_frame() &&
448 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
449 // adjust bcp to point back to monitorenter so that we print the correct line numbers
450 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
451 bcp_was_adjusted = true;
452 }
453
454 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
455 ResourceMark rm(current);
456 stringStream ss;
457 current->print_active_stack_on(&ss);
458 char* base = (char*)strstr(ss.base(), "at");
459 char* newline = (char*)strchr(ss.base(), '\n');
460 if (newline != nullptr) {
461 *newline = '\0';
462 }
463 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
464 } else {
465 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
466 ResourceMark rm(current);
467 Log(valuebasedclasses) vblog;
468
469 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
470 if (current->has_last_Java_frame()) {
471 LogStream info_stream(vblog.info());
472 current->print_active_stack_on(&info_stream);
473 } else {
474 vblog.info("Cannot find the last Java frame");
475 }
476
477 EventSyncOnValueBasedClass event;
478 if (event.should_commit()) {
479 event.set_valueBasedClass(obj->klass());
480 event.commit();
481 }
482 }
483
484 if (bcp_was_adjusted) {
485 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
486 }
487 }
488
489 static bool useHeavyMonitors() {
490 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
491 return LockingMode == LM_MONITOR;
492 #else
493 return false;
494 #endif
495 }
496
497 // -----------------------------------------------------------------------------
498 // Monitor Enter/Exit
499 // The interpreter and compiler assembly code tries to lock using the fast path
500 // of this algorithm. Make sure to update that code if the following function is
501 // changed. The implementation is extremely sensitive to race condition. Be careful.
502
503 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
504 if (obj->klass()->is_value_based()) {
505 handle_sync_on_value_based_class(obj, current);
506 }
507
508 current->inc_held_monitor_count();
509
510 if (!useHeavyMonitors()) {
511 if (LockingMode == LM_LIGHTWEIGHT) {
512 // Fast-locking does not use the 'lock' argument.
513 LockStack& lock_stack = current->lock_stack();
514 if (lock_stack.can_push()) {
515 markWord mark = obj()->mark_acquire();
516 if (mark.is_neutral()) {
517 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
518 // Try to swing into 'fast-locked' state.
519 markWord locked_mark = mark.set_fast_locked();
520 markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
521 if (old_mark == mark) {
522 // Successfully fast-locked, push object to lock-stack and return.
523 lock_stack.push(obj());
524 return;
525 }
526 }
527 }
528 // All other paths fall-through to inflate-enter.
529 } else if (LockingMode == LM_LEGACY) {
530 markWord mark = obj->mark();
531 if (mark.is_neutral()) {
532 // Anticipate successful CAS -- the ST of the displaced mark must
533 // be visible <= the ST performed by the CAS.
534 lock->set_displaced_header(mark);
535 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
536 return;
537 }
538 // Fall through to inflate() ...
539 } else if (mark.has_locker() &&
540 current->is_lock_owned((address) mark.locker())) {
541 assert(lock != mark.locker(), "must not re-lock the same lock");
542 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
543 lock->set_displaced_header(markWord::from_pointer(nullptr));
544 return;
545 }
546
547 // The object header will never be displaced to this lock,
548 // so it does not matter what the value is, except that it
549 // must be non-zero to avoid looking like a re-entrant lock,
550 // and must not look locked either.
551 lock->set_displaced_header(markWord::unused_mark());
552 }
553 } else if (VerifyHeavyMonitors) {
554 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
555 }
556
557 // An async deflation can race after the inflate() call and before
558 // enter() can make the ObjectMonitor busy. enter() returns false if
559 // we have lost the race to async deflation and we simply try again.
560 while (true) {
561 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
562 if (monitor->enter(current)) {
563 return;
564 }
565 }
566 }
567
568 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
569 current->dec_held_monitor_count();
570
571 if (!useHeavyMonitors()) {
572 markWord mark = object->mark();
573 if (LockingMode == LM_LIGHTWEIGHT) {
574 // Fast-locking does not use the 'lock' argument.
575 if (mark.is_fast_locked()) {
576 markWord unlocked_mark = mark.set_unlocked();
577 markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
578 if (old_mark != mark) {
579 // Another thread won the CAS, it must have inflated the monitor.
580 // It can only have installed an anonymously locked monitor at this point.
581 // Fetch that monitor, set owner correctly to this thread, and
582 // exit it (allowing waiting threads to enter).
583 assert(old_mark.has_monitor(), "must have monitor");
584 ObjectMonitor* monitor = old_mark.monitor();
585 assert(monitor->is_owner_anonymous(), "must be anonymous owner");
586 monitor->set_owner_from_anonymous(current);
587 monitor->exit(current);
588 }
589 LockStack& lock_stack = current->lock_stack();
590 lock_stack.remove(object);
591 return;
592 }
593 } else if (LockingMode == LM_LEGACY) {
594 markWord dhw = lock->displaced_header();
595 if (dhw.value() == 0) {
596 // If the displaced header is null, then this exit matches up with
597 // a recursive enter. No real work to do here except for diagnostics.
598 #ifndef PRODUCT
599 if (mark != markWord::INFLATING()) {
600 // Only do diagnostics if we are not racing an inflation. Simply
601 // exiting a recursive enter of a Java Monitor that is being
602 // inflated is safe; see the has_monitor() comment below.
603 assert(!mark.is_neutral(), "invariant");
604 assert(!mark.has_locker() ||
605 current->is_lock_owned((address)mark.locker()), "invariant");
606 if (mark.has_monitor()) {
607 // The BasicLock's displaced_header is marked as a recursive
608 // enter and we have an inflated Java Monitor (ObjectMonitor).
609 // This is a special case where the Java Monitor was inflated
610 // after this thread entered the stack-lock recursively. When a
611 // Java Monitor is inflated, we cannot safely walk the Java
612 // Monitor owner's stack and update the BasicLocks because a
621 return;
622 }
623
624 if (mark == markWord::from_pointer(lock)) {
625 // If the object is stack-locked by the current thread, try to
626 // swing the displaced header from the BasicLock back to the mark.
627 assert(dhw.is_neutral(), "invariant");
628 if (object->cas_set_mark(dhw, mark) == mark) {
629 return;
630 }
631 }
632 }
633 } else if (VerifyHeavyMonitors) {
634 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
635 }
636
637 // We have to take the slow-path of possible inflation and then exit.
638 // The ObjectMonitor* can't be async deflated until ownership is
639 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
640 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
641 if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
642 // It must be owned by us. Pop lock object from lock stack.
643 LockStack& lock_stack = current->lock_stack();
644 oop popped = lock_stack.pop();
645 assert(popped == object, "must be owned by this thread");
646 monitor->set_owner_from_anonymous(current);
647 }
648 monitor->exit(current);
649 }
650
651 // -----------------------------------------------------------------------------
652 // JNI locks on java objects
653 // NOTE: must use heavy weight monitor to handle jni monitor enter
654 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
655 if (obj->klass()->is_value_based()) {
656 handle_sync_on_value_based_class(obj, current);
657 }
658
659 // the current locking is from JNI instead of Java code
660 current->set_current_pending_monitor_is_from_java(false);
661 // An async deflation can race after the inflate() call and before
662 // enter() can make the ObjectMonitor busy. enter() returns false if
663 // we have lost the race to async deflation and we simply try again.
664 while (true) {
665 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
666 if (monitor->enter(current)) {
667 current->inc_held_monitor_count(1, true);
885 value = 1; // for sensitivity testing
886 } else if (hashCode == 3) {
887 value = ++GVars.hc_sequence;
888 } else if (hashCode == 4) {
889 value = cast_from_oop<intptr_t>(obj);
890 } else {
891 // Marsaglia's xor-shift scheme with thread-specific state
892 // This is probably the best overall implementation -- we'll
893 // likely make this the default in future releases.
894 unsigned t = current->_hashStateX;
895 t ^= (t << 11);
896 current->_hashStateX = current->_hashStateY;
897 current->_hashStateY = current->_hashStateZ;
898 current->_hashStateZ = current->_hashStateW;
899 unsigned v = current->_hashStateW;
900 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
901 current->_hashStateW = v;
902 value = v;
903 }
904
905 value &= markWord::hash_mask;
906 if (value == 0) value = 0xBAD;
907 assert(value != markWord::no_hash, "invariant");
908 return value;
909 }
910
911 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
912 // calculations as part of JVM/TI tagging.
913 static bool is_lock_owned(Thread* thread, oop obj) {
914 assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
915 return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
916 }
917
918 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
919
920 while (true) {
921 ObjectMonitor* monitor = nullptr;
922 markWord temp, test;
923 intptr_t hash;
924 markWord mark = read_stable_mark(obj);
925 if (VerifyHeavyMonitors) {
926 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
927 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
928 }
929 if (mark.is_neutral()) { // if this is a normal header
930 hash = mark.hash();
931 if (hash != 0) { // if it has a hash, just return it
932 return hash;
933 }
934 hash = get_next_hash(current, obj); // get a new hash
935 temp = mark.copy_set_hash(hash); // merge the hash into header
936 // try to install the hash
937 test = obj->cas_set_mark(temp, mark);
938 if (test == mark) { // if the hash was installed, return it
939 return hash;
940 }
941 // Failed to install the hash. It could be that another thread
942 // installed the hash just before our attempt or inflation has
943 // occurred or... so we fall thru to inflate the monitor for
944 // stability and then install the hash.
945 } else if (mark.has_monitor()) {
946 monitor = mark.monitor();
947 temp = monitor->header();
948 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
949 hash = temp.hash();
950 if (hash != 0) {
951 // It has a hash.
952
953 // Separate load of dmw/header above from the loads in
954 // is_being_async_deflated().
955
956 // dmw/header and _contentions may get written by different threads.
957 // Make sure to observe them in the same order when having several observers.
958 OrderAccess::loadload_for_IRIW();
959
960 if (monitor->is_being_async_deflated()) {
961 // But we can't safely use the hash if we detect that async
962 // deflation has occurred. So we attempt to restore the
963 // header/dmw to the object's header so that we only retry
964 // once if the deflater thread happens to be slow.
965 monitor->install_displaced_markword_in_object(obj);
966 continue;
967 }
968 return hash;
969 }
970 // Fall thru so we only have one place that installs the hash in
971 // the ObjectMonitor.
972 } else if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked() && is_lock_owned(current, obj)) {
973 // This is a fast-lock owned by the calling thread so use the
974 // markWord from the object.
975 hash = mark.hash();
976 if (hash != 0) { // if it has a hash, just return it
977 return hash;
978 }
979 } else if (LockingMode == LM_LEGACY && mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
980 // This is a stack-lock owned by the calling thread so fetch the
981 // displaced markWord from the BasicLock on the stack.
982 temp = mark.displaced_mark_helper();
983 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
984 hash = temp.hash();
985 if (hash != 0) { // if it has a hash, just return it
986 return hash;
987 }
988 // WARNING:
989 // The displaced header in the BasicLock on a thread's stack
990 // is strictly immutable. It CANNOT be changed in ANY cases.
991 // So we have to inflate the stack-lock into an ObjectMonitor
992 // even if the current thread owns the lock. The BasicLock on
993 // a thread's stack can be asynchronously read by other threads
994 // during an inflate() call so any change to that stack memory
995 // may not propagate to other threads correctly.
996 }
997
998 // Inflate the monitor to set the hash.
1295 ObjectSynchronizer::InflateCause cause) {
1296 assert(event != nullptr, "invariant");
1297 event->set_monitorClass(obj->klass());
1298 event->set_address((uintptr_t)(void*)obj);
1299 event->set_cause((u1)cause);
1300 event->commit();
1301 }
1302
1303 // Fast path code shared by multiple functions
1304 void ObjectSynchronizer::inflate_helper(oop obj) {
1305 markWord mark = obj->mark_acquire();
1306 if (mark.has_monitor()) {
1307 ObjectMonitor* monitor = mark.monitor();
1308 markWord dmw = monitor->header();
1309 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1310 return;
1311 }
1312 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1313 }
1314
1315 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1316 const InflateCause cause) {
1317 EventJavaMonitorInflate event;
1318
1319 for (;;) {
1320 const markWord mark = object->mark_acquire();
1321
1322 // The mark can be in one of the following states:
1323 // * inflated - Just return if using stack-locking.
1324 // If using fast-locking and the ObjectMonitor owner
1325 // is anonymous and the current thread owns the
1326 // object lock, then we make the current thread the
1327 // ObjectMonitor owner and remove the lock from the
1328 // current thread's lock stack.
1329 // * fast-locked - Coerce it to inflated from fast-locked.
1330 // * stack-locked - Coerce it to inflated from stack-locked.
1331 // * INFLATING - Busy wait for conversion from stack-locked to
1332 // inflated.
1333 // * neutral - Aggressively inflate the object.
1334
1335 // CASE: inflated
1336 if (mark.has_monitor()) {
1337 ObjectMonitor* inf = mark.monitor();
1338 markWord dmw = inf->header();
1339 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1340 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
1341 inf->set_owner_from_anonymous(current);
1342 JavaThread::cast(current)->lock_stack().remove(object);
1343 }
1344 return inf;
1345 }
1346
1347 if (LockingMode != LM_LIGHTWEIGHT) {
1348 // New lightweight locking does not use INFLATING.
1349 // CASE: inflation in progress - inflating over a stack-lock.
1350 // Some other thread is converting from stack-locked to inflated.
1351 // Only that thread can complete inflation -- other threads must wait.
1352 // The INFLATING value is transient.
1353 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1354 // We could always eliminate polling by parking the thread on some auxiliary list.
1355 if (mark == markWord::INFLATING()) {
1356 read_stable_mark(object);
1357 continue;
1358 }
1359 }
1360
1361 // CASE: fast-locked
1362 // Could be fast-locked either by current or by some other thread.
1363 //
1364 // Note that we allocate the ObjectMonitor speculatively, _before_
1365 // attempting to set the object's mark to the new ObjectMonitor. If
1366 // this thread owns the monitor, then we set the ObjectMonitor's
1367 // owner to this thread. Otherwise, we set the ObjectMonitor's owner
1368 // to anonymous. If we lose the race to set the object's mark to the
1369 // new ObjectMonitor, then we just delete it and loop around again.
1370 //
1371 LogStreamHandle(Trace, monitorinflation) lsh;
1372 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1373 ObjectMonitor* monitor = new ObjectMonitor(object);
1374 monitor->set_header(mark.set_unlocked());
1375 bool own = is_lock_owned(current, object);
1376 if (own) {
1377 // Owned by us.
1378 monitor->set_owner_from(nullptr, current);
1379 } else {
1380 // Owned by somebody else.
1381 monitor->set_owner_anonymous();
1382 }
1383 markWord monitor_mark = markWord::encode(monitor);
1384 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1385 if (old_mark == mark) {
1386 // Success! Return inflated monitor.
1387 if (own) {
1388 JavaThread::cast(current)->lock_stack().remove(object);
1389 }
1390 // Once the ObjectMonitor is configured and object is associated
1391 // with the ObjectMonitor, it is safe to allow async deflation:
1392 _in_use_list.add(monitor);
1393
1394 // Hopefully the performance counters are allocated on distinct
1395 // cache lines to avoid false sharing on MP systems ...
1396 OM_PERFDATA_OP(Inflations, inc());
1397 if (log_is_enabled(Trace, monitorinflation)) {
1398 ResourceMark rm(current);
1399 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1400 INTPTR_FORMAT ", type='%s'", p2i(object),
1401 object->mark().value(), object->klass()->external_name());
1402 }
1403 if (event.should_commit()) {
1404 post_monitor_inflate_event(&event, object, cause);
1405 }
1406 return monitor;
1407 } else {
1408 delete monitor;
1409 continue; // Interference -- just retry
1410 }
1411 }
1412
1413 // CASE: stack-locked
1414 // Could be stack-locked either by current or by some other thread.
1415 //
1416 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1417 // to install INFLATING into the mark word. We originally installed INFLATING,
1418 // allocated the ObjectMonitor, and then finally STed the address of the
1477 // Note that a thread can inflate an object
1478 // that it has stack-locked -- as might happen in wait() -- directly
1479 // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
1480 m->set_owner_from(nullptr, mark.locker());
1481 // TODO-FIXME: assert BasicLock->dhw != 0.
1482
1483 // Must preserve store ordering. The monitor state must
1484 // be stable at the time of publishing the monitor address.
1485 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1486 // Release semantics so that above set_object() is seen first.
1487 object->release_set_mark(markWord::encode(m));
1488
1489 // Once ObjectMonitor is configured and the object is associated
1490 // with the ObjectMonitor, it is safe to allow async deflation:
1491 _in_use_list.add(m);
1492
1493 // Hopefully the performance counters are allocated on distinct cache lines
1494 // to avoid false sharing on MP systems ...
1495 OM_PERFDATA_OP(Inflations, inc());
1496 if (log_is_enabled(Trace, monitorinflation)) {
1497 ResourceMark rm(current);
1498 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1499 INTPTR_FORMAT ", type='%s'", p2i(object),
1500 object->mark().value(), object->klass()->external_name());
1501 }
1502 if (event.should_commit()) {
1503 post_monitor_inflate_event(&event, object, cause);
1504 }
1505 return m;
1506 }
1507
1508 // CASE: neutral
1509 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1510 // If we know we're inflating for entry it's better to inflate by swinging a
1511 // pre-locked ObjectMonitor pointer into the object header. A successful
1512 // CAS inflates the object *and* confers ownership to the inflating thread.
1513 // In the current implementation we use a 2-step mechanism where we CAS()
1514 // to inflate and then CAS() again to try to swing _owner from null to current.
1515 // An inflateTry() method that we could call from enter() would be useful.
1516
1517 // Catch if the object's header is not neutral (not locked and
1521 // prepare m for installation - set monitor to initial state
1522 m->set_header(mark);
1523
1524 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1525 delete m;
1526 m = nullptr;
1527 continue;
1528 // interference - the markword changed - just retry.
1529 // The state-transitions are one-way, so there's no chance of
1530 // live-lock -- "Inflated" is an absorbing state.
1531 }
1532
1533 // Once the ObjectMonitor is configured and object is associated
1534 // with the ObjectMonitor, it is safe to allow async deflation:
1535 _in_use_list.add(m);
1536
1537 // Hopefully the performance counters are allocated on distinct
1538 // cache lines to avoid false sharing on MP systems ...
1539 OM_PERFDATA_OP(Inflations, inc());
1540 if (log_is_enabled(Trace, monitorinflation)) {
1541 ResourceMark rm(current);
1542 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1543 INTPTR_FORMAT ", type='%s'", p2i(object),
1544 object->mark().value(), object->klass()->external_name());
1545 }
1546 if (event.should_commit()) {
1547 post_monitor_inflate_event(&event, object, cause);
1548 }
1549 return m;
1550 }
1551 }
1552
1553 void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1554 const char* cnt_name, size_t cnt,
1555 LogStream* ls, elapsedTimer* timer_p) {
1556 if (!SafepointMechanism::should_process(current)) {
1557 return;
1558 }
1559
1560 // A safepoint/handshake has started.
1561 if (ls != nullptr) {
|
1 /*
2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/javaThread.hpp"
44 #include "runtime/lockStack.inline.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "runtime/objectMonitor.hpp"
47 #include "runtime/objectMonitor.inline.hpp"
48 #include "runtime/os.inline.hpp"
49 #include "runtime/osThread.hpp"
50 #include "runtime/perfData.hpp"
51 #include "runtime/safepointMechanism.inline.hpp"
52 #include "runtime/safepointVerifiers.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "runtime/stubRoutines.hpp"
55 #include "runtime/synchronizer.hpp"
56 #include "runtime/threads.hpp"
57 #include "runtime/timer.hpp"
58 #include "runtime/trimNativeHeap.hpp"
59 #include "runtime/vframe.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/align.hpp"
62 #include "utilities/dtrace.hpp"
63 #include "utilities/events.hpp"
64 #include "utilities/globalDefinitions.hpp"
65 #include "utilities/linkedlist.hpp"
66 #include "utilities/preserveException.hpp"
67
68 class ObjectMonitorsHashtable::PtrList :
69 public LinkedListImpl<ObjectMonitor*,
70 AnyObj::C_HEAP, mtThread,
71 AllocFailStrategy::RETURN_NULL> {};
72
73 class CleanupObjectMonitorsHashtable: StackObj {
74 public:
75 bool do_entry(void*& key, ObjectMonitorsHashtable::PtrList*& list) {
76 list->clear(); // clear the LinkListNodes
77 delete list; // then delete the LinkedList
78 return true;
79 }
80 };
81
82 ObjectMonitorsHashtable::~ObjectMonitorsHashtable() {
83 CleanupObjectMonitorsHashtable cleanup;
84 _ptrs->unlink(&cleanup); // cleanup the LinkedLists
370 return false;
371 }
372
373
374 // The LockNode emitted directly at the synchronization site would have
375 // been too big if it were to have included support for the cases of inflated
376 // recursive enter and exit, so they go here instead.
377 // Note that we can't safely call AsyncPrintJavaStack() from within
378 // quick_enter() as our thread state remains _in_Java.
379
380 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
381 BasicLock * lock) {
382 assert(current->thread_state() == _thread_in_Java, "invariant");
383 NoSafepointVerifier nsv;
384 if (obj == nullptr) return false; // Need to throw NPE
385
386 if (obj->klass()->is_value_based()) {
387 return false;
388 }
389
390 if (LockingMode == LM_LIGHTWEIGHT) {
391 LockStack& lock_stack = current->lock_stack();
392 if (lock_stack.is_full()) {
393 // Always go into runtime if the lock stack is full.
394 return false;
395 }
396 if (lock_stack.try_recursive_enter(obj)) {
397 // Recursive lock successful.
398 current->inc_held_monitor_count();
399 return true;
400 }
401 }
402
403 const markWord mark = obj->mark();
404
405 if (mark.has_monitor()) {
406 ObjectMonitor* const m = mark.monitor();
407 // An async deflation or GC can race us before we manage to make
408 // the ObjectMonitor busy by setting the owner below. If we detect
409 // that race we just bail out to the slow-path here.
410 if (m->object_peek() == nullptr) {
411 return false;
412 }
413 JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
414
415 // Lock contention and Transactional Lock Elision (TLE) diagnostics
416 // and observability
417 // Case: light contention possibly amenable to TLE
418 // Case: TLE inimical operations such as nested/recursive synchronization
419
420 if (owner == current) {
421 m->_recursions++;
422 current->inc_held_monitor_count();
436 lock->set_displaced_header(markWord::unused_mark());
437 }
438
439 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
440 assert(m->_recursions == 0, "invariant");
441 current->inc_held_monitor_count();
442 return true;
443 }
444 }
445
446 // Note that we could inflate in quick_enter.
447 // This is likely a useful optimization
448 // Critically, in quick_enter() we must not:
449 // -- block indefinitely, or
450 // -- reach a safepoint
451
452 return false; // revert to slow-path
453 }
454
455 // Handle notifications when synchronizing on value based classes
456 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
457 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
458 frame last_frame = locking_thread->last_frame();
459 bool bcp_was_adjusted = false;
460 // Don't decrement bcp if it points to the frame's first instruction. This happens when
461 // handle_sync_on_value_based_class() is called because of a synchronized method. There
462 // is no actual monitorenter instruction in the byte code in this case.
463 if (last_frame.is_interpreted_frame() &&
464 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
465 // adjust bcp to point back to monitorenter so that we print the correct line numbers
466 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
467 bcp_was_adjusted = true;
468 }
469
470 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
471 ResourceMark rm;
472 stringStream ss;
473 locking_thread->print_active_stack_on(&ss);
474 char* base = (char*)strstr(ss.base(), "at");
475 char* newline = (char*)strchr(ss.base(), '\n');
476 if (newline != nullptr) {
477 *newline = '\0';
478 }
479 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
480 } else {
481 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
482 ResourceMark rm;
483 Log(valuebasedclasses) vblog;
484
485 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
486 if (locking_thread->has_last_Java_frame()) {
487 LogStream info_stream(vblog.info());
488 locking_thread->print_active_stack_on(&info_stream);
489 } else {
490 vblog.info("Cannot find the last Java frame");
491 }
492
493 EventSyncOnValueBasedClass event;
494 if (event.should_commit()) {
495 event.set_valueBasedClass(obj->klass());
496 event.commit();
497 }
498 }
499
500 if (bcp_was_adjusted) {
501 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
502 }
503 }
504
505 static bool useHeavyMonitors() {
506 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390)
507 return LockingMode == LM_MONITOR;
508 #else
509 return false;
510 #endif
511 }
512
513 // -----------------------------------------------------------------------------
514 // Monitor Enter/Exit
515
516 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
517 // When called with locking_thread != Thread::current() some mechanism must synchronize
518 // the locking_thread with respect to the current thread. Currently only used when
519 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
520 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
521 if (!enter_fast_impl(obj, lock, locking_thread)) {
522 // Inflated ObjectMonitor::enter_for is required
523
524 // An async deflation can race after the inflate_for() call and before
525 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
526 // if we have lost the race to async deflation and we simply try again.
527 while (true) {
528 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
529 if (monitor->enter_for(locking_thread)) {
530 return;
531 }
532 assert(monitor->is_being_async_deflated(), "must be");
533 }
534 }
535 }
536
537 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
538 assert(current == Thread::current(), "must be");
539 if (!enter_fast_impl(obj, lock, current)) {
540 // Inflated ObjectMonitor::enter is required
541
542 // An async deflation can race after the inflate() call and before
543 // enter() can make the ObjectMonitor busy. enter() returns false if
544 // we have lost the race to async deflation and we simply try again.
545 while (true) {
546 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
547 if (monitor->enter(current)) {
548 return;
549 }
550 }
551 }
552 }
553
554 // The interpreter and compiler assembly code tries to lock using the fast path
555 // of this algorithm. Make sure to update that code if the following function is
556 // changed. The implementation is extremely sensitive to race condition. Be careful.
557 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
558
559 if (obj->klass()->is_value_based()) {
560 handle_sync_on_value_based_class(obj, locking_thread);
561 }
562
563 locking_thread->inc_held_monitor_count();
564
565 if (!useHeavyMonitors()) {
566 if (LockingMode == LM_LIGHTWEIGHT) {
567 // Fast-locking does not use the 'lock' argument.
568 LockStack& lock_stack = locking_thread->lock_stack();
569 if (lock_stack.is_full()) {
570 // We unconditionally make room on the lock stack by inflating
571 // the least recently locked object on the lock stack.
572
573 // About the choice to inflate least recently locked object.
574 // First we must chose to inflate a lock, either some lock on
575 // the lock-stack or the lock that is currently being entered
576 // (which may or may not be on the lock-stack).
577 // Second the best lock to inflate is a lock which is entered
578 // in a control flow where there are only a very few locks being
579 // used, as the costly part of inflated locking is inflation,
580 // not locking. But this property is entirely program dependent.
581 // Third inflating the lock currently being entered on when it
582 // is not present on the lock-stack will result in a still full
583 // lock-stack. This creates a scenario where every deeper nested
584 // monitorenter must call into the runtime.
585 // The rational here is as follows:
586 // Because we cannot (currently) figure out the second, and want
587 // to avoid the third, we inflate a lock on the lock-stack.
588 // The least recently locked lock is chosen as it is the lock
589 // with the longest critical section.
590
591 log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
592 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
593 assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
594 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
595 assert(!lock_stack.is_full(), "must have made room here");
596 }
597
598 markWord mark = obj()->mark_acquire();
599 while (mark.is_neutral()) {
600 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
601 // Try to swing into 'fast-locked' state.
602 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
603 const markWord locked_mark = mark.set_fast_locked();
604 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
605 if (old_mark == mark) {
606 // Successfully fast-locked, push object to lock-stack and return.
607 lock_stack.push(obj());
608 return true;
609 }
610 mark = old_mark;
611 }
612
613 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
614 // Recursive lock successful.
615 return true;
616 }
617
618 // Failed to fast lock.
619 return false;
620 } else if (LockingMode == LM_LEGACY) {
621 markWord mark = obj->mark();
622 if (mark.is_neutral()) {
623 // Anticipate successful CAS -- the ST of the displaced mark must
624 // be visible <= the ST performed by the CAS.
625 lock->set_displaced_header(mark);
626 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
627 return true;
628 }
629 } else if (mark.has_locker() &&
630 locking_thread->is_lock_owned((address) mark.locker())) {
631 assert(lock != mark.locker(), "must not re-lock the same lock");
632 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
633 lock->set_displaced_header(markWord::from_pointer(nullptr));
634 return true;
635 }
636
637 // The object header will never be displaced to this lock,
638 // so it does not matter what the value is, except that it
639 // must be non-zero to avoid looking like a re-entrant lock,
640 // and must not look locked either.
641 lock->set_displaced_header(markWord::unused_mark());
642
643 // Failed to fast lock.
644 return false;
645 }
646 } else if (VerifyHeavyMonitors) {
647 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
648 }
649
650 return false;
651 }
652
653 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
654 current->dec_held_monitor_count();
655
656 if (!useHeavyMonitors()) {
657 markWord mark = object->mark();
658 if (LockingMode == LM_LIGHTWEIGHT) {
659 // Fast-locking does not use the 'lock' argument.
660 LockStack& lock_stack = current->lock_stack();
661 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
662 // Recursively unlocked.
663 return;
664 }
665
666 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
667 // This lock is recursive but is not at the top of the lock stack so we're
668 // doing an unbalanced exit. We have to fall thru to inflation below and
669 // let ObjectMonitor::exit() do the unlock.
670 } else {
671 while (mark.is_fast_locked()) {
672 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
673 const markWord unlocked_mark = mark.set_unlocked();
674 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
675 if (old_mark == mark) {
676 size_t recursions = lock_stack.remove(object) - 1;
677 assert(recursions == 0, "must not be recursive here");
678 return;
679 }
680 mark = old_mark;
681 }
682 }
683 } else if (LockingMode == LM_LEGACY) {
684 markWord dhw = lock->displaced_header();
685 if (dhw.value() == 0) {
686 // If the displaced header is null, then this exit matches up with
687 // a recursive enter. No real work to do here except for diagnostics.
688 #ifndef PRODUCT
689 if (mark != markWord::INFLATING()) {
690 // Only do diagnostics if we are not racing an inflation. Simply
691 // exiting a recursive enter of a Java Monitor that is being
692 // inflated is safe; see the has_monitor() comment below.
693 assert(!mark.is_neutral(), "invariant");
694 assert(!mark.has_locker() ||
695 current->is_lock_owned((address)mark.locker()), "invariant");
696 if (mark.has_monitor()) {
697 // The BasicLock's displaced_header is marked as a recursive
698 // enter and we have an inflated Java Monitor (ObjectMonitor).
699 // This is a special case where the Java Monitor was inflated
700 // after this thread entered the stack-lock recursively. When a
701 // Java Monitor is inflated, we cannot safely walk the Java
702 // Monitor owner's stack and update the BasicLocks because a
711 return;
712 }
713
714 if (mark == markWord::from_pointer(lock)) {
715 // If the object is stack-locked by the current thread, try to
716 // swing the displaced header from the BasicLock back to the mark.
717 assert(dhw.is_neutral(), "invariant");
718 if (object->cas_set_mark(dhw, mark) == mark) {
719 return;
720 }
721 }
722 }
723 } else if (VerifyHeavyMonitors) {
724 guarantee((object->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
725 }
726
727 // We have to take the slow-path of possible inflation and then exit.
728 // The ObjectMonitor* can't be async deflated until ownership is
729 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
730 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
731 assert(!monitor->is_owner_anonymous(), "must not be");
732 monitor->exit(current);
733 }
734
735 // -----------------------------------------------------------------------------
736 // JNI locks on java objects
737 // NOTE: must use heavy weight monitor to handle jni monitor enter
738 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
739 if (obj->klass()->is_value_based()) {
740 handle_sync_on_value_based_class(obj, current);
741 }
742
743 // the current locking is from JNI instead of Java code
744 current->set_current_pending_monitor_is_from_java(false);
745 // An async deflation can race after the inflate() call and before
746 // enter() can make the ObjectMonitor busy. enter() returns false if
747 // we have lost the race to async deflation and we simply try again.
748 while (true) {
749 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
750 if (monitor->enter(current)) {
751 current->inc_held_monitor_count(1, true);
969 value = 1; // for sensitivity testing
970 } else if (hashCode == 3) {
971 value = ++GVars.hc_sequence;
972 } else if (hashCode == 4) {
973 value = cast_from_oop<intptr_t>(obj);
974 } else {
975 // Marsaglia's xor-shift scheme with thread-specific state
976 // This is probably the best overall implementation -- we'll
977 // likely make this the default in future releases.
978 unsigned t = current->_hashStateX;
979 t ^= (t << 11);
980 current->_hashStateX = current->_hashStateY;
981 current->_hashStateY = current->_hashStateZ;
982 current->_hashStateZ = current->_hashStateW;
983 unsigned v = current->_hashStateW;
984 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
985 current->_hashStateW = v;
986 value = v;
987 }
988
989 value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask;
990 if (value == 0) value = 0xBAD;
991 assert(value != markWord::no_hash, "invariant");
992 return value;
993 }
994
995 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
996
997 while (true) {
998 ObjectMonitor* monitor = nullptr;
999 markWord temp, test;
1000 intptr_t hash;
1001 markWord mark = read_stable_mark(obj);
1002 if (VerifyHeavyMonitors) {
1003 assert(LockingMode == LM_MONITOR, "+VerifyHeavyMonitors requires LockingMode == 0 (LM_MONITOR)");
1004 guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
1005 }
1006 if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1007 hash = mark.hash();
1008 if (hash != 0) { // if it has a hash, just return it
1009 return hash;
1010 }
1011 hash = get_next_hash(current, obj); // get a new hash
1012 temp = mark.copy_set_hash(hash); // merge the hash into header
1013 // try to install the hash
1014 test = obj->cas_set_mark(temp, mark);
1015 if (test == mark) { // if the hash was installed, return it
1016 return hash;
1017 }
1018 if (LockingMode == LM_LIGHTWEIGHT) {
1019 // CAS failed, retry
1020 continue;
1021 }
1022 // Failed to install the hash. It could be that another thread
1023 // installed the hash just before our attempt or inflation has
1024 // occurred or... so we fall thru to inflate the monitor for
1025 // stability and then install the hash.
1026 } else if (mark.has_monitor()) {
1027 monitor = mark.monitor();
1028 temp = monitor->header();
1029 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1030 hash = temp.hash();
1031 if (hash != 0) {
1032 // It has a hash.
1033
1034 // Separate load of dmw/header above from the loads in
1035 // is_being_async_deflated().
1036
1037 // dmw/header and _contentions may get written by different threads.
1038 // Make sure to observe them in the same order when having several observers.
1039 OrderAccess::loadload_for_IRIW();
1040
1041 if (monitor->is_being_async_deflated()) {
1042 // But we can't safely use the hash if we detect that async
1043 // deflation has occurred. So we attempt to restore the
1044 // header/dmw to the object's header so that we only retry
1045 // once if the deflater thread happens to be slow.
1046 monitor->install_displaced_markword_in_object(obj);
1047 continue;
1048 }
1049 return hash;
1050 }
1051 // Fall thru so we only have one place that installs the hash in
1052 // the ObjectMonitor.
1053 } else if (LockingMode == LM_LEGACY && mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
1054 // This is a stack-lock owned by the calling thread so fetch the
1055 // displaced markWord from the BasicLock on the stack.
1056 temp = mark.displaced_mark_helper();
1057 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1058 hash = temp.hash();
1059 if (hash != 0) { // if it has a hash, just return it
1060 return hash;
1061 }
1062 // WARNING:
1063 // The displaced header in the BasicLock on a thread's stack
1064 // is strictly immutable. It CANNOT be changed in ANY cases.
1065 // So we have to inflate the stack-lock into an ObjectMonitor
1066 // even if the current thread owns the lock. The BasicLock on
1067 // a thread's stack can be asynchronously read by other threads
1068 // during an inflate() call so any change to that stack memory
1069 // may not propagate to other threads correctly.
1070 }
1071
1072 // Inflate the monitor to set the hash.
1369 ObjectSynchronizer::InflateCause cause) {
1370 assert(event != nullptr, "invariant");
1371 event->set_monitorClass(obj->klass());
1372 event->set_address((uintptr_t)(void*)obj);
1373 event->set_cause((u1)cause);
1374 event->commit();
1375 }
1376
1377 // Fast path code shared by multiple functions
1378 void ObjectSynchronizer::inflate_helper(oop obj) {
1379 markWord mark = obj->mark_acquire();
1380 if (mark.has_monitor()) {
1381 ObjectMonitor* monitor = mark.monitor();
1382 markWord dmw = monitor->header();
1383 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1384 return;
1385 }
1386 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1387 }
1388
1389 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1390 assert(current == Thread::current(), "must be");
1391 if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1392 return inflate_impl(JavaThread::cast(current), obj, cause);
1393 }
1394 return inflate_impl(nullptr, obj, cause);
1395 }
1396
1397 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1398 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1399 return inflate_impl(thread, obj, cause);
1400 }
1401
1402 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1403 // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1404 // that the inflating_thread == Thread::current() or is suspended throughout the call by
1405 // some other mechanism.
1406 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1407 // JavaThread. (As may still be the case from FastHashCode). However it is only
1408 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1409 // is set when called from ObjectSynchronizer::enter from the owning thread,
1410 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1411 EventJavaMonitorInflate event;
1412
1413 for (;;) {
1414 const markWord mark = object->mark_acquire();
1415
1416 // The mark can be in one of the following states:
1417 // * inflated - Just return if using stack-locking.
1418 // If using fast-locking and the ObjectMonitor owner
1419 // is anonymous and the inflating_thread owns the
1420 // object lock, then we make the inflating_thread
1421 // the ObjectMonitor owner and remove the lock from
1422 // the inflating_thread's lock stack.
1423 // * fast-locked - Coerce it to inflated from fast-locked.
1424 // * stack-locked - Coerce it to inflated from stack-locked.
1425 // * INFLATING - Busy wait for conversion from stack-locked to
1426 // inflated.
1427 // * neutral - Aggressively inflate the object.
1428
1429 // CASE: inflated
1430 if (mark.has_monitor()) {
1431 ObjectMonitor* inf = mark.monitor();
1432 markWord dmw = inf->header();
1433 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1434 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
1435 inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
1436 inf->set_owner_from_anonymous(inflating_thread);
1437 size_t removed = inflating_thread->lock_stack().remove(object);
1438 inf->set_recursions(removed - 1);
1439 }
1440 return inf;
1441 }
1442
1443 if (LockingMode != LM_LIGHTWEIGHT) {
1444 // New lightweight locking does not use INFLATING.
1445 // CASE: inflation in progress - inflating over a stack-lock.
1446 // Some other thread is converting from stack-locked to inflated.
1447 // Only that thread can complete inflation -- other threads must wait.
1448 // The INFLATING value is transient.
1449 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1450 // We could always eliminate polling by parking the thread on some auxiliary list.
1451 if (mark == markWord::INFLATING()) {
1452 read_stable_mark(object);
1453 continue;
1454 }
1455 }
1456
1457 // CASE: fast-locked
1458 // Could be fast-locked either by the inflating_thread or by some other thread.
1459 //
1460 // Note that we allocate the ObjectMonitor speculatively, _before_
1461 // attempting to set the object's mark to the new ObjectMonitor. If
1462 // the inflating_thread owns the monitor, then we set the ObjectMonitor's
1463 // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
1464 // to anonymous. If we lose the race to set the object's mark to the
1465 // new ObjectMonitor, then we just delete it and loop around again.
1466 //
1467 LogStreamHandle(Trace, monitorinflation) lsh;
1468 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1469 ObjectMonitor* monitor = new ObjectMonitor(object);
1470 monitor->set_header(mark.set_unlocked());
1471 bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
1472 if (own) {
1473 // Owned by inflating_thread.
1474 monitor->set_owner_from(nullptr, inflating_thread);
1475 } else {
1476 // Owned by somebody else.
1477 monitor->set_owner_anonymous();
1478 }
1479 markWord monitor_mark = markWord::encode(monitor);
1480 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1481 if (old_mark == mark) {
1482 // Success! Return inflated monitor.
1483 if (own) {
1484 size_t removed = inflating_thread->lock_stack().remove(object);
1485 monitor->set_recursions(removed - 1);
1486 }
1487 // Once the ObjectMonitor is configured and object is associated
1488 // with the ObjectMonitor, it is safe to allow async deflation:
1489 _in_use_list.add(monitor);
1490
1491 // Hopefully the performance counters are allocated on distinct
1492 // cache lines to avoid false sharing on MP systems ...
1493 OM_PERFDATA_OP(Inflations, inc());
1494 if (log_is_enabled(Trace, monitorinflation)) {
1495 ResourceMark rm;
1496 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1497 INTPTR_FORMAT ", type='%s'", p2i(object),
1498 object->mark().value(), object->klass()->external_name());
1499 }
1500 if (event.should_commit()) {
1501 post_monitor_inflate_event(&event, object, cause);
1502 }
1503 return monitor;
1504 } else {
1505 delete monitor;
1506 continue; // Interference -- just retry
1507 }
1508 }
1509
1510 // CASE: stack-locked
1511 // Could be stack-locked either by current or by some other thread.
1512 //
1513 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1514 // to install INFLATING into the mark word. We originally installed INFLATING,
1515 // allocated the ObjectMonitor, and then finally STed the address of the
1574 // Note that a thread can inflate an object
1575 // that it has stack-locked -- as might happen in wait() -- directly
1576 // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
1577 m->set_owner_from(nullptr, mark.locker());
1578 // TODO-FIXME: assert BasicLock->dhw != 0.
1579
1580 // Must preserve store ordering. The monitor state must
1581 // be stable at the time of publishing the monitor address.
1582 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1583 // Release semantics so that above set_object() is seen first.
1584 object->release_set_mark(markWord::encode(m));
1585
1586 // Once ObjectMonitor is configured and the object is associated
1587 // with the ObjectMonitor, it is safe to allow async deflation:
1588 _in_use_list.add(m);
1589
1590 // Hopefully the performance counters are allocated on distinct cache lines
1591 // to avoid false sharing on MP systems ...
1592 OM_PERFDATA_OP(Inflations, inc());
1593 if (log_is_enabled(Trace, monitorinflation)) {
1594 ResourceMark rm;
1595 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1596 INTPTR_FORMAT ", type='%s'", p2i(object),
1597 object->mark().value(), object->klass()->external_name());
1598 }
1599 if (event.should_commit()) {
1600 post_monitor_inflate_event(&event, object, cause);
1601 }
1602 return m;
1603 }
1604
1605 // CASE: neutral
1606 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1607 // If we know we're inflating for entry it's better to inflate by swinging a
1608 // pre-locked ObjectMonitor pointer into the object header. A successful
1609 // CAS inflates the object *and* confers ownership to the inflating thread.
1610 // In the current implementation we use a 2-step mechanism where we CAS()
1611 // to inflate and then CAS() again to try to swing _owner from null to current.
1612 // An inflateTry() method that we could call from enter() would be useful.
1613
1614 // Catch if the object's header is not neutral (not locked and
1618 // prepare m for installation - set monitor to initial state
1619 m->set_header(mark);
1620
1621 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1622 delete m;
1623 m = nullptr;
1624 continue;
1625 // interference - the markword changed - just retry.
1626 // The state-transitions are one-way, so there's no chance of
1627 // live-lock -- "Inflated" is an absorbing state.
1628 }
1629
1630 // Once the ObjectMonitor is configured and object is associated
1631 // with the ObjectMonitor, it is safe to allow async deflation:
1632 _in_use_list.add(m);
1633
1634 // Hopefully the performance counters are allocated on distinct
1635 // cache lines to avoid false sharing on MP systems ...
1636 OM_PERFDATA_OP(Inflations, inc());
1637 if (log_is_enabled(Trace, monitorinflation)) {
1638 ResourceMark rm;
1639 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1640 INTPTR_FORMAT ", type='%s'", p2i(object),
1641 object->mark().value(), object->klass()->external_name());
1642 }
1643 if (event.should_commit()) {
1644 post_monitor_inflate_event(&event, object, cause);
1645 }
1646 return m;
1647 }
1648 }
1649
1650 void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1651 const char* cnt_name, size_t cnt,
1652 LogStream* ls, elapsedTimer* timer_p) {
1653 if (!SafepointMechanism::should_process(current)) {
1654 return;
1655 }
1656
1657 // A safepoint/handshake has started.
1658 if (ls != nullptr) {
|