8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomic.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/handshake.hpp"
40 #include "runtime/interfaceSupport.inline.hpp"
41 #include "runtime/javaThread.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/objectMonitor.hpp"
44 #include "runtime/objectMonitor.inline.hpp"
45 #include "runtime/os.inline.hpp"
46 #include "runtime/osThread.hpp"
47 #include "runtime/perfData.hpp"
48 #include "runtime/safepointMechanism.inline.hpp"
49 #include "runtime/safepointVerifiers.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "runtime/stubRoutines.hpp"
52 #include "runtime/synchronizer.hpp"
53 #include "runtime/threads.hpp"
54 #include "runtime/timer.hpp"
55 #include "runtime/vframe.hpp"
56 #include "runtime/vmThread.hpp"
57 #include "utilities/align.hpp"
58 #include "utilities/dtrace.hpp"
59 #include "utilities/events.hpp"
60 #include "utilities/linkedlist.hpp"
61 #include "utilities/preserveException.hpp"
294 // returns true -- to indicate the call was satisfied.
295 // returns false -- to indicate the call needs the services of the slow-path.
296 // A no-loitering ordinance is in effect for code in the quick_* family
297 // operators: safepoints or indefinite blocking (blocking that might span a
298 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
299 // entry.
300 //
301 // Consider: An interesting optimization is to have the JIT recognize the
302 // following common idiom:
303 // synchronized (someobj) { .... ; notify(); }
304 // That is, we find a notify() or notifyAll() call that immediately precedes
305 // the monitorexit operation. In that case the JIT could fuse the operations
306 // into a single notifyAndExit() runtime primitive.
307
308 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
309 assert(current->thread_state() == _thread_in_Java, "invariant");
310 NoSafepointVerifier nsv;
311 if (obj == nullptr) return false; // slow-path for invalid obj
312 const markWord mark = obj->mark();
313
314 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
315 // Degenerate notify
316 // stack-locked by caller so by definition the implied waitset is empty.
317 return true;
318 }
319
320 if (mark.has_monitor()) {
321 ObjectMonitor* const mon = mark.monitor();
322 assert(mon->object() == oop(obj), "invariant");
323 if (mon->owner() != current) return false; // slow-path for IMS exception
324
325 if (mon->first_waiter() != nullptr) {
326 // We have one or more waiters. Since this is an inflated monitor
327 // that we own, we can transfer one or more threads from the waitset
328 // to the entrylist here and now, avoiding the slow-path.
329 if (all) {
330 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
331 } else {
332 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
333 }
334 int free_count = 0;
377 // Lock contention and Transactional Lock Elision (TLE) diagnostics
378 // and observability
379 // Case: light contention possibly amenable to TLE
380 // Case: TLE inimical operations such as nested/recursive synchronization
381
382 if (owner == current) {
383 m->_recursions++;
384 current->inc_held_monitor_count();
385 return true;
386 }
387
388 // This Java Monitor is inflated so obj's header will never be
389 // displaced to this thread's BasicLock. Make the displaced header
390 // non-null so this BasicLock is not seen as recursive nor as
391 // being locked. We do this unconditionally so that this thread's
392 // BasicLock cannot be mis-interpreted by any stack walkers. For
393 // performance reasons, stack walkers generally first check for
394 // stack-locking in the object's header, the second check is for
395 // recursive stack-locking in the displaced header in the BasicLock,
396 // and last are the inflated Java Monitor (ObjectMonitor) checks.
397 lock->set_displaced_header(markWord::unused_mark());
398
399 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
400 assert(m->_recursions == 0, "invariant");
401 current->inc_held_monitor_count();
402 return true;
403 }
404 }
405
406 // Note that we could inflate in quick_enter.
407 // This is likely a useful optimization
408 // Critically, in quick_enter() we must not:
409 // -- block indefinitely, or
410 // -- reach a safepoint
411
412 return false; // revert to slow-path
413 }
414
415 // Handle notifications when synchronizing on value based classes
416 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
417 frame last_frame = current->last_frame();
466 return UseHeavyMonitors;
467 #else
468 return false;
469 #endif
470 }
471
472 // -----------------------------------------------------------------------------
473 // Monitor Enter/Exit
474 // The interpreter and compiler assembly code tries to lock using the fast path
475 // of this algorithm. Make sure to update that code if the following function is
476 // changed. The implementation is extremely sensitive to race condition. Be careful.
477
478 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
479 if (obj->klass()->is_value_based()) {
480 handle_sync_on_value_based_class(obj, current);
481 }
482
483 current->inc_held_monitor_count();
484
485 if (!useHeavyMonitors()) {
486 markWord mark = obj->mark();
487 if (mark.is_neutral()) {
488 // Anticipate successful CAS -- the ST of the displaced mark must
489 // be visible <= the ST performed by the CAS.
490 lock->set_displaced_header(mark);
491 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
492 return;
493 }
494 // Fall through to inflate() ...
495 } else if (mark.has_locker() &&
496 current->is_lock_owned((address)mark.locker())) {
497 assert(lock != mark.locker(), "must not re-lock the same lock");
498 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
499 lock->set_displaced_header(markWord::from_pointer(nullptr));
500 return;
501 }
502
503 // The object header will never be displaced to this lock,
504 // so it does not matter what the value is, except that it
505 // must be non-zero to avoid looking like a re-entrant lock,
506 // and must not look locked either.
507 lock->set_displaced_header(markWord::unused_mark());
508 } else if (VerifyHeavyMonitors) {
509 guarantee(!obj->mark().has_locker(), "must not be stack-locked");
510 }
511
512 // An async deflation can race after the inflate() call and before
513 // enter() can make the ObjectMonitor busy. enter() returns false if
514 // we have lost the race to async deflation and we simply try again.
515 while (true) {
516 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
517 if (monitor->enter(current)) {
518 return;
519 }
520 }
521 }
522
523 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
524 current->dec_held_monitor_count();
525
526 if (!useHeavyMonitors()) {
527 markWord mark = object->mark();
528
529 markWord dhw = lock->displaced_header();
530 if (dhw.value() == 0) {
531 // If the displaced header is null, then this exit matches up with
532 // a recursive enter. No real work to do here except for diagnostics.
533 #ifndef PRODUCT
534 if (mark != markWord::INFLATING()) {
535 // Only do diagnostics if we are not racing an inflation. Simply
536 // exiting a recursive enter of a Java Monitor that is being
537 // inflated is safe; see the has_monitor() comment below.
538 assert(!mark.is_neutral(), "invariant");
539 assert(!mark.has_locker() ||
540 current->is_lock_owned((address)mark.locker()), "invariant");
541 if (mark.has_monitor()) {
542 // The BasicLock's displaced_header is marked as a recursive
543 // enter and we have an inflated Java Monitor (ObjectMonitor).
544 // This is a special case where the Java Monitor was inflated
545 // after this thread entered the stack-lock recursively. When a
546 // Java Monitor is inflated, we cannot safely walk the Java
547 // Monitor owner's stack and update the BasicLocks because a
548 // Java Monitor can be asynchronously inflated by a thread that
549 // does not own the Java Monitor.
550 ObjectMonitor* m = mark.monitor();
551 assert(m->object()->mark() == mark, "invariant");
552 assert(m->is_entered(current), "invariant");
553 }
554 }
555 #endif
556 return;
557 }
558
559 if (mark == markWord::from_pointer(lock)) {
560 // If the object is stack-locked by the current thread, try to
561 // swing the displaced header from the BasicLock back to the mark.
562 assert(dhw.is_neutral(), "invariant");
563 if (object->cas_set_mark(dhw, mark) == mark) {
564 return;
565 }
566 }
567 } else if (VerifyHeavyMonitors) {
568 guarantee(!object->mark().has_locker(), "must not be stack-locked");
569 }
570
571 // We have to take the slow-path of possible inflation and then exit.
572 // The ObjectMonitor* can't be async deflated until ownership is
573 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
574 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
575 monitor->exit(current);
576 }
577
578 // -----------------------------------------------------------------------------
579 // Class Loader support to workaround deadlocks on the class loader lock objects
580 // Also used by GC
581 // complete_exit()/reenter() are used to wait on a nested lock
582 // i.e. to give up an outer lock completely and then re-enter
583 // Used when holding nested locks - lock acquisition order: lock1 then lock2
584 // 1) complete_exit lock1 - saving recursion count
585 // 2) wait on lock2
586 // 3) when notified on lock2, unlock lock2
587 // 4) reenter lock1 with original recursion count
588 // 5) lock lock2
589 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
590 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
591 // The ObjectMonitor* can't be async deflated until ownership is
592 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
593 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
594 intx recur_count = monitor->complete_exit(current);
681 // The ObjectMonitor* can't be async deflated because the _waiters
682 // field is incremented before ownership is dropped and decremented
683 // after ownership is regained.
684 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
685
686 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
687 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
688
689 // This dummy call is in place to get around dtrace bug 6254741. Once
690 // that's fixed we can uncomment the following line, remove the call
691 // and change this function back into a "void" func.
692 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
693 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
694 return ret_code;
695 }
696
697 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
698 JavaThread* current = THREAD;
699
700 markWord mark = obj->mark();
701 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
702 // Not inflated so there can't be any waiters to notify.
703 return;
704 }
705 // The ObjectMonitor* can't be async deflated until ownership is
706 // dropped by the calling thread.
707 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
708 monitor->notify(CHECK);
709 }
710
711 // NOTE: see comment of notify()
712 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
713 JavaThread* current = THREAD;
714
715 markWord mark = obj->mark();
716 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
717 // Not inflated so there can't be any waiters to notify.
718 return;
719 }
720 // The ObjectMonitor* can't be async deflated until ownership is
721 // dropped by the calling thread.
722 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
723 monitor->notifyAll(CHECK);
724 }
725
726 // -----------------------------------------------------------------------------
727 // Hash Code handling
728
729 struct SharedGlobals {
730 char _pad_prefix[OM_CACHE_LINE_SIZE];
731 // This is a highly shared mostly-read variable.
732 // To avoid false-sharing it needs to be the sole occupant of a cache line.
733 volatile int stw_random;
734 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
735 // Hot RW variable -- Sequester to avoid false-sharing
736 volatile int hc_sequence;
737 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
738 };
739
740 static SharedGlobals GVars;
741
742 static markWord read_stable_mark(oop obj) {
743 markWord mark = obj->mark_acquire();
744 if (!mark.is_being_inflated()) {
745 return mark; // normal fast-path return
746 }
747
748 int its = 0;
749 for (;;) {
750 markWord mark = obj->mark_acquire();
751 if (!mark.is_being_inflated()) {
752 return mark; // normal fast-path return
753 }
754
755 // The object is being inflated by some other thread.
756 // The caller of read_stable_mark() must wait for inflation to complete.
757 // Avoid live-lock.
758
759 ++its;
760 if (its > 10000 || !os::is_MP()) {
761 if (its & 1) {
762 os::naked_yield();
763 } else {
764 // Note that the following code attenuates the livelock problem but is not
839 // Marsaglia's xor-shift scheme with thread-specific state
840 // This is probably the best overall implementation -- we'll
841 // likely make this the default in future releases.
842 unsigned t = current->_hashStateX;
843 t ^= (t << 11);
844 current->_hashStateX = current->_hashStateY;
845 current->_hashStateY = current->_hashStateZ;
846 current->_hashStateZ = current->_hashStateW;
847 unsigned v = current->_hashStateW;
848 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
849 current->_hashStateW = v;
850 value = v;
851 }
852
853 value &= markWord::hash_mask;
854 if (value == 0) value = 0xBAD;
855 assert(value != markWord::no_hash, "invariant");
856 return value;
857 }
858
859 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
860
861 while (true) {
862 ObjectMonitor* monitor = nullptr;
863 markWord temp, test;
864 intptr_t hash;
865 markWord mark = read_stable_mark(obj);
866 if (VerifyHeavyMonitors) {
867 assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
868 guarantee(!mark.has_locker(), "must not be stack locked");
869 }
870 if (mark.is_neutral()) { // if this is a normal header
871 hash = mark.hash();
872 if (hash != 0) { // if it has a hash, just return it
873 return hash;
874 }
875 hash = get_next_hash(current, obj); // get a new hash
876 temp = mark.copy_set_hash(hash); // merge the hash into header
877 // try to install the hash
878 test = obj->cas_set_mark(temp, mark);
893
894 // Separate load of dmw/header above from the loads in
895 // is_being_async_deflated().
896
897 // dmw/header and _contentions may get written by different threads.
898 // Make sure to observe them in the same order when having several observers.
899 OrderAccess::loadload_for_IRIW();
900
901 if (monitor->is_being_async_deflated()) {
902 // But we can't safely use the hash if we detect that async
903 // deflation has occurred. So we attempt to restore the
904 // header/dmw to the object's header so that we only retry
905 // once if the deflater thread happens to be slow.
906 monitor->install_displaced_markword_in_object(obj);
907 continue;
908 }
909 return hash;
910 }
911 // Fall thru so we only have one place that installs the hash in
912 // the ObjectMonitor.
913 } else if (current->is_lock_owned((address)mark.locker())) {
914 // This is a stack lock owned by the calling thread so fetch the
915 // displaced markWord from the BasicLock on the stack.
916 temp = mark.displaced_mark_helper();
917 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
918 hash = temp.hash();
919 if (hash != 0) { // if it has a hash, just return it
920 return hash;
921 }
922 // WARNING:
923 // The displaced header in the BasicLock on a thread's stack
924 // is strictly immutable. It CANNOT be changed in ANY cases.
925 // So we have to inflate the stack lock into an ObjectMonitor
926 // even if the current thread owns the lock. The BasicLock on
927 // a thread's stack can be asynchronously read by other threads
928 // during an inflate() call so any change to that stack memory
929 // may not propagate to other threads correctly.
930 }
931
932 // Inflate the monitor to set the hash.
933
962 monitor->install_displaced_markword_in_object(obj);
963 continue;
964 }
965 }
966 // We finally get the hash.
967 return hash;
968 }
969 }
970
971 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
972 Handle h_obj) {
973 assert(current == JavaThread::current(), "Can only be called on current thread");
974 oop obj = h_obj();
975
976 markWord mark = read_stable_mark(obj);
977
978 // Uncontended case, header points to stack
979 if (mark.has_locker()) {
980 return current->is_lock_owned((address)mark.locker());
981 }
982 // Contended case, header points to ObjectMonitor (tagged pointer)
983 if (mark.has_monitor()) {
984 // The first stage of async deflation does not affect any field
985 // used by this comparison so the ObjectMonitor* is usable here.
986 ObjectMonitor* monitor = mark.monitor();
987 return monitor->is_entered(current) != 0;
988 }
989 // Unlocked case, header in place
990 assert(mark.is_neutral(), "sanity check");
991 return false;
992 }
993
994 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
995 oop obj = h_obj();
996 address owner = nullptr;
997
998 markWord mark = read_stable_mark(obj);
999
1000 // Uncontended case, header points to stack
1001 if (mark.has_locker()) {
1002 owner = (address) mark.locker();
1003 }
1004
1005 // Contended case, header points to ObjectMonitor (tagged pointer)
1006 else if (mark.has_monitor()) {
1007 // The first stage of async deflation does not affect any field
1008 // used by this comparison so the ObjectMonitor* is usable here.
1009 ObjectMonitor* monitor = mark.monitor();
1010 assert(monitor != nullptr, "monitor should be non-null");
1011 owner = (address) monitor->owner();
1012 }
1013
1014 if (owner != nullptr) {
1015 // owning_thread_from_monitor_owner() may also return null here
1016 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1017 }
1018
1019 // Unlocked case, header in place
1020 // Cannot have assertion since this object may have been
1021 // locked by another thread when reaching here.
1022 // assert(mark.is_neutral(), "sanity check");
1023
1024 return nullptr;
1025 }
1026
1027 // Visitors ...
1028
1029 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1030 // ObjectMonitors where owner is set to a stack lock address in thread.
1031 //
1032 // This version of monitors_iterate() works with the in-use monitor list.
1033 //
1034 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1035 MonitorList::Iterator iter = _in_use_list.iterator();
1036 while (iter.has_next()) {
1037 ObjectMonitor* mid = iter.next();
1038 if (mid->owner() != thread) {
1039 // Not owned by the target thread and intentionally skips when owner
1040 // is set to a stack lock address in the target thread.
1041 continue;
1042 }
1043 if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
1201 }
1202
1203 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1204 const InflateCause cause) {
1205 EventJavaMonitorInflate event;
1206
1207 for (;;) {
1208 const markWord mark = object->mark_acquire();
1209
1210 // The mark can be in one of the following states:
1211 // * Inflated - just return
1212 // * Stack-locked - coerce it to inflated
1213 // * INFLATING - busy wait for conversion to complete
1214 // * Neutral - aggressively inflate the object.
1215
1216 // CASE: inflated
1217 if (mark.has_monitor()) {
1218 ObjectMonitor* inf = mark.monitor();
1219 markWord dmw = inf->header();
1220 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1221 return inf;
1222 }
1223
1224 // CASE: inflation in progress - inflating over a stack-lock.
1225 // Some other thread is converting from stack-locked to inflated.
1226 // Only that thread can complete inflation -- other threads must wait.
1227 // The INFLATING value is transient.
1228 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1229 // We could always eliminate polling by parking the thread on some auxiliary list.
1230 if (mark == markWord::INFLATING()) {
1231 read_stable_mark(object);
1232 continue;
1233 }
1234
1235 // CASE: stack-locked
1236 // Could be stack-locked either by this thread or by some other thread.
1237 //
1238 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1239 // to install INFLATING into the mark word. We originally installed INFLATING,
1240 // allocated the ObjectMonitor, and then finally STed the address of the
1241 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1242 // the interval in which INFLATING appeared in the mark, thus increasing
1243 // the odds of inflation contention.
1244
1245 LogStreamHandle(Trace, monitorinflation) lsh;
1246
1247 if (mark.has_locker()) {
1248 ObjectMonitor* m = new ObjectMonitor(object);
1249 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1250 // We do this before the CAS in order to minimize the length of time
1251 // in which INFLATING appears in the mark.
1252
1253 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1254 if (cmp != mark) {
1255 delete m;
1256 continue; // Interference -- just retry
1257 }
1258
1259 // We've successfully installed INFLATING (0) into the mark-word.
1260 // This is the only case where 0 will appear in a mark-word.
1261 // Only the singular thread that successfully swings the mark-word
1262 // to 0 can perform (or more precisely, complete) inflation.
1263 //
1264 // Why do we CAS a 0 into the mark-word instead of just CASing the
1265 // mark-word from the stack-locked value directly to the new inflated state?
1266 // Consider what happens when a thread unlocks a stack-locked object.
1267 // It attempts to use CAS to swing the displaced header value from the
1441 if (current->is_Java_thread()) {
1442 // A JavaThread must check for a safepoint/handshake and honor it.
1443 chk_for_block_req(JavaThread::cast(current), "deflation", "deflated_count",
1444 deflated_count, ls, timer_p);
1445 }
1446 }
1447
1448 return deflated_count;
1449 }
1450
1451 class HandshakeForDeflation : public HandshakeClosure {
1452 public:
1453 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1454
1455 void do_thread(Thread* thread) {
1456 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1457 INTPTR_FORMAT, p2i(thread));
1458 }
1459 };
1460
1461 // This function is called by the MonitorDeflationThread to deflate
1462 // ObjectMonitors. It is also called via do_final_audit_and_print_stats()
1463 // and VM_ThreadDump::doit() by the VMThread.
1464 size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) {
1465 Thread* current = Thread::current();
1466 if (current->is_Java_thread()) {
1467 // The async deflation request has been processed.
1468 _last_async_deflation_time_ns = os::javaTimeNanos();
1469 set_is_async_deflation_requested(false);
1470 }
1471
1472 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1473 LogStreamHandle(Info, monitorinflation) lsh_info;
1474 LogStream* ls = nullptr;
1475 if (log_is_enabled(Debug, monitorinflation)) {
1476 ls = &lsh_debug;
1477 } else if (log_is_enabled(Info, monitorinflation)) {
1478 ls = &lsh_info;
1479 }
1480
1494 // final audit and all the remaining ObjectMonitors have been
1495 // deflated, BUT the MonitorDeflationThread blocked for the final
1496 // safepoint during unlinking.
1497
1498 // Unlink deflated ObjectMonitors from the in-use list.
1499 ResourceMark rm;
1500 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1501 unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer, &delete_list);
1502 if (current->is_Java_thread()) {
1503 if (ls != nullptr) {
1504 timer.stop();
1505 ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
1506 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1507 SIZE_FORMAT ", max=" SIZE_FORMAT,
1508 unlinked_count, in_use_list_ceiling(),
1509 _in_use_list.count(), _in_use_list.max());
1510 }
1511
1512 // A JavaThread needs to handshake in order to safely free the
1513 // ObjectMonitors that were deflated in this cycle.
1514 HandshakeForDeflation hfd_hc;
1515 Handshake::execute(&hfd_hc);
1516
1517 if (ls != nullptr) {
1518 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1519 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1520 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1521 timer.start();
1522 }
1523 }
1524
1525 // After the handshake, safely free the ObjectMonitors that were
1526 // deflated in this cycle.
1527 for (ObjectMonitor* monitor: delete_list) {
1528 delete monitor;
1529 deleted_count++;
1530
1531 if (current->is_Java_thread()) {
1532 // A JavaThread must check for a safepoint/handshake and honor it.
1533 chk_for_block_req(JavaThread::cast(current), "deletion", "deleted_count",
1534 deleted_count, ls, &timer);
1535 }
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "gc/shared/suspendibleThreadSet.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/handshake.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/javaThread.hpp"
43 #include "runtime/lockStack.inline.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/objectMonitor.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/os.inline.hpp"
48 #include "runtime/osThread.hpp"
49 #include "runtime/perfData.hpp"
50 #include "runtime/safepointMechanism.inline.hpp"
51 #include "runtime/safepointVerifiers.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/threads.hpp"
56 #include "runtime/timer.hpp"
57 #include "runtime/vframe.hpp"
58 #include "runtime/vmThread.hpp"
59 #include "utilities/align.hpp"
60 #include "utilities/dtrace.hpp"
61 #include "utilities/events.hpp"
62 #include "utilities/linkedlist.hpp"
63 #include "utilities/preserveException.hpp"
296 // returns true -- to indicate the call was satisfied.
297 // returns false -- to indicate the call needs the services of the slow-path.
298 // A no-loitering ordinance is in effect for code in the quick_* family
299 // operators: safepoints or indefinite blocking (blocking that might span a
300 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
301 // entry.
302 //
303 // Consider: An interesting optimization is to have the JIT recognize the
304 // following common idiom:
305 // synchronized (someobj) { .... ; notify(); }
306 // That is, we find a notify() or notifyAll() call that immediately precedes
307 // the monitorexit operation. In that case the JIT could fuse the operations
308 // into a single notifyAndExit() runtime primitive.
309
310 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
311 assert(current->thread_state() == _thread_in_Java, "invariant");
312 NoSafepointVerifier nsv;
313 if (obj == nullptr) return false; // slow-path for invalid obj
314 const markWord mark = obj->mark();
315
316 if ((mark.is_fast_locked() && current->lock_stack().contains(oop(obj))) ||
317 (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
318 // Degenerate notify
319 // stack-locked by caller so by definition the implied waitset is empty.
320 return true;
321 }
322
323 if (mark.has_monitor()) {
324 ObjectMonitor* const mon = mark.monitor();
325 assert(mon->object() == oop(obj), "invariant");
326 if (mon->owner() != current) return false; // slow-path for IMS exception
327
328 if (mon->first_waiter() != nullptr) {
329 // We have one or more waiters. Since this is an inflated monitor
330 // that we own, we can transfer one or more threads from the waitset
331 // to the entrylist here and now, avoiding the slow-path.
332 if (all) {
333 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
334 } else {
335 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
336 }
337 int free_count = 0;
380 // Lock contention and Transactional Lock Elision (TLE) diagnostics
381 // and observability
382 // Case: light contention possibly amenable to TLE
383 // Case: TLE inimical operations such as nested/recursive synchronization
384
385 if (owner == current) {
386 m->_recursions++;
387 current->inc_held_monitor_count();
388 return true;
389 }
390
391 // This Java Monitor is inflated so obj's header will never be
392 // displaced to this thread's BasicLock. Make the displaced header
393 // non-null so this BasicLock is not seen as recursive nor as
394 // being locked. We do this unconditionally so that this thread's
395 // BasicLock cannot be mis-interpreted by any stack walkers. For
396 // performance reasons, stack walkers generally first check for
397 // stack-locking in the object's header, the second check is for
398 // recursive stack-locking in the displaced header in the BasicLock,
399 // and last are the inflated Java Monitor (ObjectMonitor) checks.
400 if (!UseFastLocking) {
401 lock->set_displaced_header(markWord::unused_mark());
402 }
403
404 if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
405 assert(m->_recursions == 0, "invariant");
406 current->inc_held_monitor_count();
407 return true;
408 }
409 }
410
411 // Note that we could inflate in quick_enter.
412 // This is likely a useful optimization
413 // Critically, in quick_enter() we must not:
414 // -- block indefinitely, or
415 // -- reach a safepoint
416
417 return false; // revert to slow-path
418 }
419
420 // Handle notifications when synchronizing on value based classes
421 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
422 frame last_frame = current->last_frame();
471 return UseHeavyMonitors;
472 #else
473 return false;
474 #endif
475 }
476
477 // -----------------------------------------------------------------------------
478 // Monitor Enter/Exit
479 // The interpreter and compiler assembly code tries to lock using the fast path
480 // of this algorithm. Make sure to update that code if the following function is
481 // changed. The implementation is extremely sensitive to race condition. Be careful.
482
483 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
484 if (obj->klass()->is_value_based()) {
485 handle_sync_on_value_based_class(obj, current);
486 }
487
488 current->inc_held_monitor_count();
489
490 if (!useHeavyMonitors()) {
491 if (UseFastLocking) {
492 LockStack& lock_stack = current->lock_stack();
493
494 markWord header = obj()->mark_acquire();
495 while (true) {
496 if (header.is_neutral()) {
497 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
498 // Try to swing into 'fast-locked' state without inflating.
499 markWord locked_header = header.set_fast_locked();
500 markWord witness = obj()->cas_set_mark(locked_header, header);
501 if (witness == header) {
502 // Successfully fast-locked, push object to lock-stack and return.
503 lock_stack.push(obj());
504 return;
505 }
506 // Otherwise retry.
507 header = witness;
508 } else {
509 // Fall-through to inflate-enter.
510 break;
511 }
512 }
513 } else {
514 markWord mark = obj->mark();
515 if (mark.is_neutral()) {
516 // Anticipate successful CAS -- the ST of the displaced mark must
517 // be visible <= the ST performed by the CAS.
518 lock->set_displaced_header(mark);
519 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
520 return;
521 }
522 // Fall through to inflate() ...
523 } else if (mark.has_locker() &&
524 current->is_lock_owned((address)mark.locker())) {
525 assert(lock != mark.locker(), "must not re-lock the same lock");
526 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
527 lock->set_displaced_header(markWord::from_pointer(nullptr));
528 return;
529 }
530
531 // The object header will never be displaced to this lock,
532 // so it does not matter what the value is, except that it
533 // must be non-zero to avoid looking like a re-entrant lock,
534 // and must not look locked either.
535 lock->set_displaced_header(markWord::unused_mark());
536 }
537 } else if (VerifyHeavyMonitors) {
538 guarantee(!obj->mark().has_locker() && !obj->mark().is_fast_locked(), "must not be stack-locked");
539 }
540
541 // An async deflation can race after the inflate() call and before
542 // enter() can make the ObjectMonitor busy. enter() returns false if
543 // we have lost the race to async deflation and we simply try again.
544 while (true) {
545 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
546 if (monitor->enter(current)) {
547 return;
548 }
549 }
550 }
551
552 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
553 current->dec_held_monitor_count();
554
555 if (!useHeavyMonitors()) {
556 markWord mark = object->mark();
557 if (UseFastLocking) {
558 if (mark.is_fast_locked()) {
559 markWord unlocked_header = mark.set_unlocked();
560 markWord witness = object->cas_set_mark(unlocked_header, mark);
561 if (witness != mark) {
562 // Another thread beat us, it can only have installed an anonymously locked monitor at this point.
563 // Fetch that monitor, set owner correctly to this thread, and exit it (allowing waiting threads to enter).
564 assert(witness.has_monitor(), "must have monitor");
565 ObjectMonitor* monitor = witness.monitor();
566 assert(monitor->is_owner_anonymous(), "must be anonymous owner");
567 monitor->set_owner_from_anonymous(current);
568 monitor->exit(current);
569 }
570 LockStack& lock_stack = current->lock_stack();
571 lock_stack.remove(object);
572 return;
573 }
574 } else {
575 markWord dhw = lock->displaced_header();
576 if (dhw.value() == 0) {
577 // If the displaced header is null, then this exit matches up with
578 // a recursive enter. No real work to do here except for diagnostics.
579 #ifndef PRODUCT
580 if (mark != markWord::INFLATING()) {
581 // Only do diagnostics if we are not racing an inflation. Simply
582 // exiting a recursive enter of a Java Monitor that is being
583 // inflated is safe; see the has_monitor() comment below.
584 assert(!mark.is_neutral(), "invariant");
585 assert(!mark.has_locker() ||
586 current->is_lock_owned((address)mark.locker()), "invariant");
587 if (mark.has_monitor()) {
588 // The BasicLock's displaced_header is marked as a recursive
589 // enter and we have an inflated Java Monitor (ObjectMonitor).
590 // This is a special case where the Java Monitor was inflated
591 // after this thread entered the stack-lock recursively. When a
592 // Java Monitor is inflated, we cannot safely walk the Java
593 // Monitor owner's stack and update the BasicLocks because a
594 // Java Monitor can be asynchronously inflated by a thread that
595 // does not own the Java Monitor.
596 ObjectMonitor* m = mark.monitor();
597 assert(m->object()->mark() == mark, "invariant");
598 assert(m->is_entered(current), "invariant");
599 }
600 }
601 #endif
602 return;
603 }
604
605 if (mark == markWord::from_pointer(lock)) {
606 // If the object is stack-locked by the current thread, try to
607 // swing the displaced header from the BasicLock back to the mark.
608 assert(dhw.is_neutral(), "invariant");
609 if (object->cas_set_mark(dhw, mark) == mark) {
610 return;
611 }
612 }
613 }
614 } else if (VerifyHeavyMonitors) {
615 guarantee(!object->mark().has_locker(), "must not be stack-locked");
616 }
617
618 // We have to take the slow-path of possible inflation and then exit.
619 // The ObjectMonitor* can't be async deflated until ownership is
620 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
621 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
622 if (UseFastLocking && monitor->is_owner_anonymous()) {
623 // It must be us. Pop lock object from lock stack.
624 LockStack& lock_stack = current->lock_stack();
625 oop popped = lock_stack.pop();
626 assert(popped == object, "must be owned by this thread");
627 monitor->set_owner_from_anonymous(current);
628 }
629 monitor->exit(current);
630 }
631
632 // -----------------------------------------------------------------------------
633 // Class Loader support to workaround deadlocks on the class loader lock objects
634 // Also used by GC
635 // complete_exit()/reenter() are used to wait on a nested lock
636 // i.e. to give up an outer lock completely and then re-enter
637 // Used when holding nested locks - lock acquisition order: lock1 then lock2
638 // 1) complete_exit lock1 - saving recursion count
639 // 2) wait on lock2
640 // 3) when notified on lock2, unlock lock2
641 // 4) reenter lock1 with original recursion count
642 // 5) lock lock2
643 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
644 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
645 // The ObjectMonitor* can't be async deflated until ownership is
646 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
647 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
648 intx recur_count = monitor->complete_exit(current);
735 // The ObjectMonitor* can't be async deflated because the _waiters
736 // field is incremented before ownership is dropped and decremented
737 // after ownership is regained.
738 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
739
740 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
741 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
742
743 // This dummy call is in place to get around dtrace bug 6254741. Once
744 // that's fixed we can uncomment the following line, remove the call
745 // and change this function back into a "void" func.
746 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
747 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
748 return ret_code;
749 }
750
751 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
752 JavaThread* current = THREAD;
753
754 markWord mark = obj->mark();
755 if ((mark.is_fast_locked() && current->lock_stack().contains(obj())) ||
756 (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
757 // Not inflated so there can't be any waiters to notify.
758 return;
759 }
760 // The ObjectMonitor* can't be async deflated until ownership is
761 // dropped by the calling thread.
762 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
763 monitor->notify(CHECK);
764 }
765
766 // NOTE: see comment of notify()
767 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
768 JavaThread* current = THREAD;
769
770 markWord mark = obj->mark();
771 if ((mark.is_fast_locked() && current->lock_stack().contains(obj())) ||
772 (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
773 // Not inflated so there can't be any waiters to notify.
774 return;
775 }
776 // The ObjectMonitor* can't be async deflated until ownership is
777 // dropped by the calling thread.
778 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
779 monitor->notifyAll(CHECK);
780 }
781
782 // -----------------------------------------------------------------------------
783 // Hash Code handling
784
785 struct SharedGlobals {
786 char _pad_prefix[OM_CACHE_LINE_SIZE];
787 // This is a highly shared mostly-read variable.
788 // To avoid false-sharing it needs to be the sole occupant of a cache line.
789 volatile int stw_random;
790 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
791 // Hot RW variable -- Sequester to avoid false-sharing
792 volatile int hc_sequence;
793 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
794 };
795
796 static SharedGlobals GVars;
797
798 static markWord read_stable_mark(oop obj) {
799 markWord mark = obj->mark_acquire();
800 if (!mark.is_being_inflated() || UseFastLocking) {
801 return mark; // normal fast-path return
802 }
803
804 int its = 0;
805 for (;;) {
806 markWord mark = obj->mark_acquire();
807 if (!mark.is_being_inflated()) {
808 return mark; // normal fast-path return
809 }
810
811 // The object is being inflated by some other thread.
812 // The caller of read_stable_mark() must wait for inflation to complete.
813 // Avoid live-lock.
814
815 ++its;
816 if (its > 10000 || !os::is_MP()) {
817 if (its & 1) {
818 os::naked_yield();
819 } else {
820 // Note that the following code attenuates the livelock problem but is not
895 // Marsaglia's xor-shift scheme with thread-specific state
896 // This is probably the best overall implementation -- we'll
897 // likely make this the default in future releases.
898 unsigned t = current->_hashStateX;
899 t ^= (t << 11);
900 current->_hashStateX = current->_hashStateY;
901 current->_hashStateY = current->_hashStateZ;
902 current->_hashStateZ = current->_hashStateW;
903 unsigned v = current->_hashStateW;
904 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
905 current->_hashStateW = v;
906 value = v;
907 }
908
909 value &= markWord::hash_mask;
910 if (value == 0) value = 0xBAD;
911 assert(value != markWord::no_hash, "invariant");
912 return value;
913 }
914
915 static bool is_lock_owned(Thread* thread, oop obj) {
916 assert(UseFastLocking, "only call this with fast-locking enabled");
917 return thread->is_Java_thread() ? reinterpret_cast<JavaThread*>(thread)->lock_stack().contains(obj) : false;
918 }
919
920 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
921
922 while (true) {
923 ObjectMonitor* monitor = nullptr;
924 markWord temp, test;
925 intptr_t hash;
926 markWord mark = read_stable_mark(obj);
927 if (VerifyHeavyMonitors) {
928 assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
929 guarantee(!mark.has_locker(), "must not be stack locked");
930 }
931 if (mark.is_neutral()) { // if this is a normal header
932 hash = mark.hash();
933 if (hash != 0) { // if it has a hash, just return it
934 return hash;
935 }
936 hash = get_next_hash(current, obj); // get a new hash
937 temp = mark.copy_set_hash(hash); // merge the hash into header
938 // try to install the hash
939 test = obj->cas_set_mark(temp, mark);
954
955 // Separate load of dmw/header above from the loads in
956 // is_being_async_deflated().
957
958 // dmw/header and _contentions may get written by different threads.
959 // Make sure to observe them in the same order when having several observers.
960 OrderAccess::loadload_for_IRIW();
961
962 if (monitor->is_being_async_deflated()) {
963 // But we can't safely use the hash if we detect that async
964 // deflation has occurred. So we attempt to restore the
965 // header/dmw to the object's header so that we only retry
966 // once if the deflater thread happens to be slow.
967 monitor->install_displaced_markword_in_object(obj);
968 continue;
969 }
970 return hash;
971 }
972 // Fall thru so we only have one place that installs the hash in
973 // the ObjectMonitor.
974 } else if (mark.is_fast_locked() && is_lock_owned(current, obj)) {
975 // This is a fast lock owned by the calling thread so use the
976 // markWord from the object.
977 hash = mark.hash();
978 if (hash != 0) { // if it has a hash, just return it
979 return hash;
980 }
981 } else if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
982 // This is a stack lock owned by the calling thread so fetch the
983 // displaced markWord from the BasicLock on the stack.
984 temp = mark.displaced_mark_helper();
985 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
986 hash = temp.hash();
987 if (hash != 0) { // if it has a hash, just return it
988 return hash;
989 }
990 // WARNING:
991 // The displaced header in the BasicLock on a thread's stack
992 // is strictly immutable. It CANNOT be changed in ANY cases.
993 // So we have to inflate the stack lock into an ObjectMonitor
994 // even if the current thread owns the lock. The BasicLock on
995 // a thread's stack can be asynchronously read by other threads
996 // during an inflate() call so any change to that stack memory
997 // may not propagate to other threads correctly.
998 }
999
1000 // Inflate the monitor to set the hash.
1001
1030 monitor->install_displaced_markword_in_object(obj);
1031 continue;
1032 }
1033 }
1034 // We finally get the hash.
1035 return hash;
1036 }
1037 }
1038
1039 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1040 Handle h_obj) {
1041 assert(current == JavaThread::current(), "Can only be called on current thread");
1042 oop obj = h_obj();
1043
1044 markWord mark = read_stable_mark(obj);
1045
1046 // Uncontended case, header points to stack
1047 if (mark.has_locker()) {
1048 return current->is_lock_owned((address)mark.locker());
1049 }
1050
1051 // Fast-locking case.
1052 if (mark.is_fast_locked()) {
1053 return current->lock_stack().contains(h_obj());
1054 }
1055
1056 // Contended case, header points to ObjectMonitor (tagged pointer)
1057 if (mark.has_monitor()) {
1058 // The first stage of async deflation does not affect any field
1059 // used by this comparison so the ObjectMonitor* is usable here.
1060 ObjectMonitor* monitor = mark.monitor();
1061 return monitor->is_entered(current) != 0;
1062 }
1063 // Unlocked case, header in place
1064 assert(mark.is_neutral(), "sanity check");
1065 return false;
1066 }
1067
1068 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1069 oop obj = h_obj();
1070 markWord mark = read_stable_mark(obj);
1071
1072 // Uncontended case, header points to stack
1073 if (mark.has_locker()) {
1074 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1075 }
1076
1077 if (mark.is_fast_locked()) {
1078 return Threads::owning_thread_from_object(t_list, h_obj());
1079 }
1080
1081 // Contended case, header points to ObjectMonitor (tagged pointer)
1082 if (mark.has_monitor()) {
1083 // The first stage of async deflation does not affect any field
1084 // used by this comparison so the ObjectMonitor* is usable here.
1085 ObjectMonitor* monitor = mark.monitor();
1086 assert(monitor != nullptr, "monitor should be non-null");
1087 return Threads::owning_thread_from_monitor(t_list, monitor);
1088 }
1089
1090 return nullptr;
1091 }
1092
1093 // Visitors ...
1094
1095 // Iterate ObjectMonitors where the owner == thread; this does NOT include
1096 // ObjectMonitors where owner is set to a stack lock address in thread.
1097 //
1098 // This version of monitors_iterate() works with the in-use monitor list.
1099 //
1100 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1101 MonitorList::Iterator iter = _in_use_list.iterator();
1102 while (iter.has_next()) {
1103 ObjectMonitor* mid = iter.next();
1104 if (mid->owner() != thread) {
1105 // Not owned by the target thread and intentionally skips when owner
1106 // is set to a stack lock address in the target thread.
1107 continue;
1108 }
1109 if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
1267 }
1268
1269 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1270 const InflateCause cause) {
1271 EventJavaMonitorInflate event;
1272
1273 for (;;) {
1274 const markWord mark = object->mark_acquire();
1275
1276 // The mark can be in one of the following states:
1277 // * Inflated - just return
1278 // * Stack-locked - coerce it to inflated
1279 // * INFLATING - busy wait for conversion to complete
1280 // * Neutral - aggressively inflate the object.
1281
1282 // CASE: inflated
1283 if (mark.has_monitor()) {
1284 ObjectMonitor* inf = mark.monitor();
1285 markWord dmw = inf->header();
1286 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1287 if (UseFastLocking && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
1288 inf->set_owner_from_anonymous(current);
1289 assert(current->is_Java_thread(), "must be Java thread");
1290 reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
1291 }
1292 return inf;
1293 }
1294
1295 // CASE: inflation in progress - inflating over a stack-lock.
1296 // Some other thread is converting from stack-locked to inflated.
1297 // Only that thread can complete inflation -- other threads must wait.
1298 // The INFLATING value is transient.
1299 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1300 // We could always eliminate polling by parking the thread on some auxiliary list.
1301 // NOTE: We need to check UseFastLocking here, because with fast-locking, the header
1302 // may legitimately be zero: cleared lock-bits and all upper header bits zero.
1303 // With fast-locking, the INFLATING protocol is not used.
1304 if (mark == markWord::INFLATING() && !UseFastLocking) {
1305 read_stable_mark(object);
1306 continue;
1307 }
1308
1309 // CASE: stack-locked
1310 // Could be stack-locked either by this thread or by some other thread.
1311 //
1312 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1313 // to install INFLATING into the mark word. We originally installed INFLATING,
1314 // allocated the ObjectMonitor, and then finally STed the address of the
1315 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1316 // the interval in which INFLATING appeared in the mark, thus increasing
1317 // the odds of inflation contention.
1318
1319 LogStreamHandle(Trace, monitorinflation) lsh;
1320 if (mark.is_fast_locked()) {
1321 assert(UseFastLocking, "can only happen with fast-locking");
1322 ObjectMonitor* monitor = new ObjectMonitor(object);
1323 monitor->set_header(mark.set_unlocked());
1324 bool own = is_lock_owned(current, object);
1325 if (own) {
1326 // Owned by us.
1327 monitor->set_owner_from(nullptr, current);
1328 } else {
1329 // Owned by somebody else.
1330 monitor->set_owner_anonymous();
1331 }
1332 markWord monitor_mark = markWord::encode(monitor);
1333 markWord witness = object->cas_set_mark(monitor_mark, mark);
1334 if (witness == mark) {
1335 // Success! Return inflated monitor.
1336 if (own) {
1337 assert(current->is_Java_thread(), "must be: checked in is_lock_owned()");
1338 reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
1339 }
1340 // Once the ObjectMonitor is configured and object is associated
1341 // with the ObjectMonitor, it is safe to allow async deflation:
1342 _in_use_list.add(monitor);
1343
1344 // Hopefully the performance counters are allocated on distinct
1345 // cache lines to avoid false sharing on MP systems ...
1346 OM_PERFDATA_OP(Inflations, inc());
1347 if (log_is_enabled(Trace, monitorinflation)) {
1348 ResourceMark rm(current);
1349 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1350 INTPTR_FORMAT ", type='%s'", p2i(object),
1351 object->mark().value(), object->klass()->external_name());
1352 }
1353 if (event.should_commit()) {
1354 post_monitor_inflate_event(&event, object, cause);
1355 }
1356 return monitor;
1357 } else {
1358 delete monitor;
1359 continue;
1360 }
1361 }
1362
1363 if (mark.has_locker()) {
1364 assert(!UseFastLocking, "can not happen with fast-locking");
1365 ObjectMonitor* m = new ObjectMonitor(object);
1366 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1367 // We do this before the CAS in order to minimize the length of time
1368 // in which INFLATING appears in the mark.
1369
1370 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1371 if (cmp != mark) {
1372 delete m;
1373 continue; // Interference -- just retry
1374 }
1375
1376 // We've successfully installed INFLATING (0) into the mark-word.
1377 // This is the only case where 0 will appear in a mark-word.
1378 // Only the singular thread that successfully swings the mark-word
1379 // to 0 can perform (or more precisely, complete) inflation.
1380 //
1381 // Why do we CAS a 0 into the mark-word instead of just CASing the
1382 // mark-word from the stack-locked value directly to the new inflated state?
1383 // Consider what happens when a thread unlocks a stack-locked object.
1384 // It attempts to use CAS to swing the displaced header value from the
1558 if (current->is_Java_thread()) {
1559 // A JavaThread must check for a safepoint/handshake and honor it.
1560 chk_for_block_req(JavaThread::cast(current), "deflation", "deflated_count",
1561 deflated_count, ls, timer_p);
1562 }
1563 }
1564
1565 return deflated_count;
1566 }
1567
1568 class HandshakeForDeflation : public HandshakeClosure {
1569 public:
1570 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1571
1572 void do_thread(Thread* thread) {
1573 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1574 INTPTR_FORMAT, p2i(thread));
1575 }
1576 };
1577
1578 class VM_RendezvousGCThreads : public VM_Operation {
1579 public:
1580 bool evaluate_at_safepoint() const override { return false; }
1581 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1582 void doit() override {
1583 SuspendibleThreadSet::synchronize();
1584 SuspendibleThreadSet::desynchronize();
1585 };
1586 };
1587
1588 // This function is called by the MonitorDeflationThread to deflate
1589 // ObjectMonitors. It is also called via do_final_audit_and_print_stats()
1590 // and VM_ThreadDump::doit() by the VMThread.
1591 size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) {
1592 Thread* current = Thread::current();
1593 if (current->is_Java_thread()) {
1594 // The async deflation request has been processed.
1595 _last_async_deflation_time_ns = os::javaTimeNanos();
1596 set_is_async_deflation_requested(false);
1597 }
1598
1599 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1600 LogStreamHandle(Info, monitorinflation) lsh_info;
1601 LogStream* ls = nullptr;
1602 if (log_is_enabled(Debug, monitorinflation)) {
1603 ls = &lsh_debug;
1604 } else if (log_is_enabled(Info, monitorinflation)) {
1605 ls = &lsh_info;
1606 }
1607
1621 // final audit and all the remaining ObjectMonitors have been
1622 // deflated, BUT the MonitorDeflationThread blocked for the final
1623 // safepoint during unlinking.
1624
1625 // Unlink deflated ObjectMonitors from the in-use list.
1626 ResourceMark rm;
1627 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1628 unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer, &delete_list);
1629 if (current->is_Java_thread()) {
1630 if (ls != nullptr) {
1631 timer.stop();
1632 ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
1633 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1634 SIZE_FORMAT ", max=" SIZE_FORMAT,
1635 unlinked_count, in_use_list_ceiling(),
1636 _in_use_list.count(), _in_use_list.max());
1637 }
1638
1639 // A JavaThread needs to handshake in order to safely free the
1640 // ObjectMonitors that were deflated in this cycle.
1641 // Also, we sync and desync GC threads around the handshake, so that they can
1642 // safely read the mark-word and look-through to the object-monitor, without
1643 // being afraid that the object-monitor is going away.
1644 HandshakeForDeflation hfd_hc;
1645 Handshake::execute(&hfd_hc);
1646 VM_RendezvousGCThreads sync_gc;
1647 VMThread::execute(&sync_gc);
1648
1649 if (ls != nullptr) {
1650 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1651 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1652 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1653 timer.start();
1654 }
1655 }
1656
1657 // After the handshake, safely free the ObjectMonitors that were
1658 // deflated in this cycle.
1659 for (ObjectMonitor* monitor: delete_list) {
1660 delete monitor;
1661 deleted_count++;
1662
1663 if (current->is_Java_thread()) {
1664 // A JavaThread must check for a safepoint/handshake and honor it.
1665 chk_for_block_req(JavaThread::cast(current), "deletion", "deleted_count",
1666 deleted_count, ls, &timer);
1667 }
|