1 /*
2 * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomic.hpp"
37 #include "runtime/biasedLocking.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/handshake.hpp"
40 #include "runtime/interfaceSupport.inline.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "runtime/objectMonitor.hpp"
43 #include "runtime/objectMonitor.inline.hpp"
44 #include "runtime/os.inline.hpp"
45 #include "runtime/osThread.hpp"
46 #include "runtime/perfData.hpp"
47 #include "runtime/safepointMechanism.inline.hpp"
48 #include "runtime/safepointVerifiers.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "runtime/stubRoutines.hpp"
51 #include "runtime/synchronizer.hpp"
52 #include "runtime/thread.inline.hpp"
53 #include "runtime/timer.hpp"
54 #include "runtime/trimNativeHeap.hpp"
55 #include "runtime/vframe.hpp"
56 #include "runtime/vmThread.hpp"
57 #include "utilities/align.hpp"
58 #include "utilities/dtrace.hpp"
59 #include "utilities/events.hpp"
60 #include "utilities/preserveException.hpp"
61
62 void MonitorList::add(ObjectMonitor* m) {
63 ObjectMonitor* head;
64 do {
65 head = Atomic::load(&_head);
66 m->set_next_om(head);
67 } while (Atomic::cmpxchg(&_head, head, m) != head);
68
69 size_t count = Atomic::add(&_count, 1u);
70 if (count > max()) {
71 Atomic::inc(&_max);
72 }
73 }
74
75 size_t MonitorList::count() const {
76 return Atomic::load(&_count);
77 }
78
79 size_t MonitorList::max() const {
258 // returns true -- to indicate the call was satisfied.
259 // returns false -- to indicate the call needs the services of the slow-path.
260 // A no-loitering ordinance is in effect for code in the quick_* family
261 // operators: safepoints or indefinite blocking (blocking that might span a
262 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
263 // entry.
264 //
265 // Consider: An interesting optimization is to have the JIT recognize the
266 // following common idiom:
267 // synchronized (someobj) { .... ; notify(); }
268 // That is, we find a notify() or notifyAll() call that immediately precedes
269 // the monitorexit operation. In that case the JIT could fuse the operations
270 // into a single notifyAndExit() runtime primitive.
271
272 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
273 assert(current->thread_state() == _thread_in_Java, "invariant");
274 NoSafepointVerifier nsv;
275 if (obj == NULL) return false; // slow-path for invalid obj
276 const markWord mark = obj->mark();
277
278 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
279 // Degenerate notify
280 // stack-locked by caller so by definition the implied waitset is empty.
281 return true;
282 }
283
284 if (mark.has_monitor()) {
285 ObjectMonitor* const mon = mark.monitor();
286 assert(mon->object() == oop(obj), "invariant");
287 if (mon->owner() != current) return false; // slow-path for IMS exception
288
289 if (mon->first_waiter() != NULL) {
290 // We have one or more waiters. Since this is an inflated monitor
291 // that we own, we can transfer one or more threads from the waitset
292 // to the entrylist here and now, avoiding the slow-path.
293 if (all) {
294 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
295 } else {
296 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
297 }
298 int free_count = 0;
299 do {
300 mon->INotify(current);
301 ++free_count;
309 return false;
310 }
311
312
313 // The LockNode emitted directly at the synchronization site would have
314 // been too big if it were to have included support for the cases of inflated
315 // recursive enter and exit, so they go here instead.
316 // Note that we can't safely call AsyncPrintJavaStack() from within
317 // quick_enter() as our thread state remains _in_Java.
318
319 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
320 BasicLock * lock) {
321 assert(current->thread_state() == _thread_in_Java, "invariant");
322 NoSafepointVerifier nsv;
323 if (obj == NULL) return false; // Need to throw NPE
324
325 if (obj->klass()->is_value_based()) {
326 return false;
327 }
328
329 const markWord mark = obj->mark();
330
331 if (mark.has_monitor()) {
332 ObjectMonitor* const m = mark.monitor();
333 // An async deflation or GC can race us before we manage to make
334 // the ObjectMonitor busy by setting the owner below. If we detect
335 // that race we just bail out to the slow-path here.
336 if (m->object_peek() == NULL) {
337 return false;
338 }
339 JavaThread* const owner = (JavaThread*) m->owner_raw();
340
341 // Lock contention and Transactional Lock Elision (TLE) diagnostics
342 // and observability
343 // Case: light contention possibly amenable to TLE
344 // Case: TLE inimical operations such as nested/recursive synchronization
345
346 if (owner == current) {
347 m->_recursions++;
348 return true;
349 }
350
351 // This Java Monitor is inflated so obj's header will never be
352 // displaced to this thread's BasicLock. Make the displaced header
353 // non-NULL so this BasicLock is not seen as recursive nor as
354 // being locked. We do this unconditionally so that this thread's
355 // BasicLock cannot be mis-interpreted by any stack walkers. For
356 // performance reasons, stack walkers generally first check for
357 // Biased Locking in the object's header, the second check is for
358 // stack-locking in the object's header, the third check is for
359 // recursive stack-locking in the displaced header in the BasicLock,
360 // and last are the inflated Java Monitor (ObjectMonitor) checks.
361 lock->set_displaced_header(markWord::unused_mark());
362
363 if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
364 assert(m->_recursions == 0, "invariant");
365 return true;
366 }
367 }
368
369 // Note that we could inflate in quick_enter.
370 // This is likely a useful optimization
371 // Critically, in quick_enter() we must not:
372 // -- perform bias revocation, or
373 // -- block indefinitely, or
374 // -- reach a safepoint
375
376 return false; // revert to slow-path
377 }
378
379 // Handle notifications when synchronizing on value based classes
380 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
381 frame last_frame = current->last_frame();
382 bool bcp_was_adjusted = false;
383 // Don't decrement bcp if it points to the frame's first instruction. This happens when
384 // handle_sync_on_value_based_class() is called because of a synchronized method. There
385 // is no actual monitorenter instruction in the byte code in this case.
386 if (last_frame.is_interpreted_frame() &&
387 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
388 // adjust bcp to point back to monitorenter so that we print the correct line numbers
389 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
390 bcp_was_adjusted = true;
391 }
392
393 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
394 ResourceMark rm(current);
395 stringStream ss;
396 current->print_stack_on(&ss);
397 char* base = (char*)strstr(ss.base(), "at");
398 char* newline = (char*)strchr(ss.base(), '\n');
399 if (newline != NULL) {
400 *newline = '\0';
401 }
402 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
403 } else {
404 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
405 ResourceMark rm(current);
406 Log(valuebasedclasses) vblog;
407
408 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
409 if (current->has_last_Java_frame()) {
410 LogStream info_stream(vblog.info());
411 current->print_stack_on(&info_stream);
412 } else {
413 vblog.info("Cannot find the last Java frame");
414 }
415
416 EventSyncOnValueBasedClass event;
417 if (event.should_commit()) {
418 event.set_valueBasedClass(obj->klass());
419 event.commit();
420 }
421 }
422
423 if (bcp_was_adjusted) {
424 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
425 }
426 }
427
428 // -----------------------------------------------------------------------------
429 // Monitor Enter/Exit
430 // The interpreter and compiler assembly code tries to lock using the fast path
431 // of this algorithm. Make sure to update that code if the following function is
432 // changed. The implementation is extremely sensitive to race condition. Be careful.
433
434 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
435 if (obj->klass()->is_value_based()) {
436 handle_sync_on_value_based_class(obj, current);
437 }
438
439 if (UseBiasedLocking) {
440 BiasedLocking::revoke(current, obj);
441 }
442
443 markWord mark = obj->mark();
444 assert(!mark.has_bias_pattern(), "should not see bias pattern here");
445
446 if (mark.is_neutral()) {
447 // Anticipate successful CAS -- the ST of the displaced mark must
448 // be visible <= the ST performed by the CAS.
449 lock->set_displaced_header(mark);
450 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
451 return;
452 }
453 // Fall through to inflate() ...
454 } else if (mark.has_locker() &&
455 current->is_lock_owned((address)mark.locker())) {
456 assert(lock != mark.locker(), "must not re-lock the same lock");
457 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
458 lock->set_displaced_header(markWord::from_pointer(NULL));
459 return;
460 }
461
462 // The object header will never be displaced to this lock,
463 // so it does not matter what the value is, except that it
464 // must be non-zero to avoid looking like a re-entrant lock,
465 // and must not look locked either.
466 lock->set_displaced_header(markWord::unused_mark());
467 // An async deflation can race after the inflate() call and before
468 // enter() can make the ObjectMonitor busy. enter() returns false if
469 // we have lost the race to async deflation and we simply try again.
470 while (true) {
471 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
472 if (monitor->enter(current)) {
473 return;
474 }
475 }
476 }
477
478 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
479 markWord mark = object->mark();
480 // We cannot check for Biased Locking if we are racing an inflation.
481 assert(mark == markWord::INFLATING() ||
482 !mark.has_bias_pattern(), "should not see bias pattern here");
483
484 markWord dhw = lock->displaced_header();
485 if (dhw.value() == 0) {
486 // If the displaced header is NULL, then this exit matches up with
487 // a recursive enter. No real work to do here except for diagnostics.
488 #ifndef PRODUCT
489 if (mark != markWord::INFLATING()) {
490 // Only do diagnostics if we are not racing an inflation. Simply
491 // exiting a recursive enter of a Java Monitor that is being
492 // inflated is safe; see the has_monitor() comment below.
493 assert(!mark.is_neutral(), "invariant");
494 assert(!mark.has_locker() ||
495 current->is_lock_owned((address)mark.locker()), "invariant");
496 if (mark.has_monitor()) {
497 // The BasicLock's displaced_header is marked as a recursive
498 // enter and we have an inflated Java Monitor (ObjectMonitor).
499 // This is a special case where the Java Monitor was inflated
500 // after this thread entered the stack-lock recursively. When a
501 // Java Monitor is inflated, we cannot safely walk the Java
502 // Monitor owner's stack and update the BasicLocks because a
503 // Java Monitor can be asynchronously inflated by a thread that
504 // does not own the Java Monitor.
505 ObjectMonitor* m = mark.monitor();
506 assert(m->object()->mark() == mark, "invariant");
507 assert(m->is_entered(current), "invariant");
508 }
509 }
510 #endif
511 return;
512 }
513
514 if (mark == markWord::from_pointer(lock)) {
515 // If the object is stack-locked by the current thread, try to
516 // swing the displaced header from the BasicLock back to the mark.
517 assert(dhw.is_neutral(), "invariant");
518 if (object->cas_set_mark(dhw, mark) == mark) {
519 return;
520 }
521 }
522
523 // We have to take the slow-path of possible inflation and then exit.
524 // The ObjectMonitor* can't be async deflated until ownership is
525 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
526 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
527 monitor->exit(current);
528 }
529
530 // -----------------------------------------------------------------------------
531 // Class Loader support to workaround deadlocks on the class loader lock objects
532 // Also used by GC
533 // complete_exit()/reenter() are used to wait on a nested lock
534 // i.e. to give up an outer lock completely and then re-enter
535 // Used when holding nested locks - lock acquisition order: lock1 then lock2
536 // 1) complete_exit lock1 - saving recursion count
537 // 2) wait on lock2
538 // 3) when notified on lock2, unlock lock2
539 // 4) reenter lock1 with original recursion count
540 // 5) lock lock2
541 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
542 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
543 if (UseBiasedLocking) {
544 BiasedLocking::revoke(current, obj);
545 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
546 }
671 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
672 if (UseBiasedLocking) {
673 BiasedLocking::revoke(current, obj);
674 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
675 }
676 // The ObjectMonitor* can't be async deflated because the _waiters
677 // field is incremented before ownership is dropped and decremented
678 // after ownership is regained.
679 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
680 monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
681 }
682
683 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
684 JavaThread* current = THREAD;
685 if (UseBiasedLocking) {
686 BiasedLocking::revoke(current, obj);
687 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
688 }
689
690 markWord mark = obj->mark();
691 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
692 // Not inflated so there can't be any waiters to notify.
693 return;
694 }
695 // The ObjectMonitor* can't be async deflated until ownership is
696 // dropped by the calling thread.
697 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
698 monitor->notify(CHECK);
699 }
700
701 // NOTE: see comment of notify()
702 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
703 JavaThread* current = THREAD;
704 if (UseBiasedLocking) {
705 BiasedLocking::revoke(current, obj);
706 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
707 }
708
709 markWord mark = obj->mark();
710 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
711 // Not inflated so there can't be any waiters to notify.
712 return;
713 }
714 // The ObjectMonitor* can't be async deflated until ownership is
715 // dropped by the calling thread.
716 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
717 monitor->notifyAll(CHECK);
718 }
719
720 // -----------------------------------------------------------------------------
721 // Hash Code handling
722
723 struct SharedGlobals {
724 char _pad_prefix[OM_CACHE_LINE_SIZE];
725 // This is a highly shared mostly-read variable.
726 // To avoid false-sharing it needs to be the sole occupant of a cache line.
727 volatile int stw_random;
728 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
729 // Hot RW variable -- Sequester to avoid false-sharing
730 volatile int hc_sequence;
731 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
732 };
733
734 static SharedGlobals GVars;
735
736 static markWord read_stable_mark(oop obj) {
737 markWord mark = obj->mark_acquire();
738 if (!mark.is_being_inflated()) {
739 return mark; // normal fast-path return
740 }
741
742 int its = 0;
743 for (;;) {
744 markWord mark = obj->mark_acquire();
745 if (!mark.is_being_inflated()) {
746 return mark; // normal fast-path return
747 }
748
749 // The object is being inflated by some other thread.
750 // The caller of read_stable_mark() must wait for inflation to complete.
751 // Avoid live-lock.
752
753 ++its;
754 if (its > 10000 || !os::is_MP()) {
755 if (its & 1) {
756 os::naked_yield();
757 } else {
758 // Note that the following code attenuates the livelock problem but is not
827 value = 1; // for sensitivity testing
828 } else if (hashCode == 3) {
829 value = ++GVars.hc_sequence;
830 } else if (hashCode == 4) {
831 value = cast_from_oop<intptr_t>(obj);
832 } else {
833 // Marsaglia's xor-shift scheme with thread-specific state
834 // This is probably the best overall implementation -- we'll
835 // likely make this the default in future releases.
836 unsigned t = current->_hashStateX;
837 t ^= (t << 11);
838 current->_hashStateX = current->_hashStateY;
839 current->_hashStateY = current->_hashStateZ;
840 current->_hashStateZ = current->_hashStateW;
841 unsigned v = current->_hashStateW;
842 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
843 current->_hashStateW = v;
844 value = v;
845 }
846
847 value &= markWord::hash_mask;
848 if (value == 0) value = 0xBAD;
849 assert(value != markWord::no_hash, "invariant");
850 return value;
851 }
852
853 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
854 if (UseBiasedLocking) {
855 // NOTE: many places throughout the JVM do not expect a safepoint
856 // to be taken here. However, we only ever bias Java instances and all
857 // of the call sites of identity_hash that might revoke biases have
858 // been checked to make sure they can handle a safepoint. The
859 // added check of the bias pattern is to avoid useless calls to
860 // thread-local storage.
861 if (obj->mark().has_bias_pattern()) {
862 // Handle for oop obj in case of STW safepoint
863 Handle hobj(current, obj);
864 if (SafepointSynchronize::is_at_safepoint()) {
865 BiasedLocking::revoke_at_safepoint(hobj);
866 } else {
867 BiasedLocking::revoke(current->as_Java_thread(), hobj);
868 }
869 obj = hobj();
870 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
871 }
872 }
873
874 while (true) {
875 ObjectMonitor* monitor = NULL;
876 markWord temp, test;
877 intptr_t hash;
878 markWord mark = read_stable_mark(obj);
879
880 // object should remain ineligible for biased locking
881 assert(!mark.has_bias_pattern(), "invariant");
882
883 if (mark.is_neutral()) { // if this is a normal header
884 hash = mark.hash();
885 if (hash != 0) { // if it has a hash, just return it
886 return hash;
887 }
888 hash = get_next_hash(current, obj); // get a new hash
889 temp = mark.copy_set_hash(hash); // merge the hash into header
890 // try to install the hash
891 test = obj->cas_set_mark(temp, mark);
892 if (test == mark) { // if the hash was installed, return it
893 return hash;
894 }
895 // Failed to install the hash. It could be that another thread
896 // installed the hash just before our attempt or inflation has
897 // occurred or... so we fall thru to inflate the monitor for
898 // stability and then install the hash.
899 } else if (mark.has_monitor()) {
900 monitor = mark.monitor();
901 temp = monitor->header();
902 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
903 hash = temp.hash();
904 if (hash != 0) {
905 // It has a hash.
906
907 // Separate load of dmw/header above from the loads in
908 // is_being_async_deflated().
909
910 // dmw/header and _contentions may get written by different threads.
911 // Make sure to observe them in the same order when having several observers.
912 OrderAccess::loadload_for_IRIW();
913
914 if (monitor->is_being_async_deflated()) {
915 // But we can't safely use the hash if we detect that async
916 // deflation has occurred. So we attempt to restore the
917 // header/dmw to the object's header so that we only retry
918 // once if the deflater thread happens to be slow.
919 monitor->install_displaced_markword_in_object(obj);
920 continue;
921 }
922 return hash;
923 }
924 // Fall thru so we only have one place that installs the hash in
925 // the ObjectMonitor.
926 } else if (current->is_lock_owned((address)mark.locker())) {
927 // This is a stack lock owned by the calling thread so fetch the
928 // displaced markWord from the BasicLock on the stack.
929 temp = mark.displaced_mark_helper();
930 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
931 hash = temp.hash();
932 if (hash != 0) { // if it has a hash, just return it
933 return hash;
934 }
935 // WARNING:
936 // The displaced header in the BasicLock on a thread's stack
937 // is strictly immutable. It CANNOT be changed in ANY cases.
938 // So we have to inflate the stack lock into an ObjectMonitor
939 // even if the current thread owns the lock. The BasicLock on
940 // a thread's stack can be asynchronously read by other threads
941 // during an inflate() call so any change to that stack memory
942 // may not propagate to other threads correctly.
943 }
944
945 // Inflate the monitor to set the hash.
946
983
984 // Deprecated -- use FastHashCode() instead.
985
986 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
987 return FastHashCode(Thread::current(), obj());
988 }
989
990
991 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
992 Handle h_obj) {
993 if (UseBiasedLocking) {
994 BiasedLocking::revoke(current, h_obj);
995 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
996 }
997
998 assert(current == JavaThread::current(), "Can only be called on current thread");
999 oop obj = h_obj();
1000
1001 markWord mark = read_stable_mark(obj);
1002
1003 // Uncontended case, header points to stack
1004 if (mark.has_locker()) {
1005 return current->is_lock_owned((address)mark.locker());
1006 }
1007 // Contended case, header points to ObjectMonitor (tagged pointer)
1008 if (mark.has_monitor()) {
1009 // The first stage of async deflation does not affect any field
1010 // used by this comparison so the ObjectMonitor* is usable here.
1011 ObjectMonitor* monitor = mark.monitor();
1012 return monitor->is_entered(current) != 0;
1013 }
1014 // Unlocked case, header in place
1015 assert(mark.is_neutral(), "sanity check");
1016 return false;
1017 }
1018
1019 // FIXME: jvmti should call this
1020 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1021 if (UseBiasedLocking) {
1022 if (SafepointSynchronize::is_at_safepoint()) {
1023 BiasedLocking::revoke_at_safepoint(h_obj);
1024 } else {
1025 BiasedLocking::revoke(JavaThread::current(), h_obj);
1026 }
1027 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1028 }
1029
1030 oop obj = h_obj();
1031 address owner = NULL;
1032
1033 markWord mark = read_stable_mark(obj);
1034
1035 // Uncontended case, header points to stack
1036 if (mark.has_locker()) {
1037 owner = (address) mark.locker();
1038 }
1039
1040 // Contended case, header points to ObjectMonitor (tagged pointer)
1041 else if (mark.has_monitor()) {
1042 // The first stage of async deflation does not affect any field
1043 // used by this comparison so the ObjectMonitor* is usable here.
1044 ObjectMonitor* monitor = mark.monitor();
1045 assert(monitor != NULL, "monitor should be non-null");
1046 owner = (address) monitor->owner();
1047 }
1048
1049 if (owner != NULL) {
1050 // owning_thread_from_monitor_owner() may also return NULL here
1051 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1052 }
1053
1054 // Unlocked case, header in place
1055 // Cannot have assertion since this object may have been
1056 // locked by another thread when reaching here.
1057 // assert(mark.is_neutral(), "sanity check");
1058
1059 return NULL;
1060 }
1061
1062 // Visitors ...
1063
1064 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1065 MonitorList::Iterator iter = _in_use_list.iterator();
1066 while (iter.has_next()) {
1067 ObjectMonitor* mid = iter.next();
1068 if (mid->owner() != thread) {
1069 continue;
1070 }
1071 if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
1225 ObjectSynchronizer::InflateCause cause) {
1226 assert(event != NULL, "invariant");
1227 event->set_monitorClass(obj->klass());
1228 event->set_address((uintptr_t)(void*)obj);
1229 event->set_cause((u1)cause);
1230 event->commit();
1231 }
1232
1233 // Fast path code shared by multiple functions
1234 void ObjectSynchronizer::inflate_helper(oop obj) {
1235 markWord mark = obj->mark_acquire();
1236 if (mark.has_monitor()) {
1237 ObjectMonitor* monitor = mark.monitor();
1238 markWord dmw = monitor->header();
1239 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1240 return;
1241 }
1242 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1243 }
1244
1245 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1246 const InflateCause cause) {
1247 EventJavaMonitorInflate event;
1248
1249 for (;;) {
1250 const markWord mark = object->mark_acquire();
1251 assert(!mark.has_bias_pattern(), "invariant");
1252
1253 // The mark can be in one of the following states:
1254 // * Inflated - just return
1255 // * Stack-locked - coerce it to inflated
1256 // * INFLATING - busy wait for conversion to complete
1257 // * Neutral - aggressively inflate the object.
1258 // * BIASED - Illegal. We should never see this
1259
1260 // CASE: inflated
1261 if (mark.has_monitor()) {
1262 ObjectMonitor* inf = mark.monitor();
1263 markWord dmw = inf->header();
1264 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1265 return inf;
1266 }
1267
1268 // CASE: inflation in progress - inflating over a stack-lock.
1269 // Some other thread is converting from stack-locked to inflated.
1270 // Only that thread can complete inflation -- other threads must wait.
1271 // The INFLATING value is transient.
1272 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1273 // We could always eliminate polling by parking the thread on some auxiliary list.
1274 if (mark == markWord::INFLATING()) {
1275 read_stable_mark(object);
1276 continue;
1277 }
1278
1279 // CASE: stack-locked
1280 // Could be stack-locked either by this thread or by some other thread.
1281 //
1282 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1283 // to install INFLATING into the mark word. We originally installed INFLATING,
1284 // allocated the ObjectMonitor, and then finally STed the address of the
1285 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1286 // the interval in which INFLATING appeared in the mark, thus increasing
1287 // the odds of inflation contention.
1288
1289 LogStreamHandle(Trace, monitorinflation) lsh;
1290
1291 if (mark.has_locker()) {
1292 ObjectMonitor* m = new ObjectMonitor(object);
1293 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1294 // We do this before the CAS in order to minimize the length of time
1295 // in which INFLATING appears in the mark.
1296
1297 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1298 if (cmp != mark) {
1299 delete m;
1300 continue; // Interference -- just retry
1301 }
1302
1303 // We've successfully installed INFLATING (0) into the mark-word.
1304 // This is the only case where 0 will appear in a mark-word.
1305 // Only the singular thread that successfully swings the mark-word
1306 // to 0 can perform (or more precisely, complete) inflation.
1307 //
1308 // Why do we CAS a 0 into the mark-word instead of just CASing the
1309 // mark-word from the stack-locked value directly to the new inflated state?
1310 // Consider what happens when a thread unlocks a stack-locked object.
1311 // It attempts to use CAS to swing the displaced header value from the
1343 // Note that a thread can inflate an object
1344 // that it has stack-locked -- as might happen in wait() -- directly
1345 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1346 m->set_owner_from(NULL, mark.locker());
1347 // TODO-FIXME: assert BasicLock->dhw != 0.
1348
1349 // Must preserve store ordering. The monitor state must
1350 // be stable at the time of publishing the monitor address.
1351 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1352 // Release semantics so that above set_object() is seen first.
1353 object->release_set_mark(markWord::encode(m));
1354
1355 // Once ObjectMonitor is configured and the object is associated
1356 // with the ObjectMonitor, it is safe to allow async deflation:
1357 _in_use_list.add(m);
1358
1359 // Hopefully the performance counters are allocated on distinct cache lines
1360 // to avoid false sharing on MP systems ...
1361 OM_PERFDATA_OP(Inflations, inc());
1362 if (log_is_enabled(Trace, monitorinflation)) {
1363 ResourceMark rm(current);
1364 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1365 INTPTR_FORMAT ", type='%s'", p2i(object),
1366 object->mark().value(), object->klass()->external_name());
1367 }
1368 if (event.should_commit()) {
1369 post_monitor_inflate_event(&event, object, cause);
1370 }
1371 return m;
1372 }
1373
1374 // CASE: neutral
1375 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1376 // If we know we're inflating for entry it's better to inflate by swinging a
1377 // pre-locked ObjectMonitor pointer into the object header. A successful
1378 // CAS inflates the object *and* confers ownership to the inflating thread.
1379 // In the current implementation we use a 2-step mechanism where we CAS()
1380 // to inflate and then CAS() again to try to swing _owner from NULL to current.
1381 // An inflateTry() method that we could call from enter() would be useful.
1382
1383 // Catch if the object's header is not neutral (not locked and
1387 // prepare m for installation - set monitor to initial state
1388 m->set_header(mark);
1389
1390 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1391 delete m;
1392 m = NULL;
1393 continue;
1394 // interference - the markword changed - just retry.
1395 // The state-transitions are one-way, so there's no chance of
1396 // live-lock -- "Inflated" is an absorbing state.
1397 }
1398
1399 // Once the ObjectMonitor is configured and object is associated
1400 // with the ObjectMonitor, it is safe to allow async deflation:
1401 _in_use_list.add(m);
1402
1403 // Hopefully the performance counters are allocated on distinct
1404 // cache lines to avoid false sharing on MP systems ...
1405 OM_PERFDATA_OP(Inflations, inc());
1406 if (log_is_enabled(Trace, monitorinflation)) {
1407 ResourceMark rm(current);
1408 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1409 INTPTR_FORMAT ", type='%s'", p2i(object),
1410 object->mark().value(), object->klass()->external_name());
1411 }
1412 if (event.should_commit()) {
1413 post_monitor_inflate_event(&event, object, cause);
1414 }
1415 return m;
1416 }
1417 }
1418
1419 void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1420 const char* cnt_name, size_t cnt,
1421 LogStream* ls, elapsedTimer* timer_p) {
1422 if (!SafepointMechanism::should_process(current)) {
1423 return;
1424 }
1425
1426 // A safepoint/handshake has started.
1427 if (ls != NULL) {
1464 if (current->is_Java_thread()) {
1465 // A JavaThread must check for a safepoint/handshake and honor it.
1466 chk_for_block_req(current->as_Java_thread(), "deflation", "deflated_count",
1467 deflated_count, ls, timer_p);
1468 }
1469 }
1470
1471 return deflated_count;
1472 }
1473
1474 class HandshakeForDeflation : public HandshakeClosure {
1475 public:
1476 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1477
1478 void do_thread(Thread* thread) {
1479 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1480 INTPTR_FORMAT, p2i(thread));
1481 }
1482 };
1483
1484 // This function is called by the MonitorDeflationThread to deflate
1485 // ObjectMonitors. It is also called via do_final_audit_and_print_stats()
1486 // by the VMThread.
1487 size_t ObjectSynchronizer::deflate_idle_monitors() {
1488 Thread* current = Thread::current();
1489 if (current->is_Java_thread()) {
1490 // The async deflation request has been processed.
1491 _last_async_deflation_time_ns = os::javaTimeNanos();
1492 set_is_async_deflation_requested(false);
1493 }
1494
1495 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1496 LogStreamHandle(Info, monitorinflation) lsh_info;
1497 LogStream* ls = NULL;
1498 if (log_is_enabled(Debug, monitorinflation)) {
1499 ls = &lsh_debug;
1500 } else if (log_is_enabled(Info, monitorinflation)) {
1501 ls = &lsh_info;
1502 }
1503
1516 // deflated, BUT the MonitorDeflationThread blocked for the final
1517 // safepoint during unlinking.
1518
1519 // Unlink deflated ObjectMonitors from the in-use list.
1520 ResourceMark rm;
1521 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1522 size_t unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer,
1523 &delete_list);
1524 if (current->is_Java_thread()) {
1525 if (ls != NULL) {
1526 timer.stop();
1527 ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
1528 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1529 SIZE_FORMAT ", max=" SIZE_FORMAT,
1530 unlinked_count, in_use_list_ceiling(),
1531 _in_use_list.count(), _in_use_list.max());
1532 }
1533
1534 // A JavaThread needs to handshake in order to safely free the
1535 // ObjectMonitors that were deflated in this cycle.
1536 HandshakeForDeflation hfd_hc;
1537 Handshake::execute(&hfd_hc);
1538
1539 if (ls != NULL) {
1540 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1541 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1542 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1543 timer.start();
1544 }
1545 }
1546
1547 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1548
1549 // After the handshake, safely free the ObjectMonitors that were
1550 // deflated in this cycle.
1551 size_t deleted_count = 0;
1552 for (ObjectMonitor* monitor: delete_list) {
1553 delete monitor;
1554 deleted_count++;
1555
1556 if (current->is_Java_thread()) {
1557 // A JavaThread must check for a safepoint/handshake and honor it.
|
1 /*
2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "gc/shared/suspendibleThreadSet.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/globals.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/handshake.hpp"
42 #include "runtime/interfaceSupport.inline.hpp"
43 #include "runtime/lockStack.inline.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "runtime/objectMonitor.hpp"
46 #include "runtime/objectMonitor.inline.hpp"
47 #include "runtime/os.inline.hpp"
48 #include "runtime/osThread.hpp"
49 #include "runtime/perfData.hpp"
50 #include "runtime/safepointMechanism.inline.hpp"
51 #include "runtime/safepointVerifiers.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/stubRoutines.hpp"
54 #include "runtime/synchronizer.hpp"
55 #include "runtime/thread.inline.hpp"
56 #include "runtime/timer.hpp"
57 #include "runtime/trimNativeHeap.hpp"
58 #include "runtime/vframe.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "utilities/align.hpp"
61 #include "utilities/dtrace.hpp"
62 #include "utilities/events.hpp"
63 #include "utilities/globalDefinitions.hpp"
64 #include "utilities/preserveException.hpp"
65
66 void MonitorList::add(ObjectMonitor* m) {
67 ObjectMonitor* head;
68 do {
69 head = Atomic::load(&_head);
70 m->set_next_om(head);
71 } while (Atomic::cmpxchg(&_head, head, m) != head);
72
73 size_t count = Atomic::add(&_count, 1u);
74 if (count > max()) {
75 Atomic::inc(&_max);
76 }
77 }
78
79 size_t MonitorList::count() const {
80 return Atomic::load(&_count);
81 }
82
83 size_t MonitorList::max() const {
262 // returns true -- to indicate the call was satisfied.
263 // returns false -- to indicate the call needs the services of the slow-path.
264 // A no-loitering ordinance is in effect for code in the quick_* family
265 // operators: safepoints or indefinite blocking (blocking that might span a
266 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
267 // entry.
268 //
269 // Consider: An interesting optimization is to have the JIT recognize the
270 // following common idiom:
271 // synchronized (someobj) { .... ; notify(); }
272 // That is, we find a notify() or notifyAll() call that immediately precedes
273 // the monitorexit operation. In that case the JIT could fuse the operations
274 // into a single notifyAndExit() runtime primitive.
275
276 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
277 assert(current->thread_state() == _thread_in_Java, "invariant");
278 NoSafepointVerifier nsv;
279 if (obj == NULL) return false; // slow-path for invalid obj
280 const markWord mark = obj->mark();
281
282 if (LockingMode == LM_LIGHTWEIGHT) {
283 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
284 // Degenerate notify
285 // fast-locked by caller so by definition the implied waitset is empty.
286 return true;
287 }
288 } else if (LockingMode == LM_LEGACY) {
289 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
290 // Degenerate notify
291 // stack-locked by caller so by definition the implied waitset is empty.
292 return true;
293 }
294 }
295
296 if (mark.has_monitor()) {
297 ObjectMonitor* const mon = mark.monitor();
298 assert(mon->object() == oop(obj), "invariant");
299 if (mon->owner() != current) return false; // slow-path for IMS exception
300
301 if (mon->first_waiter() != NULL) {
302 // We have one or more waiters. Since this is an inflated monitor
303 // that we own, we can transfer one or more threads from the waitset
304 // to the entrylist here and now, avoiding the slow-path.
305 if (all) {
306 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
307 } else {
308 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
309 }
310 int free_count = 0;
311 do {
312 mon->INotify(current);
313 ++free_count;
321 return false;
322 }
323
324
325 // The LockNode emitted directly at the synchronization site would have
326 // been too big if it were to have included support for the cases of inflated
327 // recursive enter and exit, so they go here instead.
328 // Note that we can't safely call AsyncPrintJavaStack() from within
329 // quick_enter() as our thread state remains _in_Java.
330
331 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
332 BasicLock * lock) {
333 assert(current->thread_state() == _thread_in_Java, "invariant");
334 NoSafepointVerifier nsv;
335 if (obj == NULL) return false; // Need to throw NPE
336
337 if (obj->klass()->is_value_based()) {
338 return false;
339 }
340
341 if (LockingMode == LM_LIGHTWEIGHT) {
342 LockStack& lock_stack = current->lock_stack();
343 if (lock_stack.is_full()) {
344 // Always go into runtime if the lock stack is full.
345 return false;
346 }
347 if (lock_stack.try_recursive_enter(obj)) {
348 // Recursive lock successful.
349 return true;
350 }
351 }
352
353 const markWord mark = obj->mark();
354
355 if (mark.has_monitor()) {
356 ObjectMonitor* const m = mark.monitor();
357 // An async deflation or GC can race us before we manage to make
358 // the ObjectMonitor busy by setting the owner below. If we detect
359 // that race we just bail out to the slow-path here.
360 if (m->object_peek() == NULL) {
361 return false;
362 }
363 JavaThread* const owner = (JavaThread*) m->owner_raw();
364
365 // Lock contention and Transactional Lock Elision (TLE) diagnostics
366 // and observability
367 // Case: light contention possibly amenable to TLE
368 // Case: TLE inimical operations such as nested/recursive synchronization
369
370 if (owner == current) {
371 m->_recursions++;
372 return true;
373 }
374
375 if (LockingMode != LM_LIGHTWEIGHT) {
376 // This Java Monitor is inflated so obj's header will never be
377 // displaced to this thread's BasicLock. Make the displaced header
378 // non-NULL so this BasicLock is not seen as recursive nor as
379 // being locked. We do this unconditionally so that this thread's
380 // BasicLock cannot be mis-interpreted by any stack walkers. For
381 // performance reasons, stack walkers generally first check for
382 // Biased Locking in the object's header, the second check is for
383 // stack-locking in the object's header, the third check is for
384 // recursive stack-locking in the displaced header in the BasicLock,
385 // and last are the inflated Java Monitor (ObjectMonitor) checks.
386 lock->set_displaced_header(markWord::unused_mark());
387 }
388
389 if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
390 assert(m->_recursions == 0, "invariant");
391 return true;
392 }
393 }
394
395 // Note that we could inflate in quick_enter.
396 // This is likely a useful optimization
397 // Critically, in quick_enter() we must not:
398 // -- perform bias revocation, or
399 // -- block indefinitely, or
400 // -- reach a safepoint
401
402 return false; // revert to slow-path
403 }
404
405 // Handle notifications when synchronizing on value based classes
406 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
407 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
408 frame last_frame = locking_thread->last_frame();
409 bool bcp_was_adjusted = false;
410 // Don't decrement bcp if it points to the frame's first instruction. This happens when
411 // handle_sync_on_value_based_class() is called because of a synchronized method. There
412 // is no actual monitorenter instruction in the byte code in this case.
413 if (last_frame.is_interpreted_frame() &&
414 (last_frame.interpreter_frame_method()->code_base() < last_frame.interpreter_frame_bcp())) {
415 // adjust bcp to point back to monitorenter so that we print the correct line numbers
416 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() - 1);
417 bcp_was_adjusted = true;
418 }
419
420 if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
421 ResourceMark rm;
422 stringStream ss;
423 locking_thread->print_stack_on(&ss);
424 char* base = (char*)strstr(ss.base(), "at");
425 char* newline = (char*)strchr(ss.base(), '\n');
426 if (newline != NULL) {
427 *newline = '\0';
428 }
429 fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
430 } else {
431 assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
432 ResourceMark rm;
433 Log(valuebasedclasses) vblog;
434
435 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
436 if (locking_thread->has_last_Java_frame()) {
437 LogStream info_stream(vblog.info());
438 locking_thread->print_stack_on(&info_stream);
439 } else {
440 vblog.info("Cannot find the last Java frame");
441 }
442
443 EventSyncOnValueBasedClass event;
444 if (event.should_commit()) {
445 event.set_valueBasedClass(obj->klass());
446 event.commit();
447 }
448 }
449
450 if (bcp_was_adjusted) {
451 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
452 }
453 }
454
455 static bool useHeavyMonitors() {
456 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64)
457 return LockingMode == LM_MONITOR;
458 #else
459 return false;
460 #endif
461 }
462
463 // -----------------------------------------------------------------------------
464 // Monitor Enter/Exit
465
466 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
467 // When called with locking_thread != Thread::current() some mechanism must synchronize
468 // the locking_thread with respect to the current thread. Currently only used when
469 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
470 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
471 if (!enter_fast_impl(obj, lock, locking_thread)) {
472 // Inflated ObjectMonitor::enter_for is required
473
474 // An async deflation can race after the inflate_for() call and before
475 // enter_for() can make the ObjectMonitor busy. enter_for() returns false
476 // if we have lost the race to async deflation and we simply try again.
477 while (true) {
478 ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
479 if (monitor->enter_for(locking_thread)) {
480 return;
481 }
482 assert(monitor->is_being_async_deflated(), "must be");
483 }
484 }
485 }
486
487 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
488 assert(current == Thread::current(), "must be");
489 if (!enter_fast_impl(obj, lock, current)) {
490 // Inflated ObjectMonitor::enter is required
491
492 // An async deflation can race after the inflate() call and before
493 // enter() can make the ObjectMonitor busy. enter() returns false if
494 // we have lost the race to async deflation and we simply try again.
495 while (true) {
496 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
497 if (monitor->enter(current)) {
498 return;
499 }
500 }
501 }
502 }
503
504 // The interpreter and compiler assembly code tries to lock using the fast path
505 // of this algorithm. Make sure to update that code if the following function is
506 // changed. The implementation is extremely sensitive to race condition. Be careful.
507 bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
508
509 if (obj->klass()->is_value_based()) {
510 handle_sync_on_value_based_class(obj, locking_thread);
511 }
512
513 if (!useHeavyMonitors()) {
514 if (LockingMode == LM_LIGHTWEIGHT) {
515 // Fast-locking does not use the 'lock' argument.
516 LockStack& lock_stack = locking_thread->lock_stack();
517 if (lock_stack.is_full()) {
518 // We unconditionally make room on the lock stack by inflating
519 // the least recently locked object on the lock stack.
520
521 // About the choice to inflate least recently locked object.
522 // First we must chose to inflate a lock, either some lock on
523 // the lock-stack or the lock that is currently being entered
524 // (which may or may not be on the lock-stack).
525 // Second the best lock to inflate is a lock which is entered
526 // in a control flow where there are only a very few locks being
527 // used, as the costly part of inflated locking is inflation,
528 // not locking. But this property is entirely program dependent.
529 // Third inflating the lock currently being entered on when it
530 // is not present on the lock-stack will result in a still full
531 // lock-stack. This creates a scenario where every deeper nested
532 // monitorenter must call into the runtime.
533 // The rational here is as follows:
534 // Because we cannot (currently) figure out the second, and want
535 // to avoid the third, we inflate a lock on the lock-stack.
536 // The least recently locked lock is chosen as it is the lock
537 // with the longest critical section.
538
539 log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
540 ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
541 assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
542 p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
543 assert(!lock_stack.is_full(), "must have made room here");
544 }
545
546 markWord mark = obj()->mark_acquire();
547 while (mark.is_neutral()) {
548 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
549 // Try to swing into 'fast-locked' state.
550 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
551 const markWord locked_mark = mark.set_fast_locked();
552 const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
553 if (old_mark == mark) {
554 // Successfully fast-locked, push object to lock-stack and return.
555 lock_stack.push(obj());
556 return true;
557 }
558 mark = old_mark;
559 }
560
561 if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
562 // Recursive lock successful.
563 return true;
564 }
565
566 // Failed to fast lock.
567 return false;
568 } else if (LockingMode == LM_LEGACY) {
569 if (UseBiasedLocking) {
570 BiasedLocking::revoke(locking_thread, obj);
571 }
572
573 markWord mark = obj->mark();
574 if (mark.is_neutral()) {
575 // Anticipate successful CAS -- the ST of the displaced mark must
576 // be visible <= the ST performed by the CAS.
577 lock->set_displaced_header(mark);
578 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
579 return true;
580 }
581 } else if (mark.has_locker() &&
582 locking_thread->is_lock_owned((address) mark.locker())) {
583 assert(lock != mark.locker(), "must not re-lock the same lock");
584 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
585 lock->set_displaced_header(markWord::from_pointer(NULL));
586 return true;
587 }
588
589 // The object header will never be displaced to this lock,
590 // so it does not matter what the value is, except that it
591 // must be non-zero to avoid looking like a re-entrant lock,
592 // and must not look locked either.
593 lock->set_displaced_header(markWord::unused_mark());
594
595 // Failed to fast lock.
596 return false;
597 }
598 }
599
600 return false;
601 }
602
603 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
604 if (!useHeavyMonitors()) {
605 markWord mark = object->mark();
606 if (LockingMode == LM_LIGHTWEIGHT) {
607 // Fast-locking does not use the 'lock' argument.
608 LockStack& lock_stack = current->lock_stack();
609 if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
610 // Recursively unlocked.
611 return;
612 }
613
614 if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
615 // This lock is recursive but is not at the top of the lock stack so we're
616 // doing an unbalanced exit. We have to fall thru to inflation below and
617 // let ObjectMonitor::exit() do the unlock.
618 } else {
619 while (mark.is_fast_locked()) {
620 // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
621 const markWord unlocked_mark = mark.set_unlocked();
622 const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
623 if (old_mark == mark) {
624 size_t recursions = lock_stack.remove(object) - 1;
625 assert(recursions == 0, "must not be recursive here");
626 return;
627 }
628 mark = old_mark;
629 }
630 }
631 } else if (LockingMode == LM_LEGACY) {
632 markWord dhw = lock->displaced_header();
633 if (dhw.value() == 0) {
634 // If the displaced header is NULL, then this exit matches up with
635 // a recursive enter. No real work to do here except for diagnostics.
636 #ifndef PRODUCT
637 if (mark != markWord::INFLATING()) {
638 // Only do diagnostics if we are not racing an inflation. Simply
639 // exiting a recursive enter of a Java Monitor that is being
640 // inflated is safe; see the has_monitor() comment below.
641 assert(!mark.is_neutral(), "invariant");
642 assert(!mark.has_locker() ||
643 current->is_lock_owned((address)mark.locker()), "invariant");
644 if (mark.has_monitor()) {
645 // The BasicLock's displaced_header is marked as a recursive
646 // enter and we have an inflated Java Monitor (ObjectMonitor).
647 // This is a special case where the Java Monitor was inflated
648 // after this thread entered the stack-lock recursively. When a
649 // Java Monitor is inflated, we cannot safely walk the Java
650 // Monitor owner's stack and update the BasicLocks because a
651 // Java Monitor can be asynchronously inflated by a thread that
652 // does not own the Java Monitor.
653 ObjectMonitor* m = mark.monitor();
654 assert(m->object()->mark() == mark, "invariant");
655 assert(m->is_entered(current), "invariant");
656 }
657 }
658 #endif
659 return;
660 }
661
662 if (mark == markWord::from_pointer(lock)) {
663 // If the object is stack-locked by the current thread, try to
664 // swing the displaced header from the BasicLock back to the mark.
665 assert(dhw.is_neutral(), "invariant");
666 if (object->cas_set_mark(dhw, mark) == mark) {
667 return;
668 }
669 }
670 }
671 }
672
673 // We have to take the slow-path of possible inflation and then exit.
674 // The ObjectMonitor* can't be async deflated until ownership is
675 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
676 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
677 assert(!monitor->is_owner_anonymous(), "must not be");
678 monitor->exit(current);
679 }
680
681 // -----------------------------------------------------------------------------
682 // Class Loader support to workaround deadlocks on the class loader lock objects
683 // Also used by GC
684 // complete_exit()/reenter() are used to wait on a nested lock
685 // i.e. to give up an outer lock completely and then re-enter
686 // Used when holding nested locks - lock acquisition order: lock1 then lock2
687 // 1) complete_exit lock1 - saving recursion count
688 // 2) wait on lock2
689 // 3) when notified on lock2, unlock lock2
690 // 4) reenter lock1 with original recursion count
691 // 5) lock lock2
692 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
693 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
694 if (UseBiasedLocking) {
695 BiasedLocking::revoke(current, obj);
696 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
697 }
822 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
823 if (UseBiasedLocking) {
824 BiasedLocking::revoke(current, obj);
825 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
826 }
827 // The ObjectMonitor* can't be async deflated because the _waiters
828 // field is incremented before ownership is dropped and decremented
829 // after ownership is regained.
830 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
831 monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
832 }
833
834 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
835 JavaThread* current = THREAD;
836 if (UseBiasedLocking) {
837 BiasedLocking::revoke(current, obj);
838 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
839 }
840
841 markWord mark = obj->mark();
842 if (LockingMode == LM_LIGHTWEIGHT) {
843 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
844 // Not inflated so there can't be any waiters to notify.
845 return;
846 }
847 } else if (LockingMode == LM_LEGACY) {
848 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
849 // Not inflated so there can't be any waiters to notify.
850 return;
851 }
852 }
853 // The ObjectMonitor* can't be async deflated until ownership is
854 // dropped by the calling thread.
855 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
856 monitor->notify(CHECK);
857 }
858
859 // NOTE: see comment of notify()
860 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
861 JavaThread* current = THREAD;
862 if (UseBiasedLocking) {
863 BiasedLocking::revoke(current, obj);
864 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
865 }
866
867 markWord mark = obj->mark();
868 if (LockingMode == LM_LIGHTWEIGHT) {
869 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
870 // Not inflated so there can't be any waiters to notify.
871 return;
872 }
873 } else if (LockingMode == LM_LEGACY) {
874 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
875 // Not inflated so there can't be any waiters to notify.
876 return;
877 }
878 }
879 // The ObjectMonitor* can't be async deflated until ownership is
880 // dropped by the calling thread.
881 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
882 monitor->notifyAll(CHECK);
883 }
884
885 // -----------------------------------------------------------------------------
886 // Hash Code handling
887
888 struct SharedGlobals {
889 char _pad_prefix[OM_CACHE_LINE_SIZE];
890 // This is a highly shared mostly-read variable.
891 // To avoid false-sharing it needs to be the sole occupant of a cache line.
892 volatile int stw_random;
893 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
894 // Hot RW variable -- Sequester to avoid false-sharing
895 volatile int hc_sequence;
896 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
897 };
898
899 static SharedGlobals GVars;
900
901 static markWord read_stable_mark(oop obj) {
902 markWord mark = obj->mark_acquire();
903 if (!mark.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT) {
904 // New lightweight locking does not use the markWord::INFLATING() protocol.
905 return mark; // normal fast-path return
906 }
907
908 int its = 0;
909 for (;;) {
910 markWord mark = obj->mark_acquire();
911 if (!mark.is_being_inflated()) {
912 return mark; // normal fast-path return
913 }
914
915 // The object is being inflated by some other thread.
916 // The caller of read_stable_mark() must wait for inflation to complete.
917 // Avoid live-lock.
918
919 ++its;
920 if (its > 10000 || !os::is_MP()) {
921 if (its & 1) {
922 os::naked_yield();
923 } else {
924 // Note that the following code attenuates the livelock problem but is not
993 value = 1; // for sensitivity testing
994 } else if (hashCode == 3) {
995 value = ++GVars.hc_sequence;
996 } else if (hashCode == 4) {
997 value = cast_from_oop<intptr_t>(obj);
998 } else {
999 // Marsaglia's xor-shift scheme with thread-specific state
1000 // This is probably the best overall implementation -- we'll
1001 // likely make this the default in future releases.
1002 unsigned t = current->_hashStateX;
1003 t ^= (t << 11);
1004 current->_hashStateX = current->_hashStateY;
1005 current->_hashStateY = current->_hashStateZ;
1006 current->_hashStateZ = current->_hashStateW;
1007 unsigned v = current->_hashStateW;
1008 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
1009 current->_hashStateW = v;
1010 value = v;
1011 }
1012
1013 value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask;
1014 if (value == 0) value = 0xBAD;
1015 assert(value != markWord::no_hash, "invariant");
1016 return value;
1017 }
1018
1019 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
1020 if (UseBiasedLocking) {
1021 // NOTE: many places throughout the JVM do not expect a safepoint
1022 // to be taken here. However, we only ever bias Java instances and all
1023 // of the call sites of identity_hash that might revoke biases have
1024 // been checked to make sure they can handle a safepoint. The
1025 // added check of the bias pattern is to avoid useless calls to
1026 // thread-local storage.
1027 if (obj->mark().has_bias_pattern()) {
1028 // Handle for oop obj in case of STW safepoint
1029 Handle hobj(current, obj);
1030 if (SafepointSynchronize::is_at_safepoint()) {
1031 BiasedLocking::revoke_at_safepoint(hobj);
1032 } else {
1033 BiasedLocking::revoke(current->as_Java_thread(), hobj);
1034 }
1035 obj = hobj();
1036 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
1037 }
1038 }
1039
1040 while (true) {
1041 ObjectMonitor* monitor = NULL;
1042 markWord temp, test;
1043 intptr_t hash;
1044 markWord mark = read_stable_mark(obj);
1045
1046 // object should remain ineligible for biased locking
1047 assert(!mark.has_bias_pattern(), "invariant");
1048
1049 if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
1050 hash = mark.hash();
1051 if (hash != 0) { // if it has a hash, just return it
1052 return hash;
1053 }
1054 hash = get_next_hash(current, obj); // get a new hash
1055 temp = mark.copy_set_hash(hash); // merge the hash into header
1056 // try to install the hash
1057 test = obj->cas_set_mark(temp, mark);
1058 if (test == mark) { // if the hash was installed, return it
1059 return hash;
1060 }
1061 if (LockingMode == LM_LIGHTWEIGHT) {
1062 // CAS failed, retry
1063 continue;
1064 }
1065 // Failed to install the hash. It could be that another thread
1066 // installed the hash just before our attempt or inflation has
1067 // occurred or... so we fall thru to inflate the monitor for
1068 // stability and then install the hash.
1069 } else if (mark.has_monitor()) {
1070 monitor = mark.monitor();
1071 temp = monitor->header();
1072 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1073 hash = temp.hash();
1074 if (hash != 0) {
1075 // It has a hash.
1076
1077 // Separate load of dmw/header above from the loads in
1078 // is_being_async_deflated().
1079
1080 // dmw/header and _contentions may get written by different threads.
1081 // Make sure to observe them in the same order when having several observers.
1082 OrderAccess::loadload_for_IRIW();
1083
1084 if (monitor->is_being_async_deflated()) {
1085 // But we can't safely use the hash if we detect that async
1086 // deflation has occurred. So we attempt to restore the
1087 // header/dmw to the object's header so that we only retry
1088 // once if the deflater thread happens to be slow.
1089 monitor->install_displaced_markword_in_object(obj);
1090 continue;
1091 }
1092 return hash;
1093 }
1094 // Fall thru so we only have one place that installs the hash in
1095 // the ObjectMonitor.
1096 } else if (LockingMode == LM_LEGACY && mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
1097 // This is a stack lock owned by the calling thread so fetch the
1098 // displaced markWord from the BasicLock on the stack.
1099 temp = mark.displaced_mark_helper();
1100 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1101 hash = temp.hash();
1102 if (hash != 0) { // if it has a hash, just return it
1103 return hash;
1104 }
1105 // WARNING:
1106 // The displaced header in the BasicLock on a thread's stack
1107 // is strictly immutable. It CANNOT be changed in ANY cases.
1108 // So we have to inflate the stack lock into an ObjectMonitor
1109 // even if the current thread owns the lock. The BasicLock on
1110 // a thread's stack can be asynchronously read by other threads
1111 // during an inflate() call so any change to that stack memory
1112 // may not propagate to other threads correctly.
1113 }
1114
1115 // Inflate the monitor to set the hash.
1116
1153
1154 // Deprecated -- use FastHashCode() instead.
1155
1156 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1157 return FastHashCode(Thread::current(), obj());
1158 }
1159
1160
1161 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1162 Handle h_obj) {
1163 if (UseBiasedLocking) {
1164 BiasedLocking::revoke(current, h_obj);
1165 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1166 }
1167
1168 assert(current == JavaThread::current(), "Can only be called on current thread");
1169 oop obj = h_obj();
1170
1171 markWord mark = read_stable_mark(obj);
1172
1173 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1174 // stack-locked case, header points into owner's stack
1175 return current->is_lock_owned((address)mark.locker());
1176 }
1177
1178 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1179 // fast-locking case, see if lock is in current's lock stack
1180 return current->lock_stack().contains(h_obj());
1181 }
1182
1183 // Contended case, header points to ObjectMonitor (tagged pointer)
1184 if (mark.has_monitor()) {
1185 // The first stage of async deflation does not affect any field
1186 // used by this comparison so the ObjectMonitor* is usable here.
1187 ObjectMonitor* monitor = mark.monitor();
1188 return monitor->is_entered(current) != 0;
1189 }
1190 // Unlocked case, header in place
1191 assert(mark.is_neutral(), "sanity check");
1192 return false;
1193 }
1194
1195 // FIXME: jvmti should call this
1196 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1197 if (UseBiasedLocking) {
1198 if (SafepointSynchronize::is_at_safepoint()) {
1199 BiasedLocking::revoke_at_safepoint(h_obj);
1200 } else {
1201 BiasedLocking::revoke(JavaThread::current(), h_obj);
1202 }
1203 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1204 }
1205
1206 oop obj = h_obj();
1207 markWord mark = read_stable_mark(obj);
1208
1209 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1210 // stack-locked so header points into owner's stack.
1211 // owning_thread_from_monitor_owner() may also return null here:
1212 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1213 }
1214
1215 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1216 // fast-locked so get owner from the object.
1217 // owning_thread_from_object() may also return null here:
1218 return Threads::owning_thread_from_object(t_list, h_obj());
1219 }
1220
1221 // Contended case, header points to ObjectMonitor (tagged pointer)
1222 if (mark.has_monitor()) {
1223 // The first stage of async deflation does not affect any field
1224 // used by this comparison so the ObjectMonitor* is usable here.
1225 ObjectMonitor* monitor = mark.monitor();
1226 assert(monitor != NULL, "monitor should be non-null");
1227 // owning_thread_from_monitor() may also return null here:
1228 return Threads::owning_thread_from_monitor(t_list, monitor);
1229 }
1230
1231 // Unlocked case, header in place
1232 // Cannot have assertion since this object may have been
1233 // locked by another thread when reaching here.
1234 // assert(mark.is_neutral(), "sanity check");
1235
1236 return NULL;
1237 }
1238
1239 // Visitors ...
1240
1241 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1242 MonitorList::Iterator iter = _in_use_list.iterator();
1243 while (iter.has_next()) {
1244 ObjectMonitor* mid = iter.next();
1245 if (mid->owner() != thread) {
1246 continue;
1247 }
1248 if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
1402 ObjectSynchronizer::InflateCause cause) {
1403 assert(event != NULL, "invariant");
1404 event->set_monitorClass(obj->klass());
1405 event->set_address((uintptr_t)(void*)obj);
1406 event->set_cause((u1)cause);
1407 event->commit();
1408 }
1409
1410 // Fast path code shared by multiple functions
1411 void ObjectSynchronizer::inflate_helper(oop obj) {
1412 markWord mark = obj->mark_acquire();
1413 if (mark.has_monitor()) {
1414 ObjectMonitor* monitor = mark.monitor();
1415 markWord dmw = monitor->header();
1416 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1417 return;
1418 }
1419 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1420 }
1421
1422 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
1423 assert(current == Thread::current(), "must be");
1424 if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
1425 return inflate_impl(current->as_Java_thread(), obj, cause);
1426 }
1427 return inflate_impl(nullptr, obj, cause);
1428 }
1429
1430 ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
1431 assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
1432 return inflate_impl(thread, obj, cause);
1433 }
1434
1435 ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
1436 // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
1437 // that the inflating_thread == Thread::current() or is suspended throughout the call by
1438 // some other mechanism.
1439 // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
1440 // JavaThread. (As may still be the case from FastHashCode). However it is only
1441 // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
1442 // is set when called from ObjectSynchronizer::enter from the owning thread,
1443 // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
1444 EventJavaMonitorInflate event;
1445
1446 for (;;) {
1447 const markWord mark = object->mark_acquire();
1448 assert(!mark.has_bias_pattern(), "invariant");
1449
1450 // The mark can be in one of the following states:
1451 // * inflated - Just return if using stack-locking.
1452 // If using fast-locking and the ObjectMonitor owner
1453 // is anonymous and the inflating_thread owns the
1454 // object lock, then we make the inflating_thread
1455 // the ObjectMonitor owner and remove the lock from
1456 // the inflating_thread's lock stack.
1457 // * fast-locked - Coerce it to inflated from fast-locked.
1458 // * stack-locked - Coerce it to inflated from stack-locked.
1459 // * INFLATING - busy wait for conversion to complete
1460 // * Neutral - aggressively inflate the object.
1461 // * BIASED - Illegal. We should never see this
1462
1463 // CASE: inflated
1464 if (mark.has_monitor()) {
1465 ObjectMonitor* inf = mark.monitor();
1466 markWord dmw = inf->header();
1467 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1468 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
1469 inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
1470 inf->set_owner_from_anonymous(inflating_thread);
1471 size_t removed = inflating_thread->lock_stack().remove(object);
1472 inf->set_recursions(removed - 1);
1473 }
1474 return inf;
1475 }
1476
1477 // CASE: inflation in progress - inflating over a stack-lock.
1478 // Some other thread is converting from stack-locked to inflated.
1479 // Only that thread can complete inflation -- other threads must wait.
1480 // The INFLATING value is transient.
1481 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1482 // We could always eliminate polling by parking the thread on some auxiliary list.
1483 if (LockingMode != LM_LIGHTWEIGHT) {
1484 // New lightweight locking does not use INFLATING.
1485 // CASE: inflation in progress - inflating over a stack-lock.
1486 // Some other thread is converting from stack-locked to inflated.
1487 // Only that thread can complete inflation -- other threads must wait.
1488 // The INFLATING value is transient.
1489 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1490 // We could always eliminate polling by parking the thread on some auxiliary list.
1491 if (mark == markWord::INFLATING()) {
1492 read_stable_mark(object);
1493 continue;
1494 }
1495 }
1496
1497 // CASE: fast-locked
1498 // Could be fast-locked either by the inflating_thread or by some other thread.
1499 //
1500 // Note that we allocate the ObjectMonitor speculatively, _before_
1501 // attempting to set the object's mark to the new ObjectMonitor. If
1502 // the inflating_thread owns the monitor, then we set the ObjectMonitor's
1503 // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
1504 // to anonymous. If we lose the race to set the object's mark to the
1505 // new ObjectMonitor, then we just delete it and loop around again.
1506 //
1507 LogStreamHandle(Trace, monitorinflation) lsh;
1508 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1509 ObjectMonitor* monitor = new ObjectMonitor(object);
1510 monitor->set_header(mark.set_unlocked());
1511 bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
1512 if (own) {
1513 // Owned by us.
1514 monitor->set_owner_from(NULL, inflating_thread);
1515 } else {
1516 // Owned by somebody else.
1517 monitor->set_owner_anonymous();
1518 }
1519 markWord monitor_mark = markWord::encode(monitor);
1520 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1521 if (old_mark == mark) {
1522 // Success! Return inflated monitor.
1523 if (own) {
1524 size_t removed = inflating_thread->lock_stack().remove(object);
1525 monitor->set_recursions(removed - 1);
1526 }
1527 // Once the ObjectMonitor is configured and object is associated
1528 // with the ObjectMonitor, it is safe to allow async deflation:
1529 _in_use_list.add(monitor);
1530
1531 // Hopefully the performance counters are allocated on distinct
1532 // cache lines to avoid false sharing on MP systems ...
1533 OM_PERFDATA_OP(Inflations, inc());
1534 if (log_is_enabled(Trace, monitorinflation)) {
1535 ResourceMark rm;
1536 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1537 INTPTR_FORMAT ", type='%s'", p2i(object),
1538 object->mark().value(), object->klass()->external_name());
1539 }
1540 if (event.should_commit()) {
1541 post_monitor_inflate_event(&event, object, cause);
1542 }
1543 return monitor;
1544 } else {
1545 delete monitor;
1546 continue; // Interference -- just retry
1547 }
1548 }
1549
1550 // CASE: stack-locked
1551 // Could be stack-locked either by this thread or by some other thread.
1552 //
1553 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1554 // to install INFLATING into the mark word. We originally installed INFLATING,
1555 // allocated the ObjectMonitor, and then finally STed the address of the
1556 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1557 // the interval in which INFLATING appeared in the mark, thus increasing
1558 // the odds of inflation contention.
1559
1560 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1561 assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking");
1562 ObjectMonitor* m = new ObjectMonitor(object);
1563 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1564 // We do this before the CAS in order to minimize the length of time
1565 // in which INFLATING appears in the mark.
1566
1567 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1568 if (cmp != mark) {
1569 delete m;
1570 continue; // Interference -- just retry
1571 }
1572
1573 // We've successfully installed INFLATING (0) into the mark-word.
1574 // This is the only case where 0 will appear in a mark-word.
1575 // Only the singular thread that successfully swings the mark-word
1576 // to 0 can perform (or more precisely, complete) inflation.
1577 //
1578 // Why do we CAS a 0 into the mark-word instead of just CASing the
1579 // mark-word from the stack-locked value directly to the new inflated state?
1580 // Consider what happens when a thread unlocks a stack-locked object.
1581 // It attempts to use CAS to swing the displaced header value from the
1613 // Note that a thread can inflate an object
1614 // that it has stack-locked -- as might happen in wait() -- directly
1615 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1616 m->set_owner_from(NULL, mark.locker());
1617 // TODO-FIXME: assert BasicLock->dhw != 0.
1618
1619 // Must preserve store ordering. The monitor state must
1620 // be stable at the time of publishing the monitor address.
1621 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1622 // Release semantics so that above set_object() is seen first.
1623 object->release_set_mark(markWord::encode(m));
1624
1625 // Once ObjectMonitor is configured and the object is associated
1626 // with the ObjectMonitor, it is safe to allow async deflation:
1627 _in_use_list.add(m);
1628
1629 // Hopefully the performance counters are allocated on distinct cache lines
1630 // to avoid false sharing on MP systems ...
1631 OM_PERFDATA_OP(Inflations, inc());
1632 if (log_is_enabled(Trace, monitorinflation)) {
1633 ResourceMark rm;
1634 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1635 INTPTR_FORMAT ", type='%s'", p2i(object),
1636 object->mark().value(), object->klass()->external_name());
1637 }
1638 if (event.should_commit()) {
1639 post_monitor_inflate_event(&event, object, cause);
1640 }
1641 return m;
1642 }
1643
1644 // CASE: neutral
1645 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1646 // If we know we're inflating for entry it's better to inflate by swinging a
1647 // pre-locked ObjectMonitor pointer into the object header. A successful
1648 // CAS inflates the object *and* confers ownership to the inflating thread.
1649 // In the current implementation we use a 2-step mechanism where we CAS()
1650 // to inflate and then CAS() again to try to swing _owner from NULL to current.
1651 // An inflateTry() method that we could call from enter() would be useful.
1652
1653 // Catch if the object's header is not neutral (not locked and
1657 // prepare m for installation - set monitor to initial state
1658 m->set_header(mark);
1659
1660 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1661 delete m;
1662 m = NULL;
1663 continue;
1664 // interference - the markword changed - just retry.
1665 // The state-transitions are one-way, so there's no chance of
1666 // live-lock -- "Inflated" is an absorbing state.
1667 }
1668
1669 // Once the ObjectMonitor is configured and object is associated
1670 // with the ObjectMonitor, it is safe to allow async deflation:
1671 _in_use_list.add(m);
1672
1673 // Hopefully the performance counters are allocated on distinct
1674 // cache lines to avoid false sharing on MP systems ...
1675 OM_PERFDATA_OP(Inflations, inc());
1676 if (log_is_enabled(Trace, monitorinflation)) {
1677 ResourceMark rm;
1678 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1679 INTPTR_FORMAT ", type='%s'", p2i(object),
1680 object->mark().value(), object->klass()->external_name());
1681 }
1682 if (event.should_commit()) {
1683 post_monitor_inflate_event(&event, object, cause);
1684 }
1685 return m;
1686 }
1687 }
1688
1689 void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
1690 const char* cnt_name, size_t cnt,
1691 LogStream* ls, elapsedTimer* timer_p) {
1692 if (!SafepointMechanism::should_process(current)) {
1693 return;
1694 }
1695
1696 // A safepoint/handshake has started.
1697 if (ls != NULL) {
1734 if (current->is_Java_thread()) {
1735 // A JavaThread must check for a safepoint/handshake and honor it.
1736 chk_for_block_req(current->as_Java_thread(), "deflation", "deflated_count",
1737 deflated_count, ls, timer_p);
1738 }
1739 }
1740
1741 return deflated_count;
1742 }
1743
1744 class HandshakeForDeflation : public HandshakeClosure {
1745 public:
1746 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1747
1748 void do_thread(Thread* thread) {
1749 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1750 INTPTR_FORMAT, p2i(thread));
1751 }
1752 };
1753
1754 class VM_RendezvousGCThreads : public VM_Operation {
1755 public:
1756 bool evaluate_at_safepoint() const override { return false; }
1757 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1758 void doit() override {
1759 SuspendibleThreadSet::synchronize();
1760 SuspendibleThreadSet::desynchronize();
1761 };
1762 };
1763
1764 // This function is called by the MonitorDeflationThread to deflate
1765 // ObjectMonitors. It is also called via do_final_audit_and_print_stats()
1766 // by the VMThread.
1767 size_t ObjectSynchronizer::deflate_idle_monitors() {
1768 Thread* current = Thread::current();
1769 if (current->is_Java_thread()) {
1770 // The async deflation request has been processed.
1771 _last_async_deflation_time_ns = os::javaTimeNanos();
1772 set_is_async_deflation_requested(false);
1773 }
1774
1775 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1776 LogStreamHandle(Info, monitorinflation) lsh_info;
1777 LogStream* ls = NULL;
1778 if (log_is_enabled(Debug, monitorinflation)) {
1779 ls = &lsh_debug;
1780 } else if (log_is_enabled(Info, monitorinflation)) {
1781 ls = &lsh_info;
1782 }
1783
1796 // deflated, BUT the MonitorDeflationThread blocked for the final
1797 // safepoint during unlinking.
1798
1799 // Unlink deflated ObjectMonitors from the in-use list.
1800 ResourceMark rm;
1801 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1802 size_t unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer,
1803 &delete_list);
1804 if (current->is_Java_thread()) {
1805 if (ls != NULL) {
1806 timer.stop();
1807 ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
1808 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1809 SIZE_FORMAT ", max=" SIZE_FORMAT,
1810 unlinked_count, in_use_list_ceiling(),
1811 _in_use_list.count(), _in_use_list.max());
1812 }
1813
1814 // A JavaThread needs to handshake in order to safely free the
1815 // ObjectMonitors that were deflated in this cycle.
1816 // Also, we sync and desync GC threads around the handshake, so that they can
1817 // safely read the mark-word and look-through to the object-monitor, without
1818 // being afraid that the object-monitor is going away.
1819 HandshakeForDeflation hfd_hc;
1820 Handshake::execute(&hfd_hc);
1821 VM_RendezvousGCThreads sync_gc;
1822 VMThread::execute(&sync_gc);
1823
1824 if (ls != NULL) {
1825 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1826 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1827 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1828 timer.start();
1829 }
1830 }
1831
1832 NativeHeapTrimmer::SuspendMark sm("monitor deletion");
1833
1834 // After the handshake, safely free the ObjectMonitors that were
1835 // deflated in this cycle.
1836 size_t deleted_count = 0;
1837 for (ObjectMonitor* monitor: delete_list) {
1838 delete monitor;
1839 deleted_count++;
1840
1841 if (current->is_Java_thread()) {
1842 // A JavaThread must check for a safepoint/handshake and honor it.
|