8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/markWord.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/atomic.hpp"
37 #include "runtime/biasedLocking.hpp"
38 #include "runtime/handles.inline.hpp"
39 #include "runtime/handshake.hpp"
40 #include "runtime/interfaceSupport.inline.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "runtime/objectMonitor.hpp"
43 #include "runtime/objectMonitor.inline.hpp"
44 #include "runtime/os.inline.hpp"
45 #include "runtime/osThread.hpp"
46 #include "runtime/perfData.hpp"
47 #include "runtime/safepointMechanism.inline.hpp"
48 #include "runtime/safepointVerifiers.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "runtime/stubRoutines.hpp"
51 #include "runtime/synchronizer.hpp"
52 #include "runtime/thread.inline.hpp"
53 #include "runtime/timer.hpp"
54 #include "runtime/vframe.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "utilities/align.hpp"
57 #include "utilities/dtrace.hpp"
58 #include "utilities/events.hpp"
59 #include "utilities/preserveException.hpp"
60
257 // returns true -- to indicate the call was satisfied.
258 // returns false -- to indicate the call needs the services of the slow-path.
259 // A no-loitering ordinance is in effect for code in the quick_* family
260 // operators: safepoints or indefinite blocking (blocking that might span a
261 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
262 // entry.
263 //
264 // Consider: An interesting optimization is to have the JIT recognize the
265 // following common idiom:
266 // synchronized (someobj) { .... ; notify(); }
267 // That is, we find a notify() or notifyAll() call that immediately precedes
268 // the monitorexit operation. In that case the JIT could fuse the operations
269 // into a single notifyAndExit() runtime primitive.
270
271 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
272 assert(current->thread_state() == _thread_in_Java, "invariant");
273 NoSafepointVerifier nsv;
274 if (obj == NULL) return false; // slow-path for invalid obj
275 const markWord mark = obj->mark();
276
277 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
278 // Degenerate notify
279 // stack-locked by caller so by definition the implied waitset is empty.
280 return true;
281 }
282
283 if (mark.has_monitor()) {
284 ObjectMonitor* const mon = mark.monitor();
285 assert(mon->object() == oop(obj), "invariant");
286 if (mon->owner() != current) return false; // slow-path for IMS exception
287
288 if (mon->first_waiter() != NULL) {
289 // We have one or more waiters. Since this is an inflated monitor
290 // that we own, we can transfer one or more threads from the waitset
291 // to the entrylist here and now, avoiding the slow-path.
292 if (all) {
293 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
294 } else {
295 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
296 }
297 int free_count = 0;
298 do {
299 mon->INotify(current);
300 ++free_count;
330 if (mark.has_monitor()) {
331 ObjectMonitor* const m = mark.monitor();
332 // An async deflation or GC can race us before we manage to make
333 // the ObjectMonitor busy by setting the owner below. If we detect
334 // that race we just bail out to the slow-path here.
335 if (m->object_peek() == NULL) {
336 return false;
337 }
338 JavaThread* const owner = (JavaThread*) m->owner_raw();
339
340 // Lock contention and Transactional Lock Elision (TLE) diagnostics
341 // and observability
342 // Case: light contention possibly amenable to TLE
343 // Case: TLE inimical operations such as nested/recursive synchronization
344
345 if (owner == current) {
346 m->_recursions++;
347 return true;
348 }
349
350 // This Java Monitor is inflated so obj's header will never be
351 // displaced to this thread's BasicLock. Make the displaced header
352 // non-NULL so this BasicLock is not seen as recursive nor as
353 // being locked. We do this unconditionally so that this thread's
354 // BasicLock cannot be mis-interpreted by any stack walkers. For
355 // performance reasons, stack walkers generally first check for
356 // Biased Locking in the object's header, the second check is for
357 // stack-locking in the object's header, the third check is for
358 // recursive stack-locking in the displaced header in the BasicLock,
359 // and last are the inflated Java Monitor (ObjectMonitor) checks.
360 lock->set_displaced_header(markWord::unused_mark());
361
362 if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
363 assert(m->_recursions == 0, "invariant");
364 return true;
365 }
366 }
367
368 // Note that we could inflate in quick_enter.
369 // This is likely a useful optimization
370 // Critically, in quick_enter() we must not:
371 // -- perform bias revocation, or
372 // -- block indefinitely, or
373 // -- reach a safepoint
374
375 return false; // revert to slow-path
376 }
377
378 // Handle notifications when synchronizing on value based classes
379 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
380 frame last_frame = current->last_frame();
407 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
408 if (current->has_last_Java_frame()) {
409 LogStream info_stream(vblog.info());
410 current->print_stack_on(&info_stream);
411 } else {
412 vblog.info("Cannot find the last Java frame");
413 }
414
415 EventSyncOnValueBasedClass event;
416 if (event.should_commit()) {
417 event.set_valueBasedClass(obj->klass());
418 event.commit();
419 }
420 }
421
422 if (bcp_was_adjusted) {
423 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
424 }
425 }
426
427 // -----------------------------------------------------------------------------
428 // Monitor Enter/Exit
429 // The interpreter and compiler assembly code tries to lock using the fast path
430 // of this algorithm. Make sure to update that code if the following function is
431 // changed. The implementation is extremely sensitive to race condition. Be careful.
432
433 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
434 if (obj->klass()->is_value_based()) {
435 handle_sync_on_value_based_class(obj, current);
436 }
437
438 if (UseBiasedLocking) {
439 BiasedLocking::revoke(current, obj);
440 }
441
442 markWord mark = obj->mark();
443 assert(!mark.has_bias_pattern(), "should not see bias pattern here");
444
445 if (mark.is_neutral()) {
446 // Anticipate successful CAS -- the ST of the displaced mark must
447 // be visible <= the ST performed by the CAS.
448 lock->set_displaced_header(mark);
449 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
450 return;
451 }
452 // Fall through to inflate() ...
453 } else if (mark.has_locker() &&
454 current->is_lock_owned((address)mark.locker())) {
455 assert(lock != mark.locker(), "must not re-lock the same lock");
456 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
457 lock->set_displaced_header(markWord::from_pointer(NULL));
458 return;
459 }
460
461 // The object header will never be displaced to this lock,
462 // so it does not matter what the value is, except that it
463 // must be non-zero to avoid looking like a re-entrant lock,
464 // and must not look locked either.
465 lock->set_displaced_header(markWord::unused_mark());
466 // An async deflation can race after the inflate() call and before
467 // enter() can make the ObjectMonitor busy. enter() returns false if
468 // we have lost the race to async deflation and we simply try again.
469 while (true) {
470 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
471 if (monitor->enter(current)) {
472 return;
473 }
474 }
475 }
476
477 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
478 markWord mark = object->mark();
479 // We cannot check for Biased Locking if we are racing an inflation.
480 assert(mark == markWord::INFLATING() ||
481 !mark.has_bias_pattern(), "should not see bias pattern here");
482
483 markWord dhw = lock->displaced_header();
484 if (dhw.value() == 0) {
485 // If the displaced header is NULL, then this exit matches up with
486 // a recursive enter. No real work to do here except for diagnostics.
487 #ifndef PRODUCT
488 if (mark != markWord::INFLATING()) {
489 // Only do diagnostics if we are not racing an inflation. Simply
490 // exiting a recursive enter of a Java Monitor that is being
491 // inflated is safe; see the has_monitor() comment below.
492 assert(!mark.is_neutral(), "invariant");
493 assert(!mark.has_locker() ||
494 current->is_lock_owned((address)mark.locker()), "invariant");
495 if (mark.has_monitor()) {
496 // The BasicLock's displaced_header is marked as a recursive
497 // enter and we have an inflated Java Monitor (ObjectMonitor).
498 // This is a special case where the Java Monitor was inflated
499 // after this thread entered the stack-lock recursively. When a
500 // Java Monitor is inflated, we cannot safely walk the Java
501 // Monitor owner's stack and update the BasicLocks because a
502 // Java Monitor can be asynchronously inflated by a thread that
503 // does not own the Java Monitor.
504 ObjectMonitor* m = mark.monitor();
505 assert(m->object()->mark() == mark, "invariant");
506 assert(m->is_entered(current), "invariant");
507 }
508 }
509 #endif
510 return;
511 }
512
513 if (mark == markWord::from_pointer(lock)) {
514 // If the object is stack-locked by the current thread, try to
515 // swing the displaced header from the BasicLock back to the mark.
516 assert(dhw.is_neutral(), "invariant");
517 if (object->cas_set_mark(dhw, mark) == mark) {
518 return;
519 }
520 }
521
522 // We have to take the slow-path of possible inflation and then exit.
523 // The ObjectMonitor* can't be async deflated until ownership is
524 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
525 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
526 monitor->exit(current);
527 }
528
529 // -----------------------------------------------------------------------------
530 // Class Loader support to workaround deadlocks on the class loader lock objects
531 // Also used by GC
532 // complete_exit()/reenter() are used to wait on a nested lock
533 // i.e. to give up an outer lock completely and then re-enter
534 // Used when holding nested locks - lock acquisition order: lock1 then lock2
535 // 1) complete_exit lock1 - saving recursion count
536 // 2) wait on lock2
537 // 3) when notified on lock2, unlock lock2
538 // 4) reenter lock1 with original recursion count
539 // 5) lock lock2
540 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
541 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
542 if (UseBiasedLocking) {
543 BiasedLocking::revoke(current, obj);
544 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
545 }
670 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
671 if (UseBiasedLocking) {
672 BiasedLocking::revoke(current, obj);
673 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
674 }
675 // The ObjectMonitor* can't be async deflated because the _waiters
676 // field is incremented before ownership is dropped and decremented
677 // after ownership is regained.
678 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
679 monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
680 }
681
682 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
683 JavaThread* current = THREAD;
684 if (UseBiasedLocking) {
685 BiasedLocking::revoke(current, obj);
686 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
687 }
688
689 markWord mark = obj->mark();
690 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
691 // Not inflated so there can't be any waiters to notify.
692 return;
693 }
694 // The ObjectMonitor* can't be async deflated until ownership is
695 // dropped by the calling thread.
696 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
697 monitor->notify(CHECK);
698 }
699
700 // NOTE: see comment of notify()
701 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
702 JavaThread* current = THREAD;
703 if (UseBiasedLocking) {
704 BiasedLocking::revoke(current, obj);
705 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
706 }
707
708 markWord mark = obj->mark();
709 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
710 // Not inflated so there can't be any waiters to notify.
711 return;
712 }
713 // The ObjectMonitor* can't be async deflated until ownership is
714 // dropped by the calling thread.
715 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
716 monitor->notifyAll(CHECK);
717 }
718
719 // -----------------------------------------------------------------------------
720 // Hash Code handling
721
722 struct SharedGlobals {
723 char _pad_prefix[OM_CACHE_LINE_SIZE];
724 // This is a highly shared mostly-read variable.
725 // To avoid false-sharing it needs to be the sole occupant of a cache line.
726 volatile int stw_random;
727 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
728 // Hot RW variable -- Sequester to avoid false-sharing
729 volatile int hc_sequence;
730 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
731 };
732
733 static SharedGlobals GVars;
734
735 static markWord read_stable_mark(oop obj) {
736 markWord mark = obj->mark();
737 if (!mark.is_being_inflated()) {
738 return mark; // normal fast-path return
739 }
740
741 int its = 0;
742 for (;;) {
743 markWord mark = obj->mark();
744 if (!mark.is_being_inflated()) {
745 return mark; // normal fast-path return
746 }
747
748 // The object is being inflated by some other thread.
749 // The caller of read_stable_mark() must wait for inflation to complete.
750 // Avoid live-lock.
751
752 ++its;
753 if (its > 10000 || !os::is_MP()) {
754 if (its & 1) {
755 os::naked_yield();
756 } else {
757 // Note that the following code attenuates the livelock problem but is not
826 value = 1; // for sensitivity testing
827 } else if (hashCode == 3) {
828 value = ++GVars.hc_sequence;
829 } else if (hashCode == 4) {
830 value = cast_from_oop<intptr_t>(obj);
831 } else {
832 // Marsaglia's xor-shift scheme with thread-specific state
833 // This is probably the best overall implementation -- we'll
834 // likely make this the default in future releases.
835 unsigned t = current->_hashStateX;
836 t ^= (t << 11);
837 current->_hashStateX = current->_hashStateY;
838 current->_hashStateY = current->_hashStateZ;
839 current->_hashStateZ = current->_hashStateW;
840 unsigned v = current->_hashStateW;
841 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
842 current->_hashStateW = v;
843 value = v;
844 }
845
846 value &= markWord::hash_mask;
847 if (value == 0) value = 0xBAD;
848 assert(value != markWord::no_hash, "invariant");
849 return value;
850 }
851
852 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
853 if (UseBiasedLocking) {
854 // NOTE: many places throughout the JVM do not expect a safepoint
855 // to be taken here. However, we only ever bias Java instances and all
856 // of the call sites of identity_hash that might revoke biases have
857 // been checked to make sure they can handle a safepoint. The
858 // added check of the bias pattern is to avoid useless calls to
859 // thread-local storage.
860 if (obj->mark().has_bias_pattern()) {
861 // Handle for oop obj in case of STW safepoint
862 Handle hobj(current, obj);
863 if (SafepointSynchronize::is_at_safepoint()) {
864 BiasedLocking::revoke_at_safepoint(hobj);
865 } else {
866 BiasedLocking::revoke(current->as_Java_thread(), hobj);
867 }
868 obj = hobj();
869 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
870 }
871 }
905
906 // Separate load of dmw/header above from the loads in
907 // is_being_async_deflated().
908
909 // dmw/header and _contentions may get written by different threads.
910 // Make sure to observe them in the same order when having several observers.
911 OrderAccess::loadload_for_IRIW();
912
913 if (monitor->is_being_async_deflated()) {
914 // But we can't safely use the hash if we detect that async
915 // deflation has occurred. So we attempt to restore the
916 // header/dmw to the object's header so that we only retry
917 // once if the deflater thread happens to be slow.
918 monitor->install_displaced_markword_in_object(obj);
919 continue;
920 }
921 return hash;
922 }
923 // Fall thru so we only have one place that installs the hash in
924 // the ObjectMonitor.
925 } else if (current->is_lock_owned((address)mark.locker())) {
926 // This is a stack lock owned by the calling thread so fetch the
927 // displaced markWord from the BasicLock on the stack.
928 temp = mark.displaced_mark_helper();
929 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
930 hash = temp.hash();
931 if (hash != 0) { // if it has a hash, just return it
932 return hash;
933 }
934 // WARNING:
935 // The displaced header in the BasicLock on a thread's stack
936 // is strictly immutable. It CANNOT be changed in ANY cases.
937 // So we have to inflate the stack lock into an ObjectMonitor
938 // even if the current thread owns the lock. The BasicLock on
939 // a thread's stack can be asynchronously read by other threads
940 // during an inflate() call so any change to that stack memory
941 // may not propagate to other threads correctly.
942 }
943
944 // Inflate the monitor to set the hash.
945
982
983 // Deprecated -- use FastHashCode() instead.
984
985 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
986 return FastHashCode(Thread::current(), obj());
987 }
988
989
990 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
991 Handle h_obj) {
992 if (UseBiasedLocking) {
993 BiasedLocking::revoke(current, h_obj);
994 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
995 }
996
997 assert(current == JavaThread::current(), "Can only be called on current thread");
998 oop obj = h_obj();
999
1000 markWord mark = read_stable_mark(obj);
1001
1002 // Uncontended case, header points to stack
1003 if (mark.has_locker()) {
1004 return current->is_lock_owned((address)mark.locker());
1005 }
1006 // Contended case, header points to ObjectMonitor (tagged pointer)
1007 if (mark.has_monitor()) {
1008 // The first stage of async deflation does not affect any field
1009 // used by this comparison so the ObjectMonitor* is usable here.
1010 ObjectMonitor* monitor = mark.monitor();
1011 return monitor->is_entered(current) != 0;
1012 }
1013 // Unlocked case, header in place
1014 assert(mark.is_neutral(), "sanity check");
1015 return false;
1016 }
1017
1018 // FIXME: jvmti should call this
1019 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1020 if (UseBiasedLocking) {
1021 if (SafepointSynchronize::is_at_safepoint()) {
1022 BiasedLocking::revoke_at_safepoint(h_obj);
1023 } else {
1024 BiasedLocking::revoke(JavaThread::current(), h_obj);
1025 }
1026 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1027 }
1028
1029 oop obj = h_obj();
1030 address owner = NULL;
1031
1032 markWord mark = read_stable_mark(obj);
1033
1034 // Uncontended case, header points to stack
1035 if (mark.has_locker()) {
1036 owner = (address) mark.locker();
1037 }
1038
1039 // Contended case, header points to ObjectMonitor (tagged pointer)
1040 else if (mark.has_monitor()) {
1041 // The first stage of async deflation does not affect any field
1042 // used by this comparison so the ObjectMonitor* is usable here.
1043 ObjectMonitor* monitor = mark.monitor();
1044 assert(monitor != NULL, "monitor should be non-null");
1045 owner = (address) monitor->owner();
1046 }
1047
1048 if (owner != NULL) {
1049 // owning_thread_from_monitor_owner() may also return NULL here
1050 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1051 }
1052
1053 // Unlocked case, header in place
1054 // Cannot have assertion since this object may have been
1055 // locked by another thread when reaching here.
1056 // assert(mark.is_neutral(), "sanity check");
1057
1058 return NULL;
1059 }
1060
1061 // Visitors ...
1062
1063 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1064 MonitorList::Iterator iter = _in_use_list.iterator();
1065 while (iter.has_next()) {
1066 ObjectMonitor* mid = iter.next();
1067 if (mid->owner() != thread) {
1068 continue;
1069 }
1070 if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
1234 void ObjectSynchronizer::inflate_helper(oop obj) {
1235 markWord mark = obj->mark();
1236 if (mark.has_monitor()) {
1237 ObjectMonitor* monitor = mark.monitor();
1238 markWord dmw = monitor->header();
1239 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1240 return;
1241 }
1242 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1243 }
1244
1245 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1246 const InflateCause cause) {
1247 EventJavaMonitorInflate event;
1248
1249 for (;;) {
1250 const markWord mark = object->mark();
1251 assert(!mark.has_bias_pattern(), "invariant");
1252
1253 // The mark can be in one of the following states:
1254 // * Inflated - just return
1255 // * Stack-locked - coerce it to inflated
1256 // * INFLATING - busy wait for conversion to complete
1257 // * Neutral - aggressively inflate the object.
1258 // * BIASED - Illegal. We should never see this
1259
1260 // CASE: inflated
1261 if (mark.has_monitor()) {
1262 ObjectMonitor* inf = mark.monitor();
1263 markWord dmw = inf->header();
1264 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1265 return inf;
1266 }
1267
1268 // CASE: inflation in progress - inflating over a stack-lock.
1269 // Some other thread is converting from stack-locked to inflated.
1270 // Only that thread can complete inflation -- other threads must wait.
1271 // The INFLATING value is transient.
1272 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1273 // We could always eliminate polling by parking the thread on some auxiliary list.
1274 if (mark == markWord::INFLATING()) {
1275 read_stable_mark(object);
1276 continue;
1277 }
1278
1279 // CASE: stack-locked
1280 // Could be stack-locked either by this thread or by some other thread.
1281 //
1282 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1283 // to install INFLATING into the mark word. We originally installed INFLATING,
1284 // allocated the ObjectMonitor, and then finally STed the address of the
1285 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1286 // the interval in which INFLATING appeared in the mark, thus increasing
1287 // the odds of inflation contention.
1288
1289 LogStreamHandle(Trace, monitorinflation) lsh;
1290
1291 if (mark.has_locker()) {
1292 ObjectMonitor* m = new ObjectMonitor(object);
1293 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1294 // We do this before the CAS in order to minimize the length of time
1295 // in which INFLATING appears in the mark.
1296
1297 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1298 if (cmp != mark) {
1299 delete m;
1300 continue; // Interference -- just retry
1301 }
1302
1303 // We've successfully installed INFLATING (0) into the mark-word.
1304 // This is the only case where 0 will appear in a mark-word.
1305 // Only the singular thread that successfully swings the mark-word
1306 // to 0 can perform (or more precisely, complete) inflation.
1307 //
1308 // Why do we CAS a 0 into the mark-word instead of just CASing the
1309 // mark-word from the stack-locked value directly to the new inflated state?
1310 // Consider what happens when a thread unlocks a stack-locked object.
1311 // It attempts to use CAS to swing the displaced header value from the
1464 if (current->is_Java_thread()) {
1465 // A JavaThread must check for a safepoint/handshake and honor it.
1466 chk_for_block_req(current->as_Java_thread(), "deflation", "deflated_count",
1467 deflated_count, ls, timer_p);
1468 }
1469 }
1470
1471 return deflated_count;
1472 }
1473
1474 class HandshakeForDeflation : public HandshakeClosure {
1475 public:
1476 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1477
1478 void do_thread(Thread* thread) {
1479 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1480 INTPTR_FORMAT, p2i(thread));
1481 }
1482 };
1483
1484 // This function is called by the MonitorDeflationThread to deflate
1485 // ObjectMonitors. It is also called via do_final_audit_and_print_stats()
1486 // by the VMThread.
1487 size_t ObjectSynchronizer::deflate_idle_monitors() {
1488 Thread* current = Thread::current();
1489 if (current->is_Java_thread()) {
1490 // The async deflation request has been processed.
1491 _last_async_deflation_time_ns = os::javaTimeNanos();
1492 set_is_async_deflation_requested(false);
1493 }
1494
1495 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1496 LogStreamHandle(Info, monitorinflation) lsh_info;
1497 LogStream* ls = NULL;
1498 if (log_is_enabled(Debug, monitorinflation)) {
1499 ls = &lsh_debug;
1500 } else if (log_is_enabled(Info, monitorinflation)) {
1501 ls = &lsh_info;
1502 }
1503
1516 // deflated, BUT the MonitorDeflationThread blocked for the final
1517 // safepoint during unlinking.
1518
1519 // Unlink deflated ObjectMonitors from the in-use list.
1520 ResourceMark rm;
1521 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1522 size_t unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer,
1523 &delete_list);
1524 if (current->is_Java_thread()) {
1525 if (ls != NULL) {
1526 timer.stop();
1527 ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
1528 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1529 SIZE_FORMAT ", max=" SIZE_FORMAT,
1530 unlinked_count, in_use_list_ceiling(),
1531 _in_use_list.count(), _in_use_list.max());
1532 }
1533
1534 // A JavaThread needs to handshake in order to safely free the
1535 // ObjectMonitors that were deflated in this cycle.
1536 HandshakeForDeflation hfd_hc;
1537 Handshake::execute(&hfd_hc);
1538
1539 if (ls != NULL) {
1540 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1541 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1542 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1543 timer.start();
1544 }
1545 }
1546
1547 // After the handshake, safely free the ObjectMonitors that were
1548 // deflated in this cycle.
1549 size_t deleted_count = 0;
1550 for (ObjectMonitor* monitor: delete_list) {
1551 delete monitor;
1552 deleted_count++;
1553
1554 if (current->is_Java_thread()) {
1555 // A JavaThread must check for a safepoint/handshake and honor it.
1556 chk_for_block_req(current->as_Java_thread(), "deletion", "deleted_count",
1557 deleted_count, ls, &timer);
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "gc/shared/suspendibleThreadSet.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/allocation.inline.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/handshake.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/lockStack.inline.hpp"
43 #include "runtime/mutexLocker.hpp"
44 #include "runtime/objectMonitor.hpp"
45 #include "runtime/objectMonitor.inline.hpp"
46 #include "runtime/os.inline.hpp"
47 #include "runtime/osThread.hpp"
48 #include "runtime/perfData.hpp"
49 #include "runtime/safepointMechanism.inline.hpp"
50 #include "runtime/safepointVerifiers.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/stubRoutines.hpp"
53 #include "runtime/synchronizer.hpp"
54 #include "runtime/thread.inline.hpp"
55 #include "runtime/timer.hpp"
56 #include "runtime/vframe.hpp"
57 #include "runtime/vmThread.hpp"
58 #include "utilities/align.hpp"
59 #include "utilities/dtrace.hpp"
60 #include "utilities/events.hpp"
61 #include "utilities/preserveException.hpp"
62
259 // returns true -- to indicate the call was satisfied.
260 // returns false -- to indicate the call needs the services of the slow-path.
261 // A no-loitering ordinance is in effect for code in the quick_* family
262 // operators: safepoints or indefinite blocking (blocking that might span a
263 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
264 // entry.
265 //
266 // Consider: An interesting optimization is to have the JIT recognize the
267 // following common idiom:
268 // synchronized (someobj) { .... ; notify(); }
269 // That is, we find a notify() or notifyAll() call that immediately precedes
270 // the monitorexit operation. In that case the JIT could fuse the operations
271 // into a single notifyAndExit() runtime primitive.
272
273 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
274 assert(current->thread_state() == _thread_in_Java, "invariant");
275 NoSafepointVerifier nsv;
276 if (obj == NULL) return false; // slow-path for invalid obj
277 const markWord mark = obj->mark();
278
279 if (LockingMode == LM_LIGHTWEIGHT) {
280 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
281 // Degenerate notify
282 // fast-locked by caller so by definition the implied waitset is empty.
283 return true;
284 }
285 } else if (LockingMode == LM_LEGACY) {
286 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
287 // Degenerate notify
288 // stack-locked by caller so by definition the implied waitset is empty.
289 return true;
290 }
291 }
292
293 if (mark.has_monitor()) {
294 ObjectMonitor* const mon = mark.monitor();
295 assert(mon->object() == oop(obj), "invariant");
296 if (mon->owner() != current) return false; // slow-path for IMS exception
297
298 if (mon->first_waiter() != NULL) {
299 // We have one or more waiters. Since this is an inflated monitor
300 // that we own, we can transfer one or more threads from the waitset
301 // to the entrylist here and now, avoiding the slow-path.
302 if (all) {
303 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
304 } else {
305 DTRACE_MONITOR_PROBE(notify, mon, obj, current);
306 }
307 int free_count = 0;
308 do {
309 mon->INotify(current);
310 ++free_count;
340 if (mark.has_monitor()) {
341 ObjectMonitor* const m = mark.monitor();
342 // An async deflation or GC can race us before we manage to make
343 // the ObjectMonitor busy by setting the owner below. If we detect
344 // that race we just bail out to the slow-path here.
345 if (m->object_peek() == NULL) {
346 return false;
347 }
348 JavaThread* const owner = (JavaThread*) m->owner_raw();
349
350 // Lock contention and Transactional Lock Elision (TLE) diagnostics
351 // and observability
352 // Case: light contention possibly amenable to TLE
353 // Case: TLE inimical operations such as nested/recursive synchronization
354
355 if (owner == current) {
356 m->_recursions++;
357 return true;
358 }
359
360 if (LockingMode != LM_LIGHTWEIGHT) {
361 // This Java Monitor is inflated so obj's header will never be
362 // displaced to this thread's BasicLock. Make the displaced header
363 // non-NULL so this BasicLock is not seen as recursive nor as
364 // being locked. We do this unconditionally so that this thread's
365 // BasicLock cannot be mis-interpreted by any stack walkers. For
366 // performance reasons, stack walkers generally first check for
367 // Biased Locking in the object's header, the second check is for
368 // stack-locking in the object's header, the third check is for
369 // recursive stack-locking in the displaced header in the BasicLock,
370 // and last are the inflated Java Monitor (ObjectMonitor) checks.
371 lock->set_displaced_header(markWord::unused_mark());
372 }
373
374 if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
375 assert(m->_recursions == 0, "invariant");
376 return true;
377 }
378 }
379
380 // Note that we could inflate in quick_enter.
381 // This is likely a useful optimization
382 // Critically, in quick_enter() we must not:
383 // -- perform bias revocation, or
384 // -- block indefinitely, or
385 // -- reach a safepoint
386
387 return false; // revert to slow-path
388 }
389
390 // Handle notifications when synchronizing on value based classes
391 void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
392 frame last_frame = current->last_frame();
419 vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
420 if (current->has_last_Java_frame()) {
421 LogStream info_stream(vblog.info());
422 current->print_stack_on(&info_stream);
423 } else {
424 vblog.info("Cannot find the last Java frame");
425 }
426
427 EventSyncOnValueBasedClass event;
428 if (event.should_commit()) {
429 event.set_valueBasedClass(obj->klass());
430 event.commit();
431 }
432 }
433
434 if (bcp_was_adjusted) {
435 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
436 }
437 }
438
439 static bool useHeavyMonitors() {
440 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64)
441 return LockingMode == LM_MONITOR;
442 #else
443 return false;
444 #endif
445 }
446
447 // -----------------------------------------------------------------------------
448 // Monitor Enter/Exit
449 // The interpreter and compiler assembly code tries to lock using the fast path
450 // of this algorithm. Make sure to update that code if the following function is
451 // changed. The implementation is extremely sensitive to race condition. Be careful.
452
453 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
454 if (obj->klass()->is_value_based()) {
455 handle_sync_on_value_based_class(obj, current);
456 }
457
458 if (!useHeavyMonitors()) {
459 if (LockingMode == LM_LIGHTWEIGHT) {
460 // Fast-locking does not use the 'lock' argument.
461 LockStack& lock_stack = current->lock_stack();
462 if (lock_stack.can_push()) {
463 markWord mark = obj()->mark_acquire();
464 if (mark.is_neutral()) {
465 assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
466 // Try to swing into 'fast-locked' state.
467 markWord locked_mark = mark.set_fast_locked();
468 markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
469 if (old_mark == mark) {
470 // Successfully fast-locked, push object to lock-stack and return.
471 lock_stack.push(obj());
472 return;
473 }
474 }
475 }
476 // All other paths fall-through to inflate-enter.
477 } else if (LockingMode == LM_LEGACY) {
478 if (UseBiasedLocking) {
479 BiasedLocking::revoke(current, obj);
480 }
481
482 markWord mark = obj->mark();
483 if (mark.is_neutral()) {
484 // Anticipate successful CAS -- the ST of the displaced mark must
485 // be visible <= the ST performed by the CAS.
486 lock->set_displaced_header(mark);
487 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
488 return;
489 }
490 // Fall through to inflate() ...
491 } else if (mark.has_locker() &&
492 current->is_lock_owned((address) mark.locker())) {
493 assert(lock != mark.locker(), "must not re-lock the same lock");
494 assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
495 lock->set_displaced_header(markWord::from_pointer(NULL));
496 return;
497 }
498
499 // The object header will never be displaced to this lock,
500 // so it does not matter what the value is, except that it
501 // must be non-zero to avoid looking like a re-entrant lock,
502 // and must not look locked either.
503 lock->set_displaced_header(markWord::unused_mark());
504 }
505 }
506
507 // An async deflation can race after the inflate() call and before
508 // enter() can make the ObjectMonitor busy. enter() returns false if
509 // we have lost the race to async deflation and we simply try again.
510 while (true) {
511 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
512 if (monitor->enter(current)) {
513 return;
514 }
515 }
516 }
517
518 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
519 if (!useHeavyMonitors()) {
520 markWord mark = object->mark();
521 if (LockingMode == LM_LIGHTWEIGHT) {
522 // Fast-locking does not use the 'lock' argument.
523 if (mark.is_fast_locked()) {
524 markWord unlocked_mark = mark.set_unlocked();
525 markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
526 if (old_mark != mark) {
527 // Another thread won the CAS, it must have inflated the monitor.
528 // It can only have installed an anonymously locked monitor at this point.
529 // Fetch that monitor, set owner correctly to this thread, and
530 // exit it (allowing waiting threads to enter).
531 assert(old_mark.has_monitor(), "must have monitor");
532 ObjectMonitor* monitor = old_mark.monitor();
533 assert(monitor->is_owner_anonymous(), "must be anonymous owner");
534 monitor->set_owner_from_anonymous(current);
535 monitor->exit(current);
536 }
537 LockStack& lock_stack = current->lock_stack();
538 lock_stack.remove(object);
539 return;
540 }
541 } else if (LockingMode == LM_LEGACY) {
542 markWord dhw = lock->displaced_header();
543 if (dhw.value() == 0) {
544 // If the displaced header is NULL, then this exit matches up with
545 // a recursive enter. No real work to do here except for diagnostics.
546 #ifndef PRODUCT
547 if (mark != markWord::INFLATING()) {
548 // Only do diagnostics if we are not racing an inflation. Simply
549 // exiting a recursive enter of a Java Monitor that is being
550 // inflated is safe; see the has_monitor() comment below.
551 assert(!mark.is_neutral(), "invariant");
552 assert(!mark.has_locker() ||
553 current->is_lock_owned((address)mark.locker()), "invariant");
554 if (mark.has_monitor()) {
555 // The BasicLock's displaced_header is marked as a recursive
556 // enter and we have an inflated Java Monitor (ObjectMonitor).
557 // This is a special case where the Java Monitor was inflated
558 // after this thread entered the stack-lock recursively. When a
559 // Java Monitor is inflated, we cannot safely walk the Java
560 // Monitor owner's stack and update the BasicLocks because a
561 // Java Monitor can be asynchronously inflated by a thread that
562 // does not own the Java Monitor.
563 ObjectMonitor* m = mark.monitor();
564 assert(m->object()->mark() == mark, "invariant");
565 assert(m->is_entered(current), "invariant");
566 }
567 }
568 #endif
569 return;
570 }
571
572 if (mark == markWord::from_pointer(lock)) {
573 // If the object is stack-locked by the current thread, try to
574 // swing the displaced header from the BasicLock back to the mark.
575 assert(dhw.is_neutral(), "invariant");
576 if (object->cas_set_mark(dhw, mark) == mark) {
577 return;
578 }
579 }
580 }
581 }
582
583 // We have to take the slow-path of possible inflation and then exit.
584 // The ObjectMonitor* can't be async deflated until ownership is
585 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
586 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
587 if (LockingMode == LM_LIGHTWEIGHT && monitor->is_owner_anonymous()) {
588 // It must be us. Pop lock object from lock stack.
589 LockStack& lock_stack = current->lock_stack();
590 oop popped = lock_stack.pop();
591 assert(popped == object, "must be owned by this thread");
592 monitor->set_owner_from_anonymous(current);
593 }
594 monitor->exit(current);
595 }
596
597 // -----------------------------------------------------------------------------
598 // Class Loader support to workaround deadlocks on the class loader lock objects
599 // Also used by GC
600 // complete_exit()/reenter() are used to wait on a nested lock
601 // i.e. to give up an outer lock completely and then re-enter
602 // Used when holding nested locks - lock acquisition order: lock1 then lock2
603 // 1) complete_exit lock1 - saving recursion count
604 // 2) wait on lock2
605 // 3) when notified on lock2, unlock lock2
606 // 4) reenter lock1 with original recursion count
607 // 5) lock lock2
608 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
609 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
610 if (UseBiasedLocking) {
611 BiasedLocking::revoke(current, obj);
612 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
613 }
738 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
739 if (UseBiasedLocking) {
740 BiasedLocking::revoke(current, obj);
741 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
742 }
743 // The ObjectMonitor* can't be async deflated because the _waiters
744 // field is incremented before ownership is dropped and decremented
745 // after ownership is regained.
746 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
747 monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
748 }
749
750 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
751 JavaThread* current = THREAD;
752 if (UseBiasedLocking) {
753 BiasedLocking::revoke(current, obj);
754 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
755 }
756
757 markWord mark = obj->mark();
758 if (LockingMode == LM_LIGHTWEIGHT) {
759 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
760 // Not inflated so there can't be any waiters to notify.
761 return;
762 }
763 } else if (LockingMode == LM_LEGACY) {
764 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
765 // Not inflated so there can't be any waiters to notify.
766 return;
767 }
768 }
769 // The ObjectMonitor* can't be async deflated until ownership is
770 // dropped by the calling thread.
771 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
772 monitor->notify(CHECK);
773 }
774
775 // NOTE: see comment of notify()
776 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
777 JavaThread* current = THREAD;
778 if (UseBiasedLocking) {
779 BiasedLocking::revoke(current, obj);
780 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
781 }
782
783 markWord mark = obj->mark();
784 if (LockingMode == LM_LIGHTWEIGHT) {
785 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
786 // Not inflated so there can't be any waiters to notify.
787 return;
788 }
789 } else if (LockingMode == LM_LEGACY) {
790 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
791 // Not inflated so there can't be any waiters to notify.
792 return;
793 }
794 }
795 // The ObjectMonitor* can't be async deflated until ownership is
796 // dropped by the calling thread.
797 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
798 monitor->notifyAll(CHECK);
799 }
800
801 // -----------------------------------------------------------------------------
802 // Hash Code handling
803
804 struct SharedGlobals {
805 char _pad_prefix[OM_CACHE_LINE_SIZE];
806 // This is a highly shared mostly-read variable.
807 // To avoid false-sharing it needs to be the sole occupant of a cache line.
808 volatile int stw_random;
809 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
810 // Hot RW variable -- Sequester to avoid false-sharing
811 volatile int hc_sequence;
812 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
813 };
814
815 static SharedGlobals GVars;
816
817 static markWord read_stable_mark(oop obj) {
818 markWord mark = obj->mark();
819 if (!mark.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT) {
820 // New lightweight locking does not use the markWord::INFLATING() protocol.
821 return mark; // normal fast-path return
822 }
823
824 int its = 0;
825 for (;;) {
826 markWord mark = obj->mark();
827 if (!mark.is_being_inflated()) {
828 return mark; // normal fast-path return
829 }
830
831 // The object is being inflated by some other thread.
832 // The caller of read_stable_mark() must wait for inflation to complete.
833 // Avoid live-lock.
834
835 ++its;
836 if (its > 10000 || !os::is_MP()) {
837 if (its & 1) {
838 os::naked_yield();
839 } else {
840 // Note that the following code attenuates the livelock problem but is not
909 value = 1; // for sensitivity testing
910 } else if (hashCode == 3) {
911 value = ++GVars.hc_sequence;
912 } else if (hashCode == 4) {
913 value = cast_from_oop<intptr_t>(obj);
914 } else {
915 // Marsaglia's xor-shift scheme with thread-specific state
916 // This is probably the best overall implementation -- we'll
917 // likely make this the default in future releases.
918 unsigned t = current->_hashStateX;
919 t ^= (t << 11);
920 current->_hashStateX = current->_hashStateY;
921 current->_hashStateY = current->_hashStateZ;
922 current->_hashStateZ = current->_hashStateW;
923 unsigned v = current->_hashStateW;
924 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
925 current->_hashStateW = v;
926 value = v;
927 }
928
929 value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask;
930 if (value == 0) value = 0xBAD;
931 assert(value != markWord::no_hash, "invariant");
932 return value;
933 }
934
935 // Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
936 // calculations as part of JVM/TI tagging.
937 static bool is_lock_owned(Thread* thread, oop obj) {
938 assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
939 return thread->is_Java_thread() ? reinterpret_cast<JavaThread*>(thread)->lock_stack().contains(obj) : false;
940 }
941
942 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
943 if (UseBiasedLocking) {
944 // NOTE: many places throughout the JVM do not expect a safepoint
945 // to be taken here. However, we only ever bias Java instances and all
946 // of the call sites of identity_hash that might revoke biases have
947 // been checked to make sure they can handle a safepoint. The
948 // added check of the bias pattern is to avoid useless calls to
949 // thread-local storage.
950 if (obj->mark().has_bias_pattern()) {
951 // Handle for oop obj in case of STW safepoint
952 Handle hobj(current, obj);
953 if (SafepointSynchronize::is_at_safepoint()) {
954 BiasedLocking::revoke_at_safepoint(hobj);
955 } else {
956 BiasedLocking::revoke(current->as_Java_thread(), hobj);
957 }
958 obj = hobj();
959 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
960 }
961 }
995
996 // Separate load of dmw/header above from the loads in
997 // is_being_async_deflated().
998
999 // dmw/header and _contentions may get written by different threads.
1000 // Make sure to observe them in the same order when having several observers.
1001 OrderAccess::loadload_for_IRIW();
1002
1003 if (monitor->is_being_async_deflated()) {
1004 // But we can't safely use the hash if we detect that async
1005 // deflation has occurred. So we attempt to restore the
1006 // header/dmw to the object's header so that we only retry
1007 // once if the deflater thread happens to be slow.
1008 monitor->install_displaced_markword_in_object(obj);
1009 continue;
1010 }
1011 return hash;
1012 }
1013 // Fall thru so we only have one place that installs the hash in
1014 // the ObjectMonitor.
1015 } else if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked() && is_lock_owned(current, obj)) {
1016 // This is a fast lock owned by the calling thread so use the
1017 // markWord from the object.
1018 hash = mark.hash();
1019 if (hash != 0) { // if it has a hash, just return it
1020 return hash;
1021 }
1022 } else if (LockingMode == LM_LEGACY && mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
1023 // This is a stack lock owned by the calling thread so fetch the
1024 // displaced markWord from the BasicLock on the stack.
1025 temp = mark.displaced_mark_helper();
1026 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1027 hash = temp.hash();
1028 if (hash != 0) { // if it has a hash, just return it
1029 return hash;
1030 }
1031 // WARNING:
1032 // The displaced header in the BasicLock on a thread's stack
1033 // is strictly immutable. It CANNOT be changed in ANY cases.
1034 // So we have to inflate the stack lock into an ObjectMonitor
1035 // even if the current thread owns the lock. The BasicLock on
1036 // a thread's stack can be asynchronously read by other threads
1037 // during an inflate() call so any change to that stack memory
1038 // may not propagate to other threads correctly.
1039 }
1040
1041 // Inflate the monitor to set the hash.
1042
1079
1080 // Deprecated -- use FastHashCode() instead.
1081
1082 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1083 return FastHashCode(Thread::current(), obj());
1084 }
1085
1086
1087 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1088 Handle h_obj) {
1089 if (UseBiasedLocking) {
1090 BiasedLocking::revoke(current, h_obj);
1091 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1092 }
1093
1094 assert(current == JavaThread::current(), "Can only be called on current thread");
1095 oop obj = h_obj();
1096
1097 markWord mark = read_stable_mark(obj);
1098
1099 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1100 // stack-locked case, header points into owner's stack
1101 return current->is_lock_owned((address)mark.locker());
1102 }
1103
1104 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1105 // fast-locking case, see if lock is in current's lock stack
1106 return current->lock_stack().contains(h_obj());
1107 }
1108
1109 // Contended case, header points to ObjectMonitor (tagged pointer)
1110 if (mark.has_monitor()) {
1111 // The first stage of async deflation does not affect any field
1112 // used by this comparison so the ObjectMonitor* is usable here.
1113 ObjectMonitor* monitor = mark.monitor();
1114 return monitor->is_entered(current) != 0;
1115 }
1116 // Unlocked case, header in place
1117 assert(mark.is_neutral(), "sanity check");
1118 return false;
1119 }
1120
1121 // FIXME: jvmti should call this
1122 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1123 if (UseBiasedLocking) {
1124 if (SafepointSynchronize::is_at_safepoint()) {
1125 BiasedLocking::revoke_at_safepoint(h_obj);
1126 } else {
1127 BiasedLocking::revoke(JavaThread::current(), h_obj);
1128 }
1129 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1130 }
1131
1132 oop obj = h_obj();
1133 markWord mark = read_stable_mark(obj);
1134
1135 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1136 // stack-locked so header points into owner's stack.
1137 // owning_thread_from_monitor_owner() may also return null here:
1138 return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
1139 }
1140
1141 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1142 // fast-locked so get owner from the object.
1143 // owning_thread_from_object() may also return null here:
1144 return Threads::owning_thread_from_object(t_list, h_obj());
1145 }
1146
1147 // Contended case, header points to ObjectMonitor (tagged pointer)
1148 if (mark.has_monitor()) {
1149 // The first stage of async deflation does not affect any field
1150 // used by this comparison so the ObjectMonitor* is usable here.
1151 ObjectMonitor* monitor = mark.monitor();
1152 assert(monitor != NULL, "monitor should be non-null");
1153 // owning_thread_from_monitor() may also return null here:
1154 return Threads::owning_thread_from_monitor(t_list, monitor);
1155 }
1156
1157 // Unlocked case, header in place
1158 // Cannot have assertion since this object may have been
1159 // locked by another thread when reaching here.
1160 // assert(mark.is_neutral(), "sanity check");
1161
1162 return NULL;
1163 }
1164
1165 // Visitors ...
1166
1167 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
1168 MonitorList::Iterator iter = _in_use_list.iterator();
1169 while (iter.has_next()) {
1170 ObjectMonitor* mid = iter.next();
1171 if (mid->owner() != thread) {
1172 continue;
1173 }
1174 if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
1338 void ObjectSynchronizer::inflate_helper(oop obj) {
1339 markWord mark = obj->mark();
1340 if (mark.has_monitor()) {
1341 ObjectMonitor* monitor = mark.monitor();
1342 markWord dmw = monitor->header();
1343 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1344 return;
1345 }
1346 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1347 }
1348
1349 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1350 const InflateCause cause) {
1351 EventJavaMonitorInflate event;
1352
1353 for (;;) {
1354 const markWord mark = object->mark();
1355 assert(!mark.has_bias_pattern(), "invariant");
1356
1357 // The mark can be in one of the following states:
1358 // * inflated - Just return if using stack-locking.
1359 // If using fast-locking and the ObjectMonitor owner
1360 // is anonymous and the current thread owns the
1361 // object lock, then we make the current thread the
1362 // ObjectMonitor owner and remove the lock from the
1363 // current thread's lock stack.
1364 // * fast-locked - Coerce it to inflated from fast-locked.
1365 // * stack-locked - Coerce it to inflated from stack-locked.
1366 // * INFLATING - busy wait for conversion to complete
1367 // * Neutral - aggressively inflate the object.
1368 // * BIASED - Illegal. We should never see this
1369
1370 // CASE: inflated
1371 if (mark.has_monitor()) {
1372 ObjectMonitor* inf = mark.monitor();
1373 markWord dmw = inf->header();
1374 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1375 if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
1376 inf->set_owner_from_anonymous(current);
1377 assert(current->is_Java_thread(), "must be Java thread");
1378 reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
1379 }
1380 return inf;
1381 }
1382
1383 // CASE: inflation in progress - inflating over a stack-lock.
1384 // Some other thread is converting from stack-locked to inflated.
1385 // Only that thread can complete inflation -- other threads must wait.
1386 // The INFLATING value is transient.
1387 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1388 // We could always eliminate polling by parking the thread on some auxiliary list.
1389 if (LockingMode != LM_LIGHTWEIGHT) {
1390 // New lightweight locking does not use INFLATING.
1391 // CASE: inflation in progress - inflating over a stack-lock.
1392 // Some other thread is converting from stack-locked to inflated.
1393 // Only that thread can complete inflation -- other threads must wait.
1394 // The INFLATING value is transient.
1395 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1396 // We could always eliminate polling by parking the thread on some auxiliary list.
1397 if (mark == markWord::INFLATING()) {
1398 read_stable_mark(object);
1399 continue;
1400 }
1401 }
1402
1403 // CASE: fast-locked
1404 // Could be fast-locked either by current or by some other thread.
1405 //
1406 // Note that we allocate the ObjectMonitor speculatively, _before_
1407 // attempting to set the object's mark to the new ObjectMonitor. If
1408 // this thread owns the monitor, then we set the ObjectMonitor's
1409 // owner to this thread. Otherwise, we set the ObjectMonitor's owner
1410 // to anonymous. If we lose the race to set the object's mark to the
1411 // new ObjectMonitor, then we just delete it and loop around again.
1412 //
1413 LogStreamHandle(Trace, monitorinflation) lsh;
1414 if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
1415 ObjectMonitor* monitor = new ObjectMonitor(object);
1416 monitor->set_header(mark.set_unlocked());
1417 bool own = is_lock_owned(current, object);
1418 if (own) {
1419 // Owned by us.
1420 monitor->set_owner_from(NULL, current);
1421 } else {
1422 // Owned by somebody else.
1423 monitor->set_owner_anonymous();
1424 }
1425 markWord monitor_mark = markWord::encode(monitor);
1426 markWord old_mark = object->cas_set_mark(monitor_mark, mark);
1427 if (old_mark == mark) {
1428 // Success! Return inflated monitor.
1429 if (own) {
1430 assert(current->is_Java_thread(), "must be Java thread");
1431 reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
1432 }
1433 // Once the ObjectMonitor is configured and object is associated
1434 // with the ObjectMonitor, it is safe to allow async deflation:
1435 _in_use_list.add(monitor);
1436
1437 // Hopefully the performance counters are allocated on distinct
1438 // cache lines to avoid false sharing on MP systems ...
1439 OM_PERFDATA_OP(Inflations, inc());
1440 if (log_is_enabled(Trace, monitorinflation)) {
1441 ResourceMark rm(current);
1442 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1443 INTPTR_FORMAT ", type='%s'", p2i(object),
1444 object->mark().value(), object->klass()->external_name());
1445 }
1446 if (event.should_commit()) {
1447 post_monitor_inflate_event(&event, object, cause);
1448 }
1449 return monitor;
1450 } else {
1451 delete monitor;
1452 continue; // Interference -- just retry
1453 }
1454 }
1455
1456 // CASE: stack-locked
1457 // Could be stack-locked either by this thread or by some other thread.
1458 //
1459 // Note that we allocate the ObjectMonitor speculatively, _before_ attempting
1460 // to install INFLATING into the mark word. We originally installed INFLATING,
1461 // allocated the ObjectMonitor, and then finally STed the address of the
1462 // ObjectMonitor into the mark. This was correct, but artificially lengthened
1463 // the interval in which INFLATING appeared in the mark, thus increasing
1464 // the odds of inflation contention.
1465
1466 if (LockingMode == LM_LEGACY && mark.has_locker()) {
1467 assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking");
1468 ObjectMonitor* m = new ObjectMonitor(object);
1469 // Optimistically prepare the ObjectMonitor - anticipate successful CAS
1470 // We do this before the CAS in order to minimize the length of time
1471 // in which INFLATING appears in the mark.
1472
1473 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1474 if (cmp != mark) {
1475 delete m;
1476 continue; // Interference -- just retry
1477 }
1478
1479 // We've successfully installed INFLATING (0) into the mark-word.
1480 // This is the only case where 0 will appear in a mark-word.
1481 // Only the singular thread that successfully swings the mark-word
1482 // to 0 can perform (or more precisely, complete) inflation.
1483 //
1484 // Why do we CAS a 0 into the mark-word instead of just CASing the
1485 // mark-word from the stack-locked value directly to the new inflated state?
1486 // Consider what happens when a thread unlocks a stack-locked object.
1487 // It attempts to use CAS to swing the displaced header value from the
1640 if (current->is_Java_thread()) {
1641 // A JavaThread must check for a safepoint/handshake and honor it.
1642 chk_for_block_req(current->as_Java_thread(), "deflation", "deflated_count",
1643 deflated_count, ls, timer_p);
1644 }
1645 }
1646
1647 return deflated_count;
1648 }
1649
1650 class HandshakeForDeflation : public HandshakeClosure {
1651 public:
1652 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
1653
1654 void do_thread(Thread* thread) {
1655 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
1656 INTPTR_FORMAT, p2i(thread));
1657 }
1658 };
1659
1660 class VM_RendezvousGCThreads : public VM_Operation {
1661 public:
1662 bool evaluate_at_safepoint() const override { return false; }
1663 VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
1664 void doit() override {
1665 SuspendibleThreadSet::synchronize();
1666 SuspendibleThreadSet::desynchronize();
1667 };
1668 };
1669
1670 // This function is called by the MonitorDeflationThread to deflate
1671 // ObjectMonitors. It is also called via do_final_audit_and_print_stats()
1672 // by the VMThread.
1673 size_t ObjectSynchronizer::deflate_idle_monitors() {
1674 Thread* current = Thread::current();
1675 if (current->is_Java_thread()) {
1676 // The async deflation request has been processed.
1677 _last_async_deflation_time_ns = os::javaTimeNanos();
1678 set_is_async_deflation_requested(false);
1679 }
1680
1681 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1682 LogStreamHandle(Info, monitorinflation) lsh_info;
1683 LogStream* ls = NULL;
1684 if (log_is_enabled(Debug, monitorinflation)) {
1685 ls = &lsh_debug;
1686 } else if (log_is_enabled(Info, monitorinflation)) {
1687 ls = &lsh_info;
1688 }
1689
1702 // deflated, BUT the MonitorDeflationThread blocked for the final
1703 // safepoint during unlinking.
1704
1705 // Unlink deflated ObjectMonitors from the in-use list.
1706 ResourceMark rm;
1707 GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
1708 size_t unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer,
1709 &delete_list);
1710 if (current->is_Java_thread()) {
1711 if (ls != NULL) {
1712 timer.stop();
1713 ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
1714 ", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
1715 SIZE_FORMAT ", max=" SIZE_FORMAT,
1716 unlinked_count, in_use_list_ceiling(),
1717 _in_use_list.count(), _in_use_list.max());
1718 }
1719
1720 // A JavaThread needs to handshake in order to safely free the
1721 // ObjectMonitors that were deflated in this cycle.
1722 // Also, we sync and desync GC threads around the handshake, so that they can
1723 // safely read the mark-word and look-through to the object-monitor, without
1724 // being afraid that the object-monitor is going away.
1725 HandshakeForDeflation hfd_hc;
1726 Handshake::execute(&hfd_hc);
1727 VM_RendezvousGCThreads sync_gc;
1728 VMThread::execute(&sync_gc);
1729
1730 if (ls != NULL) {
1731 ls->print_cr("after handshaking: in_use_list stats: ceiling="
1732 SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
1733 in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
1734 timer.start();
1735 }
1736 }
1737
1738 // After the handshake, safely free the ObjectMonitors that were
1739 // deflated in this cycle.
1740 size_t deleted_count = 0;
1741 for (ObjectMonitor* monitor: delete_list) {
1742 delete monitor;
1743 deleted_count++;
1744
1745 if (current->is_Java_thread()) {
1746 // A JavaThread must check for a safepoint/handshake and honor it.
1747 chk_for_block_req(current->as_Java_thread(), "deletion", "deleted_count",
1748 deleted_count, ls, &timer);
|