266 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
267 // removed from the system.
268 //
269 // Note: If the _in_use_list max exceeds the ceiling, then
270 // monitors_used_above_threshold() will use the in_use_list max instead
271 // of the thread count derived ceiling because we have used more
272 // ObjectMonitors than the estimated average.
273 //
274 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
275 // no-progress async monitor deflation cycles in a row, then the ceiling
276 // is adjusted upwards by monitors_used_above_threshold().
277 //
278 // Start the ceiling with the estimate for one thread in initialize()
279 // which is called after cmd line options are processed.
280 static size_t _in_use_list_ceiling = 0;
281 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
282 bool volatile ObjectSynchronizer::_is_final_audit = false;
283 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
284 static uintx _no_progress_cnt = 0;
285
286 // =====================> Quick functions
287
288 // The quick_* forms are special fast-path variants used to improve
289 // performance. In the simplest case, a "quick_*" implementation could
290 // simply return false, in which case the caller will perform the necessary
291 // state transitions and call the slow-path form.
292 // The fast-path is designed to handle frequently arising cases in an efficient
293 // manner and is just a degenerate "optimistic" variant of the slow-path.
294 // returns true -- to indicate the call was satisfied.
295 // returns false -- to indicate the call needs the services of the slow-path.
296 // A no-loitering ordinance is in effect for code in the quick_* family
297 // operators: safepoints or indefinite blocking (blocking that might span a
298 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
299 // entry.
300 //
301 // Consider: An interesting optimization is to have the JIT recognize the
302 // following common idiom:
303 // synchronized (someobj) { .... ; notify(); }
304 // That is, we find a notify() or notifyAll() call that immediately precedes
305 // the monitorexit operation. In that case the JIT could fuse the operations
306 // into a single notifyAndExit() runtime primitive.
307
308 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
309 assert(current->thread_state() == _thread_in_Java, "invariant");
310 NoSafepointVerifier nsv;
311 if (obj == NULL) return false; // slow-path for invalid obj
312 const markWord mark = obj->mark();
313
314 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
315 // Degenerate notify
316 // stack-locked by caller so by definition the implied waitset is empty.
317 return true;
318 }
319
320 if (mark.has_monitor()) {
321 ObjectMonitor* const mon = mark.monitor();
322 assert(mon->object() == oop(obj), "invariant");
323 if (mon->owner() != current) return false; // slow-path for IMS exception
324
325 if (mon->first_waiter() != NULL) {
326 // We have one or more waiters. Since this is an inflated monitor
327 // that we own, we can transfer one or more threads from the waitset
328 // to the entrylist here and now, avoiding the slow-path.
329 if (all) {
330 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
331 } else {
340 }
341 return true;
342 }
343
344 // other IMS exception states take the slow-path
345 return false;
346 }
347
348
349 // The LockNode emitted directly at the synchronization site would have
350 // been too big if it were to have included support for the cases of inflated
351 // recursive enter and exit, so they go here instead.
352 // Note that we can't safely call AsyncPrintJavaStack() from within
353 // quick_enter() as our thread state remains _in_Java.
354
355 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
356 BasicLock * lock) {
357 assert(current->thread_state() == _thread_in_Java, "invariant");
358 NoSafepointVerifier nsv;
359 if (obj == NULL) return false; // Need to throw NPE
360
361 if (obj->klass()->is_value_based()) {
362 return false;
363 }
364
365 const markWord mark = obj->mark();
366
367 if (mark.has_monitor()) {
368 ObjectMonitor* const m = mark.monitor();
369 // An async deflation or GC can race us before we manage to make
370 // the ObjectMonitor busy by setting the owner below. If we detect
371 // that race we just bail out to the slow-path here.
372 if (m->object_peek() == NULL) {
373 return false;
374 }
375 JavaThread* const owner = (JavaThread*) m->owner_raw();
376
377 // Lock contention and Transactional Lock Elision (TLE) diagnostics
378 // and observability
379 // Case: light contention possibly amenable to TLE
459 if (bcp_was_adjusted) {
460 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
461 }
462 }
463
464 static bool useHeavyMonitors() {
465 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64)
466 return UseHeavyMonitors;
467 #else
468 return false;
469 #endif
470 }
471
472 // -----------------------------------------------------------------------------
473 // Monitor Enter/Exit
474 // The interpreter and compiler assembly code tries to lock using the fast path
475 // of this algorithm. Make sure to update that code if the following function is
476 // changed. The implementation is extremely sensitive to race condition. Be careful.
477
478 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
479 if (obj->klass()->is_value_based()) {
480 handle_sync_on_value_based_class(obj, current);
481 }
482
483 current->inc_held_monitor_count();
484
485 if (!useHeavyMonitors()) {
486 markWord mark = obj->mark();
487 if (mark.is_neutral()) {
488 // Anticipate successful CAS -- the ST of the displaced mark must
489 // be visible <= the ST performed by the CAS.
490 lock->set_displaced_header(mark);
491 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
492 return;
493 }
494 // Fall through to inflate() ...
495 } else if (mark.has_locker() &&
496 current->is_lock_owned((address)mark.locker())) {
497 assert(lock != mark.locker(), "must not re-lock the same lock");
498 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
508 } else if (VerifyHeavyMonitors) {
509 guarantee(!obj->mark().has_locker(), "must not be stack-locked");
510 }
511
512 // An async deflation can race after the inflate() call and before
513 // enter() can make the ObjectMonitor busy. enter() returns false if
514 // we have lost the race to async deflation and we simply try again.
515 while (true) {
516 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
517 if (monitor->enter(current)) {
518 return;
519 }
520 }
521 }
522
523 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
524 current->dec_held_monitor_count();
525
526 if (!useHeavyMonitors()) {
527 markWord mark = object->mark();
528
529 markWord dhw = lock->displaced_header();
530 if (dhw.value() == 0) {
531 // If the displaced header is NULL, then this exit matches up with
532 // a recursive enter. No real work to do here except for diagnostics.
533 #ifndef PRODUCT
534 if (mark != markWord::INFLATING()) {
535 // Only do diagnostics if we are not racing an inflation. Simply
536 // exiting a recursive enter of a Java Monitor that is being
537 // inflated is safe; see the has_monitor() comment below.
538 assert(!mark.is_neutral(), "invariant");
539 assert(!mark.has_locker() ||
540 current->is_lock_owned((address)mark.locker()), "invariant");
541 if (mark.has_monitor()) {
542 // The BasicLock's displaced_header is marked as a recursive
543 // enter and we have an inflated Java Monitor (ObjectMonitor).
544 // This is a special case where the Java Monitor was inflated
545 // after this thread entered the stack-lock recursively. When a
546 // Java Monitor is inflated, we cannot safely walk the Java
547 // Monitor owner's stack and update the BasicLocks because a
571 // We have to take the slow-path of possible inflation and then exit.
572 // The ObjectMonitor* can't be async deflated until ownership is
573 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
574 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
575 monitor->exit(current);
576 }
577
578 // -----------------------------------------------------------------------------
579 // Class Loader support to workaround deadlocks on the class loader lock objects
580 // Also used by GC
581 // complete_exit()/reenter() are used to wait on a nested lock
582 // i.e. to give up an outer lock completely and then re-enter
583 // Used when holding nested locks - lock acquisition order: lock1 then lock2
584 // 1) complete_exit lock1 - saving recursion count
585 // 2) wait on lock2
586 // 3) when notified on lock2, unlock lock2
587 // 4) reenter lock1 with original recursion count
588 // 5) lock lock2
589 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
590 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
591 // The ObjectMonitor* can't be async deflated until ownership is
592 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
593 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
594 intx recur_count = monitor->complete_exit(current);
595 current->dec_held_monitor_count(recur_count + 1);
596 return recur_count;
597 }
598
599 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
600 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
601 // An async deflation can race after the inflate() call and before
602 // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
603 // enter() returns false if we have lost the race to async deflation
604 // and we simply try again.
605 while (true) {
606 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
607 if (monitor->reenter(recursions, current)) {
608 current->inc_held_monitor_count(recursions + 1);
609 return;
610 }
611 }
612 }
613
614 // -----------------------------------------------------------------------------
615 // JNI locks on java objects
616 // NOTE: must use heavy weight monitor to handle jni monitor enter
617 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
618 if (obj->klass()->is_value_based()) {
619 handle_sync_on_value_based_class(obj, current);
620 }
621
622 // the current locking is from JNI instead of Java code
623 current->set_current_pending_monitor_is_from_java(false);
624 // An async deflation can race after the inflate() call and before
625 // enter() can make the ObjectMonitor busy. enter() returns false if
626 // we have lost the race to async deflation and we simply try again.
627 while (true) {
628 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
629 if (monitor->enter(current)) {
630 current->inc_held_monitor_count(1, true);
631 break;
632 }
633 }
634 current->set_current_pending_monitor_is_from_java(true);
635 }
636
637 // NOTE: must use heavy weight monitor to handle jni monitor exit
638 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
639 JavaThread* current = THREAD;
640
641 // The ObjectMonitor* can't be async deflated until ownership is
642 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
643 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
644 // If this thread has locked the object, exit the monitor. We
645 // intentionally do not use CHECK on check_owner because we must exit the
646 // monitor even if an exception was already pending.
647 if (monitor->check_owner(THREAD)) {
648 monitor->exit(current);
649 current->dec_held_monitor_count(1, true);
650 }
651 }
652
653 // -----------------------------------------------------------------------------
654 // Internal VM locks on java objects
655 // standard constructor, allows locking failures
656 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
657 _thread = thread;
658 _thread->check_for_valid_safepoint_state();
659 _obj = obj;
660
661 if (_obj() != NULL) {
662 ObjectSynchronizer::enter(_obj, &_lock, _thread);
663 }
664 }
665
666 ObjectLocker::~ObjectLocker() {
667 if (_obj() != NULL) {
668 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
669 }
670 }
671
672
673 // -----------------------------------------------------------------------------
674 // Wait/Notify/NotifyAll
675 // NOTE: must use heavy weight monitor to handle wait()
676 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
677 JavaThread* current = THREAD;
678 if (millis < 0) {
679 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
680 }
681 // The ObjectMonitor* can't be async deflated because the _waiters
682 // field is incremented before ownership is dropped and decremented
683 // after ownership is regained.
684 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
685
686 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
687 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
688
689 // This dummy call is in place to get around dtrace bug 6254741. Once
690 // that's fixed we can uncomment the following line, remove the call
691 // and change this function back into a "void" func.
692 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
693 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
694 return ret_code;
695 }
696
697 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
698 JavaThread* current = THREAD;
699
700 markWord mark = obj->mark();
701 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
702 // Not inflated so there can't be any waiters to notify.
703 return;
704 }
705 // The ObjectMonitor* can't be async deflated until ownership is
706 // dropped by the calling thread.
707 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
708 monitor->notify(CHECK);
709 }
710
711 // NOTE: see comment of notify()
712 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
713 JavaThread* current = THREAD;
714
715 markWord mark = obj->mark();
716 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
717 // Not inflated so there can't be any waiters to notify.
718 return;
719 }
720 // The ObjectMonitor* can't be async deflated until ownership is
721 // dropped by the calling thread.
722 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
723 monitor->notifyAll(CHECK);
724 }
725
726 // -----------------------------------------------------------------------------
727 // Hash Code handling
728
729 struct SharedGlobals {
730 char _pad_prefix[OM_CACHE_LINE_SIZE];
731 // This is a highly shared mostly-read variable.
732 // To avoid false-sharing it needs to be the sole occupant of a cache line.
733 volatile int stw_random;
840 // This is probably the best overall implementation -- we'll
841 // likely make this the default in future releases.
842 unsigned t = current->_hashStateX;
843 t ^= (t << 11);
844 current->_hashStateX = current->_hashStateY;
845 current->_hashStateY = current->_hashStateZ;
846 current->_hashStateZ = current->_hashStateW;
847 unsigned v = current->_hashStateW;
848 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
849 current->_hashStateW = v;
850 value = v;
851 }
852
853 value &= markWord::hash_mask;
854 if (value == 0) value = 0xBAD;
855 assert(value != markWord::no_hash, "invariant");
856 return value;
857 }
858
859 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
860
861 while (true) {
862 ObjectMonitor* monitor = NULL;
863 markWord temp, test;
864 intptr_t hash;
865 markWord mark = read_stable_mark(obj);
866 if (VerifyHeavyMonitors) {
867 assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
868 guarantee(!mark.has_locker(), "must not be stack locked");
869 }
870 if (mark.is_neutral()) { // if this is a normal header
871 hash = mark.hash();
872 if (hash != 0) { // if it has a hash, just return it
873 return hash;
874 }
875 hash = get_next_hash(current, obj); // get a new hash
876 temp = mark.copy_set_hash(hash); // merge the hash into header
877 // try to install the hash
878 test = obj->cas_set_mark(temp, mark);
879 if (test == mark) { // if the hash was installed, return it
953 hash = test.hash();
954 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
955 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
956 }
957 if (monitor->is_being_async_deflated()) {
958 // If we detect that async deflation has occurred, then we
959 // attempt to restore the header/dmw to the object's header
960 // so that we only retry once if the deflater thread happens
961 // to be slow.
962 monitor->install_displaced_markword_in_object(obj);
963 continue;
964 }
965 }
966 // We finally get the hash.
967 return hash;
968 }
969 }
970
971 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
972 Handle h_obj) {
973 assert(current == JavaThread::current(), "Can only be called on current thread");
974 oop obj = h_obj();
975
976 markWord mark = read_stable_mark(obj);
977
978 // Uncontended case, header points to stack
979 if (mark.has_locker()) {
980 return current->is_lock_owned((address)mark.locker());
981 }
982 // Contended case, header points to ObjectMonitor (tagged pointer)
983 if (mark.has_monitor()) {
984 // The first stage of async deflation does not affect any field
985 // used by this comparison so the ObjectMonitor* is usable here.
986 ObjectMonitor* monitor = mark.monitor();
987 return monitor->is_entered(current) != 0;
988 }
989 // Unlocked case, header in place
990 assert(mark.is_neutral(), "sanity check");
991 return false;
992 }
1185 event->set_monitorClass(obj->klass());
1186 event->set_address((uintptr_t)(void*)obj);
1187 event->set_cause((u1)cause);
1188 event->commit();
1189 }
1190
1191 // Fast path code shared by multiple functions
1192 void ObjectSynchronizer::inflate_helper(oop obj) {
1193 markWord mark = obj->mark_acquire();
1194 if (mark.has_monitor()) {
1195 ObjectMonitor* monitor = mark.monitor();
1196 markWord dmw = monitor->header();
1197 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1198 return;
1199 }
1200 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1201 }
1202
1203 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1204 const InflateCause cause) {
1205 EventJavaMonitorInflate event;
1206
1207 for (;;) {
1208 const markWord mark = object->mark_acquire();
1209
1210 // The mark can be in one of the following states:
1211 // * Inflated - just return
1212 // * Stack-locked - coerce it to inflated
1213 // * INFLATING - busy wait for conversion to complete
1214 // * Neutral - aggressively inflate the object.
1215
1216 // CASE: inflated
1217 if (mark.has_monitor()) {
1218 ObjectMonitor* inf = mark.monitor();
1219 markWord dmw = inf->header();
1220 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1221 return inf;
1222 }
1223
1224 // CASE: inflation in progress - inflating over a stack-lock.
|
266 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
267 // removed from the system.
268 //
269 // Note: If the _in_use_list max exceeds the ceiling, then
270 // monitors_used_above_threshold() will use the in_use_list max instead
271 // of the thread count derived ceiling because we have used more
272 // ObjectMonitors than the estimated average.
273 //
274 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
275 // no-progress async monitor deflation cycles in a row, then the ceiling
276 // is adjusted upwards by monitors_used_above_threshold().
277 //
278 // Start the ceiling with the estimate for one thread in initialize()
279 // which is called after cmd line options are processed.
280 static size_t _in_use_list_ceiling = 0;
281 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
282 bool volatile ObjectSynchronizer::_is_final_audit = false;
283 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
284 static uintx _no_progress_cnt = 0;
285
286 #define CHECK_THROW_NOSYNC_IMSE(obj) \
287 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
288 JavaThread* THREAD = current; \
289 ResourceMark rm(THREAD); \
290 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
291 }
292
293 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
294 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
295 JavaThread* THREAD = current; \
296 ResourceMark rm(THREAD); \
297 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
298 }
299
300 // =====================> Quick functions
301
302 // The quick_* forms are special fast-path variants used to improve
303 // performance. In the simplest case, a "quick_*" implementation could
304 // simply return false, in which case the caller will perform the necessary
305 // state transitions and call the slow-path form.
306 // The fast-path is designed to handle frequently arising cases in an efficient
307 // manner and is just a degenerate "optimistic" variant of the slow-path.
308 // returns true -- to indicate the call was satisfied.
309 // returns false -- to indicate the call needs the services of the slow-path.
310 // A no-loitering ordinance is in effect for code in the quick_* family
311 // operators: safepoints or indefinite blocking (blocking that might span a
312 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
313 // entry.
314 //
315 // Consider: An interesting optimization is to have the JIT recognize the
316 // following common idiom:
317 // synchronized (someobj) { .... ; notify(); }
318 // That is, we find a notify() or notifyAll() call that immediately precedes
319 // the monitorexit operation. In that case the JIT could fuse the operations
320 // into a single notifyAndExit() runtime primitive.
321
322 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
323 assert(current->thread_state() == _thread_in_Java, "invariant");
324 NoSafepointVerifier nsv;
325 if (obj == NULL) return false; // slow-path for invalid obj
326 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
327 const markWord mark = obj->mark();
328
329 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
330 // Degenerate notify
331 // stack-locked by caller so by definition the implied waitset is empty.
332 return true;
333 }
334
335 if (mark.has_monitor()) {
336 ObjectMonitor* const mon = mark.monitor();
337 assert(mon->object() == oop(obj), "invariant");
338 if (mon->owner() != current) return false; // slow-path for IMS exception
339
340 if (mon->first_waiter() != NULL) {
341 // We have one or more waiters. Since this is an inflated monitor
342 // that we own, we can transfer one or more threads from the waitset
343 // to the entrylist here and now, avoiding the slow-path.
344 if (all) {
345 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
346 } else {
355 }
356 return true;
357 }
358
359 // other IMS exception states take the slow-path
360 return false;
361 }
362
363
364 // The LockNode emitted directly at the synchronization site would have
365 // been too big if it were to have included support for the cases of inflated
366 // recursive enter and exit, so they go here instead.
367 // Note that we can't safely call AsyncPrintJavaStack() from within
368 // quick_enter() as our thread state remains _in_Java.
369
370 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
371 BasicLock * lock) {
372 assert(current->thread_state() == _thread_in_Java, "invariant");
373 NoSafepointVerifier nsv;
374 if (obj == NULL) return false; // Need to throw NPE
375 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
376
377 if (obj->klass()->is_value_based()) {
378 return false;
379 }
380
381 const markWord mark = obj->mark();
382
383 if (mark.has_monitor()) {
384 ObjectMonitor* const m = mark.monitor();
385 // An async deflation or GC can race us before we manage to make
386 // the ObjectMonitor busy by setting the owner below. If we detect
387 // that race we just bail out to the slow-path here.
388 if (m->object_peek() == NULL) {
389 return false;
390 }
391 JavaThread* const owner = (JavaThread*) m->owner_raw();
392
393 // Lock contention and Transactional Lock Elision (TLE) diagnostics
394 // and observability
395 // Case: light contention possibly amenable to TLE
475 if (bcp_was_adjusted) {
476 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
477 }
478 }
479
480 static bool useHeavyMonitors() {
481 #if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64)
482 return UseHeavyMonitors;
483 #else
484 return false;
485 #endif
486 }
487
488 // -----------------------------------------------------------------------------
489 // Monitor Enter/Exit
490 // The interpreter and compiler assembly code tries to lock using the fast path
491 // of this algorithm. Make sure to update that code if the following function is
492 // changed. The implementation is extremely sensitive to race condition. Be careful.
493
494 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
495 CHECK_THROW_NOSYNC_IMSE(obj);
496 if (obj->klass()->is_value_based()) {
497 handle_sync_on_value_based_class(obj, current);
498 }
499
500 current->inc_held_monitor_count();
501
502 if (!useHeavyMonitors()) {
503 markWord mark = obj->mark();
504 if (mark.is_neutral()) {
505 // Anticipate successful CAS -- the ST of the displaced mark must
506 // be visible <= the ST performed by the CAS.
507 lock->set_displaced_header(mark);
508 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
509 return;
510 }
511 // Fall through to inflate() ...
512 } else if (mark.has_locker() &&
513 current->is_lock_owned((address)mark.locker())) {
514 assert(lock != mark.locker(), "must not re-lock the same lock");
515 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
525 } else if (VerifyHeavyMonitors) {
526 guarantee(!obj->mark().has_locker(), "must not be stack-locked");
527 }
528
529 // An async deflation can race after the inflate() call and before
530 // enter() can make the ObjectMonitor busy. enter() returns false if
531 // we have lost the race to async deflation and we simply try again.
532 while (true) {
533 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
534 if (monitor->enter(current)) {
535 return;
536 }
537 }
538 }
539
540 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
541 current->dec_held_monitor_count();
542
543 if (!useHeavyMonitors()) {
544 markWord mark = object->mark();
545 if (EnableValhalla && mark.is_inline_type()) {
546 return;
547 }
548 assert(!EnableValhalla || !object->klass()->is_inline_klass(), "monitor op on inline type");
549
550 markWord dhw = lock->displaced_header();
551 if (dhw.value() == 0) {
552 // If the displaced header is NULL, then this exit matches up with
553 // a recursive enter. No real work to do here except for diagnostics.
554 #ifndef PRODUCT
555 if (mark != markWord::INFLATING()) {
556 // Only do diagnostics if we are not racing an inflation. Simply
557 // exiting a recursive enter of a Java Monitor that is being
558 // inflated is safe; see the has_monitor() comment below.
559 assert(!mark.is_neutral(), "invariant");
560 assert(!mark.has_locker() ||
561 current->is_lock_owned((address)mark.locker()), "invariant");
562 if (mark.has_monitor()) {
563 // The BasicLock's displaced_header is marked as a recursive
564 // enter and we have an inflated Java Monitor (ObjectMonitor).
565 // This is a special case where the Java Monitor was inflated
566 // after this thread entered the stack-lock recursively. When a
567 // Java Monitor is inflated, we cannot safely walk the Java
568 // Monitor owner's stack and update the BasicLocks because a
592 // We have to take the slow-path of possible inflation and then exit.
593 // The ObjectMonitor* can't be async deflated until ownership is
594 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
595 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
596 monitor->exit(current);
597 }
598
599 // -----------------------------------------------------------------------------
600 // Class Loader support to workaround deadlocks on the class loader lock objects
601 // Also used by GC
602 // complete_exit()/reenter() are used to wait on a nested lock
603 // i.e. to give up an outer lock completely and then re-enter
604 // Used when holding nested locks - lock acquisition order: lock1 then lock2
605 // 1) complete_exit lock1 - saving recursion count
606 // 2) wait on lock2
607 // 3) when notified on lock2, unlock lock2
608 // 4) reenter lock1 with original recursion count
609 // 5) lock lock2
610 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
611 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
612 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
613
614 // The ObjectMonitor* can't be async deflated until ownership is
615 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
616 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
617 intx recur_count = monitor->complete_exit(current);
618 current->dec_held_monitor_count(recur_count + 1);
619 return recur_count;
620 }
621
622 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
623 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
624 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
625
626 // An async deflation can race after the inflate() call and before
627 // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
628 // enter() returns false if we have lost the race to async deflation
629 // and we simply try again.
630 while (true) {
631 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
632 if (monitor->reenter(recursions, current)) {
633 current->inc_held_monitor_count(recursions + 1);
634 return;
635 }
636 }
637 }
638
639 // -----------------------------------------------------------------------------
640 // JNI locks on java objects
641 // NOTE: must use heavy weight monitor to handle jni monitor enter
642 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
643 if (obj->klass()->is_value_based()) {
644 handle_sync_on_value_based_class(obj, current);
645 }
646 CHECK_THROW_NOSYNC_IMSE(obj);
647
648 // the current locking is from JNI instead of Java code
649 current->set_current_pending_monitor_is_from_java(false);
650 // An async deflation can race after the inflate() call and before
651 // enter() can make the ObjectMonitor busy. enter() returns false if
652 // we have lost the race to async deflation and we simply try again.
653 while (true) {
654 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
655 if (monitor->enter(current)) {
656 current->inc_held_monitor_count(1, true);
657 break;
658 }
659 }
660 current->set_current_pending_monitor_is_from_java(true);
661 }
662
663 // NOTE: must use heavy weight monitor to handle jni monitor exit
664 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
665 JavaThread* current = THREAD;
666 CHECK_THROW_NOSYNC_IMSE(obj);
667
668 // The ObjectMonitor* can't be async deflated until ownership is
669 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
670 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
671 // If this thread has locked the object, exit the monitor. We
672 // intentionally do not use CHECK on check_owner because we must exit the
673 // monitor even if an exception was already pending.
674 if (monitor->check_owner(THREAD)) {
675 monitor->exit(current);
676 current->dec_held_monitor_count(1, true);
677 }
678 }
679
680 // -----------------------------------------------------------------------------
681 // Internal VM locks on java objects
682 // standard constructor, allows locking failures
683 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
684 _thread = thread;
685 _thread->check_for_valid_safepoint_state();
686 _obj = obj;
687
688 if (_obj() != NULL) {
689 ObjectSynchronizer::enter(_obj, &_lock, _thread);
690 }
691 }
692
693 ObjectLocker::~ObjectLocker() {
694 if (_obj() != NULL) {
695 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
696 }
697 }
698
699
700 // -----------------------------------------------------------------------------
701 // Wait/Notify/NotifyAll
702 // NOTE: must use heavy weight monitor to handle wait()
703 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
704 JavaThread* current = THREAD;
705 CHECK_THROW_NOSYNC_IMSE_0(obj);
706 if (millis < 0) {
707 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
708 }
709 // The ObjectMonitor* can't be async deflated because the _waiters
710 // field is incremented before ownership is dropped and decremented
711 // after ownership is regained.
712 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
713
714 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
715 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
716
717 // This dummy call is in place to get around dtrace bug 6254741. Once
718 // that's fixed we can uncomment the following line, remove the call
719 // and change this function back into a "void" func.
720 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
721 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
722 return ret_code;
723 }
724
725 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
726 JavaThread* current = THREAD;
727 CHECK_THROW_NOSYNC_IMSE(obj);
728
729 markWord mark = obj->mark();
730 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
731 // Not inflated so there can't be any waiters to notify.
732 return;
733 }
734 // The ObjectMonitor* can't be async deflated until ownership is
735 // dropped by the calling thread.
736 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
737 monitor->notify(CHECK);
738 }
739
740 // NOTE: see comment of notify()
741 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
742 JavaThread* current = THREAD;
743 CHECK_THROW_NOSYNC_IMSE(obj);
744
745 markWord mark = obj->mark();
746 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
747 // Not inflated so there can't be any waiters to notify.
748 return;
749 }
750 // The ObjectMonitor* can't be async deflated until ownership is
751 // dropped by the calling thread.
752 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
753 monitor->notifyAll(CHECK);
754 }
755
756 // -----------------------------------------------------------------------------
757 // Hash Code handling
758
759 struct SharedGlobals {
760 char _pad_prefix[OM_CACHE_LINE_SIZE];
761 // This is a highly shared mostly-read variable.
762 // To avoid false-sharing it needs to be the sole occupant of a cache line.
763 volatile int stw_random;
870 // This is probably the best overall implementation -- we'll
871 // likely make this the default in future releases.
872 unsigned t = current->_hashStateX;
873 t ^= (t << 11);
874 current->_hashStateX = current->_hashStateY;
875 current->_hashStateY = current->_hashStateZ;
876 current->_hashStateZ = current->_hashStateW;
877 unsigned v = current->_hashStateW;
878 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
879 current->_hashStateW = v;
880 value = v;
881 }
882
883 value &= markWord::hash_mask;
884 if (value == 0) value = 0xBAD;
885 assert(value != markWord::no_hash, "invariant");
886 return value;
887 }
888
889 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
890 if (EnableValhalla && obj->klass()->is_inline_klass()) {
891 // VM should be calling bootstrap method
892 ShouldNotReachHere();
893 }
894
895 while (true) {
896 ObjectMonitor* monitor = NULL;
897 markWord temp, test;
898 intptr_t hash;
899 markWord mark = read_stable_mark(obj);
900 if (VerifyHeavyMonitors) {
901 assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
902 guarantee(!mark.has_locker(), "must not be stack locked");
903 }
904 if (mark.is_neutral()) { // if this is a normal header
905 hash = mark.hash();
906 if (hash != 0) { // if it has a hash, just return it
907 return hash;
908 }
909 hash = get_next_hash(current, obj); // get a new hash
910 temp = mark.copy_set_hash(hash); // merge the hash into header
911 // try to install the hash
912 test = obj->cas_set_mark(temp, mark);
913 if (test == mark) { // if the hash was installed, return it
987 hash = test.hash();
988 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
989 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
990 }
991 if (monitor->is_being_async_deflated()) {
992 // If we detect that async deflation has occurred, then we
993 // attempt to restore the header/dmw to the object's header
994 // so that we only retry once if the deflater thread happens
995 // to be slow.
996 monitor->install_displaced_markword_in_object(obj);
997 continue;
998 }
999 }
1000 // We finally get the hash.
1001 return hash;
1002 }
1003 }
1004
1005 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1006 Handle h_obj) {
1007 if (EnableValhalla && h_obj->mark().is_inline_type()) {
1008 return false;
1009 }
1010 assert(current == JavaThread::current(), "Can only be called on current thread");
1011 oop obj = h_obj();
1012
1013 markWord mark = read_stable_mark(obj);
1014
1015 // Uncontended case, header points to stack
1016 if (mark.has_locker()) {
1017 return current->is_lock_owned((address)mark.locker());
1018 }
1019 // Contended case, header points to ObjectMonitor (tagged pointer)
1020 if (mark.has_monitor()) {
1021 // The first stage of async deflation does not affect any field
1022 // used by this comparison so the ObjectMonitor* is usable here.
1023 ObjectMonitor* monitor = mark.monitor();
1024 return monitor->is_entered(current) != 0;
1025 }
1026 // Unlocked case, header in place
1027 assert(mark.is_neutral(), "sanity check");
1028 return false;
1029 }
1222 event->set_monitorClass(obj->klass());
1223 event->set_address((uintptr_t)(void*)obj);
1224 event->set_cause((u1)cause);
1225 event->commit();
1226 }
1227
1228 // Fast path code shared by multiple functions
1229 void ObjectSynchronizer::inflate_helper(oop obj) {
1230 markWord mark = obj->mark_acquire();
1231 if (mark.has_monitor()) {
1232 ObjectMonitor* monitor = mark.monitor();
1233 markWord dmw = monitor->header();
1234 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1235 return;
1236 }
1237 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1238 }
1239
1240 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1241 const InflateCause cause) {
1242 if (EnableValhalla) {
1243 guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1244 }
1245
1246 EventJavaMonitorInflate event;
1247
1248 for (;;) {
1249 const markWord mark = object->mark_acquire();
1250
1251 // The mark can be in one of the following states:
1252 // * Inflated - just return
1253 // * Stack-locked - coerce it to inflated
1254 // * INFLATING - busy wait for conversion to complete
1255 // * Neutral - aggressively inflate the object.
1256
1257 // CASE: inflated
1258 if (mark.has_monitor()) {
1259 ObjectMonitor* inf = mark.monitor();
1260 markWord dmw = inf->header();
1261 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1262 return inf;
1263 }
1264
1265 // CASE: inflation in progress - inflating over a stack-lock.
|