298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // =====================> Quick functions
319
320 // The quick_* forms are special fast-path variants used to improve
321 // performance. In the simplest case, a "quick_*" implementation could
322 // simply return false, in which case the caller will perform the necessary
323 // state transitions and call the slow-path form.
324 // The fast-path is designed to handle frequently arising cases in an efficient
325 // manner and is just a degenerate "optimistic" variant of the slow-path.
326 // returns true -- to indicate the call was satisfied.
327 // returns false -- to indicate the call needs the services of the slow-path.
328 // A no-loitering ordinance is in effect for code in the quick_* family
329 // operators: safepoints or indefinite blocking (blocking that might span a
330 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
331 // entry.
332 //
333 // Consider: An interesting optimization is to have the JIT recognize the
334 // following common idiom:
335 // synchronized (someobj) { .... ; notify(); }
336 // That is, we find a notify() or notifyAll() call that immediately precedes
337 // the monitorexit operation. In that case the JIT could fuse the operations
338 // into a single notifyAndExit() runtime primitive.
339
340 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
341 assert(current->thread_state() == _thread_in_Java, "invariant");
342 NoSafepointVerifier nsv;
343 if (obj == nullptr) return false; // slow-path for invalid obj
344 const markWord mark = obj->mark();
345
346 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
347 // Degenerate notify
348 // fast-locked by caller so by definition the implied waitset is empty.
349 return true;
350 }
351
352 if (mark.has_monitor()) {
353 ObjectMonitor* const mon = read_monitor(obj, mark);
354 if (mon == nullptr) {
355 // Racing with inflation/deflation go slow path
356 return false;
357 }
358 assert(mon->object() == oop(obj), "invariant");
359 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
360
361 if (mon->first_waiter() != nullptr) {
362 // We have one or more waiters. Since this is an inflated monitor
363 // that we own, we quickly notify them here and now, avoiding the slow-path.
411 } else {
412 vblog.info("Cannot find the last Java frame");
413 }
414
415 EventSyncOnValueBasedClass event;
416 if (event.should_commit()) {
417 event.set_valueBasedClass(obj->klass());
418 event.commit();
419 }
420 }
421
422 if (bcp_was_adjusted) {
423 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
424 }
425 }
426
427 // -----------------------------------------------------------------------------
428 // JNI locks on java objects
429 // NOTE: must use heavy weight monitor to handle jni monitor enter
430 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
431 // Top native frames in the stack will not be seen if we attempt
432 // preemption, since we start walking from the last Java anchor.
433 NoPreemptMark npm(current);
434
435 if (obj->klass()->is_value_based()) {
436 handle_sync_on_value_based_class(obj, current);
437 }
438
439 // the current locking is from JNI instead of Java code
440 current->set_current_pending_monitor_is_from_java(false);
441 // An async deflation can race after the inflate() call and before
442 // enter() can make the ObjectMonitor busy. enter() returns false if
443 // we have lost the race to async deflation and we simply try again.
444 while (true) {
445 BasicLock lock;
446 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
447 break;
448 }
449 }
450 current->set_current_pending_monitor_is_from_java(true);
451 }
452
453 // NOTE: must use heavy weight monitor to handle jni monitor exit
454 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
455 JavaThread* current = THREAD;
456
457 ObjectMonitor* monitor;
458 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
459 // If this thread has locked the object, exit the monitor. We
460 // intentionally do not use CHECK on check_owner because we must exit the
461 // monitor even if an exception was already pending.
462 if (monitor->check_owner(THREAD)) {
463 monitor->exit(current);
464 }
465 }
466
467 // -----------------------------------------------------------------------------
468 // Internal VM locks on java objects
469 // standard constructor, allows locking failures
470 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
471 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
472 assert(!_thread->preempting(), "");
473
474 _thread->check_for_valid_safepoint_state();
475
496 if (_obj() != nullptr && !_skip_exit) {
497 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
498 }
499 }
500
501 void ObjectLocker::wait_uninterruptibly(TRAPS) {
502 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
503 if (_thread->preempting()) {
504 _skip_exit = true;
505 ObjectSynchronizer::read_monitor(_obj())->set_object_strong();
506 _thread->set_pending_preempted_exception();
507 }
508 }
509
510 // -----------------------------------------------------------------------------
511 // Wait/Notify/NotifyAll
512 // NOTE: must use heavy weight monitor to handle wait()
513
514 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
515 JavaThread* current = THREAD;
516 if (millis < 0) {
517 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
518 }
519
520 ObjectMonitor* monitor;
521 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
522
523 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
524 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
525
526 // This dummy call is in place to get around dtrace bug 6254741. Once
527 // that's fixed we can uncomment the following line, remove the call
528 // and change this function back into a "void" func.
529 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
530 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
531 return ret_code;
532 }
533
534 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
535 assert(millis >= 0, "timeout value is negative");
536
537 ObjectMonitor* monitor;
538 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
539 monitor->wait(millis, false, THREAD);
540 }
541
542
543 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
544 JavaThread* current = THREAD;
545
546 markWord mark = obj->mark();
547 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
548 // Not inflated so there can't be any waiters to notify.
549 return;
550 }
551 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
552 monitor->notify(CHECK);
553 }
554
555 // NOTE: see comment of notify()
556 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
557 JavaThread* current = THREAD;
558
559 markWord mark = obj->mark();
560 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
561 // Not inflated so there can't be any waiters to notify.
562 return;
563 }
564
565 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
566 monitor->notifyAll(CHECK);
567 }
568
569 // -----------------------------------------------------------------------------
570 // Hash Code handling
571
572 struct SharedGlobals {
573 char _pad_prefix[OM_CACHE_LINE_SIZE];
574 // This is a highly shared mostly-read variable.
575 // To avoid false-sharing it needs to be the sole occupant of a cache line.
576 volatile int stw_random;
577 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
623 // This is probably the best overall implementation -- we'll
624 // likely make this the default in future releases.
625 unsigned t = current->_hashStateX;
626 t ^= (t << 11);
627 current->_hashStateX = current->_hashStateY;
628 current->_hashStateY = current->_hashStateZ;
629 current->_hashStateZ = current->_hashStateW;
630 unsigned v = current->_hashStateW;
631 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
632 current->_hashStateW = v;
633 value = v;
634 }
635
636 value &= markWord::hash_mask;
637 if (value == 0) value = 0xBAD;
638 assert(value != markWord::no_hash, "invariant");
639 return value;
640 }
641
642 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
643 while (true) {
644 ObjectMonitor* monitor = nullptr;
645 markWord temp, test;
646 intptr_t hash;
647 markWord mark = obj->mark_acquire();
648 // If UseObjectMonitorTable is set the hash can simply be installed in the
649 // object header, since the monitor isn't in the object header.
650 if (UseObjectMonitorTable || !mark.has_monitor()) {
651 hash = mark.hash();
652 if (hash != 0) { // if it has a hash, just return it
653 return hash;
654 }
655 hash = get_next_hash(current, obj); // get a new hash
656 temp = mark.copy_set_hash(hash); // merge the hash into header
657 // try to install the hash
658 test = obj->cas_set_mark(temp, mark);
659 if (test == mark) { // if the hash was installed, return it
660 return hash;
661 }
662 // CAS failed, retry
721 hash = test.hash();
722 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
723 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
724 }
725 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
726 // If we detect that async deflation has occurred, then we
727 // attempt to restore the header/dmw to the object's header
728 // so that we only retry once if the deflater thread happens
729 // to be slow.
730 monitor->install_displaced_markword_in_object(obj);
731 continue;
732 }
733 }
734 // We finally get the hash.
735 return hash;
736 }
737 }
738
739 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
740 Handle h_obj) {
741 assert(current == JavaThread::current(), "Can only be called on current thread");
742 oop obj = h_obj();
743
744 markWord mark = obj->mark_acquire();
745
746 if (mark.is_fast_locked()) {
747 // fast-locking case, see if lock is in current's lock stack
748 return current->lock_stack().contains(h_obj());
749 }
750
751 while (mark.has_monitor()) {
752 ObjectMonitor* monitor = read_monitor(obj, mark);
753 if (monitor != nullptr) {
754 return monitor->is_entered(current) != 0;
755 }
756 // Racing with inflation/deflation, retry
757 mark = obj->mark_acquire();
758
759 if (mark.is_fast_locked()) {
760 // Some other thread fast_locked, current could not have held the lock
|
298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // These checks are required for wait, notify and exit to avoid inflating the monitor to
319 // find out this inline type object cannot be locked.
320 #define CHECK_THROW_NOSYNC_IMSE(obj) \
321 if ((obj)->mark().is_inline_type()) { \
322 JavaThread* THREAD = current; \
323 ResourceMark rm(THREAD); \
324 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
325 }
326
327 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
328 if ((obj)->mark().is_inline_type()) { \
329 JavaThread* THREAD = current; \
330 ResourceMark rm(THREAD); \
331 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
332 }
333
334 // =====================> Quick functions
335
336 // The quick_* forms are special fast-path variants used to improve
337 // performance. In the simplest case, a "quick_*" implementation could
338 // simply return false, in which case the caller will perform the necessary
339 // state transitions and call the slow-path form.
340 // The fast-path is designed to handle frequently arising cases in an efficient
341 // manner and is just a degenerate "optimistic" variant of the slow-path.
342 // returns true -- to indicate the call was satisfied.
343 // returns false -- to indicate the call needs the services of the slow-path.
344 // A no-loitering ordinance is in effect for code in the quick_* family
345 // operators: safepoints or indefinite blocking (blocking that might span a
346 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
347 // entry.
348 //
349 // Consider: An interesting optimization is to have the JIT recognize the
350 // following common idiom:
351 // synchronized (someobj) { .... ; notify(); }
352 // That is, we find a notify() or notifyAll() call that immediately precedes
353 // the monitorexit operation. In that case the JIT could fuse the operations
354 // into a single notifyAndExit() runtime primitive.
355
356 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
357 assert(current->thread_state() == _thread_in_Java, "invariant");
358 NoSafepointVerifier nsv;
359 if (obj == nullptr) return false; // slow-path for invalid obj
360 assert(!obj->klass()->is_inline_klass(), "monitor op on inline type");
361 const markWord mark = obj->mark();
362
363 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
364 // Degenerate notify
365 // fast-locked by caller so by definition the implied waitset is empty.
366 return true;
367 }
368
369 if (mark.has_monitor()) {
370 ObjectMonitor* const mon = read_monitor(obj, mark);
371 if (mon == nullptr) {
372 // Racing with inflation/deflation go slow path
373 return false;
374 }
375 assert(mon->object() == oop(obj), "invariant");
376 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
377
378 if (mon->first_waiter() != nullptr) {
379 // We have one or more waiters. Since this is an inflated monitor
380 // that we own, we quickly notify them here and now, avoiding the slow-path.
428 } else {
429 vblog.info("Cannot find the last Java frame");
430 }
431
432 EventSyncOnValueBasedClass event;
433 if (event.should_commit()) {
434 event.set_valueBasedClass(obj->klass());
435 event.commit();
436 }
437 }
438
439 if (bcp_was_adjusted) {
440 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
441 }
442 }
443
444 // -----------------------------------------------------------------------------
445 // JNI locks on java objects
446 // NOTE: must use heavy weight monitor to handle jni monitor enter
447 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
448 JavaThread* THREAD = current;
449 // Top native frames in the stack will not be seen if we attempt
450 // preemption, since we start walking from the last Java anchor.
451 NoPreemptMark npm(current);
452
453 if (obj->klass()->is_value_based()) {
454 handle_sync_on_value_based_class(obj, current);
455 }
456
457 if (obj->klass()->is_inline_klass()) {
458 ResourceMark rm(THREAD);
459 stringStream ss;
460 ss.print("Cannot synchronize on an instance of value class %s",
461 obj->klass()->external_name());
462 THROW_MSG(vmSymbols::java_lang_IdentityException(), ss.as_string());
463 }
464
465 // the current locking is from JNI instead of Java code
466 current->set_current_pending_monitor_is_from_java(false);
467 // An async deflation can race after the inflate() call and before
468 // enter() can make the ObjectMonitor busy. enter() returns false if
469 // we have lost the race to async deflation and we simply try again.
470 while (true) {
471 BasicLock lock;
472 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
473 break;
474 }
475 }
476 current->set_current_pending_monitor_is_from_java(true);
477 }
478
479 // NOTE: must use heavy weight monitor to handle jni monitor exit
480 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
481 JavaThread* current = THREAD;
482 CHECK_THROW_NOSYNC_IMSE(obj);
483
484 ObjectMonitor* monitor;
485 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
486 // If this thread has locked the object, exit the monitor. We
487 // intentionally do not use CHECK on check_owner because we must exit the
488 // monitor even if an exception was already pending.
489 if (monitor->check_owner(THREAD)) {
490 monitor->exit(current);
491 }
492 }
493
494 // -----------------------------------------------------------------------------
495 // Internal VM locks on java objects
496 // standard constructor, allows locking failures
497 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
498 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
499 assert(!_thread->preempting(), "");
500
501 _thread->check_for_valid_safepoint_state();
502
523 if (_obj() != nullptr && !_skip_exit) {
524 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
525 }
526 }
527
528 void ObjectLocker::wait_uninterruptibly(TRAPS) {
529 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
530 if (_thread->preempting()) {
531 _skip_exit = true;
532 ObjectSynchronizer::read_monitor(_obj())->set_object_strong();
533 _thread->set_pending_preempted_exception();
534 }
535 }
536
537 // -----------------------------------------------------------------------------
538 // Wait/Notify/NotifyAll
539 // NOTE: must use heavy weight monitor to handle wait()
540
541 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
542 JavaThread* current = THREAD;
543 CHECK_THROW_NOSYNC_IMSE_0(obj);
544 if (millis < 0) {
545 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
546 }
547
548 ObjectMonitor* monitor;
549 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
550
551 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
552 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
553
554 // This dummy call is in place to get around dtrace bug 6254741. Once
555 // that's fixed we can uncomment the following line, remove the call
556 // and change this function back into a "void" func.
557 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
558 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
559 return ret_code;
560 }
561
562 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
563 assert(millis >= 0, "timeout value is negative");
564
565 ObjectMonitor* monitor;
566 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
567 monitor->wait(millis, false, THREAD);
568 }
569
570
571 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
572 JavaThread* current = THREAD;
573 CHECK_THROW_NOSYNC_IMSE(obj);
574
575 markWord mark = obj->mark();
576 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
577 // Not inflated so there can't be any waiters to notify.
578 return;
579 }
580 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
581 monitor->notify(CHECK);
582 }
583
584 // NOTE: see comment of notify()
585 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
586 JavaThread* current = THREAD;
587 CHECK_THROW_NOSYNC_IMSE(obj);
588
589 markWord mark = obj->mark();
590 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
591 // Not inflated so there can't be any waiters to notify.
592 return;
593 }
594
595 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
596 monitor->notifyAll(CHECK);
597 }
598
599 // -----------------------------------------------------------------------------
600 // Hash Code handling
601
602 struct SharedGlobals {
603 char _pad_prefix[OM_CACHE_LINE_SIZE];
604 // This is a highly shared mostly-read variable.
605 // To avoid false-sharing it needs to be the sole occupant of a cache line.
606 volatile int stw_random;
607 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
653 // This is probably the best overall implementation -- we'll
654 // likely make this the default in future releases.
655 unsigned t = current->_hashStateX;
656 t ^= (t << 11);
657 current->_hashStateX = current->_hashStateY;
658 current->_hashStateY = current->_hashStateZ;
659 current->_hashStateZ = current->_hashStateW;
660 unsigned v = current->_hashStateW;
661 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
662 current->_hashStateW = v;
663 value = v;
664 }
665
666 value &= markWord::hash_mask;
667 if (value == 0) value = 0xBAD;
668 assert(value != markWord::no_hash, "invariant");
669 return value;
670 }
671
672 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
673 // VM should be calling bootstrap method.
674 assert(!obj->klass()->is_inline_klass(), "FastHashCode should not be called for inline classes");
675
676 while (true) {
677 ObjectMonitor* monitor = nullptr;
678 markWord temp, test;
679 intptr_t hash;
680 markWord mark = obj->mark_acquire();
681 // If UseObjectMonitorTable is set the hash can simply be installed in the
682 // object header, since the monitor isn't in the object header.
683 if (UseObjectMonitorTable || !mark.has_monitor()) {
684 hash = mark.hash();
685 if (hash != 0) { // if it has a hash, just return it
686 return hash;
687 }
688 hash = get_next_hash(current, obj); // get a new hash
689 temp = mark.copy_set_hash(hash); // merge the hash into header
690 // try to install the hash
691 test = obj->cas_set_mark(temp, mark);
692 if (test == mark) { // if the hash was installed, return it
693 return hash;
694 }
695 // CAS failed, retry
754 hash = test.hash();
755 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
756 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
757 }
758 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
759 // If we detect that async deflation has occurred, then we
760 // attempt to restore the header/dmw to the object's header
761 // so that we only retry once if the deflater thread happens
762 // to be slow.
763 monitor->install_displaced_markword_in_object(obj);
764 continue;
765 }
766 }
767 // We finally get the hash.
768 return hash;
769 }
770 }
771
772 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
773 Handle h_obj) {
774 if (h_obj->mark().is_inline_type()) {
775 return false;
776 }
777 assert(current == JavaThread::current(), "Can only be called on current thread");
778 oop obj = h_obj();
779
780 markWord mark = obj->mark_acquire();
781
782 if (mark.is_fast_locked()) {
783 // fast-locking case, see if lock is in current's lock stack
784 return current->lock_stack().contains(h_obj());
785 }
786
787 while (mark.has_monitor()) {
788 ObjectMonitor* monitor = read_monitor(obj, mark);
789 if (monitor != nullptr) {
790 return monitor->is_entered(current) != 0;
791 }
792 // Racing with inflation/deflation, retry
793 mark = obj->mark_acquire();
794
795 if (mark.is_fast_locked()) {
796 // Some other thread fast_locked, current could not have held the lock
|