297 // removed from the system.
298 //
299 // Note: If the _in_use_list max exceeds the ceiling, then
300 // monitors_used_above_threshold() will use the in_use_list max instead
301 // of the thread count derived ceiling because we have used more
302 // ObjectMonitors than the estimated average.
303 //
304 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
305 // no-progress async monitor deflation cycles in a row, then the ceiling
306 // is adjusted upwards by monitors_used_above_threshold().
307 //
308 // Start the ceiling with the estimate for one thread in initialize()
309 // which is called after cmd line options are processed.
310 static size_t _in_use_list_ceiling = 0;
311 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
312 bool volatile ObjectSynchronizer::_is_final_audit = false;
313 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
314 static uintx _no_progress_cnt = 0;
315 static bool _no_progress_skip_increment = false;
316
317 // =====================> Quick functions
318
319 // The quick_* forms are special fast-path variants used to improve
320 // performance. In the simplest case, a "quick_*" implementation could
321 // simply return false, in which case the caller will perform the necessary
322 // state transitions and call the slow-path form.
323 // The fast-path is designed to handle frequently arising cases in an efficient
324 // manner and is just a degenerate "optimistic" variant of the slow-path.
325 // returns true -- to indicate the call was satisfied.
326 // returns false -- to indicate the call needs the services of the slow-path.
327 // A no-loitering ordinance is in effect for code in the quick_* family
328 // operators: safepoints or indefinite blocking (blocking that might span a
329 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
330 // entry.
331 //
332 // Consider: An interesting optimization is to have the JIT recognize the
333 // following common idiom:
334 // synchronized (someobj) { .... ; notify(); }
335 // That is, we find a notify() or notifyAll() call that immediately precedes
336 // the monitorexit operation. In that case the JIT could fuse the operations
337 // into a single notifyAndExit() runtime primitive.
338
339 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
340 assert(current->thread_state() == _thread_in_Java, "invariant");
341 NoSafepointVerifier nsv;
342 if (obj == nullptr) return false; // slow-path for invalid obj
343 const markWord mark = obj->mark();
344
345 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
346 // Degenerate notify
347 // fast-locked by caller so by definition the implied waitset is empty.
348 return true;
349 }
350
351 if (mark.has_monitor()) {
352 ObjectMonitor* const mon = read_monitor(current, obj, mark);
353 if (mon == nullptr) {
354 // Racing with inflation/deflation go slow path
355 return false;
356 }
357 assert(mon->object() == oop(obj), "invariant");
358 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
359
360 if (mon->first_waiter() != nullptr) {
361 // We have one or more waiters. Since this is an inflated monitor
362 // that we own, we quickly notify them here and now, avoiding the slow-path.
410 } else {
411 vblog.info("Cannot find the last Java frame");
412 }
413
414 EventSyncOnValueBasedClass event;
415 if (event.should_commit()) {
416 event.set_valueBasedClass(obj->klass());
417 event.commit();
418 }
419 }
420
421 if (bcp_was_adjusted) {
422 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
423 }
424 }
425
426 // -----------------------------------------------------------------------------
427 // JNI locks on java objects
428 // NOTE: must use heavy weight monitor to handle jni monitor enter
429 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
430 // Top native frames in the stack will not be seen if we attempt
431 // preemption, since we start walking from the last Java anchor.
432 NoPreemptMark npm(current);
433
434 if (obj->klass()->is_value_based()) {
435 handle_sync_on_value_based_class(obj, current);
436 }
437
438 // the current locking is from JNI instead of Java code
439 current->set_current_pending_monitor_is_from_java(false);
440 // An async deflation can race after the inflate() call and before
441 // enter() can make the ObjectMonitor busy. enter() returns false if
442 // we have lost the race to async deflation and we simply try again.
443 while (true) {
444 BasicLock lock;
445 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
446 break;
447 }
448 }
449 current->set_current_pending_monitor_is_from_java(true);
450 }
451
452 // NOTE: must use heavy weight monitor to handle jni monitor exit
453 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
454 JavaThread* current = THREAD;
455
456 ObjectMonitor* monitor;
457 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
458 // If this thread has locked the object, exit the monitor. We
459 // intentionally do not use CHECK on check_owner because we must exit the
460 // monitor even if an exception was already pending.
461 if (monitor->check_owner(THREAD)) {
462 monitor->exit(current);
463 }
464 }
465
466 // -----------------------------------------------------------------------------
467 // Internal VM locks on java objects
468 // standard constructor, allows locking failures
469 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
470 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
471 assert(!_thread->preempting(), "");
472
473 _thread->check_for_valid_safepoint_state();
474
495 if (_obj() != nullptr && !_skip_exit) {
496 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
497 }
498 }
499
500 void ObjectLocker::wait_uninterruptibly(TRAPS) {
501 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
502 if (_thread->preempting()) {
503 _skip_exit = true;
504 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
505 _thread->set_pending_preempted_exception();
506 }
507 }
508
509 // -----------------------------------------------------------------------------
510 // Wait/Notify/NotifyAll
511 // NOTE: must use heavy weight monitor to handle wait()
512
513 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
514 JavaThread* current = THREAD;
515 if (millis < 0) {
516 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
517 }
518
519 ObjectMonitor* monitor;
520 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
521
522 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
523 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
524
525 // This dummy call is in place to get around dtrace bug 6254741. Once
526 // that's fixed we can uncomment the following line, remove the call
527 // and change this function back into a "void" func.
528 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
529 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
530 return ret_code;
531 }
532
533 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
534 assert(millis >= 0, "timeout value is negative");
535
536 ObjectMonitor* monitor;
537 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
538 monitor->wait(millis, false, THREAD);
539 }
540
541
542 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
543 JavaThread* current = THREAD;
544
545 markWord mark = obj->mark();
546 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
547 // Not inflated so there can't be any waiters to notify.
548 return;
549 }
550 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
551 monitor->notify(CHECK);
552 }
553
554 // NOTE: see comment of notify()
555 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
556 JavaThread* current = THREAD;
557
558 markWord mark = obj->mark();
559 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
560 // Not inflated so there can't be any waiters to notify.
561 return;
562 }
563
564 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
565 monitor->notifyAll(CHECK);
566 }
567
568 // -----------------------------------------------------------------------------
569 // Hash Code handling
570
571 struct SharedGlobals {
572 char _pad_prefix[OM_CACHE_LINE_SIZE];
573 // This is a highly shared mostly-read variable.
574 // To avoid false-sharing it needs to be the sole occupant of a cache line.
575 volatile int stw_random;
576 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
622 // This is probably the best overall implementation -- we'll
623 // likely make this the default in future releases.
624 unsigned t = current->_hashStateX;
625 t ^= (t << 11);
626 current->_hashStateX = current->_hashStateY;
627 current->_hashStateY = current->_hashStateZ;
628 current->_hashStateZ = current->_hashStateW;
629 unsigned v = current->_hashStateW;
630 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
631 current->_hashStateW = v;
632 value = v;
633 }
634
635 value &= markWord::hash_mask;
636 if (value == 0) value = 0xBAD;
637 assert(value != markWord::no_hash, "invariant");
638 return value;
639 }
640
641 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
642 while (true) {
643 ObjectMonitor* monitor = nullptr;
644 markWord temp, test;
645 intptr_t hash;
646 markWord mark = obj->mark_acquire();
647 // If UseObjectMonitorTable is set the hash can simply be installed in the
648 // object header, since the monitor isn't in the object header.
649 if (UseObjectMonitorTable || !mark.has_monitor()) {
650 hash = mark.hash();
651 if (hash != 0) { // if it has a hash, just return it
652 return hash;
653 }
654 hash = get_next_hash(current, obj); // get a new hash
655 temp = mark.copy_set_hash(hash); // merge the hash into header
656 // try to install the hash
657 test = obj->cas_set_mark(temp, mark);
658 if (test == mark) { // if the hash was installed, return it
659 return hash;
660 }
661 // CAS failed, retry
720 hash = test.hash();
721 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
722 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
723 }
724 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
725 // If we detect that async deflation has occurred, then we
726 // attempt to restore the header/dmw to the object's header
727 // so that we only retry once if the deflater thread happens
728 // to be slow.
729 monitor->install_displaced_markword_in_object(obj);
730 continue;
731 }
732 }
733 // We finally get the hash.
734 return hash;
735 }
736 }
737
738 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
739 Handle h_obj) {
740 assert(current == JavaThread::current(), "Can only be called on current thread");
741 oop obj = h_obj();
742
743 markWord mark = obj->mark_acquire();
744
745 if (mark.is_fast_locked()) {
746 // fast-locking case, see if lock is in current's lock stack
747 return current->lock_stack().contains(h_obj());
748 }
749
750 while (mark.has_monitor()) {
751 ObjectMonitor* monitor = read_monitor(current, obj, mark);
752 if (monitor != nullptr) {
753 return monitor->is_entered(current) != 0;
754 }
755 // Racing with inflation/deflation, retry
756 mark = obj->mark_acquire();
757
758 if (mark.is_fast_locked()) {
759 // Some other thread fast_locked, current could not have held the lock
|
297 // removed from the system.
298 //
299 // Note: If the _in_use_list max exceeds the ceiling, then
300 // monitors_used_above_threshold() will use the in_use_list max instead
301 // of the thread count derived ceiling because we have used more
302 // ObjectMonitors than the estimated average.
303 //
304 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
305 // no-progress async monitor deflation cycles in a row, then the ceiling
306 // is adjusted upwards by monitors_used_above_threshold().
307 //
308 // Start the ceiling with the estimate for one thread in initialize()
309 // which is called after cmd line options are processed.
310 static size_t _in_use_list_ceiling = 0;
311 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
312 bool volatile ObjectSynchronizer::_is_final_audit = false;
313 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
314 static uintx _no_progress_cnt = 0;
315 static bool _no_progress_skip_increment = false;
316
317 // These checks are required for wait, notify and exit to avoid inflating the monitor to
318 // find out this inline type object cannot be locked.
319 #define CHECK_THROW_NOSYNC_IMSE(obj) \
320 if ((obj)->mark().is_inline_type()) { \
321 JavaThread* THREAD = current; \
322 ResourceMark rm(THREAD); \
323 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
324 }
325
326 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
327 if ((obj)->mark().is_inline_type()) { \
328 JavaThread* THREAD = current; \
329 ResourceMark rm(THREAD); \
330 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
331 }
332
333 // =====================> Quick functions
334
335 // The quick_* forms are special fast-path variants used to improve
336 // performance. In the simplest case, a "quick_*" implementation could
337 // simply return false, in which case the caller will perform the necessary
338 // state transitions and call the slow-path form.
339 // The fast-path is designed to handle frequently arising cases in an efficient
340 // manner and is just a degenerate "optimistic" variant of the slow-path.
341 // returns true -- to indicate the call was satisfied.
342 // returns false -- to indicate the call needs the services of the slow-path.
343 // A no-loitering ordinance is in effect for code in the quick_* family
344 // operators: safepoints or indefinite blocking (blocking that might span a
345 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
346 // entry.
347 //
348 // Consider: An interesting optimization is to have the JIT recognize the
349 // following common idiom:
350 // synchronized (someobj) { .... ; notify(); }
351 // That is, we find a notify() or notifyAll() call that immediately precedes
352 // the monitorexit operation. In that case the JIT could fuse the operations
353 // into a single notifyAndExit() runtime primitive.
354
355 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
356 assert(current->thread_state() == _thread_in_Java, "invariant");
357 NoSafepointVerifier nsv;
358 if (obj == nullptr) return false; // slow-path for invalid obj
359 assert(!obj->klass()->is_inline_klass(), "monitor op on inline type");
360 const markWord mark = obj->mark();
361
362 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
363 // Degenerate notify
364 // fast-locked by caller so by definition the implied waitset is empty.
365 return true;
366 }
367
368 if (mark.has_monitor()) {
369 ObjectMonitor* const mon = read_monitor(current, obj, mark);
370 if (mon == nullptr) {
371 // Racing with inflation/deflation go slow path
372 return false;
373 }
374 assert(mon->object() == oop(obj), "invariant");
375 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
376
377 if (mon->first_waiter() != nullptr) {
378 // We have one or more waiters. Since this is an inflated monitor
379 // that we own, we quickly notify them here and now, avoiding the slow-path.
427 } else {
428 vblog.info("Cannot find the last Java frame");
429 }
430
431 EventSyncOnValueBasedClass event;
432 if (event.should_commit()) {
433 event.set_valueBasedClass(obj->klass());
434 event.commit();
435 }
436 }
437
438 if (bcp_was_adjusted) {
439 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
440 }
441 }
442
443 // -----------------------------------------------------------------------------
444 // JNI locks on java objects
445 // NOTE: must use heavy weight monitor to handle jni monitor enter
446 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
447 JavaThread* THREAD = current;
448 // Top native frames in the stack will not be seen if we attempt
449 // preemption, since we start walking from the last Java anchor.
450 NoPreemptMark npm(current);
451
452 if (obj->klass()->is_value_based()) {
453 handle_sync_on_value_based_class(obj, current);
454 }
455
456 if (obj->klass()->is_inline_klass()) {
457 ResourceMark rm(THREAD);
458 const char* desc = "Cannot synchronize on an instance of value class ";
459 const char* className = obj->klass()->external_name();
460 size_t msglen = strlen(desc) + strlen(className) + 1;
461 char* message = NEW_RESOURCE_ARRAY(char, msglen);
462 assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
463 THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
464 }
465
466 // the current locking is from JNI instead of Java code
467 current->set_current_pending_monitor_is_from_java(false);
468 // An async deflation can race after the inflate() call and before
469 // enter() can make the ObjectMonitor busy. enter() returns false if
470 // we have lost the race to async deflation and we simply try again.
471 while (true) {
472 BasicLock lock;
473 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
474 break;
475 }
476 }
477 current->set_current_pending_monitor_is_from_java(true);
478 }
479
480 // NOTE: must use heavy weight monitor to handle jni monitor exit
481 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
482 JavaThread* current = THREAD;
483 CHECK_THROW_NOSYNC_IMSE(obj);
484
485 ObjectMonitor* monitor;
486 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
487 // If this thread has locked the object, exit the monitor. We
488 // intentionally do not use CHECK on check_owner because we must exit the
489 // monitor even if an exception was already pending.
490 if (monitor->check_owner(THREAD)) {
491 monitor->exit(current);
492 }
493 }
494
495 // -----------------------------------------------------------------------------
496 // Internal VM locks on java objects
497 // standard constructor, allows locking failures
498 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
499 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
500 assert(!_thread->preempting(), "");
501
502 _thread->check_for_valid_safepoint_state();
503
524 if (_obj() != nullptr && !_skip_exit) {
525 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
526 }
527 }
528
529 void ObjectLocker::wait_uninterruptibly(TRAPS) {
530 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
531 if (_thread->preempting()) {
532 _skip_exit = true;
533 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
534 _thread->set_pending_preempted_exception();
535 }
536 }
537
538 // -----------------------------------------------------------------------------
539 // Wait/Notify/NotifyAll
540 // NOTE: must use heavy weight monitor to handle wait()
541
542 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
543 JavaThread* current = THREAD;
544 CHECK_THROW_NOSYNC_IMSE_0(obj);
545 if (millis < 0) {
546 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
547 }
548
549 ObjectMonitor* monitor;
550 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
551
552 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
553 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
554
555 // This dummy call is in place to get around dtrace bug 6254741. Once
556 // that's fixed we can uncomment the following line, remove the call
557 // and change this function back into a "void" func.
558 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
559 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
560 return ret_code;
561 }
562
563 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
564 assert(millis >= 0, "timeout value is negative");
565
566 ObjectMonitor* monitor;
567 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
568 monitor->wait(millis, false, THREAD);
569 }
570
571
572 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
573 JavaThread* current = THREAD;
574 CHECK_THROW_NOSYNC_IMSE(obj);
575
576 markWord mark = obj->mark();
577 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
578 // Not inflated so there can't be any waiters to notify.
579 return;
580 }
581 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
582 monitor->notify(CHECK);
583 }
584
585 // NOTE: see comment of notify()
586 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
587 JavaThread* current = THREAD;
588 CHECK_THROW_NOSYNC_IMSE(obj);
589
590 markWord mark = obj->mark();
591 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
592 // Not inflated so there can't be any waiters to notify.
593 return;
594 }
595
596 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
597 monitor->notifyAll(CHECK);
598 }
599
600 // -----------------------------------------------------------------------------
601 // Hash Code handling
602
603 struct SharedGlobals {
604 char _pad_prefix[OM_CACHE_LINE_SIZE];
605 // This is a highly shared mostly-read variable.
606 // To avoid false-sharing it needs to be the sole occupant of a cache line.
607 volatile int stw_random;
608 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
654 // This is probably the best overall implementation -- we'll
655 // likely make this the default in future releases.
656 unsigned t = current->_hashStateX;
657 t ^= (t << 11);
658 current->_hashStateX = current->_hashStateY;
659 current->_hashStateY = current->_hashStateZ;
660 current->_hashStateZ = current->_hashStateW;
661 unsigned v = current->_hashStateW;
662 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
663 current->_hashStateW = v;
664 value = v;
665 }
666
667 value &= markWord::hash_mask;
668 if (value == 0) value = 0xBAD;
669 assert(value != markWord::no_hash, "invariant");
670 return value;
671 }
672
673 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
674 // VM should be calling bootstrap method.
675 assert(!obj->klass()->is_inline_klass(), "FastHashCode should not be called for inline classes");
676
677 while (true) {
678 ObjectMonitor* monitor = nullptr;
679 markWord temp, test;
680 intptr_t hash;
681 markWord mark = obj->mark_acquire();
682 // If UseObjectMonitorTable is set the hash can simply be installed in the
683 // object header, since the monitor isn't in the object header.
684 if (UseObjectMonitorTable || !mark.has_monitor()) {
685 hash = mark.hash();
686 if (hash != 0) { // if it has a hash, just return it
687 return hash;
688 }
689 hash = get_next_hash(current, obj); // get a new hash
690 temp = mark.copy_set_hash(hash); // merge the hash into header
691 // try to install the hash
692 test = obj->cas_set_mark(temp, mark);
693 if (test == mark) { // if the hash was installed, return it
694 return hash;
695 }
696 // CAS failed, retry
755 hash = test.hash();
756 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
757 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
758 }
759 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
760 // If we detect that async deflation has occurred, then we
761 // attempt to restore the header/dmw to the object's header
762 // so that we only retry once if the deflater thread happens
763 // to be slow.
764 monitor->install_displaced_markword_in_object(obj);
765 continue;
766 }
767 }
768 // We finally get the hash.
769 return hash;
770 }
771 }
772
773 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
774 Handle h_obj) {
775 if (h_obj->mark().is_inline_type()) {
776 return false;
777 }
778 assert(current == JavaThread::current(), "Can only be called on current thread");
779 oop obj = h_obj();
780
781 markWord mark = obj->mark_acquire();
782
783 if (mark.is_fast_locked()) {
784 // fast-locking case, see if lock is in current's lock stack
785 return current->lock_stack().contains(h_obj());
786 }
787
788 while (mark.has_monitor()) {
789 ObjectMonitor* monitor = read_monitor(current, obj, mark);
790 if (monitor != nullptr) {
791 return monitor->is_entered(current) != 0;
792 }
793 // Racing with inflation/deflation, retry
794 mark = obj->mark_acquire();
795
796 if (mark.is_fast_locked()) {
797 // Some other thread fast_locked, current could not have held the lock
|