298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // =====================> Quick functions
319
320 // The quick_* forms are special fast-path variants used to improve
321 // performance. In the simplest case, a "quick_*" implementation could
322 // simply return false, in which case the caller will perform the necessary
323 // state transitions and call the slow-path form.
324 // The fast-path is designed to handle frequently arising cases in an efficient
325 // manner and is just a degenerate "optimistic" variant of the slow-path.
326 // returns true -- to indicate the call was satisfied.
327 // returns false -- to indicate the call needs the services of the slow-path.
328 // A no-loitering ordinance is in effect for code in the quick_* family
329 // operators: safepoints or indefinite blocking (blocking that might span a
330 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
331 // entry.
332 //
333 // Consider: An interesting optimization is to have the JIT recognize the
334 // following common idiom:
335 // synchronized (someobj) { .... ; notify(); }
336 // That is, we find a notify() or notifyAll() call that immediately precedes
337 // the monitorexit operation. In that case the JIT could fuse the operations
338 // into a single notifyAndExit() runtime primitive.
339
340 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
341 assert(current->thread_state() == _thread_in_Java, "invariant");
342 NoSafepointVerifier nsv;
343 if (obj == nullptr) return false; // slow-path for invalid obj
344 const markWord mark = obj->mark();
345
346 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
347 // Degenerate notify
348 // fast-locked by caller so by definition the implied waitset is empty.
349 return true;
350 }
351
352 if (mark.has_monitor()) {
353 ObjectMonitor* const mon = read_monitor(current, obj, mark);
354 if (mon == nullptr) {
355 // Racing with inflation/deflation go slow path
356 return false;
357 }
358 assert(mon->object() == oop(obj), "invariant");
359 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
360
361 if (mon->first_waiter() != nullptr) {
362 // We have one or more waiters. Since this is an inflated monitor
363 // that we own, we quickly notify them here and now, avoiding the slow-path.
411 } else {
412 vblog.info("Cannot find the last Java frame");
413 }
414
415 EventSyncOnValueBasedClass event;
416 if (event.should_commit()) {
417 event.set_valueBasedClass(obj->klass());
418 event.commit();
419 }
420 }
421
422 if (bcp_was_adjusted) {
423 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
424 }
425 }
426
427 // -----------------------------------------------------------------------------
428 // JNI locks on java objects
429 // NOTE: must use heavy weight monitor to handle jni monitor enter
430 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
431 // Top native frames in the stack will not be seen if we attempt
432 // preemption, since we start walking from the last Java anchor.
433 NoPreemptMark npm(current);
434
435 if (obj->klass()->is_value_based()) {
436 handle_sync_on_value_based_class(obj, current);
437 }
438
439 // the current locking is from JNI instead of Java code
440 current->set_current_pending_monitor_is_from_java(false);
441 // An async deflation can race after the inflate() call and before
442 // enter() can make the ObjectMonitor busy. enter() returns false if
443 // we have lost the race to async deflation and we simply try again.
444 while (true) {
445 BasicLock lock;
446 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
447 break;
448 }
449 }
450 current->set_current_pending_monitor_is_from_java(true);
451 }
452
453 // NOTE: must use heavy weight monitor to handle jni monitor exit
454 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
455 JavaThread* current = THREAD;
456
457 ObjectMonitor* monitor;
458 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
459 // If this thread has locked the object, exit the monitor. We
460 // intentionally do not use CHECK on check_owner because we must exit the
461 // monitor even if an exception was already pending.
462 if (monitor->check_owner(THREAD)) {
463 monitor->exit(current);
464 }
465 }
466
467 // -----------------------------------------------------------------------------
468 // Internal VM locks on java objects
469 // standard constructor, allows locking failures
470 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
471 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
472 assert(!_thread->preempting(), "");
473
474 _thread->check_for_valid_safepoint_state();
475
496 if (_obj() != nullptr && !_skip_exit) {
497 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
498 }
499 }
500
501 void ObjectLocker::wait_uninterruptibly(TRAPS) {
502 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
503 if (_thread->preempting()) {
504 _skip_exit = true;
505 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
506 _thread->set_pending_preempted_exception();
507 }
508 }
509
510 // -----------------------------------------------------------------------------
511 // Wait/Notify/NotifyAll
512 // NOTE: must use heavy weight monitor to handle wait()
513
514 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
515 JavaThread* current = THREAD;
516 if (millis < 0) {
517 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
518 }
519
520 ObjectMonitor* monitor;
521 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
522
523 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
524 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
525
526 // This dummy call is in place to get around dtrace bug 6254741. Once
527 // that's fixed we can uncomment the following line, remove the call
528 // and change this function back into a "void" func.
529 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
530 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
531 return ret_code;
532 }
533
534 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
535 assert(millis >= 0, "timeout value is negative");
536
537 ObjectMonitor* monitor;
538 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
539 monitor->wait(millis, false, THREAD);
540 }
541
542
543 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
544 JavaThread* current = THREAD;
545
546 markWord mark = obj->mark();
547 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
548 // Not inflated so there can't be any waiters to notify.
549 return;
550 }
551 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
552 monitor->notify(CHECK);
553 }
554
555 // NOTE: see comment of notify()
556 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
557 JavaThread* current = THREAD;
558
559 markWord mark = obj->mark();
560 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
561 // Not inflated so there can't be any waiters to notify.
562 return;
563 }
564
565 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
566 monitor->notifyAll(CHECK);
567 }
568
569 // -----------------------------------------------------------------------------
570 // Hash Code handling
571
572 struct SharedGlobals {
573 char _pad_prefix[OM_CACHE_LINE_SIZE];
574 // This is a highly shared mostly-read variable.
575 // To avoid false-sharing it needs to be the sole occupant of a cache line.
576 volatile int stw_random;
577 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
623 // This is probably the best overall implementation -- we'll
624 // likely make this the default in future releases.
625 unsigned t = current->_hashStateX;
626 t ^= (t << 11);
627 current->_hashStateX = current->_hashStateY;
628 current->_hashStateY = current->_hashStateZ;
629 current->_hashStateZ = current->_hashStateW;
630 unsigned v = current->_hashStateW;
631 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
632 current->_hashStateW = v;
633 value = v;
634 }
635
636 value &= markWord::hash_mask;
637 if (value == 0) value = 0xBAD;
638 assert(value != markWord::no_hash, "invariant");
639 return value;
640 }
641
642 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
643 while (true) {
644 ObjectMonitor* monitor = nullptr;
645 markWord temp, test;
646 intptr_t hash;
647 markWord mark = obj->mark_acquire();
648 // If UseObjectMonitorTable is set the hash can simply be installed in the
649 // object header, since the monitor isn't in the object header.
650 if (UseObjectMonitorTable || !mark.has_monitor()) {
651 hash = mark.hash();
652 if (hash != 0) { // if it has a hash, just return it
653 return hash;
654 }
655 hash = get_next_hash(current, obj); // get a new hash
656 temp = mark.copy_set_hash(hash); // merge the hash into header
657 // try to install the hash
658 test = obj->cas_set_mark(temp, mark);
659 if (test == mark) { // if the hash was installed, return it
660 return hash;
661 }
662 // CAS failed, retry
721 hash = test.hash();
722 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
723 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
724 }
725 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
726 // If we detect that async deflation has occurred, then we
727 // attempt to restore the header/dmw to the object's header
728 // so that we only retry once if the deflater thread happens
729 // to be slow.
730 monitor->install_displaced_markword_in_object(obj);
731 continue;
732 }
733 }
734 // We finally get the hash.
735 return hash;
736 }
737 }
738
739 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
740 Handle h_obj) {
741 assert(current == JavaThread::current(), "Can only be called on current thread");
742 oop obj = h_obj();
743
744 markWord mark = obj->mark_acquire();
745
746 if (mark.is_fast_locked()) {
747 // fast-locking case, see if lock is in current's lock stack
748 return current->lock_stack().contains(h_obj());
749 }
750
751 while (mark.has_monitor()) {
752 ObjectMonitor* monitor = read_monitor(current, obj, mark);
753 if (monitor != nullptr) {
754 return monitor->is_entered(current) != 0;
755 }
756 // Racing with inflation/deflation, retry
757 mark = obj->mark_acquire();
758
759 if (mark.is_fast_locked()) {
760 // Some other thread fast_locked, current could not have held the lock
|
298 // removed from the system.
299 //
300 // Note: If the _in_use_list max exceeds the ceiling, then
301 // monitors_used_above_threshold() will use the in_use_list max instead
302 // of the thread count derived ceiling because we have used more
303 // ObjectMonitors than the estimated average.
304 //
305 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
306 // no-progress async monitor deflation cycles in a row, then the ceiling
307 // is adjusted upwards by monitors_used_above_threshold().
308 //
309 // Start the ceiling with the estimate for one thread in initialize()
310 // which is called after cmd line options are processed.
311 static size_t _in_use_list_ceiling = 0;
312 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
313 bool volatile ObjectSynchronizer::_is_final_audit = false;
314 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
315 static uintx _no_progress_cnt = 0;
316 static bool _no_progress_skip_increment = false;
317
318 // These checks are required for wait, notify and exit to avoid inflating the monitor to
319 // find out this inline type object cannot be locked.
320 #define CHECK_THROW_NOSYNC_IMSE(obj) \
321 if ((obj)->mark().is_inline_type()) { \
322 JavaThread* THREAD = current; \
323 ResourceMark rm(THREAD); \
324 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
325 }
326
327 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
328 if ((obj)->mark().is_inline_type()) { \
329 JavaThread* THREAD = current; \
330 ResourceMark rm(THREAD); \
331 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
332 }
333
334 // =====================> Quick functions
335
336 // The quick_* forms are special fast-path variants used to improve
337 // performance. In the simplest case, a "quick_*" implementation could
338 // simply return false, in which case the caller will perform the necessary
339 // state transitions and call the slow-path form.
340 // The fast-path is designed to handle frequently arising cases in an efficient
341 // manner and is just a degenerate "optimistic" variant of the slow-path.
342 // returns true -- to indicate the call was satisfied.
343 // returns false -- to indicate the call needs the services of the slow-path.
344 // A no-loitering ordinance is in effect for code in the quick_* family
345 // operators: safepoints or indefinite blocking (blocking that might span a
346 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
347 // entry.
348 //
349 // Consider: An interesting optimization is to have the JIT recognize the
350 // following common idiom:
351 // synchronized (someobj) { .... ; notify(); }
352 // That is, we find a notify() or notifyAll() call that immediately precedes
353 // the monitorexit operation. In that case the JIT could fuse the operations
354 // into a single notifyAndExit() runtime primitive.
355
356 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
357 assert(current->thread_state() == _thread_in_Java, "invariant");
358 NoSafepointVerifier nsv;
359 if (obj == nullptr) return false; // slow-path for invalid obj
360 assert(!obj->klass()->is_inline_klass(), "monitor op on inline type");
361 const markWord mark = obj->mark();
362
363 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
364 // Degenerate notify
365 // fast-locked by caller so by definition the implied waitset is empty.
366 return true;
367 }
368
369 if (mark.has_monitor()) {
370 ObjectMonitor* const mon = read_monitor(current, obj, mark);
371 if (mon == nullptr) {
372 // Racing with inflation/deflation go slow path
373 return false;
374 }
375 assert(mon->object() == oop(obj), "invariant");
376 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
377
378 if (mon->first_waiter() != nullptr) {
379 // We have one or more waiters. Since this is an inflated monitor
380 // that we own, we quickly notify them here and now, avoiding the slow-path.
428 } else {
429 vblog.info("Cannot find the last Java frame");
430 }
431
432 EventSyncOnValueBasedClass event;
433 if (event.should_commit()) {
434 event.set_valueBasedClass(obj->klass());
435 event.commit();
436 }
437 }
438
439 if (bcp_was_adjusted) {
440 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
441 }
442 }
443
444 // -----------------------------------------------------------------------------
445 // JNI locks on java objects
446 // NOTE: must use heavy weight monitor to handle jni monitor enter
447 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
448 JavaThread* THREAD = current;
449 // Top native frames in the stack will not be seen if we attempt
450 // preemption, since we start walking from the last Java anchor.
451 NoPreemptMark npm(current);
452
453 if (obj->klass()->is_value_based()) {
454 handle_sync_on_value_based_class(obj, current);
455 }
456
457 if (obj->klass()->is_inline_klass()) {
458 ResourceMark rm(THREAD);
459 const char* desc = "Cannot synchronize on an instance of value class ";
460 const char* className = obj->klass()->external_name();
461 size_t msglen = strlen(desc) + strlen(className) + 1;
462 char* message = NEW_RESOURCE_ARRAY(char, msglen);
463 assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
464 THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
465 }
466
467 // the current locking is from JNI instead of Java code
468 current->set_current_pending_monitor_is_from_java(false);
469 // An async deflation can race after the inflate() call and before
470 // enter() can make the ObjectMonitor busy. enter() returns false if
471 // we have lost the race to async deflation and we simply try again.
472 while (true) {
473 BasicLock lock;
474 if (ObjectSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
475 break;
476 }
477 }
478 current->set_current_pending_monitor_is_from_java(true);
479 }
480
481 // NOTE: must use heavy weight monitor to handle jni monitor exit
482 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
483 JavaThread* current = THREAD;
484 CHECK_THROW_NOSYNC_IMSE(obj);
485
486 ObjectMonitor* monitor;
487 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
488 // If this thread has locked the object, exit the monitor. We
489 // intentionally do not use CHECK on check_owner because we must exit the
490 // monitor even if an exception was already pending.
491 if (monitor->check_owner(THREAD)) {
492 monitor->exit(current);
493 }
494 }
495
496 // -----------------------------------------------------------------------------
497 // Internal VM locks on java objects
498 // standard constructor, allows locking failures
499 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
500 _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
501 assert(!_thread->preempting(), "");
502
503 _thread->check_for_valid_safepoint_state();
504
525 if (_obj() != nullptr && !_skip_exit) {
526 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
527 }
528 }
529
530 void ObjectLocker::wait_uninterruptibly(TRAPS) {
531 ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
532 if (_thread->preempting()) {
533 _skip_exit = true;
534 ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
535 _thread->set_pending_preempted_exception();
536 }
537 }
538
539 // -----------------------------------------------------------------------------
540 // Wait/Notify/NotifyAll
541 // NOTE: must use heavy weight monitor to handle wait()
542
543 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
544 JavaThread* current = THREAD;
545 CHECK_THROW_NOSYNC_IMSE_0(obj);
546 if (millis < 0) {
547 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
548 }
549
550 ObjectMonitor* monitor;
551 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
552
553 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
554 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
555
556 // This dummy call is in place to get around dtrace bug 6254741. Once
557 // that's fixed we can uncomment the following line, remove the call
558 // and change this function back into a "void" func.
559 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
560 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
561 return ret_code;
562 }
563
564 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
565 assert(millis >= 0, "timeout value is negative");
566
567 ObjectMonitor* monitor;
568 monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
569 monitor->wait(millis, false, THREAD);
570 }
571
572
573 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
574 JavaThread* current = THREAD;
575 CHECK_THROW_NOSYNC_IMSE(obj);
576
577 markWord mark = obj->mark();
578 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
579 // Not inflated so there can't be any waiters to notify.
580 return;
581 }
582 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
583 monitor->notify(CHECK);
584 }
585
586 // NOTE: see comment of notify()
587 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
588 JavaThread* current = THREAD;
589 CHECK_THROW_NOSYNC_IMSE(obj);
590
591 markWord mark = obj->mark();
592 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
593 // Not inflated so there can't be any waiters to notify.
594 return;
595 }
596
597 ObjectMonitor* monitor = ObjectSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
598 monitor->notifyAll(CHECK);
599 }
600
601 // -----------------------------------------------------------------------------
602 // Hash Code handling
603
604 struct SharedGlobals {
605 char _pad_prefix[OM_CACHE_LINE_SIZE];
606 // This is a highly shared mostly-read variable.
607 // To avoid false-sharing it needs to be the sole occupant of a cache line.
608 volatile int stw_random;
609 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
655 // This is probably the best overall implementation -- we'll
656 // likely make this the default in future releases.
657 unsigned t = current->_hashStateX;
658 t ^= (t << 11);
659 current->_hashStateX = current->_hashStateY;
660 current->_hashStateY = current->_hashStateZ;
661 current->_hashStateZ = current->_hashStateW;
662 unsigned v = current->_hashStateW;
663 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
664 current->_hashStateW = v;
665 value = v;
666 }
667
668 value &= markWord::hash_mask;
669 if (value == 0) value = 0xBAD;
670 assert(value != markWord::no_hash, "invariant");
671 return value;
672 }
673
674 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
675 // VM should be calling bootstrap method.
676 assert(!obj->klass()->is_inline_klass(), "FastHashCode should not be called for inline classes");
677
678 while (true) {
679 ObjectMonitor* monitor = nullptr;
680 markWord temp, test;
681 intptr_t hash;
682 markWord mark = obj->mark_acquire();
683 // If UseObjectMonitorTable is set the hash can simply be installed in the
684 // object header, since the monitor isn't in the object header.
685 if (UseObjectMonitorTable || !mark.has_monitor()) {
686 hash = mark.hash();
687 if (hash != 0) { // if it has a hash, just return it
688 return hash;
689 }
690 hash = get_next_hash(current, obj); // get a new hash
691 temp = mark.copy_set_hash(hash); // merge the hash into header
692 // try to install the hash
693 test = obj->cas_set_mark(temp, mark);
694 if (test == mark) { // if the hash was installed, return it
695 return hash;
696 }
697 // CAS failed, retry
756 hash = test.hash();
757 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
758 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
759 }
760 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
761 // If we detect that async deflation has occurred, then we
762 // attempt to restore the header/dmw to the object's header
763 // so that we only retry once if the deflater thread happens
764 // to be slow.
765 monitor->install_displaced_markword_in_object(obj);
766 continue;
767 }
768 }
769 // We finally get the hash.
770 return hash;
771 }
772 }
773
774 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
775 Handle h_obj) {
776 if (h_obj->mark().is_inline_type()) {
777 return false;
778 }
779 assert(current == JavaThread::current(), "Can only be called on current thread");
780 oop obj = h_obj();
781
782 markWord mark = obj->mark_acquire();
783
784 if (mark.is_fast_locked()) {
785 // fast-locking case, see if lock is in current's lock stack
786 return current->lock_stack().contains(h_obj());
787 }
788
789 while (mark.has_monitor()) {
790 ObjectMonitor* monitor = read_monitor(current, obj, mark);
791 if (monitor != nullptr) {
792 return monitor->is_entered(current) != 0;
793 }
794 // Racing with inflation/deflation, retry
795 mark = obj->mark_acquire();
796
797 if (mark.is_fast_locked()) {
798 // Some other thread fast_locked, current could not have held the lock
|