295 // removed from the system.
296 //
297 // Note: If the _in_use_list max exceeds the ceiling, then
298 // monitors_used_above_threshold() will use the in_use_list max instead
299 // of the thread count derived ceiling because we have used more
300 // ObjectMonitors than the estimated average.
301 //
302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
303 // no-progress async monitor deflation cycles in a row, then the ceiling
304 // is adjusted upwards by monitors_used_above_threshold().
305 //
306 // Start the ceiling with the estimate for one thread in initialize()
307 // which is called after cmd line options are processed.
308 static size_t _in_use_list_ceiling = 0;
309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
310 bool volatile ObjectSynchronizer::_is_final_audit = false;
311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
312 static uintx _no_progress_cnt = 0;
313 static bool _no_progress_skip_increment = false;
314
315 // =====================> Quick functions
316
317 // The quick_* forms are special fast-path variants used to improve
318 // performance. In the simplest case, a "quick_*" implementation could
319 // simply return false, in which case the caller will perform the necessary
320 // state transitions and call the slow-path form.
321 // The fast-path is designed to handle frequently arising cases in an efficient
322 // manner and is just a degenerate "optimistic" variant of the slow-path.
323 // returns true -- to indicate the call was satisfied.
324 // returns false -- to indicate the call needs the services of the slow-path.
325 // A no-loitering ordinance is in effect for code in the quick_* family
326 // operators: safepoints or indefinite blocking (blocking that might span a
327 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
328 // entry.
329 //
330 // Consider: An interesting optimization is to have the JIT recognize the
331 // following common idiom:
332 // synchronized (someobj) { .... ; notify(); }
333 // That is, we find a notify() or notifyAll() call that immediately precedes
334 // the monitorexit operation. In that case the JIT could fuse the operations
335 // into a single notifyAndExit() runtime primitive.
336
337 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
338 assert(current->thread_state() == _thread_in_Java, "invariant");
339 NoSafepointVerifier nsv;
340 if (obj == nullptr) return false; // slow-path for invalid obj
341 const markWord mark = obj->mark();
342
343 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
344 // Degenerate notify
345 // fast-locked by caller so by definition the implied waitset is empty.
346 return true;
347 }
348
349 if (mark.has_monitor()) {
350 ObjectMonitor* const mon = read_monitor(current, obj, mark);
351 if (mon == nullptr) {
352 // Racing with inflation/deflation go slow path
353 return false;
354 }
355 assert(mon->object() == oop(obj), "invariant");
356 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
357
358 if (mon->first_waiter() != nullptr) {
359 // We have one or more waiters. Since this is an inflated monitor
360 // that we own, we quickly notify them here and now, avoiding the slow-path.
412 EventSyncOnValueBasedClass event;
413 if (event.should_commit()) {
414 event.set_valueBasedClass(obj->klass());
415 event.commit();
416 }
417 }
418
419 if (bcp_was_adjusted) {
420 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
421 }
422 }
423
424 // -----------------------------------------------------------------------------
425 // Monitor Enter/Exit
426
427 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
428 // When called with locking_thread != Thread::current() some mechanism must synchronize
429 // the locking_thread with respect to the current thread. Currently only used when
430 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
431 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
432 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
433 }
434
435 // -----------------------------------------------------------------------------
436 // JNI locks on java objects
437 // NOTE: must use heavy weight monitor to handle jni monitor enter
438 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
439 // Top native frames in the stack will not be seen if we attempt
440 // preemption, since we start walking from the last Java anchor.
441 NoPreemptMark npm(current);
442
443 if (obj->klass()->is_value_based()) {
444 handle_sync_on_value_based_class(obj, current);
445 }
446
447 // the current locking is from JNI instead of Java code
448 current->set_current_pending_monitor_is_from_java(false);
449 // An async deflation can race after the inflate() call and before
450 // enter() can make the ObjectMonitor busy. enter() returns false if
451 // we have lost the race to async deflation and we simply try again.
452 while (true) {
453 BasicLock lock;
454 if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
455 break;
456 }
457 }
458 current->set_current_pending_monitor_is_from_java(true);
459 }
460
461 // NOTE: must use heavy weight monitor to handle jni monitor exit
462 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
463 JavaThread* current = THREAD;
464
465 ObjectMonitor* monitor;
466 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
467 // If this thread has locked the object, exit the monitor. We
468 // intentionally do not use CHECK on check_owner because we must exit the
469 // monitor even if an exception was already pending.
470 if (monitor->check_owner(THREAD)) {
471 monitor->exit(current);
472 }
473 }
474
475 // -----------------------------------------------------------------------------
476 // Internal VM locks on java objects
477 // standard constructor, allows locking failures
478 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) : _npm(thread) {
479 _thread = thread;
480 _thread->check_for_valid_safepoint_state();
481 _obj = obj;
482
483 if (_obj() != nullptr) {
484 ObjectSynchronizer::enter(_obj, &_lock, _thread);
485 }
486 }
487
488 ObjectLocker::~ObjectLocker() {
489 if (_obj() != nullptr) {
490 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
491 }
492 }
493
494
495 // -----------------------------------------------------------------------------
496 // Wait/Notify/NotifyAll
497 // NOTE: must use heavy weight monitor to handle wait()
498
499 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
500 JavaThread* current = THREAD;
501 if (millis < 0) {
502 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
503 }
504
505 ObjectMonitor* monitor;
506 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
507
508 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
509 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
510
511 // This dummy call is in place to get around dtrace bug 6254741. Once
512 // that's fixed we can uncomment the following line, remove the call
513 // and change this function back into a "void" func.
514 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
515 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
516 return ret_code;
517 }
518
519 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
520 if (millis < 0) {
521 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
522 }
523
524 ObjectMonitor* monitor;
525 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
526 monitor->wait(millis, false, THREAD);
527 }
528
529
530 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
531 JavaThread* current = THREAD;
532
533 markWord mark = obj->mark();
534 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
535 // Not inflated so there can't be any waiters to notify.
536 return;
537 }
538 ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
539 monitor->notify(CHECK);
540 }
541
542 // NOTE: see comment of notify()
543 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
544 JavaThread* current = THREAD;
545
546 markWord mark = obj->mark();
547 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
548 // Not inflated so there can't be any waiters to notify.
549 return;
550 }
551
552 ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
553 monitor->notifyAll(CHECK);
554 }
555
556 // -----------------------------------------------------------------------------
557 // Hash Code handling
558
559 struct SharedGlobals {
560 char _pad_prefix[OM_CACHE_LINE_SIZE];
561 // This is a highly shared mostly-read variable.
562 // To avoid false-sharing it needs to be the sole occupant of a cache line.
563 volatile int stw_random;
564 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
631
632 markWord mark = obj->mark_acquire();
633 for (;;) {
634 intptr_t hash = mark.hash();
635 if (hash != 0) {
636 return hash;
637 }
638
639 hash = get_next_hash(current, obj);
640 const markWord old_mark = mark;
641 const markWord new_mark = old_mark.copy_set_hash(hash);
642
643 mark = obj->cas_set_mark(new_mark, old_mark);
644 if (old_mark == mark) {
645 return hash;
646 }
647 }
648 }
649
650 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
651 if (UseObjectMonitorTable) {
652 // Since the monitor isn't in the object header, the hash can simply be
653 // installed in the object header.
654 return install_hash_code(current, obj);
655 }
656
657 while (true) {
658 ObjectMonitor* monitor = nullptr;
659 markWord temp, test;
660 intptr_t hash;
661 markWord mark = obj->mark_acquire();
662 if (mark.is_unlocked() || mark.is_fast_locked()) {
663 hash = mark.hash();
664 if (hash != 0) { // if it has a hash, just return it
665 return hash;
666 }
667 hash = get_next_hash(current, obj); // get a new hash
668 temp = mark.copy_set_hash(hash); // merge the hash into header
669 // try to install the hash
670 test = obj->cas_set_mark(temp, mark);
732 hash = test.hash();
733 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
734 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
735 }
736 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
737 // If we detect that async deflation has occurred, then we
738 // attempt to restore the header/dmw to the object's header
739 // so that we only retry once if the deflater thread happens
740 // to be slow.
741 monitor->install_displaced_markword_in_object(obj);
742 continue;
743 }
744 }
745 // We finally get the hash.
746 return hash;
747 }
748 }
749
750 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
751 Handle h_obj) {
752 assert(current == JavaThread::current(), "Can only be called on current thread");
753 oop obj = h_obj();
754
755 markWord mark = obj->mark_acquire();
756
757 if (mark.is_fast_locked()) {
758 // fast-locking case, see if lock is in current's lock stack
759 return current->lock_stack().contains(h_obj());
760 }
761
762 while (mark.has_monitor()) {
763 ObjectMonitor* monitor = read_monitor(current, obj, mark);
764 if (monitor != nullptr) {
765 return monitor->is_entered(current) != 0;
766 }
767 // Racing with inflation/deflation, retry
768 mark = obj->mark_acquire();
769
770 if (mark.is_fast_locked()) {
771 // Some other thread fast_locked, current could not have held the lock
|
295 // removed from the system.
296 //
297 // Note: If the _in_use_list max exceeds the ceiling, then
298 // monitors_used_above_threshold() will use the in_use_list max instead
299 // of the thread count derived ceiling because we have used more
300 // ObjectMonitors than the estimated average.
301 //
302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
303 // no-progress async monitor deflation cycles in a row, then the ceiling
304 // is adjusted upwards by monitors_used_above_threshold().
305 //
306 // Start the ceiling with the estimate for one thread in initialize()
307 // which is called after cmd line options are processed.
308 static size_t _in_use_list_ceiling = 0;
309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
310 bool volatile ObjectSynchronizer::_is_final_audit = false;
311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
312 static uintx _no_progress_cnt = 0;
313 static bool _no_progress_skip_increment = false;
314
315 // These checks are required for wait, notify and exit to avoid inflating the monitor to
316 // find out this inline type object cannot be locked.
317 #define CHECK_THROW_NOSYNC_IMSE(obj) \
318 if ((obj)->mark().is_inline_type()) { \
319 JavaThread* THREAD = current; \
320 ResourceMark rm(THREAD); \
321 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
322 }
323
324 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
325 if ((obj)->mark().is_inline_type()) { \
326 JavaThread* THREAD = current; \
327 ResourceMark rm(THREAD); \
328 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
329 }
330
331 // =====================> Quick functions
332
333 // The quick_* forms are special fast-path variants used to improve
334 // performance. In the simplest case, a "quick_*" implementation could
335 // simply return false, in which case the caller will perform the necessary
336 // state transitions and call the slow-path form.
337 // The fast-path is designed to handle frequently arising cases in an efficient
338 // manner and is just a degenerate "optimistic" variant of the slow-path.
339 // returns true -- to indicate the call was satisfied.
340 // returns false -- to indicate the call needs the services of the slow-path.
341 // A no-loitering ordinance is in effect for code in the quick_* family
342 // operators: safepoints or indefinite blocking (blocking that might span a
343 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
344 // entry.
345 //
346 // Consider: An interesting optimization is to have the JIT recognize the
347 // following common idiom:
348 // synchronized (someobj) { .... ; notify(); }
349 // That is, we find a notify() or notifyAll() call that immediately precedes
350 // the monitorexit operation. In that case the JIT could fuse the operations
351 // into a single notifyAndExit() runtime primitive.
352
353 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
354 assert(current->thread_state() == _thread_in_Java, "invariant");
355 NoSafepointVerifier nsv;
356 if (obj == nullptr) return false; // slow-path for invalid obj
357 assert(!obj->klass()->is_inline_klass(), "monitor op on inline type");
358 const markWord mark = obj->mark();
359
360 if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
361 // Degenerate notify
362 // fast-locked by caller so by definition the implied waitset is empty.
363 return true;
364 }
365
366 if (mark.has_monitor()) {
367 ObjectMonitor* const mon = read_monitor(current, obj, mark);
368 if (mon == nullptr) {
369 // Racing with inflation/deflation go slow path
370 return false;
371 }
372 assert(mon->object() == oop(obj), "invariant");
373 if (!mon->has_owner(current)) return false; // slow-path for IMS exception
374
375 if (mon->first_waiter() != nullptr) {
376 // We have one or more waiters. Since this is an inflated monitor
377 // that we own, we quickly notify them here and now, avoiding the slow-path.
429 EventSyncOnValueBasedClass event;
430 if (event.should_commit()) {
431 event.set_valueBasedClass(obj->klass());
432 event.commit();
433 }
434 }
435
436 if (bcp_was_adjusted) {
437 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
438 }
439 }
440
441 // -----------------------------------------------------------------------------
442 // Monitor Enter/Exit
443
444 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
445 // When called with locking_thread != Thread::current() some mechanism must synchronize
446 // the locking_thread with respect to the current thread. Currently only used when
447 // deoptimizing and re-locking locks. See Deoptimization::relock_objects
448 assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
449 assert(!obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
450 return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
451 }
452
453 // -----------------------------------------------------------------------------
454 // JNI locks on java objects
455 // NOTE: must use heavy weight monitor to handle jni monitor enter
456 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
457 JavaThread* THREAD = current;
458 // Top native frames in the stack will not be seen if we attempt
459 // preemption, since we start walking from the last Java anchor.
460 NoPreemptMark npm(current);
461
462 if (obj->klass()->is_value_based()) {
463 handle_sync_on_value_based_class(obj, current);
464 }
465
466 if (obj->klass()->is_inline_klass()) {
467 ResourceMark rm(THREAD);
468 const char* desc = "Cannot synchronize on an instance of value class ";
469 const char* className = obj->klass()->external_name();
470 size_t msglen = strlen(desc) + strlen(className) + 1;
471 char* message = NEW_RESOURCE_ARRAY(char, msglen);
472 assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
473 THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
474 }
475
476 // the current locking is from JNI instead of Java code
477 current->set_current_pending_monitor_is_from_java(false);
478 // An async deflation can race after the inflate() call and before
479 // enter() can make the ObjectMonitor busy. enter() returns false if
480 // we have lost the race to async deflation and we simply try again.
481 while (true) {
482 BasicLock lock;
483 if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
484 break;
485 }
486 }
487 current->set_current_pending_monitor_is_from_java(true);
488 }
489
490 // NOTE: must use heavy weight monitor to handle jni monitor exit
491 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
492 JavaThread* current = THREAD;
493 CHECK_THROW_NOSYNC_IMSE(obj);
494
495 ObjectMonitor* monitor;
496 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
497 // If this thread has locked the object, exit the monitor. We
498 // intentionally do not use CHECK on check_owner because we must exit the
499 // monitor even if an exception was already pending.
500 if (monitor->check_owner(THREAD)) {
501 monitor->exit(current);
502 }
503 }
504
505 // -----------------------------------------------------------------------------
506 // Internal VM locks on java objects
507 // standard constructor, allows locking failures
508 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) : _npm(thread) {
509 _thread = thread;
510 _thread->check_for_valid_safepoint_state();
511 _obj = obj;
512
513 if (_obj() != nullptr) {
514 ObjectSynchronizer::enter(_obj, &_lock, _thread);
515 }
516 }
517
518 ObjectLocker::~ObjectLocker() {
519 if (_obj() != nullptr) {
520 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
521 }
522 }
523
524
525 // -----------------------------------------------------------------------------
526 // Wait/Notify/NotifyAll
527 // NOTE: must use heavy weight monitor to handle wait()
528
529 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
530 JavaThread* current = THREAD;
531 CHECK_THROW_NOSYNC_IMSE_0(obj);
532 if (millis < 0) {
533 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
534 }
535
536 ObjectMonitor* monitor;
537 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
538
539 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
540 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
541
542 // This dummy call is in place to get around dtrace bug 6254741. Once
543 // that's fixed we can uncomment the following line, remove the call
544 // and change this function back into a "void" func.
545 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
546 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
547 return ret_code;
548 }
549
550 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
551 if (millis < 0) {
552 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
553 }
554
555 ObjectMonitor* monitor;
556 monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
557 monitor->wait(millis, false, THREAD);
558 }
559
560
561 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
562 JavaThread* current = THREAD;
563 CHECK_THROW_NOSYNC_IMSE(obj);
564
565 markWord mark = obj->mark();
566 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
567 // Not inflated so there can't be any waiters to notify.
568 return;
569 }
570 ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
571 monitor->notify(CHECK);
572 }
573
574 // NOTE: see comment of notify()
575 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
576 JavaThread* current = THREAD;
577 CHECK_THROW_NOSYNC_IMSE(obj);
578
579 markWord mark = obj->mark();
580 if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
581 // Not inflated so there can't be any waiters to notify.
582 return;
583 }
584
585 ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
586 monitor->notifyAll(CHECK);
587 }
588
589 // -----------------------------------------------------------------------------
590 // Hash Code handling
591
592 struct SharedGlobals {
593 char _pad_prefix[OM_CACHE_LINE_SIZE];
594 // This is a highly shared mostly-read variable.
595 // To avoid false-sharing it needs to be the sole occupant of a cache line.
596 volatile int stw_random;
597 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));
664
665 markWord mark = obj->mark_acquire();
666 for (;;) {
667 intptr_t hash = mark.hash();
668 if (hash != 0) {
669 return hash;
670 }
671
672 hash = get_next_hash(current, obj);
673 const markWord old_mark = mark;
674 const markWord new_mark = old_mark.copy_set_hash(hash);
675
676 mark = obj->cas_set_mark(new_mark, old_mark);
677 if (old_mark == mark) {
678 return hash;
679 }
680 }
681 }
682
683 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
684 // VM should be calling bootstrap method.
685 assert(!obj->klass()->is_inline_klass(), "FastHashCode should not be called for inline classes");
686
687 if (UseObjectMonitorTable) {
688 // Since the monitor isn't in the object header, the hash can simply be
689 // installed in the object header.
690 return install_hash_code(current, obj);
691 }
692
693 while (true) {
694 ObjectMonitor* monitor = nullptr;
695 markWord temp, test;
696 intptr_t hash;
697 markWord mark = obj->mark_acquire();
698 if (mark.is_unlocked() || mark.is_fast_locked()) {
699 hash = mark.hash();
700 if (hash != 0) { // if it has a hash, just return it
701 return hash;
702 }
703 hash = get_next_hash(current, obj); // get a new hash
704 temp = mark.copy_set_hash(hash); // merge the hash into header
705 // try to install the hash
706 test = obj->cas_set_mark(temp, mark);
768 hash = test.hash();
769 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
770 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
771 }
772 if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
773 // If we detect that async deflation has occurred, then we
774 // attempt to restore the header/dmw to the object's header
775 // so that we only retry once if the deflater thread happens
776 // to be slow.
777 monitor->install_displaced_markword_in_object(obj);
778 continue;
779 }
780 }
781 // We finally get the hash.
782 return hash;
783 }
784 }
785
786 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
787 Handle h_obj) {
788 if (h_obj->mark().is_inline_type()) {
789 return false;
790 }
791 assert(current == JavaThread::current(), "Can only be called on current thread");
792 oop obj = h_obj();
793
794 markWord mark = obj->mark_acquire();
795
796 if (mark.is_fast_locked()) {
797 // fast-locking case, see if lock is in current's lock stack
798 return current->lock_stack().contains(h_obj());
799 }
800
801 while (mark.has_monitor()) {
802 ObjectMonitor* monitor = read_monitor(current, obj, mark);
803 if (monitor != nullptr) {
804 return monitor->is_entered(current) != 0;
805 }
806 // Racing with inflation/deflation, retry
807 mark = obj->mark_acquire();
808
809 if (mark.is_fast_locked()) {
810 // Some other thread fast_locked, current could not have held the lock
|