< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 295 // removed from the system.
 296 //
 297 // Note: If the _in_use_list max exceeds the ceiling, then
 298 // monitors_used_above_threshold() will use the in_use_list max instead
 299 // of the thread count derived ceiling because we have used more
 300 // ObjectMonitors than the estimated average.
 301 //
 302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 303 // no-progress async monitor deflation cycles in a row, then the ceiling
 304 // is adjusted upwards by monitors_used_above_threshold().
 305 //
 306 // Start the ceiling with the estimate for one thread in initialize()
 307 // which is called after cmd line options are processed.
 308 static size_t _in_use_list_ceiling = 0;
 309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 310 bool volatile ObjectSynchronizer::_is_final_audit = false;
 311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 312 static uintx _no_progress_cnt = 0;
 313 static bool _no_progress_skip_increment = false;
 314 
















 315 // =====================> Quick functions
 316 
 317 // The quick_* forms are special fast-path variants used to improve
 318 // performance.  In the simplest case, a "quick_*" implementation could
 319 // simply return false, in which case the caller will perform the necessary
 320 // state transitions and call the slow-path form.
 321 // The fast-path is designed to handle frequently arising cases in an efficient
 322 // manner and is just a degenerate "optimistic" variant of the slow-path.
 323 // returns true  -- to indicate the call was satisfied.
 324 // returns false -- to indicate the call needs the services of the slow-path.
 325 // A no-loitering ordinance is in effect for code in the quick_* family
 326 // operators: safepoints or indefinite blocking (blocking that might span a
 327 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 328 // entry.
 329 //
 330 // Consider: An interesting optimization is to have the JIT recognize the
 331 // following common idiom:
 332 //   synchronized (someobj) { .... ; notify(); }
 333 // That is, we find a notify() or notifyAll() call that immediately precedes
 334 // the monitorexit operation.  In that case the JIT could fuse the operations
 335 // into a single notifyAndExit() runtime primitive.
 336 
 337 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 338   assert(current->thread_state() == _thread_in_Java, "invariant");
 339   NoSafepointVerifier nsv;
 340   if (obj == nullptr) return false;  // slow-path for invalid obj

 341   const markWord mark = obj->mark();
 342 
 343   if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 344     // Degenerate notify
 345     // fast-locked by caller so by definition the implied waitset is empty.
 346     return true;
 347   }
 348 
 349   if (mark.has_monitor()) {
 350     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 351     if (mon == nullptr) {
 352       // Racing with inflation/deflation go slow path
 353       return false;
 354     }
 355     assert(mon->object() == oop(obj), "invariant");
 356     if (!mon->has_owner(current)) return false;  // slow-path for IMS exception
 357 
 358     if (mon->first_waiter() != nullptr) {
 359       // We have one or more waiters. Since this is an inflated monitor
 360       // that we own, we quickly notify them here and now, avoiding the slow-path.

 412     EventSyncOnValueBasedClass event;
 413     if (event.should_commit()) {
 414       event.set_valueBasedClass(obj->klass());
 415       event.commit();
 416     }
 417   }
 418 
 419   if (bcp_was_adjusted) {
 420     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 421   }
 422 }
 423 
 424 // -----------------------------------------------------------------------------
 425 // Monitor Enter/Exit
 426 
 427 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 428   // When called with locking_thread != Thread::current() some mechanism must synchronize
 429   // the locking_thread with respect to the current thread. Currently only used when
 430   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 431   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");

 432   return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 433 }
 434 
 435 // -----------------------------------------------------------------------------
 436 // JNI locks on java objects
 437 // NOTE: must use heavy weight monitor to handle jni monitor enter
 438 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {

 439   // Top native frames in the stack will not be seen if we attempt
 440   // preemption, since we start walking from the last Java anchor.
 441   NoPreemptMark npm(current);
 442 
 443   if (obj->klass()->is_value_based()) {
 444     handle_sync_on_value_based_class(obj, current);
 445   }
 446 










 447   // the current locking is from JNI instead of Java code
 448   current->set_current_pending_monitor_is_from_java(false);
 449   // An async deflation can race after the inflate() call and before
 450   // enter() can make the ObjectMonitor busy. enter() returns false if
 451   // we have lost the race to async deflation and we simply try again.
 452   while (true) {
 453     BasicLock lock;
 454     if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
 455       current->inc_held_monitor_count(1, true);
 456       break;
 457     }
 458   }
 459   current->set_current_pending_monitor_is_from_java(true);
 460 }
 461 
 462 // NOTE: must use heavy weight monitor to handle jni monitor exit
 463 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 464   JavaThread* current = THREAD;

 465 
 466   ObjectMonitor* monitor;
 467   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 468   // If this thread has locked the object, exit the monitor. We
 469   // intentionally do not use CHECK on check_owner because we must exit the
 470   // monitor even if an exception was already pending.
 471   if (monitor->check_owner(THREAD)) {
 472     monitor->exit(current);
 473     current->dec_held_monitor_count(1, true);
 474   }
 475 }
 476 
 477 // -----------------------------------------------------------------------------
 478 // Internal VM locks on java objects
 479 // standard constructor, allows locking failures
 480 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) : _npm(thread) {
 481   _thread = thread;
 482   _thread->check_for_valid_safepoint_state();
 483   _obj = obj;
 484 
 485   if (_obj() != nullptr) {
 486     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 487   }
 488 }
 489 
 490 ObjectLocker::~ObjectLocker() {
 491   if (_obj() != nullptr) {
 492     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 493   }
 494 }
 495 
 496 
 497 // -----------------------------------------------------------------------------
 498 //  Wait/Notify/NotifyAll
 499 // NOTE: must use heavy weight monitor to handle wait()
 500 
 501 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 502   JavaThread* current = THREAD;

 503   if (millis < 0) {
 504     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 505   }
 506 
 507   ObjectMonitor* monitor;
 508   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 509 
 510   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 511   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 512 
 513   // This dummy call is in place to get around dtrace bug 6254741.  Once
 514   // that's fixed we can uncomment the following line, remove the call
 515   // and change this function back into a "void" func.
 516   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 517   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 518   return ret_code;
 519 }
 520 
 521 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 522   if (millis < 0) {
 523     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 524   }
 525 
 526   ObjectMonitor* monitor;
 527   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 528   monitor->wait(millis, false, THREAD);
 529 }
 530 
 531 
 532 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 533   JavaThread* current = THREAD;

 534 
 535   markWord mark = obj->mark();
 536   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 537     // Not inflated so there can't be any waiters to notify.
 538     return;
 539   }
 540   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 541   monitor->notify(CHECK);
 542 }
 543 
 544 // NOTE: see comment of notify()
 545 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 546   JavaThread* current = THREAD;

 547 
 548   markWord mark = obj->mark();
 549   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 550     // Not inflated so there can't be any waiters to notify.
 551     return;
 552   }
 553 
 554   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 555   monitor->notifyAll(CHECK);
 556 }
 557 
 558 // -----------------------------------------------------------------------------
 559 // Hash Code handling
 560 
 561 struct SharedGlobals {
 562   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 563   // This is a highly shared mostly-read variable.
 564   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 565   volatile int stw_random;
 566   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));

 633 
 634   markWord mark = obj->mark_acquire();
 635   for (;;) {
 636     intptr_t hash = mark.hash();
 637     if (hash != 0) {
 638       return hash;
 639     }
 640 
 641     hash = get_next_hash(current, obj);
 642     const markWord old_mark = mark;
 643     const markWord new_mark = old_mark.copy_set_hash(hash);
 644 
 645     mark = obj->cas_set_mark(new_mark, old_mark);
 646     if (old_mark == mark) {
 647       return hash;
 648     }
 649   }
 650 }
 651 
 652 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




 653   if (UseObjectMonitorTable) {
 654     // Since the monitor isn't in the object header, the hash can simply be
 655     // installed in the object header.
 656     return install_hash_code(current, obj);
 657   }
 658 
 659   while (true) {
 660     ObjectMonitor* monitor = nullptr;
 661     markWord temp, test;
 662     intptr_t hash;
 663     markWord mark = obj->mark_acquire();
 664     if (mark.is_unlocked() || mark.is_fast_locked()) {
 665       hash = mark.hash();
 666       if (hash != 0) {                     // if it has a hash, just return it
 667         return hash;
 668       }
 669       hash = get_next_hash(current, obj);  // get a new hash
 670       temp = mark.copy_set_hash(hash);     // merge the hash into header
 671                                            // try to install the hash
 672       test = obj->cas_set_mark(temp, mark);

 734         hash = test.hash();
 735         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
 736         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
 737       }
 738       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
 739         // If we detect that async deflation has occurred, then we
 740         // attempt to restore the header/dmw to the object's header
 741         // so that we only retry once if the deflater thread happens
 742         // to be slow.
 743         monitor->install_displaced_markword_in_object(obj);
 744         continue;
 745       }
 746     }
 747     // We finally get the hash.
 748     return hash;
 749   }
 750 }
 751 
 752 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
 753                                                    Handle h_obj) {



 754   assert(current == JavaThread::current(), "Can only be called on current thread");
 755   oop obj = h_obj();
 756 
 757   markWord mark = obj->mark_acquire();
 758 
 759   if (mark.is_fast_locked()) {
 760     // fast-locking case, see if lock is in current's lock stack
 761     return current->lock_stack().contains(h_obj());
 762   }
 763 
 764   while (mark.has_monitor()) {
 765     ObjectMonitor* monitor = read_monitor(current, obj, mark);
 766     if (monitor != nullptr) {
 767       return monitor->is_entered(current) != 0;
 768     }
 769     // Racing with inflation/deflation, retry
 770     mark = obj->mark_acquire();
 771 
 772     if (mark.is_fast_locked()) {
 773       // Some other thread fast_locked, current could not have held the lock

 295 // removed from the system.
 296 //
 297 // Note: If the _in_use_list max exceeds the ceiling, then
 298 // monitors_used_above_threshold() will use the in_use_list max instead
 299 // of the thread count derived ceiling because we have used more
 300 // ObjectMonitors than the estimated average.
 301 //
 302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 303 // no-progress async monitor deflation cycles in a row, then the ceiling
 304 // is adjusted upwards by monitors_used_above_threshold().
 305 //
 306 // Start the ceiling with the estimate for one thread in initialize()
 307 // which is called after cmd line options are processed.
 308 static size_t _in_use_list_ceiling = 0;
 309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 310 bool volatile ObjectSynchronizer::_is_final_audit = false;
 311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 312 static uintx _no_progress_cnt = 0;
 313 static bool _no_progress_skip_increment = false;
 314 
 315 // These checks are required for wait, notify and exit to avoid inflating the monitor to
 316 // find out this inline type object cannot be locked.
 317 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 318   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 319     JavaThread* THREAD = current;           \
 320     ResourceMark rm(THREAD);                \
 321     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 322   }
 323 
 324 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 325   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 326     JavaThread* THREAD = current;             \
 327     ResourceMark rm(THREAD);                  \
 328     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 329   }
 330 
 331 // =====================> Quick functions
 332 
 333 // The quick_* forms are special fast-path variants used to improve
 334 // performance.  In the simplest case, a "quick_*" implementation could
 335 // simply return false, in which case the caller will perform the necessary
 336 // state transitions and call the slow-path form.
 337 // The fast-path is designed to handle frequently arising cases in an efficient
 338 // manner and is just a degenerate "optimistic" variant of the slow-path.
 339 // returns true  -- to indicate the call was satisfied.
 340 // returns false -- to indicate the call needs the services of the slow-path.
 341 // A no-loitering ordinance is in effect for code in the quick_* family
 342 // operators: safepoints or indefinite blocking (blocking that might span a
 343 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 344 // entry.
 345 //
 346 // Consider: An interesting optimization is to have the JIT recognize the
 347 // following common idiom:
 348 //   synchronized (someobj) { .... ; notify(); }
 349 // That is, we find a notify() or notifyAll() call that immediately precedes
 350 // the monitorexit operation.  In that case the JIT could fuse the operations
 351 // into a single notifyAndExit() runtime primitive.
 352 
 353 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 354   assert(current->thread_state() == _thread_in_Java, "invariant");
 355   NoSafepointVerifier nsv;
 356   if (obj == nullptr) return false;  // slow-path for invalid obj
 357   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 358   const markWord mark = obj->mark();
 359 
 360   if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 361     // Degenerate notify
 362     // fast-locked by caller so by definition the implied waitset is empty.
 363     return true;
 364   }
 365 
 366   if (mark.has_monitor()) {
 367     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 368     if (mon == nullptr) {
 369       // Racing with inflation/deflation go slow path
 370       return false;
 371     }
 372     assert(mon->object() == oop(obj), "invariant");
 373     if (!mon->has_owner(current)) return false;  // slow-path for IMS exception
 374 
 375     if (mon->first_waiter() != nullptr) {
 376       // We have one or more waiters. Since this is an inflated monitor
 377       // that we own, we quickly notify them here and now, avoiding the slow-path.

 429     EventSyncOnValueBasedClass event;
 430     if (event.should_commit()) {
 431       event.set_valueBasedClass(obj->klass());
 432       event.commit();
 433     }
 434   }
 435 
 436   if (bcp_was_adjusted) {
 437     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 438   }
 439 }
 440 
 441 // -----------------------------------------------------------------------------
 442 // Monitor Enter/Exit
 443 
 444 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 445   // When called with locking_thread != Thread::current() some mechanism must synchronize
 446   // the locking_thread with respect to the current thread. Currently only used when
 447   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 448   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 449   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
 450   return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 451 }
 452 
 453 // -----------------------------------------------------------------------------
 454 // JNI locks on java objects
 455 // NOTE: must use heavy weight monitor to handle jni monitor enter
 456 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 457   JavaThread* THREAD = current;
 458   // Top native frames in the stack will not be seen if we attempt
 459   // preemption, since we start walking from the last Java anchor.
 460   NoPreemptMark npm(current);
 461 
 462   if (obj->klass()->is_value_based()) {
 463     handle_sync_on_value_based_class(obj, current);
 464   }
 465 
 466   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 467     ResourceMark rm(THREAD);
 468     const char* desc = "Cannot synchronize on an instance of value class ";
 469     const char* className = obj->klass()->external_name();
 470     size_t msglen = strlen(desc) + strlen(className) + 1;
 471     char* message = NEW_RESOURCE_ARRAY(char, msglen);
 472     assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
 473     THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
 474   }
 475 
 476   // the current locking is from JNI instead of Java code
 477   current->set_current_pending_monitor_is_from_java(false);
 478   // An async deflation can race after the inflate() call and before
 479   // enter() can make the ObjectMonitor busy. enter() returns false if
 480   // we have lost the race to async deflation and we simply try again.
 481   while (true) {
 482     BasicLock lock;
 483     if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
 484       current->inc_held_monitor_count(1, true);
 485       break;
 486     }
 487   }
 488   current->set_current_pending_monitor_is_from_java(true);
 489 }
 490 
 491 // NOTE: must use heavy weight monitor to handle jni monitor exit
 492 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 493   JavaThread* current = THREAD;
 494   CHECK_THROW_NOSYNC_IMSE(obj);
 495 
 496   ObjectMonitor* monitor;
 497   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 498   // If this thread has locked the object, exit the monitor. We
 499   // intentionally do not use CHECK on check_owner because we must exit the
 500   // monitor even if an exception was already pending.
 501   if (monitor->check_owner(THREAD)) {
 502     monitor->exit(current);
 503     current->dec_held_monitor_count(1, true);
 504   }
 505 }
 506 
 507 // -----------------------------------------------------------------------------
 508 // Internal VM locks on java objects
 509 // standard constructor, allows locking failures
 510 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) : _npm(thread) {
 511   _thread = thread;
 512   _thread->check_for_valid_safepoint_state();
 513   _obj = obj;
 514 
 515   if (_obj() != nullptr) {
 516     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 517   }
 518 }
 519 
 520 ObjectLocker::~ObjectLocker() {
 521   if (_obj() != nullptr) {
 522     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 523   }
 524 }
 525 
 526 
 527 // -----------------------------------------------------------------------------
 528 //  Wait/Notify/NotifyAll
 529 // NOTE: must use heavy weight monitor to handle wait()
 530 
 531 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 532   JavaThread* current = THREAD;
 533   CHECK_THROW_NOSYNC_IMSE_0(obj);
 534   if (millis < 0) {
 535     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 536   }
 537 
 538   ObjectMonitor* monitor;
 539   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 540 
 541   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 542   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 543 
 544   // This dummy call is in place to get around dtrace bug 6254741.  Once
 545   // that's fixed we can uncomment the following line, remove the call
 546   // and change this function back into a "void" func.
 547   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 548   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 549   return ret_code;
 550 }
 551 
 552 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 553   if (millis < 0) {
 554     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 555   }
 556 
 557   ObjectMonitor* monitor;
 558   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 559   monitor->wait(millis, false, THREAD);
 560 }
 561 
 562 
 563 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 564   JavaThread* current = THREAD;
 565   CHECK_THROW_NOSYNC_IMSE(obj);
 566 
 567   markWord mark = obj->mark();
 568   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 569     // Not inflated so there can't be any waiters to notify.
 570     return;
 571   }
 572   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 573   monitor->notify(CHECK);
 574 }
 575 
 576 // NOTE: see comment of notify()
 577 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 578   JavaThread* current = THREAD;
 579   CHECK_THROW_NOSYNC_IMSE(obj);
 580 
 581   markWord mark = obj->mark();
 582   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 583     // Not inflated so there can't be any waiters to notify.
 584     return;
 585   }
 586 
 587   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 588   monitor->notifyAll(CHECK);
 589 }
 590 
 591 // -----------------------------------------------------------------------------
 592 // Hash Code handling
 593 
 594 struct SharedGlobals {
 595   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 596   // This is a highly shared mostly-read variable.
 597   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 598   volatile int stw_random;
 599   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));

 666 
 667   markWord mark = obj->mark_acquire();
 668   for (;;) {
 669     intptr_t hash = mark.hash();
 670     if (hash != 0) {
 671       return hash;
 672     }
 673 
 674     hash = get_next_hash(current, obj);
 675     const markWord old_mark = mark;
 676     const markWord new_mark = old_mark.copy_set_hash(hash);
 677 
 678     mark = obj->cas_set_mark(new_mark, old_mark);
 679     if (old_mark == mark) {
 680       return hash;
 681     }
 682   }
 683 }
 684 
 685 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 686   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 687     // VM should be calling bootstrap method
 688     ShouldNotReachHere();
 689   }
 690   if (UseObjectMonitorTable) {
 691     // Since the monitor isn't in the object header, the hash can simply be
 692     // installed in the object header.
 693     return install_hash_code(current, obj);
 694   }
 695 
 696   while (true) {
 697     ObjectMonitor* monitor = nullptr;
 698     markWord temp, test;
 699     intptr_t hash;
 700     markWord mark = obj->mark_acquire();
 701     if (mark.is_unlocked() || mark.is_fast_locked()) {
 702       hash = mark.hash();
 703       if (hash != 0) {                     // if it has a hash, just return it
 704         return hash;
 705       }
 706       hash = get_next_hash(current, obj);  // get a new hash
 707       temp = mark.copy_set_hash(hash);     // merge the hash into header
 708                                            // try to install the hash
 709       test = obj->cas_set_mark(temp, mark);

 771         hash = test.hash();
 772         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
 773         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
 774       }
 775       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
 776         // If we detect that async deflation has occurred, then we
 777         // attempt to restore the header/dmw to the object's header
 778         // so that we only retry once if the deflater thread happens
 779         // to be slow.
 780         monitor->install_displaced_markword_in_object(obj);
 781         continue;
 782       }
 783     }
 784     // We finally get the hash.
 785     return hash;
 786   }
 787 }
 788 
 789 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
 790                                                    Handle h_obj) {
 791   if (EnableValhalla && h_obj->mark().is_inline_type()) {
 792     return false;
 793   }
 794   assert(current == JavaThread::current(), "Can only be called on current thread");
 795   oop obj = h_obj();
 796 
 797   markWord mark = obj->mark_acquire();
 798 
 799   if (mark.is_fast_locked()) {
 800     // fast-locking case, see if lock is in current's lock stack
 801     return current->lock_stack().contains(h_obj());
 802   }
 803 
 804   while (mark.has_monitor()) {
 805     ObjectMonitor* monitor = read_monitor(current, obj, mark);
 806     if (monitor != nullptr) {
 807       return monitor->is_entered(current) != 0;
 808     }
 809     // Racing with inflation/deflation, retry
 810     mark = obj->mark_acquire();
 811 
 812     if (mark.is_fast_locked()) {
 813       // Some other thread fast_locked, current could not have held the lock
< prev index next >