< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 295 // removed from the system.
 296 //
 297 // Note: If the _in_use_list max exceeds the ceiling, then
 298 // monitors_used_above_threshold() will use the in_use_list max instead
 299 // of the thread count derived ceiling because we have used more
 300 // ObjectMonitors than the estimated average.
 301 //
 302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 303 // no-progress async monitor deflation cycles in a row, then the ceiling
 304 // is adjusted upwards by monitors_used_above_threshold().
 305 //
 306 // Start the ceiling with the estimate for one thread in initialize()
 307 // which is called after cmd line options are processed.
 308 static size_t _in_use_list_ceiling = 0;
 309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 310 bool volatile ObjectSynchronizer::_is_final_audit = false;
 311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 312 static uintx _no_progress_cnt = 0;
 313 static bool _no_progress_skip_increment = false;
 314 
















 315 // =====================> Quick functions
 316 
 317 // The quick_* forms are special fast-path variants used to improve
 318 // performance.  In the simplest case, a "quick_*" implementation could
 319 // simply return false, in which case the caller will perform the necessary
 320 // state transitions and call the slow-path form.
 321 // The fast-path is designed to handle frequently arising cases in an efficient
 322 // manner and is just a degenerate "optimistic" variant of the slow-path.
 323 // returns true  -- to indicate the call was satisfied.
 324 // returns false -- to indicate the call needs the services of the slow-path.
 325 // A no-loitering ordinance is in effect for code in the quick_* family
 326 // operators: safepoints or indefinite blocking (blocking that might span a
 327 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 328 // entry.
 329 //
 330 // Consider: An interesting optimization is to have the JIT recognize the
 331 // following common idiom:
 332 //   synchronized (someobj) { .... ; notify(); }
 333 // That is, we find a notify() or notifyAll() call that immediately precedes
 334 // the monitorexit operation.  In that case the JIT could fuse the operations
 335 // into a single notifyAndExit() runtime primitive.
 336 
 337 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 338   assert(current->thread_state() == _thread_in_Java, "invariant");
 339   NoSafepointVerifier nsv;
 340   if (obj == nullptr) return false;  // slow-path for invalid obj

 341   const markWord mark = obj->mark();
 342 
 343   if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 344     // Degenerate notify
 345     // fast-locked by caller so by definition the implied waitset is empty.
 346     return true;
 347   }
 348 
 349   if (mark.has_monitor()) {
 350     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 351     if (mon == nullptr) {
 352       // Racing with inflation/deflation go slow path
 353       return false;
 354     }
 355     assert(mon->object() == oop(obj), "invariant");
 356     if (!mon->has_owner(current)) return false;  // slow-path for IMS exception
 357 
 358     if (mon->first_waiter() != nullptr) {
 359       // We have one or more waiters. Since this is an inflated monitor
 360       // that we own, we quickly notify them here and now, avoiding the slow-path.

 412     EventSyncOnValueBasedClass event;
 413     if (event.should_commit()) {
 414       event.set_valueBasedClass(obj->klass());
 415       event.commit();
 416     }
 417   }
 418 
 419   if (bcp_was_adjusted) {
 420     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 421   }
 422 }
 423 
 424 // -----------------------------------------------------------------------------
 425 // Monitor Enter/Exit
 426 
 427 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 428   // When called with locking_thread != Thread::current() some mechanism must synchronize
 429   // the locking_thread with respect to the current thread. Currently only used when
 430   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 431   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");

 432   return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 433 }
 434 
 435 // -----------------------------------------------------------------------------
 436 // JNI locks on java objects
 437 // NOTE: must use heavy weight monitor to handle jni monitor enter
 438 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {

 439   // Top native frames in the stack will not be seen if we attempt
 440   // preemption, since we start walking from the last Java anchor.
 441   NoPreemptMark npm(current);
 442 
 443   if (obj->klass()->is_value_based()) {
 444     handle_sync_on_value_based_class(obj, current);
 445   }
 446 










 447   // the current locking is from JNI instead of Java code
 448   current->set_current_pending_monitor_is_from_java(false);
 449   // An async deflation can race after the inflate() call and before
 450   // enter() can make the ObjectMonitor busy. enter() returns false if
 451   // we have lost the race to async deflation and we simply try again.
 452   while (true) {
 453     BasicLock lock;
 454     if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
 455       break;
 456     }
 457   }
 458   current->set_current_pending_monitor_is_from_java(true);
 459 }
 460 
 461 // NOTE: must use heavy weight monitor to handle jni monitor exit
 462 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 463   JavaThread* current = THREAD;

 464 
 465   ObjectMonitor* monitor;
 466   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 467   // If this thread has locked the object, exit the monitor. We
 468   // intentionally do not use CHECK on check_owner because we must exit the
 469   // monitor even if an exception was already pending.
 470   if (monitor->check_owner(THREAD)) {
 471     monitor->exit(current);
 472   }
 473 }
 474 
 475 // -----------------------------------------------------------------------------
 476 // Internal VM locks on java objects
 477 // standard constructor, allows locking failures
 478 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
 479   _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
 480   assert(!_thread->preempting(), "");
 481 
 482   _thread->check_for_valid_safepoint_state();
 483 

 504   if (_obj() != nullptr && !_skip_exit) {
 505     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 506   }
 507 }
 508 
 509 void ObjectLocker::wait_uninterruptibly(TRAPS) {
 510   ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
 511   if (_thread->preempting()) {
 512     _skip_exit = true;
 513     ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
 514     _thread->set_pending_preempted_exception();
 515   }
 516 }
 517 
 518 // -----------------------------------------------------------------------------
 519 //  Wait/Notify/NotifyAll
 520 // NOTE: must use heavy weight monitor to handle wait()
 521 
 522 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 523   JavaThread* current = THREAD;

 524   if (millis < 0) {
 525     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 526   }
 527 
 528   ObjectMonitor* monitor;
 529   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 530 
 531   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 532   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 533 
 534   // This dummy call is in place to get around dtrace bug 6254741.  Once
 535   // that's fixed we can uncomment the following line, remove the call
 536   // and change this function back into a "void" func.
 537   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 538   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 539   return ret_code;
 540 }
 541 
 542 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 543   assert(millis >= 0, "timeout value is negative");
 544 
 545   ObjectMonitor* monitor;
 546   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 547   monitor->wait(millis, false, THREAD);
 548 }
 549 
 550 
 551 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 552   JavaThread* current = THREAD;

 553 
 554   markWord mark = obj->mark();
 555   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 556     // Not inflated so there can't be any waiters to notify.
 557     return;
 558   }
 559   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 560   monitor->notify(CHECK);
 561 }
 562 
 563 // NOTE: see comment of notify()
 564 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 565   JavaThread* current = THREAD;

 566 
 567   markWord mark = obj->mark();
 568   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 569     // Not inflated so there can't be any waiters to notify.
 570     return;
 571   }
 572 
 573   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 574   monitor->notifyAll(CHECK);
 575 }
 576 
 577 // -----------------------------------------------------------------------------
 578 // Hash Code handling
 579 
 580 struct SharedGlobals {
 581   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 582   // This is a highly shared mostly-read variable.
 583   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 584   volatile int stw_random;
 585   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));

 652 
 653   markWord mark = obj->mark_acquire();
 654   for (;;) {
 655     intptr_t hash = mark.hash();
 656     if (hash != 0) {
 657       return hash;
 658     }
 659 
 660     hash = get_next_hash(current, obj);
 661     const markWord old_mark = mark;
 662     const markWord new_mark = old_mark.copy_set_hash(hash);
 663 
 664     mark = obj->cas_set_mark(new_mark, old_mark);
 665     if (old_mark == mark) {
 666       return hash;
 667     }
 668   }
 669 }
 670 
 671 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {



 672   if (UseObjectMonitorTable) {
 673     // Since the monitor isn't in the object header, the hash can simply be
 674     // installed in the object header.
 675     return install_hash_code(current, obj);
 676   }
 677 
 678   while (true) {
 679     ObjectMonitor* monitor = nullptr;
 680     markWord temp, test;
 681     intptr_t hash;
 682     markWord mark = obj->mark_acquire();
 683     if (mark.is_unlocked() || mark.is_fast_locked()) {
 684       hash = mark.hash();
 685       if (hash != 0) {                     // if it has a hash, just return it
 686         return hash;
 687       }
 688       hash = get_next_hash(current, obj);  // get a new hash
 689       temp = mark.copy_set_hash(hash);     // merge the hash into header
 690                                            // try to install the hash
 691       test = obj->cas_set_mark(temp, mark);

 753         hash = test.hash();
 754         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
 755         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
 756       }
 757       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
 758         // If we detect that async deflation has occurred, then we
 759         // attempt to restore the header/dmw to the object's header
 760         // so that we only retry once if the deflater thread happens
 761         // to be slow.
 762         monitor->install_displaced_markword_in_object(obj);
 763         continue;
 764       }
 765     }
 766     // We finally get the hash.
 767     return hash;
 768   }
 769 }
 770 
 771 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
 772                                                    Handle h_obj) {



 773   assert(current == JavaThread::current(), "Can only be called on current thread");
 774   oop obj = h_obj();
 775 
 776   markWord mark = obj->mark_acquire();
 777 
 778   if (mark.is_fast_locked()) {
 779     // fast-locking case, see if lock is in current's lock stack
 780     return current->lock_stack().contains(h_obj());
 781   }
 782 
 783   while (mark.has_monitor()) {
 784     ObjectMonitor* monitor = read_monitor(current, obj, mark);
 785     if (monitor != nullptr) {
 786       return monitor->is_entered(current) != 0;
 787     }
 788     // Racing with inflation/deflation, retry
 789     mark = obj->mark_acquire();
 790 
 791     if (mark.is_fast_locked()) {
 792       // Some other thread fast_locked, current could not have held the lock

 295 // removed from the system.
 296 //
 297 // Note: If the _in_use_list max exceeds the ceiling, then
 298 // monitors_used_above_threshold() will use the in_use_list max instead
 299 // of the thread count derived ceiling because we have used more
 300 // ObjectMonitors than the estimated average.
 301 //
 302 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 303 // no-progress async monitor deflation cycles in a row, then the ceiling
 304 // is adjusted upwards by monitors_used_above_threshold().
 305 //
 306 // Start the ceiling with the estimate for one thread in initialize()
 307 // which is called after cmd line options are processed.
 308 static size_t _in_use_list_ceiling = 0;
 309 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 310 bool volatile ObjectSynchronizer::_is_final_audit = false;
 311 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 312 static uintx _no_progress_cnt = 0;
 313 static bool _no_progress_skip_increment = false;
 314 
 315 // These checks are required for wait, notify and exit to avoid inflating the monitor to
 316 // find out this inline type object cannot be locked.
 317 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 318   if ((obj)->mark().is_inline_type()) {  \
 319     JavaThread* THREAD = current;           \
 320     ResourceMark rm(THREAD);                \
 321     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 322   }
 323 
 324 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 325   if ((obj)->mark().is_inline_type()) {  \
 326     JavaThread* THREAD = current;             \
 327     ResourceMark rm(THREAD);                  \
 328     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 329   }
 330 
 331 // =====================> Quick functions
 332 
 333 // The quick_* forms are special fast-path variants used to improve
 334 // performance.  In the simplest case, a "quick_*" implementation could
 335 // simply return false, in which case the caller will perform the necessary
 336 // state transitions and call the slow-path form.
 337 // The fast-path is designed to handle frequently arising cases in an efficient
 338 // manner and is just a degenerate "optimistic" variant of the slow-path.
 339 // returns true  -- to indicate the call was satisfied.
 340 // returns false -- to indicate the call needs the services of the slow-path.
 341 // A no-loitering ordinance is in effect for code in the quick_* family
 342 // operators: safepoints or indefinite blocking (blocking that might span a
 343 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 344 // entry.
 345 //
 346 // Consider: An interesting optimization is to have the JIT recognize the
 347 // following common idiom:
 348 //   synchronized (someobj) { .... ; notify(); }
 349 // That is, we find a notify() or notifyAll() call that immediately precedes
 350 // the monitorexit operation.  In that case the JIT could fuse the operations
 351 // into a single notifyAndExit() runtime primitive.
 352 
 353 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 354   assert(current->thread_state() == _thread_in_Java, "invariant");
 355   NoSafepointVerifier nsv;
 356   if (obj == nullptr) return false;  // slow-path for invalid obj
 357   assert(!obj->klass()->is_inline_klass(), "monitor op on inline type");
 358   const markWord mark = obj->mark();
 359 
 360   if (mark.is_fast_locked() && current->lock_stack().contains(cast_to_oop(obj))) {
 361     // Degenerate notify
 362     // fast-locked by caller so by definition the implied waitset is empty.
 363     return true;
 364   }
 365 
 366   if (mark.has_monitor()) {
 367     ObjectMonitor* const mon = read_monitor(current, obj, mark);
 368     if (mon == nullptr) {
 369       // Racing with inflation/deflation go slow path
 370       return false;
 371     }
 372     assert(mon->object() == oop(obj), "invariant");
 373     if (!mon->has_owner(current)) return false;  // slow-path for IMS exception
 374 
 375     if (mon->first_waiter() != nullptr) {
 376       // We have one or more waiters. Since this is an inflated monitor
 377       // that we own, we quickly notify them here and now, avoiding the slow-path.

 429     EventSyncOnValueBasedClass event;
 430     if (event.should_commit()) {
 431       event.set_valueBasedClass(obj->klass());
 432       event.commit();
 433     }
 434   }
 435 
 436   if (bcp_was_adjusted) {
 437     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 438   }
 439 }
 440 
 441 // -----------------------------------------------------------------------------
 442 // Monitor Enter/Exit
 443 
 444 void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 445   // When called with locking_thread != Thread::current() some mechanism must synchronize
 446   // the locking_thread with respect to the current thread. Currently only used when
 447   // deoptimizing and re-locking locks. See Deoptimization::relock_objects
 448   assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
 449   assert(!obj->klass()->is_inline_klass(), "JITed code should never have locked an instance of a value class");
 450   return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
 451 }
 452 
 453 // -----------------------------------------------------------------------------
 454 // JNI locks on java objects
 455 // NOTE: must use heavy weight monitor to handle jni monitor enter
 456 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 457   JavaThread* THREAD = current;
 458   // Top native frames in the stack will not be seen if we attempt
 459   // preemption, since we start walking from the last Java anchor.
 460   NoPreemptMark npm(current);
 461 
 462   if (obj->klass()->is_value_based()) {
 463     handle_sync_on_value_based_class(obj, current);
 464   }
 465 
 466   if (obj->klass()->is_inline_klass()) {
 467     ResourceMark rm(THREAD);
 468     const char* desc = "Cannot synchronize on an instance of value class ";
 469     const char* className = obj->klass()->external_name();
 470     size_t msglen = strlen(desc) + strlen(className) + 1;
 471     char* message = NEW_RESOURCE_ARRAY(char, msglen);
 472     assert(message != nullptr, "NEW_RESOURCE_ARRAY should have called vm_exit_out_of_memory and not return nullptr");
 473     THROW_MSG(vmSymbols::java_lang_IdentityException(), className);
 474   }
 475 
 476   // the current locking is from JNI instead of Java code
 477   current->set_current_pending_monitor_is_from_java(false);
 478   // An async deflation can race after the inflate() call and before
 479   // enter() can make the ObjectMonitor busy. enter() returns false if
 480   // we have lost the race to async deflation and we simply try again.
 481   while (true) {
 482     BasicLock lock;
 483     if (LightweightSynchronizer::inflate_and_enter(obj(), &lock, inflate_cause_jni_enter, current, current) != nullptr) {
 484       break;
 485     }
 486   }
 487   current->set_current_pending_monitor_is_from_java(true);
 488 }
 489 
 490 // NOTE: must use heavy weight monitor to handle jni monitor exit
 491 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 492   JavaThread* current = THREAD;
 493   CHECK_THROW_NOSYNC_IMSE(obj);
 494 
 495   ObjectMonitor* monitor;
 496   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
 497   // If this thread has locked the object, exit the monitor. We
 498   // intentionally do not use CHECK on check_owner because we must exit the
 499   // monitor even if an exception was already pending.
 500   if (monitor->check_owner(THREAD)) {
 501     monitor->exit(current);
 502   }
 503 }
 504 
 505 // -----------------------------------------------------------------------------
 506 // Internal VM locks on java objects
 507 // standard constructor, allows locking failures
 508 ObjectLocker::ObjectLocker(Handle obj, TRAPS) : _thread(THREAD), _obj(obj),
 509   _npm(_thread, _thread->at_preemptable_init() /* ignore_mark */), _skip_exit(false) {
 510   assert(!_thread->preempting(), "");
 511 
 512   _thread->check_for_valid_safepoint_state();
 513 

 534   if (_obj() != nullptr && !_skip_exit) {
 535     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 536   }
 537 }
 538 
 539 void ObjectLocker::wait_uninterruptibly(TRAPS) {
 540   ObjectSynchronizer::waitUninterruptibly(_obj, 0, _thread);
 541   if (_thread->preempting()) {
 542     _skip_exit = true;
 543     ObjectSynchronizer::read_monitor(_thread, _obj())->set_object_strong();
 544     _thread->set_pending_preempted_exception();
 545   }
 546 }
 547 
 548 // -----------------------------------------------------------------------------
 549 //  Wait/Notify/NotifyAll
 550 // NOTE: must use heavy weight monitor to handle wait()
 551 
 552 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 553   JavaThread* current = THREAD;
 554   CHECK_THROW_NOSYNC_IMSE_0(obj);
 555   if (millis < 0) {
 556     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 557   }
 558 
 559   ObjectMonitor* monitor;
 560   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
 561 
 562   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 563   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 564 
 565   // This dummy call is in place to get around dtrace bug 6254741.  Once
 566   // that's fixed we can uncomment the following line, remove the call
 567   // and change this function back into a "void" func.
 568   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 569   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 570   return ret_code;
 571 }
 572 
 573 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 574   assert(millis >= 0, "timeout value is negative");
 575 
 576   ObjectMonitor* monitor;
 577   monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK);
 578   monitor->wait(millis, false, THREAD);
 579 }
 580 
 581 
 582 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 583   JavaThread* current = THREAD;
 584   CHECK_THROW_NOSYNC_IMSE(obj);
 585 
 586   markWord mark = obj->mark();
 587   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 588     // Not inflated so there can't be any waiters to notify.
 589     return;
 590   }
 591   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 592   monitor->notify(CHECK);
 593 }
 594 
 595 // NOTE: see comment of notify()
 596 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 597   JavaThread* current = THREAD;
 598   CHECK_THROW_NOSYNC_IMSE(obj);
 599 
 600   markWord mark = obj->mark();
 601   if ((mark.is_fast_locked() && current->lock_stack().contains(obj()))) {
 602     // Not inflated so there can't be any waiters to notify.
 603     return;
 604   }
 605 
 606   ObjectMonitor* monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
 607   monitor->notifyAll(CHECK);
 608 }
 609 
 610 // -----------------------------------------------------------------------------
 611 // Hash Code handling
 612 
 613 struct SharedGlobals {
 614   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 615   // This is a highly shared mostly-read variable.
 616   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 617   volatile int stw_random;
 618   DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int));

 685 
 686   markWord mark = obj->mark_acquire();
 687   for (;;) {
 688     intptr_t hash = mark.hash();
 689     if (hash != 0) {
 690       return hash;
 691     }
 692 
 693     hash = get_next_hash(current, obj);
 694     const markWord old_mark = mark;
 695     const markWord new_mark = old_mark.copy_set_hash(hash);
 696 
 697     mark = obj->cas_set_mark(new_mark, old_mark);
 698     if (old_mark == mark) {
 699       return hash;
 700     }
 701   }
 702 }
 703 
 704 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 705   // VM should be calling bootstrap method.
 706   assert(!obj->klass()->is_inline_klass(), "FastHashCode should not be called for inline classes");
 707 
 708   if (UseObjectMonitorTable) {
 709     // Since the monitor isn't in the object header, the hash can simply be
 710     // installed in the object header.
 711     return install_hash_code(current, obj);
 712   }
 713 
 714   while (true) {
 715     ObjectMonitor* monitor = nullptr;
 716     markWord temp, test;
 717     intptr_t hash;
 718     markWord mark = obj->mark_acquire();
 719     if (mark.is_unlocked() || mark.is_fast_locked()) {
 720       hash = mark.hash();
 721       if (hash != 0) {                     // if it has a hash, just return it
 722         return hash;
 723       }
 724       hash = get_next_hash(current, obj);  // get a new hash
 725       temp = mark.copy_set_hash(hash);     // merge the hash into header
 726                                            // try to install the hash
 727       test = obj->cas_set_mark(temp, mark);

 789         hash = test.hash();
 790         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
 791         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
 792       }
 793       if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) {
 794         // If we detect that async deflation has occurred, then we
 795         // attempt to restore the header/dmw to the object's header
 796         // so that we only retry once if the deflater thread happens
 797         // to be slow.
 798         monitor->install_displaced_markword_in_object(obj);
 799         continue;
 800       }
 801     }
 802     // We finally get the hash.
 803     return hash;
 804   }
 805 }
 806 
 807 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
 808                                                    Handle h_obj) {
 809   if (h_obj->mark().is_inline_type()) {
 810     return false;
 811   }
 812   assert(current == JavaThread::current(), "Can only be called on current thread");
 813   oop obj = h_obj();
 814 
 815   markWord mark = obj->mark_acquire();
 816 
 817   if (mark.is_fast_locked()) {
 818     // fast-locking case, see if lock is in current's lock stack
 819     return current->lock_stack().contains(h_obj());
 820   }
 821 
 822   while (mark.has_monitor()) {
 823     ObjectMonitor* monitor = read_monitor(current, obj, mark);
 824     if (monitor != nullptr) {
 825       return monitor->is_entered(current) != 0;
 826     }
 827     // Racing with inflation/deflation, retry
 828     mark = obj->mark_acquire();
 829 
 830     if (mark.is_fast_locked()) {
 831       // Some other thread fast_locked, current could not have held the lock
< prev index next >