< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page

 224 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
 225 // removed from the system.
 226 //
 227 // Note: If the _in_use_list max exceeds the ceiling, then
 228 // monitors_used_above_threshold() will use the in_use_list max instead
 229 // of the thread count derived ceiling because we have used more
 230 // ObjectMonitors than the estimated average.
 231 //
 232 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 233 // no-progress async monitor deflation cycles in a row, then the ceiling
 234 // is adjusted upwards by monitors_used_above_threshold().
 235 //
 236 // Start the ceiling with the estimate for one thread in initialize()
 237 // which is called after cmd line options are processed.
 238 static size_t _in_use_list_ceiling = 0;
 239 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 240 bool volatile ObjectSynchronizer::_is_final_audit = false;
 241 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 242 static uintx _no_progress_cnt = 0;
 243 














 244 // =====================> Quick functions
 245 
 246 // The quick_* forms are special fast-path variants used to improve
 247 // performance.  In the simplest case, a "quick_*" implementation could
 248 // simply return false, in which case the caller will perform the necessary
 249 // state transitions and call the slow-path form.
 250 // The fast-path is designed to handle frequently arising cases in an efficient
 251 // manner and is just a degenerate "optimistic" variant of the slow-path.
 252 // returns true  -- to indicate the call was satisfied.
 253 // returns false -- to indicate the call needs the services of the slow-path.
 254 // A no-loitering ordinance is in effect for code in the quick_* family
 255 // operators: safepoints or indefinite blocking (blocking that might span a
 256 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 257 // entry.
 258 //
 259 // Consider: An interesting optimization is to have the JIT recognize the
 260 // following common idiom:
 261 //   synchronized (someobj) { .... ; notify(); }
 262 // That is, we find a notify() or notifyAll() call that immediately precedes
 263 // the monitorexit operation.  In that case the JIT could fuse the operations
 264 // into a single notifyAndExit() runtime primitive.
 265 
 266 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 267   assert(current->thread_state() == _thread_in_Java, "invariant");
 268   NoSafepointVerifier nsv;
 269   if (obj == NULL) return false;  // slow-path for invalid obj

 270   const markWord mark = obj->mark();
 271 
 272   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 273     // Degenerate notify
 274     // stack-locked by caller so by definition the implied waitset is empty.
 275     return true;
 276   }
 277 
 278   if (mark.has_monitor()) {
 279     ObjectMonitor* const mon = mark.monitor();
 280     assert(mon->object() == oop(obj), "invariant");
 281     if (mon->owner() != current) return false;  // slow-path for IMS exception
 282 
 283     if (mon->first_waiter() != NULL) {
 284       // We have one or more waiters. Since this is an inflated monitor
 285       // that we own, we can transfer one or more threads from the waitset
 286       // to the entrylist here and now, avoiding the slow-path.
 287       if (all) {
 288         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 289       } else {

 298     }
 299     return true;
 300   }
 301 
 302   // other IMS exception states take the slow-path
 303   return false;
 304 }
 305 
 306 
 307 // The LockNode emitted directly at the synchronization site would have
 308 // been too big if it were to have included support for the cases of inflated
 309 // recursive enter and exit, so they go here instead.
 310 // Note that we can't safely call AsyncPrintJavaStack() from within
 311 // quick_enter() as our thread state remains _in_Java.
 312 
 313 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 314                                      BasicLock * lock) {
 315   assert(current->thread_state() == _thread_in_Java, "invariant");
 316   NoSafepointVerifier nsv;
 317   if (obj == NULL) return false;       // Need to throw NPE

 318 
 319   if (obj->klass()->is_value_based()) {
 320     return false;
 321   }
 322 
 323   const markWord mark = obj->mark();
 324 
 325   if (mark.has_monitor()) {
 326     ObjectMonitor* const m = mark.monitor();
 327     // An async deflation or GC can race us before we manage to make
 328     // the ObjectMonitor busy by setting the owner below. If we detect
 329     // that race we just bail out to the slow-path here.
 330     if (m->object_peek() == NULL) {
 331       return false;
 332     }
 333     JavaThread* const owner = (JavaThread*) m->owner_raw();
 334 
 335     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 336     // and observability
 337     // Case: light contention possibly amenable to TLE

 407 
 408     EventSyncOnValueBasedClass event;
 409     if (event.should_commit()) {
 410       event.set_valueBasedClass(obj->klass());
 411       event.commit();
 412     }
 413   }
 414 
 415   if (bcp_was_adjusted) {
 416     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 417   }
 418 }
 419 
 420 // -----------------------------------------------------------------------------
 421 // Monitor Enter/Exit
 422 // The interpreter and compiler assembly code tries to lock using the fast path
 423 // of this algorithm. Make sure to update that code if the following function is
 424 // changed. The implementation is extremely sensitive to race condition. Be careful.
 425 
 426 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {

 427   if (obj->klass()->is_value_based()) {
 428     handle_sync_on_value_based_class(obj, current);
 429   }
 430 
 431   markWord mark = obj->mark();
 432   if (mark.is_neutral()) {
 433     // Anticipate successful CAS -- the ST of the displaced mark must
 434     // be visible <= the ST performed by the CAS.
 435     lock->set_displaced_header(mark);
 436     if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 437       return;
 438     }
 439     // Fall through to inflate() ...
 440   } else if (mark.has_locker() &&
 441              current->is_lock_owned((address)mark.locker())) {
 442     assert(lock != mark.locker(), "must not re-lock the same lock");
 443     assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
 444     lock->set_displaced_header(markWord::from_pointer(NULL));
 445     return;
 446   }
 447 
 448   // The object header will never be displaced to this lock,
 449   // so it does not matter what the value is, except that it
 450   // must be non-zero to avoid looking like a re-entrant lock,
 451   // and must not look locked either.
 452   lock->set_displaced_header(markWord::unused_mark());
 453   // An async deflation can race after the inflate() call and before
 454   // enter() can make the ObjectMonitor busy. enter() returns false if
 455   // we have lost the race to async deflation and we simply try again.
 456   while (true) {
 457     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 458     if (monitor->enter(current)) {
 459       return;
 460     }
 461   }
 462 }
 463 
 464 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 465   markWord mark = object->mark();




 466 
 467   markWord dhw = lock->displaced_header();
 468   if (dhw.value() == 0) {
 469     // If the displaced header is NULL, then this exit matches up with
 470     // a recursive enter. No real work to do here except for diagnostics.
 471 #ifndef PRODUCT
 472     if (mark != markWord::INFLATING()) {
 473       // Only do diagnostics if we are not racing an inflation. Simply
 474       // exiting a recursive enter of a Java Monitor that is being
 475       // inflated is safe; see the has_monitor() comment below.
 476       assert(!mark.is_neutral(), "invariant");
 477       assert(!mark.has_locker() ||
 478              current->is_lock_owned((address)mark.locker()), "invariant");
 479       if (mark.has_monitor()) {
 480         // The BasicLock's displaced_header is marked as a recursive
 481         // enter and we have an inflated Java Monitor (ObjectMonitor).
 482         // This is a special case where the Java Monitor was inflated
 483         // after this thread entered the stack-lock recursively. When a
 484         // Java Monitor is inflated, we cannot safely walk the Java
 485         // Monitor owner's stack and update the BasicLocks because a

 506   // We have to take the slow-path of possible inflation and then exit.
 507   // The ObjectMonitor* can't be async deflated until ownership is
 508   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 509   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 510   monitor->exit(current);
 511 }
 512 
 513 // -----------------------------------------------------------------------------
 514 // Class Loader  support to workaround deadlocks on the class loader lock objects
 515 // Also used by GC
 516 // complete_exit()/reenter() are used to wait on a nested lock
 517 // i.e. to give up an outer lock completely and then re-enter
 518 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 519 //  1) complete_exit lock1 - saving recursion count
 520 //  2) wait on lock2
 521 //  3) when notified on lock2, unlock lock2
 522 //  4) reenter lock1 with original recursion count
 523 //  5) lock lock2
 524 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 525 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {


 526   // The ObjectMonitor* can't be async deflated until ownership is
 527   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 528   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 529   intptr_t ret_code = monitor->complete_exit(current);
 530   return ret_code;
 531 }
 532 
 533 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 534 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {


 535   // An async deflation can race after the inflate() call and before
 536   // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
 537   // enter() returns false if we have lost the race to async deflation
 538   // and we simply try again.
 539   while (true) {
 540     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 541     if (monitor->reenter(recursions, current)) {
 542       return;
 543     }
 544   }
 545 }
 546 
 547 // -----------------------------------------------------------------------------
 548 // JNI locks on java objects
 549 // NOTE: must use heavy weight monitor to handle jni monitor enter
 550 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 551   if (obj->klass()->is_value_based()) {
 552     handle_sync_on_value_based_class(obj, current);
 553   }

 554 
 555   // the current locking is from JNI instead of Java code
 556   current->set_current_pending_monitor_is_from_java(false);
 557   // An async deflation can race after the inflate() call and before
 558   // enter() can make the ObjectMonitor busy. enter() returns false if
 559   // we have lost the race to async deflation and we simply try again.
 560   while (true) {
 561     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 562     if (monitor->enter(current)) {
 563       break;
 564     }
 565   }
 566   current->set_current_pending_monitor_is_from_java(true);
 567 }
 568 
 569 // NOTE: must use heavy weight monitor to handle jni monitor exit
 570 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 571   JavaThread* current = THREAD;

 572 
 573   // The ObjectMonitor* can't be async deflated until ownership is
 574   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 575   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 576   // If this thread has locked the object, exit the monitor. We
 577   // intentionally do not use CHECK on check_owner because we must exit the
 578   // monitor even if an exception was already pending.
 579   if (monitor->check_owner(THREAD)) {
 580     monitor->exit(current);
 581   }
 582 }
 583 
 584 // -----------------------------------------------------------------------------
 585 // Internal VM locks on java objects
 586 // standard constructor, allows locking failures
 587 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 588   _thread = thread;
 589   _thread->check_for_valid_safepoint_state();
 590   _obj = obj;
 591 
 592   if (_obj() != NULL) {
 593     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 594   }
 595 }
 596 
 597 ObjectLocker::~ObjectLocker() {
 598   if (_obj() != NULL) {
 599     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 600   }
 601 }
 602 
 603 
 604 // -----------------------------------------------------------------------------
 605 //  Wait/Notify/NotifyAll
 606 // NOTE: must use heavy weight monitor to handle wait()
 607 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 608   JavaThread* current = THREAD;

 609   if (millis < 0) {
 610     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 611   }
 612   // The ObjectMonitor* can't be async deflated because the _waiters
 613   // field is incremented before ownership is dropped and decremented
 614   // after ownership is regained.
 615   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 616 
 617   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 618   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 619 
 620   // This dummy call is in place to get around dtrace bug 6254741.  Once
 621   // that's fixed we can uncomment the following line, remove the call
 622   // and change this function back into a "void" func.
 623   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 624   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 625   return ret_code;
 626 }
 627 
 628 // No exception are possible in this case as we only use this internally when locking is
 629 // correct and we have to wait until notified - so no interrupts or timeouts.
 630 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {

 631   // The ObjectMonitor* can't be async deflated because the _waiters
 632   // field is incremented before ownership is dropped and decremented
 633   // after ownership is regained.
 634   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 635   monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
 636 }
 637 
 638 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 639   JavaThread* current = THREAD;

 640 
 641   markWord mark = obj->mark();
 642   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 643     // Not inflated so there can't be any waiters to notify.
 644     return;
 645   }
 646   // The ObjectMonitor* can't be async deflated until ownership is
 647   // dropped by the calling thread.
 648   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 649   monitor->notify(CHECK);
 650 }
 651 
 652 // NOTE: see comment of notify()
 653 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 654   JavaThread* current = THREAD;

 655 
 656   markWord mark = obj->mark();
 657   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 658     // Not inflated so there can't be any waiters to notify.
 659     return;
 660   }
 661   // The ObjectMonitor* can't be async deflated until ownership is
 662   // dropped by the calling thread.
 663   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 664   monitor->notifyAll(CHECK);
 665 }
 666 
 667 // -----------------------------------------------------------------------------
 668 // Hash Code handling
 669 
 670 struct SharedGlobals {
 671   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 672   // This is a highly shared mostly-read variable.
 673   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 674   volatile int stw_random;

 781     // This is probably the best overall implementation -- we'll
 782     // likely make this the default in future releases.
 783     unsigned t = current->_hashStateX;
 784     t ^= (t << 11);
 785     current->_hashStateX = current->_hashStateY;
 786     current->_hashStateY = current->_hashStateZ;
 787     current->_hashStateZ = current->_hashStateW;
 788     unsigned v = current->_hashStateW;
 789     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 790     current->_hashStateW = v;
 791     value = v;
 792   }
 793 
 794   value &= markWord::hash_mask;
 795   if (value == 0) value = 0xBAD;
 796   assert(value != markWord::no_hash, "invariant");
 797   return value;
 798 }
 799 
 800 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {




 801 
 802   while (true) {
 803     ObjectMonitor* monitor = NULL;
 804     markWord temp, test;
 805     intptr_t hash;
 806     markWord mark = read_stable_mark(obj);
 807 
 808     if (mark.is_neutral()) {               // if this is a normal header
 809       hash = mark.hash();
 810       if (hash != 0) {                     // if it has a hash, just return it
 811         return hash;
 812       }
 813       hash = get_next_hash(current, obj);  // get a new hash
 814       temp = mark.copy_set_hash(hash);     // merge the hash into header
 815                                            // try to install the hash
 816       test = obj->cas_set_mark(temp, mark);
 817       if (test == mark) {                  // if the hash was installed, return it
 818         return hash;
 819       }
 820       // Failed to install the hash. It could be that another thread

 889         // If we add any new usages of the header/dmw field, this code
 890         // will need to be updated.
 891         hash = test.hash();
 892         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
 893         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
 894       }
 895       if (monitor->is_being_async_deflated()) {
 896         // If we detect that async deflation has occurred, then we
 897         // attempt to restore the header/dmw to the object's header
 898         // so that we only retry once if the deflater thread happens
 899         // to be slow.
 900         monitor->install_displaced_markword_in_object(obj);
 901         continue;
 902       }
 903     }
 904     // We finally get the hash.
 905     return hash;
 906   }
 907 }
 908 
 909 // Deprecated -- use FastHashCode() instead.
 910 
 911 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 912   return FastHashCode(Thread::current(), obj());
 913 }
 914 
 915 
 916 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
 917                                                    Handle h_obj) {



 918   assert(current == JavaThread::current(), "Can only be called on current thread");
 919   oop obj = h_obj();
 920 
 921   markWord mark = read_stable_mark(obj);
 922 
 923   // Uncontended case, header points to stack
 924   if (mark.has_locker()) {
 925     return current->is_lock_owned((address)mark.locker());
 926   }
 927   // Contended case, header points to ObjectMonitor (tagged pointer)
 928   if (mark.has_monitor()) {
 929     // The first stage of async deflation does not affect any field
 930     // used by this comparison so the ObjectMonitor* is usable here.
 931     ObjectMonitor* monitor = mark.monitor();
 932     return monitor->is_entered(current) != 0;
 933   }
 934   // Unlocked case, header in place
 935   assert(mark.is_neutral(), "sanity check");
 936   return false;
 937 }

1100   event->set_monitorClass(obj->klass());
1101   event->set_address((uintptr_t)(void*)obj);
1102   event->set_cause((u1)cause);
1103   event->commit();
1104 }
1105 
1106 // Fast path code shared by multiple functions
1107 void ObjectSynchronizer::inflate_helper(oop obj) {
1108   markWord mark = obj->mark_acquire();
1109   if (mark.has_monitor()) {
1110     ObjectMonitor* monitor = mark.monitor();
1111     markWord dmw = monitor->header();
1112     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1113     return;
1114   }
1115   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1116 }
1117 
1118 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1119                                            const InflateCause cause) {




1120   EventJavaMonitorInflate event;
1121 
1122   for (;;) {
1123     const markWord mark = object->mark_acquire();
1124 
1125     // The mark can be in one of the following states:
1126     // *  Inflated     - just return
1127     // *  Stack-locked - coerce it to inflated
1128     // *  INFLATING    - busy wait for conversion to complete
1129     // *  Neutral      - aggressively inflate the object.
1130 
1131     // CASE: inflated
1132     if (mark.has_monitor()) {
1133       ObjectMonitor* inf = mark.monitor();
1134       markWord dmw = inf->header();
1135       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1136       return inf;
1137     }
1138 
1139     // CASE: inflation in progress - inflating over a stack-lock.

 224 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
 225 // removed from the system.
 226 //
 227 // Note: If the _in_use_list max exceeds the ceiling, then
 228 // monitors_used_above_threshold() will use the in_use_list max instead
 229 // of the thread count derived ceiling because we have used more
 230 // ObjectMonitors than the estimated average.
 231 //
 232 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
 233 // no-progress async monitor deflation cycles in a row, then the ceiling
 234 // is adjusted upwards by monitors_used_above_threshold().
 235 //
 236 // Start the ceiling with the estimate for one thread in initialize()
 237 // which is called after cmd line options are processed.
 238 static size_t _in_use_list_ceiling = 0;
 239 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
 240 bool volatile ObjectSynchronizer::_is_final_audit = false;
 241 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
 242 static uintx _no_progress_cnt = 0;
 243 
 244 #define CHECK_THROW_NOSYNC_IMSE(obj)  \
 245   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 246     JavaThread* THREAD = current;           \
 247     ResourceMark rm(THREAD);                \
 248     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 249   }
 250 
 251 #define CHECK_THROW_NOSYNC_IMSE_0(obj)  \
 252   if (EnableValhalla && (obj)->mark().is_inline_type()) {  \
 253     JavaThread* THREAD = current;             \
 254     ResourceMark rm(THREAD);                  \
 255     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 256   }
 257 
 258 // =====================> Quick functions
 259 
 260 // The quick_* forms are special fast-path variants used to improve
 261 // performance.  In the simplest case, a "quick_*" implementation could
 262 // simply return false, in which case the caller will perform the necessary
 263 // state transitions and call the slow-path form.
 264 // The fast-path is designed to handle frequently arising cases in an efficient
 265 // manner and is just a degenerate "optimistic" variant of the slow-path.
 266 // returns true  -- to indicate the call was satisfied.
 267 // returns false -- to indicate the call needs the services of the slow-path.
 268 // A no-loitering ordinance is in effect for code in the quick_* family
 269 // operators: safepoints or indefinite blocking (blocking that might span a
 270 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 271 // entry.
 272 //
 273 // Consider: An interesting optimization is to have the JIT recognize the
 274 // following common idiom:
 275 //   synchronized (someobj) { .... ; notify(); }
 276 // That is, we find a notify() or notifyAll() call that immediately precedes
 277 // the monitorexit operation.  In that case the JIT could fuse the operations
 278 // into a single notifyAndExit() runtime primitive.
 279 
 280 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
 281   assert(current->thread_state() == _thread_in_Java, "invariant");
 282   NoSafepointVerifier nsv;
 283   if (obj == NULL) return false;  // slow-path for invalid obj
 284   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 285   const markWord mark = obj->mark();
 286 
 287   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 288     // Degenerate notify
 289     // stack-locked by caller so by definition the implied waitset is empty.
 290     return true;
 291   }
 292 
 293   if (mark.has_monitor()) {
 294     ObjectMonitor* const mon = mark.monitor();
 295     assert(mon->object() == oop(obj), "invariant");
 296     if (mon->owner() != current) return false;  // slow-path for IMS exception
 297 
 298     if (mon->first_waiter() != NULL) {
 299       // We have one or more waiters. Since this is an inflated monitor
 300       // that we own, we can transfer one or more threads from the waitset
 301       // to the entrylist here and now, avoiding the slow-path.
 302       if (all) {
 303         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
 304       } else {

 313     }
 314     return true;
 315   }
 316 
 317   // other IMS exception states take the slow-path
 318   return false;
 319 }
 320 
 321 
 322 // The LockNode emitted directly at the synchronization site would have
 323 // been too big if it were to have included support for the cases of inflated
 324 // recursive enter and exit, so they go here instead.
 325 // Note that we can't safely call AsyncPrintJavaStack() from within
 326 // quick_enter() as our thread state remains _in_Java.
 327 
 328 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
 329                                      BasicLock * lock) {
 330   assert(current->thread_state() == _thread_in_Java, "invariant");
 331   NoSafepointVerifier nsv;
 332   if (obj == NULL) return false;       // Need to throw NPE
 333   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 334 
 335   if (obj->klass()->is_value_based()) {
 336     return false;
 337   }
 338 
 339   const markWord mark = obj->mark();
 340 
 341   if (mark.has_monitor()) {
 342     ObjectMonitor* const m = mark.monitor();
 343     // An async deflation or GC can race us before we manage to make
 344     // the ObjectMonitor busy by setting the owner below. If we detect
 345     // that race we just bail out to the slow-path here.
 346     if (m->object_peek() == NULL) {
 347       return false;
 348     }
 349     JavaThread* const owner = (JavaThread*) m->owner_raw();
 350 
 351     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 352     // and observability
 353     // Case: light contention possibly amenable to TLE

 423 
 424     EventSyncOnValueBasedClass event;
 425     if (event.should_commit()) {
 426       event.set_valueBasedClass(obj->klass());
 427       event.commit();
 428     }
 429   }
 430 
 431   if (bcp_was_adjusted) {
 432     last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
 433   }
 434 }
 435 
 436 // -----------------------------------------------------------------------------
 437 // Monitor Enter/Exit
 438 // The interpreter and compiler assembly code tries to lock using the fast path
 439 // of this algorithm. Make sure to update that code if the following function is
 440 // changed. The implementation is extremely sensitive to race condition. Be careful.
 441 
 442 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 443   CHECK_THROW_NOSYNC_IMSE(obj);
 444   if (obj->klass()->is_value_based()) {
 445     handle_sync_on_value_based_class(obj, current);
 446   }
 447 
 448   markWord mark = obj->mark();
 449   if (mark.is_neutral()) {
 450     // Anticipate successful CAS -- the ST of the displaced mark must
 451     // be visible <= the ST performed by the CAS.
 452     lock->set_displaced_header(mark);
 453     if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
 454       return;
 455     }
 456     // Fall through to inflate() ...
 457   } else if (mark.has_locker() &&
 458              current->is_lock_owned((address)mark.locker())) {
 459     assert(lock != mark.locker(), "must not re-lock the same lock");
 460     assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
 461     lock->set_displaced_header(markWord::from_pointer(NULL));
 462     return;
 463   }
 464 
 465   // The object header will never be displaced to this lock,
 466   // so it does not matter what the value is, except that it
 467   // must be non-zero to avoid looking like a re-entrant lock,
 468   // and must not look locked either.
 469   lock->set_displaced_header(markWord::unused_mark());
 470   // An async deflation can race after the inflate() call and before
 471   // enter() can make the ObjectMonitor busy. enter() returns false if
 472   // we have lost the race to async deflation and we simply try again.
 473   while (true) {
 474     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
 475     if (monitor->enter(current)) {
 476       return;
 477     }
 478   }
 479 }
 480 
 481 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
 482   markWord mark = object->mark();
 483   if (EnableValhalla && mark.is_inline_type()) {
 484     return;
 485   }
 486   assert(!EnableValhalla || !object->klass()->is_inline_klass(), "monitor op on inline type");
 487 
 488   markWord dhw = lock->displaced_header();
 489   if (dhw.value() == 0) {
 490     // If the displaced header is NULL, then this exit matches up with
 491     // a recursive enter. No real work to do here except for diagnostics.
 492 #ifndef PRODUCT
 493     if (mark != markWord::INFLATING()) {
 494       // Only do diagnostics if we are not racing an inflation. Simply
 495       // exiting a recursive enter of a Java Monitor that is being
 496       // inflated is safe; see the has_monitor() comment below.
 497       assert(!mark.is_neutral(), "invariant");
 498       assert(!mark.has_locker() ||
 499              current->is_lock_owned((address)mark.locker()), "invariant");
 500       if (mark.has_monitor()) {
 501         // The BasicLock's displaced_header is marked as a recursive
 502         // enter and we have an inflated Java Monitor (ObjectMonitor).
 503         // This is a special case where the Java Monitor was inflated
 504         // after this thread entered the stack-lock recursively. When a
 505         // Java Monitor is inflated, we cannot safely walk the Java
 506         // Monitor owner's stack and update the BasicLocks because a

 527   // We have to take the slow-path of possible inflation and then exit.
 528   // The ObjectMonitor* can't be async deflated until ownership is
 529   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 530   ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
 531   monitor->exit(current);
 532 }
 533 
 534 // -----------------------------------------------------------------------------
 535 // Class Loader  support to workaround deadlocks on the class loader lock objects
 536 // Also used by GC
 537 // complete_exit()/reenter() are used to wait on a nested lock
 538 // i.e. to give up an outer lock completely and then re-enter
 539 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 540 //  1) complete_exit lock1 - saving recursion count
 541 //  2) wait on lock2
 542 //  3) when notified on lock2, unlock lock2
 543 //  4) reenter lock1 with original recursion count
 544 //  5) lock lock2
 545 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 546 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
 547   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 548 
 549   // The ObjectMonitor* can't be async deflated until ownership is
 550   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 551   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 552   intptr_t ret_code = monitor->complete_exit(current);
 553   return ret_code;
 554 }
 555 
 556 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 557 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
 558   assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
 559 
 560   // An async deflation can race after the inflate() call and before
 561   // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
 562   // enter() returns false if we have lost the race to async deflation
 563   // and we simply try again.
 564   while (true) {
 565     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
 566     if (monitor->reenter(recursions, current)) {
 567       return;
 568     }
 569   }
 570 }
 571 
 572 // -----------------------------------------------------------------------------
 573 // JNI locks on java objects
 574 // NOTE: must use heavy weight monitor to handle jni monitor enter
 575 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
 576   if (obj->klass()->is_value_based()) {
 577     handle_sync_on_value_based_class(obj, current);
 578   }
 579   CHECK_THROW_NOSYNC_IMSE(obj);
 580 
 581   // the current locking is from JNI instead of Java code
 582   current->set_current_pending_monitor_is_from_java(false);
 583   // An async deflation can race after the inflate() call and before
 584   // enter() can make the ObjectMonitor busy. enter() returns false if
 585   // we have lost the race to async deflation and we simply try again.
 586   while (true) {
 587     ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
 588     if (monitor->enter(current)) {
 589       break;
 590     }
 591   }
 592   current->set_current_pending_monitor_is_from_java(true);
 593 }
 594 
 595 // NOTE: must use heavy weight monitor to handle jni monitor exit
 596 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
 597   JavaThread* current = THREAD;
 598   CHECK_THROW_NOSYNC_IMSE(obj);
 599 
 600   // The ObjectMonitor* can't be async deflated until ownership is
 601   // dropped inside exit() and the ObjectMonitor* must be !is_busy().
 602   ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
 603   // If this thread has locked the object, exit the monitor. We
 604   // intentionally do not use CHECK on check_owner because we must exit the
 605   // monitor even if an exception was already pending.
 606   if (monitor->check_owner(THREAD)) {
 607     monitor->exit(current);
 608   }
 609 }
 610 
 611 // -----------------------------------------------------------------------------
 612 // Internal VM locks on java objects
 613 // standard constructor, allows locking failures
 614 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
 615   _thread = thread;
 616   _thread->check_for_valid_safepoint_state();
 617   _obj = obj;
 618 
 619   if (_obj() != NULL) {
 620     ObjectSynchronizer::enter(_obj, &_lock, _thread);
 621   }
 622 }
 623 
 624 ObjectLocker::~ObjectLocker() {
 625   if (_obj() != NULL) {
 626     ObjectSynchronizer::exit(_obj(), &_lock, _thread);
 627   }
 628 }
 629 
 630 
 631 // -----------------------------------------------------------------------------
 632 //  Wait/Notify/NotifyAll
 633 // NOTE: must use heavy weight monitor to handle wait()
 634 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 635   JavaThread* current = THREAD;
 636   CHECK_THROW_NOSYNC_IMSE_0(obj);
 637   if (millis < 0) {
 638     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 639   }
 640   // The ObjectMonitor* can't be async deflated because the _waiters
 641   // field is incremented before ownership is dropped and decremented
 642   // after ownership is regained.
 643   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 644 
 645   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
 646   monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
 647 
 648   // This dummy call is in place to get around dtrace bug 6254741.  Once
 649   // that's fixed we can uncomment the following line, remove the call
 650   // and change this function back into a "void" func.
 651   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 652   int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
 653   return ret_code;
 654 }
 655 
 656 // No exception are possible in this case as we only use this internally when locking is
 657 // correct and we have to wait until notified - so no interrupts or timeouts.
 658 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
 659   CHECK_THROW_NOSYNC_IMSE(obj);
 660   // The ObjectMonitor* can't be async deflated because the _waiters
 661   // field is incremented before ownership is dropped and decremented
 662   // after ownership is regained.
 663   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
 664   monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
 665 }
 666 
 667 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 668   JavaThread* current = THREAD;
 669   CHECK_THROW_NOSYNC_IMSE(obj);
 670 
 671   markWord mark = obj->mark();
 672   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 673     // Not inflated so there can't be any waiters to notify.
 674     return;
 675   }
 676   // The ObjectMonitor* can't be async deflated until ownership is
 677   // dropped by the calling thread.
 678   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 679   monitor->notify(CHECK);
 680 }
 681 
 682 // NOTE: see comment of notify()
 683 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 684   JavaThread* current = THREAD;
 685   CHECK_THROW_NOSYNC_IMSE(obj);
 686 
 687   markWord mark = obj->mark();
 688   if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
 689     // Not inflated so there can't be any waiters to notify.
 690     return;
 691   }
 692   // The ObjectMonitor* can't be async deflated until ownership is
 693   // dropped by the calling thread.
 694   ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
 695   monitor->notifyAll(CHECK);
 696 }
 697 
 698 // -----------------------------------------------------------------------------
 699 // Hash Code handling
 700 
 701 struct SharedGlobals {
 702   char         _pad_prefix[OM_CACHE_LINE_SIZE];
 703   // This is a highly shared mostly-read variable.
 704   // To avoid false-sharing it needs to be the sole occupant of a cache line.
 705   volatile int stw_random;

 812     // This is probably the best overall implementation -- we'll
 813     // likely make this the default in future releases.
 814     unsigned t = current->_hashStateX;
 815     t ^= (t << 11);
 816     current->_hashStateX = current->_hashStateY;
 817     current->_hashStateY = current->_hashStateZ;
 818     current->_hashStateZ = current->_hashStateW;
 819     unsigned v = current->_hashStateW;
 820     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 821     current->_hashStateW = v;
 822     value = v;
 823   }
 824 
 825   value &= markWord::hash_mask;
 826   if (value == 0) value = 0xBAD;
 827   assert(value != markWord::no_hash, "invariant");
 828   return value;
 829 }
 830 
 831 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
 832   if (EnableValhalla && obj->klass()->is_inline_klass()) {
 833     // VM should be calling bootstrap method
 834     ShouldNotReachHere();
 835   }
 836 
 837   while (true) {
 838     ObjectMonitor* monitor = NULL;
 839     markWord temp, test;
 840     intptr_t hash;
 841     markWord mark = read_stable_mark(obj);
 842 
 843     if (mark.is_neutral()) {               // if this is a normal header
 844       hash = mark.hash();
 845       if (hash != 0) {                     // if it has a hash, just return it
 846         return hash;
 847       }
 848       hash = get_next_hash(current, obj);  // get a new hash
 849       temp = mark.copy_set_hash(hash);     // merge the hash into header
 850                                            // try to install the hash
 851       test = obj->cas_set_mark(temp, mark);
 852       if (test == mark) {                  // if the hash was installed, return it
 853         return hash;
 854       }
 855       // Failed to install the hash. It could be that another thread

 924         // If we add any new usages of the header/dmw field, this code
 925         // will need to be updated.
 926         hash = test.hash();
 927         assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
 928         assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
 929       }
 930       if (monitor->is_being_async_deflated()) {
 931         // If we detect that async deflation has occurred, then we
 932         // attempt to restore the header/dmw to the object's header
 933         // so that we only retry once if the deflater thread happens
 934         // to be slow.
 935         monitor->install_displaced_markword_in_object(obj);
 936         continue;
 937       }
 938     }
 939     // We finally get the hash.
 940     return hash;
 941   }
 942 }
 943 






 944 
 945 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
 946                                                    Handle h_obj) {
 947   if (EnableValhalla && h_obj->mark().is_inline_type()) {
 948     return false;
 949   }
 950   assert(current == JavaThread::current(), "Can only be called on current thread");
 951   oop obj = h_obj();
 952 
 953   markWord mark = read_stable_mark(obj);
 954 
 955   // Uncontended case, header points to stack
 956   if (mark.has_locker()) {
 957     return current->is_lock_owned((address)mark.locker());
 958   }
 959   // Contended case, header points to ObjectMonitor (tagged pointer)
 960   if (mark.has_monitor()) {
 961     // The first stage of async deflation does not affect any field
 962     // used by this comparison so the ObjectMonitor* is usable here.
 963     ObjectMonitor* monitor = mark.monitor();
 964     return monitor->is_entered(current) != 0;
 965   }
 966   // Unlocked case, header in place
 967   assert(mark.is_neutral(), "sanity check");
 968   return false;
 969 }

1132   event->set_monitorClass(obj->klass());
1133   event->set_address((uintptr_t)(void*)obj);
1134   event->set_cause((u1)cause);
1135   event->commit();
1136 }
1137 
1138 // Fast path code shared by multiple functions
1139 void ObjectSynchronizer::inflate_helper(oop obj) {
1140   markWord mark = obj->mark_acquire();
1141   if (mark.has_monitor()) {
1142     ObjectMonitor* monitor = mark.monitor();
1143     markWord dmw = monitor->header();
1144     assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1145     return;
1146   }
1147   (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1148 }
1149 
1150 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1151                                            const InflateCause cause) {
1152   if (EnableValhalla) {
1153     guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1154   }
1155 
1156   EventJavaMonitorInflate event;
1157 
1158   for (;;) {
1159     const markWord mark = object->mark_acquire();
1160 
1161     // The mark can be in one of the following states:
1162     // *  Inflated     - just return
1163     // *  Stack-locked - coerce it to inflated
1164     // *  INFLATING    - busy wait for conversion to complete
1165     // *  Neutral      - aggressively inflate the object.
1166 
1167     // CASE: inflated
1168     if (mark.has_monitor()) {
1169       ObjectMonitor* inf = mark.monitor();
1170       markWord dmw = inf->header();
1171       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1172       return inf;
1173     }
1174 
1175     // CASE: inflation in progress - inflating over a stack-lock.
< prev index next >