258 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
259 // removed from the system.
260 //
261 // Note: If the _in_use_list max exceeds the ceiling, then
262 // monitors_used_above_threshold() will use the in_use_list max instead
263 // of the thread count derived ceiling because we have used more
264 // ObjectMonitors than the estimated average.
265 //
266 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
267 // no-progress async monitor deflation cycles in a row, then the ceiling
268 // is adjusted upwards by monitors_used_above_threshold().
269 //
270 // Start the ceiling with the estimate for one thread in initialize()
271 // which is called after cmd line options are processed.
272 static size_t _in_use_list_ceiling = 0;
273 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
274 bool volatile ObjectSynchronizer::_is_final_audit = false;
275 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
276 static uintx _no_progress_cnt = 0;
277
278 // =====================> Quick functions
279
280 // The quick_* forms are special fast-path variants used to improve
281 // performance. In the simplest case, a "quick_*" implementation could
282 // simply return false, in which case the caller will perform the necessary
283 // state transitions and call the slow-path form.
284 // The fast-path is designed to handle frequently arising cases in an efficient
285 // manner and is just a degenerate "optimistic" variant of the slow-path.
286 // returns true -- to indicate the call was satisfied.
287 // returns false -- to indicate the call needs the services of the slow-path.
288 // A no-loitering ordinance is in effect for code in the quick_* family
289 // operators: safepoints or indefinite blocking (blocking that might span a
290 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
291 // entry.
292 //
293 // Consider: An interesting optimization is to have the JIT recognize the
294 // following common idiom:
295 // synchronized (someobj) { .... ; notify(); }
296 // That is, we find a notify() or notifyAll() call that immediately precedes
297 // the monitorexit operation. In that case the JIT could fuse the operations
298 // into a single notifyAndExit() runtime primitive.
299
300 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
301 assert(current->thread_state() == _thread_in_Java, "invariant");
302 NoSafepointVerifier nsv;
303 if (obj == NULL) return false; // slow-path for invalid obj
304 const markWord mark = obj->mark();
305
306 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
307 // Degenerate notify
308 // stack-locked by caller so by definition the implied waitset is empty.
309 return true;
310 }
311
312 if (mark.has_monitor()) {
313 ObjectMonitor* const mon = mark.monitor();
314 assert(mon->object() == oop(obj), "invariant");
315 if (mon->owner() != current) return false; // slow-path for IMS exception
316
317 if (mon->first_waiter() != NULL) {
318 // We have one or more waiters. Since this is an inflated monitor
319 // that we own, we can transfer one or more threads from the waitset
320 // to the entrylist here and now, avoiding the slow-path.
321 if (all) {
322 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
323 } else {
332 }
333 return true;
334 }
335
336 // other IMS exception states take the slow-path
337 return false;
338 }
339
340
341 // The LockNode emitted directly at the synchronization site would have
342 // been too big if it were to have included support for the cases of inflated
343 // recursive enter and exit, so they go here instead.
344 // Note that we can't safely call AsyncPrintJavaStack() from within
345 // quick_enter() as our thread state remains _in_Java.
346
347 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
348 BasicLock * lock) {
349 assert(current->thread_state() == _thread_in_Java, "invariant");
350 NoSafepointVerifier nsv;
351 if (obj == NULL) return false; // Need to throw NPE
352
353 if (obj->klass()->is_value_based()) {
354 return false;
355 }
356
357 const markWord mark = obj->mark();
358
359 if (mark.has_monitor()) {
360 ObjectMonitor* const m = mark.monitor();
361 // An async deflation or GC can race us before we manage to make
362 // the ObjectMonitor busy by setting the owner below. If we detect
363 // that race we just bail out to the slow-path here.
364 if (m->object_peek() == NULL) {
365 return false;
366 }
367 JavaThread* const owner = (JavaThread*) m->owner_raw();
368
369 // Lock contention and Transactional Lock Elision (TLE) diagnostics
370 // and observability
371 // Case: light contention possibly amenable to TLE
449 if (bcp_was_adjusted) {
450 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
451 }
452 }
453
454 static bool useHeavyMonitors() {
455 #if defined(X86) || defined(AARCH64) || defined(PPC64)
456 return UseHeavyMonitors;
457 #else
458 return false;
459 #endif
460 }
461
462 // -----------------------------------------------------------------------------
463 // Monitor Enter/Exit
464 // The interpreter and compiler assembly code tries to lock using the fast path
465 // of this algorithm. Make sure to update that code if the following function is
466 // changed. The implementation is extremely sensitive to race condition. Be careful.
467
468 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
469 if (obj->klass()->is_value_based()) {
470 handle_sync_on_value_based_class(obj, current);
471 }
472
473 if (!useHeavyMonitors()) {
474 markWord mark = obj->mark();
475 if (mark.is_neutral()) {
476 // Anticipate successful CAS -- the ST of the displaced mark must
477 // be visible <= the ST performed by the CAS.
478 lock->set_displaced_header(mark);
479 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
480 return;
481 }
482 // Fall through to inflate() ...
483 } else if (mark.has_locker() &&
484 current->is_lock_owned((address)mark.locker())) {
485 assert(lock != mark.locker(), "must not re-lock the same lock");
486 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
487 lock->set_displaced_header(markWord::from_pointer(NULL));
488 return;
494 // and must not look locked either.
495 lock->set_displaced_header(markWord::unused_mark());
496 } else if (VerifyHeavyMonitors) {
497 guarantee(!obj->mark().has_locker(), "must not be stack-locked");
498 }
499
500 // An async deflation can race after the inflate() call and before
501 // enter() can make the ObjectMonitor busy. enter() returns false if
502 // we have lost the race to async deflation and we simply try again.
503 while (true) {
504 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
505 if (monitor->enter(current)) {
506 return;
507 }
508 }
509 }
510
511 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
512 if (!useHeavyMonitors()) {
513 markWord mark = object->mark();
514
515 markWord dhw = lock->displaced_header();
516 if (dhw.value() == 0) {
517 // If the displaced header is NULL, then this exit matches up with
518 // a recursive enter. No real work to do here except for diagnostics.
519 #ifndef PRODUCT
520 if (mark != markWord::INFLATING()) {
521 // Only do diagnostics if we are not racing an inflation. Simply
522 // exiting a recursive enter of a Java Monitor that is being
523 // inflated is safe; see the has_monitor() comment below.
524 assert(!mark.is_neutral(), "invariant");
525 assert(!mark.has_locker() ||
526 current->is_lock_owned((address)mark.locker()), "invariant");
527 if (mark.has_monitor()) {
528 // The BasicLock's displaced_header is marked as a recursive
529 // enter and we have an inflated Java Monitor (ObjectMonitor).
530 // This is a special case where the Java Monitor was inflated
531 // after this thread entered the stack-lock recursively. When a
532 // Java Monitor is inflated, we cannot safely walk the Java
533 // Monitor owner's stack and update the BasicLocks because a
557 // We have to take the slow-path of possible inflation and then exit.
558 // The ObjectMonitor* can't be async deflated until ownership is
559 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
560 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
561 monitor->exit(current);
562 }
563
564 // -----------------------------------------------------------------------------
565 // Class Loader support to workaround deadlocks on the class loader lock objects
566 // Also used by GC
567 // complete_exit()/reenter() are used to wait on a nested lock
568 // i.e. to give up an outer lock completely and then re-enter
569 // Used when holding nested locks - lock acquisition order: lock1 then lock2
570 // 1) complete_exit lock1 - saving recursion count
571 // 2) wait on lock2
572 // 3) when notified on lock2, unlock lock2
573 // 4) reenter lock1 with original recursion count
574 // 5) lock lock2
575 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
576 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
577 // The ObjectMonitor* can't be async deflated until ownership is
578 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
579 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
580 intptr_t ret_code = monitor->complete_exit(current);
581 return ret_code;
582 }
583
584 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
585 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
586 // An async deflation can race after the inflate() call and before
587 // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
588 // enter() returns false if we have lost the race to async deflation
589 // and we simply try again.
590 while (true) {
591 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
592 if (monitor->reenter(recursions, current)) {
593 return;
594 }
595 }
596 }
597
598 // -----------------------------------------------------------------------------
599 // JNI locks on java objects
600 // NOTE: must use heavy weight monitor to handle jni monitor enter
601 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
602 if (obj->klass()->is_value_based()) {
603 handle_sync_on_value_based_class(obj, current);
604 }
605
606 // the current locking is from JNI instead of Java code
607 current->set_current_pending_monitor_is_from_java(false);
608 // An async deflation can race after the inflate() call and before
609 // enter() can make the ObjectMonitor busy. enter() returns false if
610 // we have lost the race to async deflation and we simply try again.
611 while (true) {
612 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
613 if (monitor->enter(current)) {
614 break;
615 }
616 }
617 current->set_current_pending_monitor_is_from_java(true);
618 }
619
620 // NOTE: must use heavy weight monitor to handle jni monitor exit
621 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
622 JavaThread* current = THREAD;
623
624 // The ObjectMonitor* can't be async deflated until ownership is
625 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
626 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
627 // If this thread has locked the object, exit the monitor. We
628 // intentionally do not use CHECK on check_owner because we must exit the
629 // monitor even if an exception was already pending.
630 if (monitor->check_owner(THREAD)) {
631 monitor->exit(current);
632 }
633 }
634
635 // -----------------------------------------------------------------------------
636 // Internal VM locks on java objects
637 // standard constructor, allows locking failures
638 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
639 _thread = thread;
640 _thread->check_for_valid_safepoint_state();
641 _obj = obj;
642
643 if (_obj() != NULL) {
644 ObjectSynchronizer::enter(_obj, &_lock, _thread);
645 }
646 }
647
648 ObjectLocker::~ObjectLocker() {
649 if (_obj() != NULL) {
650 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
651 }
652 }
653
654
655 // -----------------------------------------------------------------------------
656 // Wait/Notify/NotifyAll
657 // NOTE: must use heavy weight monitor to handle wait()
658 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
659 JavaThread* current = THREAD;
660 if (millis < 0) {
661 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
662 }
663 // The ObjectMonitor* can't be async deflated because the _waiters
664 // field is incremented before ownership is dropped and decremented
665 // after ownership is regained.
666 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
667
668 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
669 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
670
671 // This dummy call is in place to get around dtrace bug 6254741. Once
672 // that's fixed we can uncomment the following line, remove the call
673 // and change this function back into a "void" func.
674 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
675 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
676 return ret_code;
677 }
678
679 // No exception are possible in this case as we only use this internally when locking is
680 // correct and we have to wait until notified - so no interrupts or timeouts.
681 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
682 // The ObjectMonitor* can't be async deflated because the _waiters
683 // field is incremented before ownership is dropped and decremented
684 // after ownership is regained.
685 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
686 monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
687 }
688
689 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
690 JavaThread* current = THREAD;
691
692 markWord mark = obj->mark();
693 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
694 // Not inflated so there can't be any waiters to notify.
695 return;
696 }
697 // The ObjectMonitor* can't be async deflated until ownership is
698 // dropped by the calling thread.
699 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
700 monitor->notify(CHECK);
701 }
702
703 // NOTE: see comment of notify()
704 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
705 JavaThread* current = THREAD;
706
707 markWord mark = obj->mark();
708 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
709 // Not inflated so there can't be any waiters to notify.
710 return;
711 }
712 // The ObjectMonitor* can't be async deflated until ownership is
713 // dropped by the calling thread.
714 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
715 monitor->notifyAll(CHECK);
716 }
717
718 // -----------------------------------------------------------------------------
719 // Hash Code handling
720
721 struct SharedGlobals {
722 char _pad_prefix[OM_CACHE_LINE_SIZE];
723 // This is a highly shared mostly-read variable.
724 // To avoid false-sharing it needs to be the sole occupant of a cache line.
725 volatile int stw_random;
832 // This is probably the best overall implementation -- we'll
833 // likely make this the default in future releases.
834 unsigned t = current->_hashStateX;
835 t ^= (t << 11);
836 current->_hashStateX = current->_hashStateY;
837 current->_hashStateY = current->_hashStateZ;
838 current->_hashStateZ = current->_hashStateW;
839 unsigned v = current->_hashStateW;
840 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
841 current->_hashStateW = v;
842 value = v;
843 }
844
845 value &= markWord::hash_mask;
846 if (value == 0) value = 0xBAD;
847 assert(value != markWord::no_hash, "invariant");
848 return value;
849 }
850
851 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
852
853 while (true) {
854 ObjectMonitor* monitor = NULL;
855 markWord temp, test;
856 intptr_t hash;
857 markWord mark = read_stable_mark(obj);
858 if (VerifyHeavyMonitors) {
859 assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
860 guarantee(!mark.has_locker(), "must not be stack locked");
861 }
862 if (mark.is_neutral()) { // if this is a normal header
863 hash = mark.hash();
864 if (hash != 0) { // if it has a hash, just return it
865 return hash;
866 }
867 hash = get_next_hash(current, obj); // get a new hash
868 temp = mark.copy_set_hash(hash); // merge the hash into header
869 // try to install the hash
870 test = obj->cas_set_mark(temp, mark);
871 if (test == mark) { // if the hash was installed, return it
943 // If we add any new usages of the header/dmw field, this code
944 // will need to be updated.
945 hash = test.hash();
946 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
947 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
948 }
949 if (monitor->is_being_async_deflated()) {
950 // If we detect that async deflation has occurred, then we
951 // attempt to restore the header/dmw to the object's header
952 // so that we only retry once if the deflater thread happens
953 // to be slow.
954 monitor->install_displaced_markword_in_object(obj);
955 continue;
956 }
957 }
958 // We finally get the hash.
959 return hash;
960 }
961 }
962
963 // Deprecated -- use FastHashCode() instead.
964
965 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
966 return FastHashCode(Thread::current(), obj());
967 }
968
969
970 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
971 Handle h_obj) {
972 assert(current == JavaThread::current(), "Can only be called on current thread");
973 oop obj = h_obj();
974
975 markWord mark = read_stable_mark(obj);
976
977 // Uncontended case, header points to stack
978 if (mark.has_locker()) {
979 return current->is_lock_owned((address)mark.locker());
980 }
981 // Contended case, header points to ObjectMonitor (tagged pointer)
982 if (mark.has_monitor()) {
983 // The first stage of async deflation does not affect any field
984 // used by this comparison so the ObjectMonitor* is usable here.
985 ObjectMonitor* monitor = mark.monitor();
986 return monitor->is_entered(current) != 0;
987 }
988 // Unlocked case, header in place
989 assert(mark.is_neutral(), "sanity check");
990 return false;
991 }
1186 event->set_monitorClass(obj->klass());
1187 event->set_address((uintptr_t)(void*)obj);
1188 event->set_cause((u1)cause);
1189 event->commit();
1190 }
1191
1192 // Fast path code shared by multiple functions
1193 void ObjectSynchronizer::inflate_helper(oop obj) {
1194 markWord mark = obj->mark_acquire();
1195 if (mark.has_monitor()) {
1196 ObjectMonitor* monitor = mark.monitor();
1197 markWord dmw = monitor->header();
1198 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1199 return;
1200 }
1201 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1202 }
1203
1204 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1205 const InflateCause cause) {
1206 EventJavaMonitorInflate event;
1207
1208 for (;;) {
1209 const markWord mark = object->mark_acquire();
1210
1211 // The mark can be in one of the following states:
1212 // * Inflated - just return
1213 // * Stack-locked - coerce it to inflated
1214 // * INFLATING - busy wait for conversion to complete
1215 // * Neutral - aggressively inflate the object.
1216
1217 // CASE: inflated
1218 if (mark.has_monitor()) {
1219 ObjectMonitor* inf = mark.monitor();
1220 markWord dmw = inf->header();
1221 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1222 return inf;
1223 }
1224
1225 // CASE: inflation in progress - inflating over a stack-lock.
|
258 // and is decreased by AvgMonitorsPerThreadEstimate when a thread is
259 // removed from the system.
260 //
261 // Note: If the _in_use_list max exceeds the ceiling, then
262 // monitors_used_above_threshold() will use the in_use_list max instead
263 // of the thread count derived ceiling because we have used more
264 // ObjectMonitors than the estimated average.
265 //
266 // Note: If deflate_idle_monitors() has NoAsyncDeflationProgressMax
267 // no-progress async monitor deflation cycles in a row, then the ceiling
268 // is adjusted upwards by monitors_used_above_threshold().
269 //
270 // Start the ceiling with the estimate for one thread in initialize()
271 // which is called after cmd line options are processed.
272 static size_t _in_use_list_ceiling = 0;
273 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
274 bool volatile ObjectSynchronizer::_is_final_audit = false;
275 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
276 static uintx _no_progress_cnt = 0;
277
278 #define CHECK_THROW_NOSYNC_IMSE(obj) \
279 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
280 JavaThread* THREAD = current; \
281 ResourceMark rm(THREAD); \
282 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
283 }
284
285 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
286 if (EnableValhalla && (obj)->mark().is_inline_type()) { \
287 JavaThread* THREAD = current; \
288 ResourceMark rm(THREAD); \
289 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
290 }
291
292 // =====================> Quick functions
293
294 // The quick_* forms are special fast-path variants used to improve
295 // performance. In the simplest case, a "quick_*" implementation could
296 // simply return false, in which case the caller will perform the necessary
297 // state transitions and call the slow-path form.
298 // The fast-path is designed to handle frequently arising cases in an efficient
299 // manner and is just a degenerate "optimistic" variant of the slow-path.
300 // returns true -- to indicate the call was satisfied.
301 // returns false -- to indicate the call needs the services of the slow-path.
302 // A no-loitering ordinance is in effect for code in the quick_* family
303 // operators: safepoints or indefinite blocking (blocking that might span a
304 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
305 // entry.
306 //
307 // Consider: An interesting optimization is to have the JIT recognize the
308 // following common idiom:
309 // synchronized (someobj) { .... ; notify(); }
310 // That is, we find a notify() or notifyAll() call that immediately precedes
311 // the monitorexit operation. In that case the JIT could fuse the operations
312 // into a single notifyAndExit() runtime primitive.
313
314 bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
315 assert(current->thread_state() == _thread_in_Java, "invariant");
316 NoSafepointVerifier nsv;
317 if (obj == NULL) return false; // slow-path for invalid obj
318 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
319 const markWord mark = obj->mark();
320
321 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
322 // Degenerate notify
323 // stack-locked by caller so by definition the implied waitset is empty.
324 return true;
325 }
326
327 if (mark.has_monitor()) {
328 ObjectMonitor* const mon = mark.monitor();
329 assert(mon->object() == oop(obj), "invariant");
330 if (mon->owner() != current) return false; // slow-path for IMS exception
331
332 if (mon->first_waiter() != NULL) {
333 // We have one or more waiters. Since this is an inflated monitor
334 // that we own, we can transfer one or more threads from the waitset
335 // to the entrylist here and now, avoiding the slow-path.
336 if (all) {
337 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
338 } else {
347 }
348 return true;
349 }
350
351 // other IMS exception states take the slow-path
352 return false;
353 }
354
355
356 // The LockNode emitted directly at the synchronization site would have
357 // been too big if it were to have included support for the cases of inflated
358 // recursive enter and exit, so they go here instead.
359 // Note that we can't safely call AsyncPrintJavaStack() from within
360 // quick_enter() as our thread state remains _in_Java.
361
362 bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
363 BasicLock * lock) {
364 assert(current->thread_state() == _thread_in_Java, "invariant");
365 NoSafepointVerifier nsv;
366 if (obj == NULL) return false; // Need to throw NPE
367 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
368
369 if (obj->klass()->is_value_based()) {
370 return false;
371 }
372
373 const markWord mark = obj->mark();
374
375 if (mark.has_monitor()) {
376 ObjectMonitor* const m = mark.monitor();
377 // An async deflation or GC can race us before we manage to make
378 // the ObjectMonitor busy by setting the owner below. If we detect
379 // that race we just bail out to the slow-path here.
380 if (m->object_peek() == NULL) {
381 return false;
382 }
383 JavaThread* const owner = (JavaThread*) m->owner_raw();
384
385 // Lock contention and Transactional Lock Elision (TLE) diagnostics
386 // and observability
387 // Case: light contention possibly amenable to TLE
465 if (bcp_was_adjusted) {
466 last_frame.interpreter_frame_set_bcp(last_frame.interpreter_frame_bcp() + 1);
467 }
468 }
469
470 static bool useHeavyMonitors() {
471 #if defined(X86) || defined(AARCH64) || defined(PPC64)
472 return UseHeavyMonitors;
473 #else
474 return false;
475 #endif
476 }
477
478 // -----------------------------------------------------------------------------
479 // Monitor Enter/Exit
480 // The interpreter and compiler assembly code tries to lock using the fast path
481 // of this algorithm. Make sure to update that code if the following function is
482 // changed. The implementation is extremely sensitive to race condition. Be careful.
483
484 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
485 CHECK_THROW_NOSYNC_IMSE(obj);
486 if (obj->klass()->is_value_based()) {
487 handle_sync_on_value_based_class(obj, current);
488 }
489
490 if (!useHeavyMonitors()) {
491 markWord mark = obj->mark();
492 if (mark.is_neutral()) {
493 // Anticipate successful CAS -- the ST of the displaced mark must
494 // be visible <= the ST performed by the CAS.
495 lock->set_displaced_header(mark);
496 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
497 return;
498 }
499 // Fall through to inflate() ...
500 } else if (mark.has_locker() &&
501 current->is_lock_owned((address)mark.locker())) {
502 assert(lock != mark.locker(), "must not re-lock the same lock");
503 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
504 lock->set_displaced_header(markWord::from_pointer(NULL));
505 return;
511 // and must not look locked either.
512 lock->set_displaced_header(markWord::unused_mark());
513 } else if (VerifyHeavyMonitors) {
514 guarantee(!obj->mark().has_locker(), "must not be stack-locked");
515 }
516
517 // An async deflation can race after the inflate() call and before
518 // enter() can make the ObjectMonitor busy. enter() returns false if
519 // we have lost the race to async deflation and we simply try again.
520 while (true) {
521 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
522 if (monitor->enter(current)) {
523 return;
524 }
525 }
526 }
527
528 void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
529 if (!useHeavyMonitors()) {
530 markWord mark = object->mark();
531 if (EnableValhalla && mark.is_inline_type()) {
532 return;
533 }
534 assert(!EnableValhalla || !object->klass()->is_inline_klass(), "monitor op on inline type");
535
536 markWord dhw = lock->displaced_header();
537 if (dhw.value() == 0) {
538 // If the displaced header is NULL, then this exit matches up with
539 // a recursive enter. No real work to do here except for diagnostics.
540 #ifndef PRODUCT
541 if (mark != markWord::INFLATING()) {
542 // Only do diagnostics if we are not racing an inflation. Simply
543 // exiting a recursive enter of a Java Monitor that is being
544 // inflated is safe; see the has_monitor() comment below.
545 assert(!mark.is_neutral(), "invariant");
546 assert(!mark.has_locker() ||
547 current->is_lock_owned((address)mark.locker()), "invariant");
548 if (mark.has_monitor()) {
549 // The BasicLock's displaced_header is marked as a recursive
550 // enter and we have an inflated Java Monitor (ObjectMonitor).
551 // This is a special case where the Java Monitor was inflated
552 // after this thread entered the stack-lock recursively. When a
553 // Java Monitor is inflated, we cannot safely walk the Java
554 // Monitor owner's stack and update the BasicLocks because a
578 // We have to take the slow-path of possible inflation and then exit.
579 // The ObjectMonitor* can't be async deflated until ownership is
580 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
581 ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
582 monitor->exit(current);
583 }
584
585 // -----------------------------------------------------------------------------
586 // Class Loader support to workaround deadlocks on the class loader lock objects
587 // Also used by GC
588 // complete_exit()/reenter() are used to wait on a nested lock
589 // i.e. to give up an outer lock completely and then re-enter
590 // Used when holding nested locks - lock acquisition order: lock1 then lock2
591 // 1) complete_exit lock1 - saving recursion count
592 // 2) wait on lock2
593 // 3) when notified on lock2, unlock lock2
594 // 4) reenter lock1 with original recursion count
595 // 5) lock lock2
596 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
597 intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
598 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
599
600 // The ObjectMonitor* can't be async deflated until ownership is
601 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
602 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
603 intptr_t ret_code = monitor->complete_exit(current);
604 return ret_code;
605 }
606
607 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
608 void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
609 assert(!EnableValhalla || !obj->klass()->is_inline_klass(), "monitor op on inline type");
610
611 // An async deflation can race after the inflate() call and before
612 // reenter() -> enter() can make the ObjectMonitor busy. reenter() ->
613 // enter() returns false if we have lost the race to async deflation
614 // and we simply try again.
615 while (true) {
616 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
617 if (monitor->reenter(recursions, current)) {
618 return;
619 }
620 }
621 }
622
623 // -----------------------------------------------------------------------------
624 // JNI locks on java objects
625 // NOTE: must use heavy weight monitor to handle jni monitor enter
626 void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
627 if (obj->klass()->is_value_based()) {
628 handle_sync_on_value_based_class(obj, current);
629 }
630 CHECK_THROW_NOSYNC_IMSE(obj);
631
632 // the current locking is from JNI instead of Java code
633 current->set_current_pending_monitor_is_from_java(false);
634 // An async deflation can race after the inflate() call and before
635 // enter() can make the ObjectMonitor busy. enter() returns false if
636 // we have lost the race to async deflation and we simply try again.
637 while (true) {
638 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
639 if (monitor->enter(current)) {
640 break;
641 }
642 }
643 current->set_current_pending_monitor_is_from_java(true);
644 }
645
646 // NOTE: must use heavy weight monitor to handle jni monitor exit
647 void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
648 JavaThread* current = THREAD;
649 CHECK_THROW_NOSYNC_IMSE(obj);
650
651 // The ObjectMonitor* can't be async deflated until ownership is
652 // dropped inside exit() and the ObjectMonitor* must be !is_busy().
653 ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
654 // If this thread has locked the object, exit the monitor. We
655 // intentionally do not use CHECK on check_owner because we must exit the
656 // monitor even if an exception was already pending.
657 if (monitor->check_owner(THREAD)) {
658 monitor->exit(current);
659 }
660 }
661
662 // -----------------------------------------------------------------------------
663 // Internal VM locks on java objects
664 // standard constructor, allows locking failures
665 ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
666 _thread = thread;
667 _thread->check_for_valid_safepoint_state();
668 _obj = obj;
669
670 if (_obj() != NULL) {
671 ObjectSynchronizer::enter(_obj, &_lock, _thread);
672 }
673 }
674
675 ObjectLocker::~ObjectLocker() {
676 if (_obj() != NULL) {
677 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
678 }
679 }
680
681
682 // -----------------------------------------------------------------------------
683 // Wait/Notify/NotifyAll
684 // NOTE: must use heavy weight monitor to handle wait()
685 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
686 JavaThread* current = THREAD;
687 CHECK_THROW_NOSYNC_IMSE_0(obj);
688 if (millis < 0) {
689 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
690 }
691 // The ObjectMonitor* can't be async deflated because the _waiters
692 // field is incremented before ownership is dropped and decremented
693 // after ownership is regained.
694 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
695
696 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
697 monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
698
699 // This dummy call is in place to get around dtrace bug 6254741. Once
700 // that's fixed we can uncomment the following line, remove the call
701 // and change this function back into a "void" func.
702 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
703 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
704 return ret_code;
705 }
706
707 // No exception are possible in this case as we only use this internally when locking is
708 // correct and we have to wait until notified - so no interrupts or timeouts.
709 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
710 CHECK_THROW_NOSYNC_IMSE(obj);
711 // The ObjectMonitor* can't be async deflated because the _waiters
712 // field is incremented before ownership is dropped and decremented
713 // after ownership is regained.
714 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
715 monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
716 }
717
718 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
719 JavaThread* current = THREAD;
720 CHECK_THROW_NOSYNC_IMSE(obj);
721
722 markWord mark = obj->mark();
723 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
724 // Not inflated so there can't be any waiters to notify.
725 return;
726 }
727 // The ObjectMonitor* can't be async deflated until ownership is
728 // dropped by the calling thread.
729 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
730 monitor->notify(CHECK);
731 }
732
733 // NOTE: see comment of notify()
734 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
735 JavaThread* current = THREAD;
736 CHECK_THROW_NOSYNC_IMSE(obj);
737
738 markWord mark = obj->mark();
739 if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
740 // Not inflated so there can't be any waiters to notify.
741 return;
742 }
743 // The ObjectMonitor* can't be async deflated until ownership is
744 // dropped by the calling thread.
745 ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
746 monitor->notifyAll(CHECK);
747 }
748
749 // -----------------------------------------------------------------------------
750 // Hash Code handling
751
752 struct SharedGlobals {
753 char _pad_prefix[OM_CACHE_LINE_SIZE];
754 // This is a highly shared mostly-read variable.
755 // To avoid false-sharing it needs to be the sole occupant of a cache line.
756 volatile int stw_random;
863 // This is probably the best overall implementation -- we'll
864 // likely make this the default in future releases.
865 unsigned t = current->_hashStateX;
866 t ^= (t << 11);
867 current->_hashStateX = current->_hashStateY;
868 current->_hashStateY = current->_hashStateZ;
869 current->_hashStateZ = current->_hashStateW;
870 unsigned v = current->_hashStateW;
871 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
872 current->_hashStateW = v;
873 value = v;
874 }
875
876 value &= markWord::hash_mask;
877 if (value == 0) value = 0xBAD;
878 assert(value != markWord::no_hash, "invariant");
879 return value;
880 }
881
882 intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
883 if (EnableValhalla && obj->klass()->is_inline_klass()) {
884 // VM should be calling bootstrap method
885 ShouldNotReachHere();
886 }
887
888 while (true) {
889 ObjectMonitor* monitor = NULL;
890 markWord temp, test;
891 intptr_t hash;
892 markWord mark = read_stable_mark(obj);
893 if (VerifyHeavyMonitors) {
894 assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
895 guarantee(!mark.has_locker(), "must not be stack locked");
896 }
897 if (mark.is_neutral()) { // if this is a normal header
898 hash = mark.hash();
899 if (hash != 0) { // if it has a hash, just return it
900 return hash;
901 }
902 hash = get_next_hash(current, obj); // get a new hash
903 temp = mark.copy_set_hash(hash); // merge the hash into header
904 // try to install the hash
905 test = obj->cas_set_mark(temp, mark);
906 if (test == mark) { // if the hash was installed, return it
978 // If we add any new usages of the header/dmw field, this code
979 // will need to be updated.
980 hash = test.hash();
981 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
982 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
983 }
984 if (monitor->is_being_async_deflated()) {
985 // If we detect that async deflation has occurred, then we
986 // attempt to restore the header/dmw to the object's header
987 // so that we only retry once if the deflater thread happens
988 // to be slow.
989 monitor->install_displaced_markword_in_object(obj);
990 continue;
991 }
992 }
993 // We finally get the hash.
994 return hash;
995 }
996 }
997
998
999 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
1000 Handle h_obj) {
1001 if (EnableValhalla && h_obj->mark().is_inline_type()) {
1002 return false;
1003 }
1004 assert(current == JavaThread::current(), "Can only be called on current thread");
1005 oop obj = h_obj();
1006
1007 markWord mark = read_stable_mark(obj);
1008
1009 // Uncontended case, header points to stack
1010 if (mark.has_locker()) {
1011 return current->is_lock_owned((address)mark.locker());
1012 }
1013 // Contended case, header points to ObjectMonitor (tagged pointer)
1014 if (mark.has_monitor()) {
1015 // The first stage of async deflation does not affect any field
1016 // used by this comparison so the ObjectMonitor* is usable here.
1017 ObjectMonitor* monitor = mark.monitor();
1018 return monitor->is_entered(current) != 0;
1019 }
1020 // Unlocked case, header in place
1021 assert(mark.is_neutral(), "sanity check");
1022 return false;
1023 }
1218 event->set_monitorClass(obj->klass());
1219 event->set_address((uintptr_t)(void*)obj);
1220 event->set_cause((u1)cause);
1221 event->commit();
1222 }
1223
1224 // Fast path code shared by multiple functions
1225 void ObjectSynchronizer::inflate_helper(oop obj) {
1226 markWord mark = obj->mark_acquire();
1227 if (mark.has_monitor()) {
1228 ObjectMonitor* monitor = mark.monitor();
1229 markWord dmw = monitor->header();
1230 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1231 return;
1232 }
1233 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
1234 }
1235
1236 ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
1237 const InflateCause cause) {
1238 if (EnableValhalla) {
1239 guarantee(!object->klass()->is_inline_klass(), "Attempt to inflate inline type");
1240 }
1241
1242 EventJavaMonitorInflate event;
1243
1244 for (;;) {
1245 const markWord mark = object->mark_acquire();
1246
1247 // The mark can be in one of the following states:
1248 // * Inflated - just return
1249 // * Stack-locked - coerce it to inflated
1250 // * INFLATING - busy wait for conversion to complete
1251 // * Neutral - aggressively inflate the object.
1252
1253 // CASE: inflated
1254 if (mark.has_monitor()) {
1255 ObjectMonitor* inf = mark.monitor();
1256 markWord dmw = inf->header();
1257 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1258 return inf;
1259 }
1260
1261 // CASE: inflation in progress - inflating over a stack-lock.
|