< prev index next > src/hotspot/share/runtime/synchronizer.cpp
Print this page
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/handshake.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.hpp"
+ #include "runtime/lightweightSynchronizer.hpp"
#include "runtime/lockStack.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/os.inline.hpp"
#include "utilities/linkedlist.hpp"
#include "utilities/preserveException.hpp"
class ObjectMonitorDeflationLogging;
+ ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) {
+ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking uses table");
+ return mark.monitor();
+ }
+
+ ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) {
+ if (LockingMode != LM_LIGHTWEIGHT) {
+ return read_monitor(mark);
+ }
+ return LightweightSynchronizer::read_monitor(current, obj);
+ }
+
void MonitorList::add(ObjectMonitor* m) {
ObjectMonitor* head;
do {
head = Atomic::load(&_head);
m->set_next_om(head);
// Start the ceiling with the estimate for one thread.
set_in_use_list_ceiling(AvgMonitorsPerThreadEstimate);
// Start the timer for deflations, so it does not trigger immediately.
_last_async_deflation_time_ns = os::javaTimeNanos();
+
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ LightweightSynchronizer::initialize();
+ }
}
MonitorList ObjectSynchronizer::_in_use_list;
// monitors_used_above_threshold() policy is as follows:
//
return true;
}
}
if (mark.has_monitor()) {
! ObjectMonitor* const mon = mark.monitor();
assert(mon->object() == oop(obj), "invariant");
if (mon->owner() != current) return false; // slow-path for IMS exception
if (mon->first_waiter() != nullptr) {
// We have one or more waiters. Since this is an inflated monitor
return true;
}
}
if (mark.has_monitor()) {
! ObjectMonitor* const mon = read_monitor(current, obj, mark);
+ if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
+ // Racing with inflation/deflation go slow path
+ return false;
+ }
assert(mon->object() == oop(obj), "invariant");
if (mon->owner() != current) return false; // slow-path for IMS exception
if (mon->first_waiter() != nullptr) {
// We have one or more waiters. Since this is an inflated monitor
}
const markWord mark = obj->mark();
if (mark.has_monitor()) {
! ObjectMonitor* const m = mark.monitor();
// An async deflation or GC can race us before we manage to make
// the ObjectMonitor busy by setting the owner below. If we detect
// that race we just bail out to the slow-path here.
if (m->object_peek() == nullptr) {
return false;
}
const markWord mark = obj->mark();
if (mark.has_monitor()) {
! ObjectMonitor* m = nullptr;
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ m = current->om_get_from_monitor_cache(obj);
+ if (m == nullptr) {
+ // Take the slow-path on a cache miss.
+ return false;
+ }
+ } else {
+ m = ObjectSynchronizer::read_monitor(mark);
+ }
// An async deflation or GC can race us before we manage to make
// the ObjectMonitor busy by setting the owner below. If we detect
// that race we just bail out to the slow-path here.
if (m->object_peek() == nullptr) {
return false;
void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
// When called with locking_thread != Thread::current() some mechanism must synchronize
// the locking_thread with respect to the current thread. Currently only used when
// deoptimizing and re-locking locks. See Deoptimization::relock_objects
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
+
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ return LightweightSynchronizer::enter_for(obj, lock, locking_thread);
+ }
+
if (!enter_fast_impl(obj, lock, locking_thread)) {
// Inflated ObjectMonitor::enter_for is required
// An async deflation can race after the inflate_for() call and before
// enter_for() can make the ObjectMonitor busy. enter_for() returns false
}
}
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
assert(current == Thread::current(), "must be");
+
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ return LightweightSynchronizer::enter(obj, lock, current);
+ }
+
if (!enter_fast_impl(obj, lock, current)) {
// Inflated ObjectMonitor::enter is required
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// The interpreter and compiler assembly code tries to lock using the fast path
// of this algorithm. Make sure to update that code if the following function is
// changed. The implementation is extremely sensitive to race condition. Be careful.
bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, locking_thread);
}
locking_thread->inc_held_monitor_count();
if (!useHeavyMonitors()) {
! if (LockingMode == LM_LIGHTWEIGHT) {
- // Fast-locking does not use the 'lock' argument.
- LockStack& lock_stack = locking_thread->lock_stack();
- if (lock_stack.is_full()) {
- // We unconditionally make room on the lock stack by inflating
- // the least recently locked object on the lock stack.
-
- // About the choice to inflate least recently locked object.
- // First we must chose to inflate a lock, either some lock on
- // the lock-stack or the lock that is currently being entered
- // (which may or may not be on the lock-stack).
- // Second the best lock to inflate is a lock which is entered
- // in a control flow where there are only a very few locks being
- // used, as the costly part of inflated locking is inflation,
- // not locking. But this property is entirely program dependent.
- // Third inflating the lock currently being entered on when it
- // is not present on the lock-stack will result in a still full
- // lock-stack. This creates a scenario where every deeper nested
- // monitorenter must call into the runtime.
- // The rational here is as follows:
- // Because we cannot (currently) figure out the second, and want
- // to avoid the third, we inflate a lock on the lock-stack.
- // The least recently locked lock is chosen as it is the lock
- // with the longest critical section.
-
- log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
- ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
- assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
- p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
- assert(!lock_stack.is_full(), "must have made room here");
- }
-
- markWord mark = obj()->mark_acquire();
- while (mark.is_unlocked()) {
- // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
- // Try to swing into 'fast-locked' state.
- assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
- const markWord locked_mark = mark.set_fast_locked();
- const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
- if (old_mark == mark) {
- // Successfully fast-locked, push object to lock-stack and return.
- lock_stack.push(obj());
- return true;
- }
- mark = old_mark;
- }
-
- if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
- // Recursive lock successful.
- return true;
- }
-
- // Failed to fast lock.
- return false;
- } else if (LockingMode == LM_LEGACY) {
markWord mark = obj->mark();
if (mark.is_unlocked()) {
// Anticipate successful CAS -- the ST of the displaced mark must
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
// The interpreter and compiler assembly code tries to lock using the fast path
// of this algorithm. Make sure to update that code if the following function is
// changed. The implementation is extremely sensitive to race condition. Be careful.
bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
+ assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer");
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, locking_thread);
}
locking_thread->inc_held_monitor_count();
if (!useHeavyMonitors()) {
! if (LockingMode == LM_LEGACY) {
markWord mark = obj->mark();
if (mark.is_unlocked()) {
// Anticipate successful CAS -- the ST of the displaced mark must
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
}
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
current->dec_held_monitor_count();
if (!useHeavyMonitors()) {
markWord mark = object->mark();
! if (LockingMode == LM_LIGHTWEIGHT) {
- // Fast-locking does not use the 'lock' argument.
- LockStack& lock_stack = current->lock_stack();
- if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
- // Recursively unlocked.
- return;
- }
-
- if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
- // This lock is recursive but is not at the top of the lock stack so we're
- // doing an unbalanced exit. We have to fall thru to inflation below and
- // let ObjectMonitor::exit() do the unlock.
- } else {
- while (mark.is_fast_locked()) {
- // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
- const markWord unlocked_mark = mark.set_unlocked();
- const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
- if (old_mark == mark) {
- size_t recursions = lock_stack.remove(object) - 1;
- assert(recursions == 0, "must not be recursive here");
- return;
- }
- mark = old_mark;
- }
- }
- } else if (LockingMode == LM_LEGACY) {
markWord dhw = lock->displaced_header();
if (dhw.value() == 0) {
// If the displaced header is null, then this exit matches up with
// a recursive enter. No real work to do here except for diagnostics.
#ifndef PRODUCT
}
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
current->dec_held_monitor_count();
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ return LightweightSynchronizer::exit(object, current);
+ }
+
if (!useHeavyMonitors()) {
markWord mark = object->mark();
! if (LockingMode == LM_LEGACY) {
markWord dhw = lock->displaced_header();
if (dhw.value() == 0) {
// If the displaced header is null, then this exit matches up with
// a recursive enter. No real work to do here except for diagnostics.
#ifndef PRODUCT
// after this thread entered the stack-lock recursively. When a
// Java Monitor is inflated, we cannot safely walk the Java
// Monitor owner's stack and update the BasicLocks because a
// Java Monitor can be asynchronously inflated by a thread that
// does not own the Java Monitor.
! ObjectMonitor* m = mark.monitor();
assert(m->object()->mark() == mark, "invariant");
assert(m->is_entered(current), "invariant");
}
}
#endif
// after this thread entered the stack-lock recursively. When a
// Java Monitor is inflated, we cannot safely walk the Java
// Monitor owner's stack and update the BasicLocks because a
// Java Monitor can be asynchronously inflated by a thread that
// does not own the Java Monitor.
! ObjectMonitor* m = read_monitor(mark);
assert(m->object()->mark() == mark, "invariant");
assert(m->is_entered(current), "invariant");
}
}
#endif
current->set_current_pending_monitor_is_from_java(false);
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
! ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
! if (monitor->enter(current)) {
current->inc_held_monitor_count(1, true);
break;
}
}
current->set_current_pending_monitor_is_from_java(true);
current->set_current_pending_monitor_is_from_java(false);
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
! ObjectMonitor* monitor;
! bool entered;
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ entered = LightweightSynchronizer::inflate_and_enter(obj(), current, current, inflate_cause_jni_enter) != nullptr;
+ } else {
+ monitor = inflate(current, obj(), inflate_cause_jni_enter);
+ entered = monitor->enter(current);
+ }
+
+ if (entered) {
current->inc_held_monitor_count(1, true);
break;
}
}
current->set_current_pending_monitor_is_from_java(true);
// NOTE: must use heavy weight monitor to handle jni monitor exit
void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
JavaThread* current = THREAD;
! // The ObjectMonitor* can't be async deflated until ownership is
! // dropped inside exit() and the ObjectMonitor* must be !is_busy().
! ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
// If this thread has locked the object, exit the monitor. We
// intentionally do not use CHECK on check_owner because we must exit the
// monitor even if an exception was already pending.
if (monitor->check_owner(THREAD)) {
monitor->exit(current);
// NOTE: must use heavy weight monitor to handle jni monitor exit
void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
JavaThread* current = THREAD;
! ObjectMonitor* monitor;
! if (LockingMode == LM_LIGHTWEIGHT) {
! monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK);
+ } else {
+ // The ObjectMonitor* can't be async deflated until ownership is
+ // dropped inside exit() and the ObjectMonitor* must be !is_busy().
+ monitor = inflate(current, obj, inflate_cause_jni_exit);
+ }
// If this thread has locked the object, exit the monitor. We
// intentionally do not use CHECK on check_owner because we must exit the
// monitor even if an exception was already pending.
if (monitor->check_owner(THREAD)) {
monitor->exit(current);
// -----------------------------------------------------------------------------
// Wait/Notify/NotifyAll
// NOTE: must use heavy weight monitor to handle wait()
int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
JavaThread* current = THREAD;
if (millis < 0) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
}
! // The ObjectMonitor* can't be async deflated because the _waiters
! // field is incremented before ownership is dropped and decremented
! // after ownership is regained.
! ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
// This dummy call is in place to get around dtrace bug 6254741. Once
// -----------------------------------------------------------------------------
// Wait/Notify/NotifyAll
// NOTE: must use heavy weight monitor to handle wait()
+
int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
JavaThread* current = THREAD;
if (millis < 0) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
}
!
! ObjectMonitor* monitor;
! if (LockingMode == LM_LIGHTWEIGHT) {
! monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0);
+ } else {
+ // The ObjectMonitor* can't be async deflated because the _waiters
+ // field is incremented before ownership is dropped and decremented
+ // after ownership is regained.
+ monitor = inflate(current, obj(), inflate_cause_wait);
+ }
DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
// This dummy call is in place to get around dtrace bug 6254741. Once
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
}
! // The ObjectMonitor* can't be async deflated until ownership is
! // dropped by the calling thread.
! ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
monitor->notify(CHECK);
}
// NOTE: see comment of notify()
void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
}
!
! ObjectMonitor* monitor;
! if (LockingMode == LM_LIGHTWEIGHT) {
+ monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
+ } else {
+ // The ObjectMonitor* can't be async deflated until ownership is
+ // dropped by the calling thread.
+ monitor = inflate(current, obj(), inflate_cause_notify);
+ }
monitor->notify(CHECK);
}
// NOTE: see comment of notify()
void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
}
! // The ObjectMonitor* can't be async deflated until ownership is
! // dropped by the calling thread.
! ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
monitor->notifyAll(CHECK);
}
// -----------------------------------------------------------------------------
// Hash Code handling
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
}
!
! ObjectMonitor* monitor;
! if (LockingMode == LM_LIGHTWEIGHT) {
+ monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK);
+ } else {
+ // The ObjectMonitor* can't be async deflated until ownership is
+ // dropped by the calling thread.
+ monitor = inflate(current, obj(), inflate_cause_notify);
+ }
monitor->notifyAll(CHECK);
}
// -----------------------------------------------------------------------------
// Hash Code handling
v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
current->_hashStateW = v;
value = v;
}
! value &= markWord::hash_mask;
if (value == 0) value = 0xBAD;
assert(value != markWord::no_hash, "invariant");
return value;
}
intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
while (true) {
ObjectMonitor* monitor = nullptr;
markWord temp, test;
intptr_t hash;
v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
current->_hashStateW = v;
value = v;
}
! value &= UseCompactObjectHeaders ? markWord::hash_mask_compact : markWord::hash_mask;
if (value == 0) value = 0xBAD;
assert(value != markWord::no_hash, "invariant");
return value;
}
+ intptr_t ObjectSynchronizer::get_next_hash(Thread* current, oop obj) {
+ // CLEANUP[Axel]: hack for LightweightSynchronizer being in different translation unit
+ return ::get_next_hash(current, obj);
+ }
+
intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ return LightweightSynchronizer::FastHashCode(current, obj);
+ }
while (true) {
ObjectMonitor* monitor = nullptr;
markWord temp, test;
intptr_t hash;
if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
// fast-locking case, see if lock is in current's lock stack
return current->lock_stack().contains(h_obj());
}
! if (mark.has_monitor()) {
// Inflated monitor so header points to ObjectMonitor (tagged pointer).
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
! ObjectMonitor* monitor = mark.monitor();
return monitor->is_entered(current) != 0;
}
// Unlocked case, header in place
assert(mark.is_unlocked(), "sanity check");
return false;
if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
// fast-locking case, see if lock is in current's lock stack
return current->lock_stack().contains(h_obj());
}
! while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
+ ObjectMonitor* monitor = LightweightSynchronizer::read_monitor(current, obj);
+ if (monitor != nullptr) {
+ return monitor->is_entered(current) != 0;
+ }
+ // Racing with inflation/deflation, retry
+ mark = obj->mark_acquire();
+
+ if (mark.is_fast_locked()) {
+ // Some other thread fast_locked, current could not have held the lock
+ return false;
+ }
+ }
+
+ if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
// Inflated monitor so header points to ObjectMonitor (tagged pointer).
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
! ObjectMonitor* monitor = read_monitor(mark);
return monitor->is_entered(current) != 0;
}
// Unlocked case, header in place
assert(mark.is_unlocked(), "sanity check");
return false;
// fast-locked so get owner from the object.
// owning_thread_from_object() may also return null here:
return Threads::owning_thread_from_object(t_list, h_obj());
}
! if (mark.has_monitor()) {
// Inflated monitor so header points to ObjectMonitor (tagged pointer).
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
! ObjectMonitor* monitor = mark.monitor();
assert(monitor != nullptr, "monitor should be non-null");
// owning_thread_from_monitor() may also return null here:
return Threads::owning_thread_from_monitor(t_list, monitor);
}
// fast-locked so get owner from the object.
// owning_thread_from_object() may also return null here:
return Threads::owning_thread_from_object(t_list, h_obj());
}
! while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) {
+ ObjectMonitor* monitor = LightweightSynchronizer::read_monitor(Thread::current(), obj);
+ if (monitor != nullptr) {
+ return Threads::owning_thread_from_monitor(t_list, monitor);
+ }
+ // Racing with inflation/deflation, retry
+ mark = obj->mark_acquire();
+
+ if (mark.is_fast_locked()) {
+ // Some other thread fast_locked
+ return Threads::owning_thread_from_object(t_list, h_obj());
+ }
+ }
+
+ if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) {
// Inflated monitor so header points to ObjectMonitor (tagged pointer).
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
! ObjectMonitor* monitor = read_monitor(mark);
assert(monitor != nullptr, "monitor should be non-null");
// owning_thread_from_monitor() may also return null here:
return Threads::owning_thread_from_monitor(t_list, monitor);
}
return false;
}
if (NoAsyncDeflationProgressMax != 0 &&
_no_progress_cnt >= NoAsyncDeflationProgressMax) {
double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
! size_t new_ceiling = ceiling + (size_t)((double)ceiling * remainder) + 1;
ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
log_info(monitorinflation)("Too many deflations without progress; "
"bumping in_use_list_ceiling from " SIZE_FORMAT
" to " SIZE_FORMAT, old_ceiling, new_ceiling);
_no_progress_cnt = 0;
return false;
}
if (NoAsyncDeflationProgressMax != 0 &&
_no_progress_cnt >= NoAsyncDeflationProgressMax) {
double remainder = (100.0 - MonitorUsedDeflationThreshold) / 100.0;
! size_t new_ceiling = ceiling / remainder + 1;
ObjectSynchronizer::set_in_use_list_ceiling(new_ceiling);
log_info(monitorinflation)("Too many deflations without progress; "
"bumping in_use_list_ceiling from " SIZE_FORMAT
" to " SIZE_FORMAT, old_ceiling, new_ceiling);
_no_progress_cnt = 0;
event->commit();
}
// Fast path code shared by multiple functions
void ObjectSynchronizer::inflate_helper(oop obj) {
markWord mark = obj->mark_acquire();
if (mark.has_monitor()) {
! ObjectMonitor* monitor = mark.monitor();
markWord dmw = monitor->header();
assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
return;
}
(void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
assert(current == Thread::current(), "must be");
! if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
! return inflate_impl(JavaThread::cast(current), obj, cause);
- }
- return inflate_impl(nullptr, obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
! return inflate_impl(thread, obj, cause);
! }
!
! ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
! // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
! // that the inflating_thread == Thread::current() or is suspended throughout the call by
- // some other mechanism.
- // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
- // JavaThread. (As may still be the case from FastHashCode). However it is only
- // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
- // is set when called from ObjectSynchronizer::enter from the owning thread,
- // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
EventJavaMonitorInflate event;
for (;;) {
const markWord mark = object->mark_acquire();
// The mark can be in one of the following states:
! // * inflated - Just return if using stack-locking.
- // If using fast-locking and the ObjectMonitor owner
- // is anonymous and the inflating_thread owns the
- // object lock, then we make the inflating_thread
- // the ObjectMonitor owner and remove the lock from
- // the inflating_thread's lock stack.
- // * fast-locked - Coerce it to inflated from fast-locked.
// * stack-locked - Coerce it to inflated from stack-locked.
// * INFLATING - Busy wait for conversion from stack-locked to
// inflated.
// * unlocked - Aggressively inflate the object.
// CASE: inflated
if (mark.has_monitor()) {
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
- if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
- inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
- inf->set_owner_from_anonymous(inflating_thread);
- size_t removed = inflating_thread->lock_stack().remove(object);
- inf->set_recursions(removed - 1);
- }
return inf;
}
! if (LockingMode != LM_LIGHTWEIGHT) {
! // New lightweight locking does not use INFLATING.
! // CASE: inflation in progress - inflating over a stack-lock.
! // Some other thread is converting from stack-locked to inflated.
! // Only that thread can complete inflation -- other threads must wait.
! // The INFLATING value is transient.
! // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
! // We could always eliminate polling by parking the thread on some auxiliary list.
! if (mark == markWord::INFLATING()) {
- read_stable_mark(object);
- continue;
- }
- }
-
- // CASE: fast-locked
- // Could be fast-locked either by the inflating_thread or by some other thread.
- //
- // Note that we allocate the ObjectMonitor speculatively, _before_
- // attempting to set the object's mark to the new ObjectMonitor. If
- // the inflating_thread owns the monitor, then we set the ObjectMonitor's
- // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
- // to anonymous. If we lose the race to set the object's mark to the
- // new ObjectMonitor, then we just delete it and loop around again.
- //
- LogStreamHandle(Trace, monitorinflation) lsh;
- if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
- ObjectMonitor* monitor = new ObjectMonitor(object);
- monitor->set_header(mark.set_unlocked());
- bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
- if (own) {
- // Owned by inflating_thread.
- monitor->set_owner_from(nullptr, inflating_thread);
- } else {
- // Owned by somebody else.
- monitor->set_owner_anonymous();
- }
- markWord monitor_mark = markWord::encode(monitor);
- markWord old_mark = object->cas_set_mark(monitor_mark, mark);
- if (old_mark == mark) {
- // Success! Return inflated monitor.
- if (own) {
- size_t removed = inflating_thread->lock_stack().remove(object);
- monitor->set_recursions(removed - 1);
- }
- // Once the ObjectMonitor is configured and object is associated
- // with the ObjectMonitor, it is safe to allow async deflation:
- _in_use_list.add(monitor);
-
- // Hopefully the performance counters are allocated on distinct
- // cache lines to avoid false sharing on MP systems ...
- OM_PERFDATA_OP(Inflations, inc());
- if (log_is_enabled(Trace, monitorinflation)) {
- ResourceMark rm;
- lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
- INTPTR_FORMAT ", type='%s'", p2i(object),
- object->mark().value(), object->klass()->external_name());
- }
- if (event.should_commit()) {
- post_monitor_inflate_event(&event, object, cause);
- }
- return monitor;
- } else {
- delete monitor;
- continue; // Interference -- just retry
- }
}
// CASE: stack-locked
// Could be stack-locked either by current or by some other thread.
//
event->commit();
}
// Fast path code shared by multiple functions
void ObjectSynchronizer::inflate_helper(oop obj) {
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ return;
+ }
markWord mark = obj->mark_acquire();
if (mark.has_monitor()) {
! ObjectMonitor* monitor = read_monitor(mark);
markWord dmw = monitor->header();
assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
return;
}
(void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
assert(current == Thread::current(), "must be");
! assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate");
! return inflate_impl(obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
! assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
! return inflate_impl(obj, cause);
! }
!
! ObjectMonitor* ObjectSynchronizer::inflate_impl(oop object, const InflateCause cause) {
! assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
EventJavaMonitorInflate event;
for (;;) {
const markWord mark = object->mark_acquire();
// The mark can be in one of the following states:
! // * inflated - Just return it.
// * stack-locked - Coerce it to inflated from stack-locked.
// * INFLATING - Busy wait for conversion from stack-locked to
// inflated.
// * unlocked - Aggressively inflate the object.
// CASE: inflated
if (mark.has_monitor()) {
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
return inf;
}
! // CASE: inflation in progress - inflating over a stack-lock.
! // Some other thread is converting from stack-locked to inflated.
! // Only that thread can complete inflation -- other threads must wait.
! // The INFLATING value is transient.
! // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
! // We could always eliminate polling by parking the thread on some auxiliary list.
! if (mark == markWord::INFLATING()) {
! read_stable_mark(object);
! continue;
}
// CASE: stack-locked
// Could be stack-locked either by current or by some other thread.
//
// ObjectMonitor into the mark. This was correct, but artificially lengthened
// the interval in which INFLATING appeared in the mark, thus increasing
// the odds of inflation contention. If we lose the race to set INFLATING,
// then we just delete the ObjectMonitor and loop around again.
//
if (LockingMode == LM_LEGACY && mark.has_locker()) {
- assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking");
ObjectMonitor* m = new ObjectMonitor(object);
// Optimistically prepare the ObjectMonitor - anticipate successful CAS
// We do this before the CAS in order to minimize the length of time
// in which INFLATING appears in the mark.
// ObjectMonitor into the mark. This was correct, but artificially lengthened
// the interval in which INFLATING appeared in the mark, thus increasing
// the odds of inflation contention. If we lose the race to set INFLATING,
// then we just delete the ObjectMonitor and loop around again.
//
+ LogStreamHandle(Trace, monitorinflation) lsh;
if (LockingMode == LM_LEGACY && mark.has_locker()) {
ObjectMonitor* m = new ObjectMonitor(object);
// Optimistically prepare the ObjectMonitor - anticipate successful CAS
// We do this before the CAS in order to minimize the length of time
// in which INFLATING appears in the mark.
// ObjectMonitors. Returns the number of deflated ObjectMonitors.
//
size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
MonitorList::Iterator iter = _in_use_list.iterator();
size_t deflated_count = 0;
while (iter.has_next()) {
if (deflated_count >= (size_t)MonitorDeflationMax) {
break;
}
ObjectMonitor* mid = iter.next();
! if (mid->deflate_monitor()) {
deflated_count++;
}
// Must check for a safepoint/handshake and honor it.
safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
// ObjectMonitors. Returns the number of deflated ObjectMonitors.
//
size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) {
MonitorList::Iterator iter = _in_use_list.iterator();
size_t deflated_count = 0;
+ Thread* current = Thread::current();
while (iter.has_next()) {
if (deflated_count >= (size_t)MonitorDeflationMax) {
break;
}
ObjectMonitor* mid = iter.next();
! if (mid->deflate_monitor(current)) {
deflated_count++;
}
// Must check for a safepoint/handshake and honor it.
safepointer->block_for_safepoint("deflation", "deflated_count", deflated_count);
HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
void do_thread(Thread* thread) {
log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
INTPTR_FORMAT, p2i(thread));
+ if (thread->is_Java_thread()) {
+ // Clear OM cache
+ JavaThread* jt = JavaThread::cast(thread);
+ jt->om_clear_monitor_cache();
+ }
}
};
class VM_RendezvousGCThreads : public VM_Operation {
public:
if (deflated_count > 0) {
ResourceMark rm(current);
GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer);
+ #ifdef ASSERT
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ for (ObjectMonitor* monitor : delete_list) {
+ assert(!LightweightSynchronizer::contains_monitor(current, monitor), "Should have been removed");
+ }
+ }
+ #endif
+
log.before_handshake(unlinked_count);
// A JavaThread needs to handshake in order to safely free the
// ObjectMonitors that were deflated in this cycle.
HandshakeForDeflation hfd_hc;
if (n->owner_is_DEFLATER_MARKER()) {
// This could happen when monitor deflation blocks for a safepoint.
return;
}
! if (n->header().value() == 0) {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
"have non-null _header field.", p2i(n));
*error_cnt_p = *error_cnt_p + 1;
}
const oop obj = n->object_peek();
! if (obj != nullptr) {
! const markWord mark = obj->mark();
! if (!mark.has_monitor()) {
! out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
! "object does not think it has a monitor: obj="
! INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
! p2i(obj), mark.value());
! *error_cnt_p = *error_cnt_p + 1;
! }
! ObjectMonitor* const obj_mon = mark.monitor();
! if (n != obj_mon) {
! out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
! "object does not refer to the same monitor: obj="
! INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
! INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
! *error_cnt_p = *error_cnt_p + 1;
! }
}
}
// Log details about ObjectMonitors on the in_use_list. The 'BHL'
// flags indicate why the entry is in-use, 'object' and 'object type'
if (n->owner_is_DEFLATER_MARKER()) {
// This could happen when monitor deflation blocks for a safepoint.
return;
}
!
+ if (n->header_value() == 0) {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
"have non-null _header field.", p2i(n));
*error_cnt_p = *error_cnt_p + 1;
}
+
const oop obj = n->object_peek();
! if (obj == nullptr) {
! return;
! }
!
! const markWord mark = obj->mark();
! if (!mark.has_monitor()) {
! out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
! "object does not think it has a monitor: obj="
! INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n),
! p2i(obj), mark.value());
! *error_cnt_p = *error_cnt_p + 1;
! return;
! }
!
! ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark);
! if (n != obj_mon) {
! out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
+ "object does not refer to the same monitor: obj="
+ INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
+ INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
+ *error_cnt_p = *error_cnt_p + 1;
}
}
// Log details about ObjectMonitors on the in_use_list. The 'BHL'
// flags indicate why the entry is in-use, 'object' and 'object type'
};
monitors_iterate([&](ObjectMonitor* monitor) {
if (is_interesting(monitor)) {
const oop obj = monitor->object_peek();
! const markWord mark = monitor->header();
ResourceMark rm;
out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
! monitor->is_busy(), mark.hash() != 0, monitor->owner() != nullptr,
p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
if (monitor->is_busy()) {
out->print(" (%s)", monitor->is_busy_to_string(&ss));
ss.reset();
}
};
monitors_iterate([&](ObjectMonitor* monitor) {
if (is_interesting(monitor)) {
const oop obj = monitor->object_peek();
! const intptr_t hash = LockingMode == LM_LIGHTWEIGHT ? monitor->hash_lightweight_locking() : monitor->header().hash();
ResourceMark rm;
out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
! monitor->is_busy(), hash != 0, monitor->owner() != nullptr,
p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
if (monitor->is_busy()) {
out->print(" (%s)", monitor->is_busy_to_string(&ss));
ss.reset();
}
< prev index next >