< prev index next > src/hotspot/share/runtime/synchronizer.cpp
Print this page
if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
// Racing with inflation/deflation go slow path
return false;
}
assert(mon->object() == oop(obj), "invariant");
! if (mon->owner() != current) return false; // slow-path for IMS exception
if (mon->first_waiter() != nullptr) {
// We have one or more waiters. Since this is an inflated monitor
// that we own, we can transfer one or more threads from the waitset
// to the entrylist here and now, avoiding the slow-path.
if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) {
// Racing with inflation/deflation go slow path
return false;
}
assert(mon->object() == oop(obj), "invariant");
! if (!mon->has_owner(current)) return false; // slow-path for IMS exception
if (mon->first_waiter() != nullptr) {
// We have one or more waiters. Since this is an inflated monitor
// that we own, we can transfer one or more threads from the waitset
// to the entrylist here and now, avoiding the slow-path.
// the ObjectMonitor busy by setting the owner below. If we detect
// that race we just bail out to the slow-path here.
if (m->object_peek() == nullptr) {
return false;
}
- JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
// Lock contention and Transactional Lock Elision (TLE) diagnostics
// and observability
// Case: light contention possibly amenable to TLE
// Case: TLE inimical operations such as nested/recursive synchronization
! if (owner == current) {
m->_recursions++;
current->inc_held_monitor_count();
return true;
}
// the ObjectMonitor busy by setting the owner below. If we detect
// that race we just bail out to the slow-path here.
if (m->object_peek() == nullptr) {
return false;
}
// Lock contention and Transactional Lock Elision (TLE) diagnostics
// and observability
// Case: light contention possibly amenable to TLE
// Case: TLE inimical operations such as nested/recursive synchronization
! if (m->has_owner(current)) {
m->_recursions++;
current->inc_held_monitor_count();
return true;
}
// stack-locking in the object's header, the second check is for
// recursive stack-locking in the displaced header in the BasicLock,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
lock->set_displaced_header(markWord::unused_mark());
! if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
assert(m->_recursions == 0, "invariant");
current->inc_held_monitor_count();
return true;
}
}
// stack-locking in the object's header, the second check is for
// recursive stack-locking in the displaced header in the BasicLock,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
lock->set_displaced_header(markWord::unused_mark());
! if (!m->has_owner() && m->try_set_owner(current)) {
assert(m->_recursions == 0, "invariant");
current->inc_held_monitor_count();
return true;
}
}
// We have to take the slow-path of possible inflation and then exit.
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
! assert(!monitor->is_owner_anonymous(), "must not be");
monitor->exit(current);
}
// -----------------------------------------------------------------------------
// JNI locks on java objects
// NOTE: must use heavy weight monitor to handle jni monitor enter
void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, current);
}
// the current locking is from JNI instead of Java code
// We have to take the slow-path of possible inflation and then exit.
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
! assert(!monitor->has_anonymous_owner(), "must not be");
monitor->exit(current);
}
// -----------------------------------------------------------------------------
// JNI locks on java objects
// NOTE: must use heavy weight monitor to handle jni monitor enter
void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
+ // Top native frames in the stack will not be seen if we attempt
+ // preemption, since we start walking from the last Java anchor.
+ NoPreemptMark npm(current);
+
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, current);
}
// the current locking is from JNI instead of Java code
}
// -----------------------------------------------------------------------------
// Internal VM locks on java objects
// standard constructor, allows locking failures
! ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
_thread = thread;
_thread->check_for_valid_safepoint_state();
_obj = obj;
if (_obj() != nullptr) {
}
// -----------------------------------------------------------------------------
// Internal VM locks on java objects
// standard constructor, allows locking failures
! ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) : _npm(thread) {
_thread = thread;
_thread->check_for_valid_safepoint_state();
_obj = obj;
if (_obj() != nullptr) {
markWord mark = read_stable_mark(obj);
if (LockingMode == LM_LEGACY && mark.has_locker()) {
// stack-locked so header points into owner's stack.
// owning_thread_from_monitor_owner() may also return null here:
! return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
}
if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
// fast-locked so get owner from the object.
// owning_thread_from_object() may also return null here:
markWord mark = read_stable_mark(obj);
if (LockingMode == LM_LEGACY && mark.has_locker()) {
// stack-locked so header points into owner's stack.
// owning_thread_from_monitor_owner() may also return null here:
! return Threads::owning_thread_from_stacklock(t_list, (address) mark.locker());
}
if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
// fast-locked so get owner from the object.
// owning_thread_from_object() may also return null here:
// target thread is suspended or when the target thread is
// operating on itself. The current closures in use today are
// only interested in an owned ObjectMonitor and ownership
// cannot be dropped under the calling contexts so the
// ObjectMonitor cannot be async deflated.
! if (monitor->has_owner() && filter(monitor->owner_raw())) {
assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
closure->do_monitor(monitor);
}
});
}
// Iterate ObjectMonitors where the owner == thread; this does NOT include
// ObjectMonitors where owner is set to a stack-lock address in thread.
void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
! auto thread_filter = [&](void* owner) { return owner == thread; };
return owned_monitors_iterate_filtered(closure, thread_filter);
}
// Iterate ObjectMonitors owned by any thread.
void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
! auto all_filter = [&](void* owner) { return true; };
return owned_monitors_iterate_filtered(closure, all_filter);
}
static bool monitors_used_above_threshold(MonitorList* list) {
if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
// target thread is suspended or when the target thread is
// operating on itself. The current closures in use today are
// only interested in an owned ObjectMonitor and ownership
// cannot be dropped under the calling contexts so the
// ObjectMonitor cannot be async deflated.
! if (monitor->has_owner() && filter(monitor)) {
assert(!monitor->is_being_async_deflated(), "Owned monitors should not be deflating");
closure->do_monitor(monitor);
}
});
}
// Iterate ObjectMonitors where the owner == thread; this does NOT include
// ObjectMonitors where owner is set to a stack-lock address in thread.
void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
! int64_t key = ObjectMonitor::owner_from(thread);
+ auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
+ return owned_monitors_iterate_filtered(closure, thread_filter);
+ }
+
+ void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, oop vthread) {
+ int64_t key = ObjectMonitor::owner_from(vthread);
+ auto thread_filter = [&](ObjectMonitor* monitor) { return monitor->owner() == key; };
return owned_monitors_iterate_filtered(closure, thread_filter);
}
// Iterate ObjectMonitors owned by any thread.
void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
! auto all_filter = [&](ObjectMonitor* monitor) { return true; };
return owned_monitors_iterate_filtered(closure, all_filter);
}
static bool monitors_used_above_threshold(MonitorList* list) {
if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
assert(current == Thread::current(), "must be");
assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
! return inflate_impl(obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
! return inflate_impl(obj, cause);
}
! ObjectMonitor* ObjectSynchronizer::inflate_impl(oop object, const InflateCause cause) {
assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
EventJavaMonitorInflate event;
for (;;) {
const markWord mark = object->mark_acquire();
// The mark can be in one of the following states:
! // * inflated - Just return it.
// * stack-locked - Coerce it to inflated from stack-locked.
// * INFLATING - Busy wait for conversion from stack-locked to
// inflated.
// * unlocked - Aggressively inflate the object.
// CASE: inflated
if (mark.has_monitor()) {
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
return inf;
}
// CASE: inflation in progress - inflating over a stack-lock.
// Some other thread is converting from stack-locked to inflated.
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
assert(current == Thread::current(), "must be");
assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter");
! return inflate_impl(current->is_Java_thread() ? JavaThread::cast(current) : nullptr, obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for");
! return inflate_impl(thread, obj, cause);
}
! ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* locking_thread, oop object, const InflateCause cause) {
+ // The JavaThread* locking_thread requires that the locking_thread == Thread::current() or
+ // is suspended throughout the call by some other mechanism.
+ // The thread might be nullptr when called from a non JavaThread. (As may still be
+ // the case from FastHashCode). However it is only important for correctness that the
+ // thread is set when called from ObjectSynchronizer::enter from the owning thread,
+ // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl");
EventJavaMonitorInflate event;
for (;;) {
const markWord mark = object->mark_acquire();
// The mark can be in one of the following states:
! // * inflated - If the ObjectMonitor owner is anonymous and the
+ // locking_thread owns the object lock, then we
+ // make the locking_thread the ObjectMonitor owner.
// * stack-locked - Coerce it to inflated from stack-locked.
// * INFLATING - Busy wait for conversion from stack-locked to
// inflated.
// * unlocked - Aggressively inflate the object.
// CASE: inflated
if (mark.has_monitor()) {
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
+ if (inf->has_anonymous_owner() && locking_thread != nullptr) {
+ assert(LockingMode == LM_LEGACY, "invariant");
+ if (locking_thread->is_lock_owned((address)inf->stack_locker())) {
+ inf->set_stack_locker(nullptr);
+ inf->set_owner_from_anonymous(locking_thread);
+ }
+ }
return inf;
}
// CASE: inflation in progress - inflating over a stack-lock.
// Some other thread is converting from stack-locked to inflated.
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
// Setup monitor fields to proper values -- prepare the monitor
m->set_header(dmw);
- // Optimization: if the mark.locker stack address is associated
- // with this thread we could simply set m->_owner = current.
// Note that a thread can inflate an object
// that it has stack-locked -- as might happen in wait() -- directly
// with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
! m->set_owner_from(nullptr, mark.locker());
// TODO-FIXME: assert BasicLock->dhw != 0.
// Must preserve store ordering. The monitor state must
// be stable at the time of publishing the monitor address.
guarantee(object->mark() == markWord::INFLATING(), "invariant");
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
// Setup monitor fields to proper values -- prepare the monitor
m->set_header(dmw);
// Note that a thread can inflate an object
// that it has stack-locked -- as might happen in wait() -- directly
// with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
! if (locking_thread != nullptr && locking_thread->is_lock_owned((address)mark.locker())) {
+ m->set_owner(locking_thread);
+ } else {
+ // Use ANONYMOUS_OWNER to indicate that the owner is the BasicLock on the stack,
+ // and set the stack locker field in the monitor.
+ m->set_stack_locker(mark.locker());
+ m->set_anonymous_owner();
+ }
// TODO-FIXME: assert BasicLock->dhw != 0.
// Must preserve store ordering. The monitor state must
// be stable at the time of publishing the monitor address.
guarantee(object->mark() == markWord::INFLATING(), "invariant");
if (is_interesting(monitor)) {
const oop obj = monitor->object_peek();
const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
ResourceMark rm;
out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
! monitor->is_busy(), hash != 0, monitor->owner() != nullptr,
p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
if (monitor->is_busy()) {
out->print(" (%s)", monitor->is_busy_to_string(&ss));
ss.reset();
}
if (is_interesting(monitor)) {
const oop obj = monitor->object_peek();
const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash();
ResourceMark rm;
out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor),
! monitor->is_busy(), hash != 0, monitor->has_owner(),
p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
if (monitor->is_busy()) {
out->print(" (%s)", monitor->is_busy_to_string(&ss));
ss.reset();
}
< prev index next >