< prev index next > src/hotspot/share/runtime/objectMonitor.cpp
Print this page
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.inline.hpp"
+ #include "runtime/lightweightSynchronizer.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfData.hpp"
#include "runtime/safefetch.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
+ #include "runtime/synchronizer.hpp"
#include "services/threadService.hpp"
+ #include "utilities/debug.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
#if INCLUDE_JFR
}
#endif // ASSERT
}
ObjectMonitor::ObjectMonitor(oop object) :
- _header(markWord::zero()),
+ _metadata(0),
_object(_oop_storage, object),
_owner(nullptr),
_previous_owner_tid(0),
_next_om(nullptr),
_recursions(0),
oop ObjectMonitor::object() const {
check_object_context();
return _object.resolve();
}
- oop ObjectMonitor::object_peek() const {
- return _object.peek();
- }
-
void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
if (current->is_suspended()) {
_om->_recursions = 0;
_om->_succ = nullptr;
// Don't need a full fence after clearing successor here because of the call to exit().
OrderAccess::fence(); // always do a full fence when successor is cleared
}
}
}
+ #define assert_mark_word_consistency() \
+ assert(UseObjectMonitorTable || object()->mark() == markWord::encode(this), \
+ "object mark must match encoded this: mark=" INTPTR_FORMAT \
+ ", encoded this=" INTPTR_FORMAT, object()->mark().value(), \
+ markWord::encode(this).value());
+
// -----------------------------------------------------------------------------
// Enter support
- bool ObjectMonitor::enter_for(JavaThread* locking_thread) {
+ bool ObjectMonitor::enter_is_async_deflating() {
+ if (is_being_async_deflated()) {
+ if (!UseObjectMonitorTable) {
+ const oop l_object = object();
+ if (l_object != nullptr) {
+ // Attempt to restore the header/dmw to the object's header so that
+ // we only retry once if the deflater thread happens to be slow.
+ install_displaced_markword_in_object(l_object);
+ }
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ void ObjectMonitor::enter_for_with_contention_mark(JavaThread* locking_thread, ObjectMonitorContentionMark& contention_mark) {
// Used by ObjectSynchronizer::enter_for to enter for another thread.
// The monitor is private to or already owned by locking_thread which must be suspended.
// So this code may only contend with deflation.
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
+ assert(contention_mark._monitor == this, "must be");
+ assert(!is_being_async_deflated(), "must be");
- // Block out deflation as soon as possible.
- add_to_contentions(1);
+
+ void* prev_owner = try_set_owner_from(nullptr, locking_thread);
bool success = false;
- if (!is_being_async_deflated()) {
- void* prev_owner = try_set_owner_from(nullptr, locking_thread);
- if (prev_owner == nullptr) {
- assert(_recursions == 0, "invariant");
- success = true;
- } else if (prev_owner == locking_thread) {
- _recursions++;
- success = true;
- } else if (prev_owner == DEFLATER_MARKER) {
- // Racing with deflation.
- prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
- if (prev_owner == DEFLATER_MARKER) {
- // Cancelled deflation. Increment contentions as part of the deflation protocol.
- add_to_contentions(1);
- success = true;
- } else if (prev_owner == nullptr) {
- // At this point we cannot race with deflation as we have both incremented
- // contentions, seen contention > 0 and seen a DEFLATER_MARKER.
- // success will only be false if this races with something other than
- // deflation.
- prev_owner = try_set_owner_from(nullptr, locking_thread);
- success = prev_owner == nullptr;
- }
- } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) {
- assert(_recursions == 0, "must be");
- _recursions = 1;
- set_owner_from_BasicLock(prev_owner, locking_thread);
+ if (prev_owner == nullptr) {
+ assert(_recursions == 0, "invariant");
+ success = true;
+ } else if (prev_owner == locking_thread) {
+ _recursions++;
+ success = true;
+ } else if (prev_owner == DEFLATER_MARKER) {
+ // Racing with deflation.
+ prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
+ if (prev_owner == DEFLATER_MARKER) {
+ // Cancelled deflation. Increment contentions as part of the deflation protocol.
+ add_to_contentions(1);
success = true;
+ } else if (prev_owner == nullptr) {
+ // At this point we cannot race with deflation as we have both incremented
+ // contentions, seen contention > 0 and seen a DEFLATER_MARKER.
+ // success will only be false if this races with something other than
+ // deflation.
+ prev_owner = try_set_owner_from(nullptr, locking_thread);
+ success = prev_owner == nullptr;
}
- assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT
- ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT,
- p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner));
- } else {
- // Async deflation is in progress and our contentions increment
- // above lost the race to async deflation. Undo the work and
- // force the caller to retry.
- const oop l_object = object();
- if (l_object != nullptr) {
- // Attempt to restore the header/dmw to the object's header so that
- // we only retry once if the deflater thread happens to be slow.
- install_displaced_markword_in_object(l_object);
- }
+ } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) {
+ assert(_recursions == 0, "must be");
+ _recursions = 1;
+ set_owner_from_BasicLock(prev_owner, locking_thread);
+ success = true;
}
+ assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT
+ ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT,
+ p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner));
+ }
+
+ bool ObjectMonitor::enter_for(JavaThread* locking_thread) {
- add_to_contentions(-1);
+ // Block out deflation as soon as possible.
+ ObjectMonitorContentionMark contention_mark(this);
- assert(!success || owner_raw() == locking_thread, "must be");
+ // Check for deflation.
+ if (enter_is_async_deflating()) {
+ return false;
+ }
- return success;
+ enter_for_with_contention_mark(locking_thread, contention_mark);
+ assert(owner_raw() == locking_thread, "must be");
+ return true;
}
- bool ObjectMonitor::enter(JavaThread* current) {
- assert(current == JavaThread::current(), "must be");
- // The following code is ordered to check the most common cases first
- // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
-
- void* cur = try_set_owner_from(nullptr, current);
- if (cur == nullptr) {
+ bool ObjectMonitor::try_enter(JavaThread* current) {
+ // TryLock avoids the CAS
+ TryLockResult r = TryLock(current);
+ if (r == TryLockResult::Success) {
assert(_recursions == 0, "invariant");
return true;
}
- if (cur == current) {
- // TODO-FIXME: check for integer overflow! BUGID 6557169.
+ if (r == TryLockResult::HasOwner && owner() == current) {
_recursions++;
return true;
}
- if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) {
+ void* cur = owner_raw();
+ if (LockingMode == LM_LEGACY && current->is_lock_owned((address)cur)) {
assert(_recursions == 0, "internal state error");
_recursions = 1;
set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*.
return true;
}
+ return false;
+ }
+
+ bool ObjectMonitor::spin_enter(JavaThread* current) {
+ assert(current == JavaThread::current(), "must be");
+
+ // Check for recursion.
+ if (try_enter(current)) {
+ return true;
+ }
+
+ // Check for deflation.
+ if (enter_is_async_deflating()) {
+ return false;
+ }
+
// We've encountered genuine contention.
- // Try one round of spinning *before* enqueueing current
- // and before going through the awkward and expensive state
- // transitions. The following spin is strictly optional ...
+ // Do one round of spinning.
// Note that if we acquire the monitor from an initial spin
// we forgo posting JVMTI events and firing DTRACE probes.
if (TrySpin(current)) {
assert(owner_raw() == current, "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw()));
assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);
- assert(object()->mark() == markWord::encode(this),
- "object mark must match encoded this: mark=" INTPTR_FORMAT
- ", encoded this=" INTPTR_FORMAT, object()->mark().value(),
- markWord::encode(this).value());
+ assert_mark_word_consistency();
+ return true;
+ }
+
+ return false;
+ }
+
+ bool ObjectMonitor::enter(JavaThread* current) {
+ assert(current == JavaThread::current(), "must be");
+
+ if (spin_enter(current)) {
return true;
}
assert(owner_raw() != current, "invariant");
assert(_succ != current, "invariant");
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(current->thread_state() != _thread_blocked, "invariant");
- // Keep track of contention for JVM/TI and M&M queries.
- add_to_contentions(1);
- if (is_being_async_deflated()) {
- // Async deflation is in progress and our contentions increment
- // above lost the race to async deflation. Undo the work and
- // force the caller to retry.
- const oop l_object = object();
- if (l_object != nullptr) {
- // Attempt to restore the header/dmw to the object's header so that
- // we only retry once if the deflater thread happens to be slow.
- install_displaced_markword_in_object(l_object);
- }
- add_to_contentions(-1);
+ // Keep is_being_async_deflated stable across the rest of enter
+ ObjectMonitorContentionMark contention_mark(this);
+
+ // Check for deflation.
+ if (enter_is_async_deflating()) {
return false;
}
+ // At this point this ObjectMonitor cannot be deflated, finish contended enter
+ enter_with_contention_mark(current, contention_mark);
+ return true;
+ }
+
+ void ObjectMonitor::enter_with_contention_mark(JavaThread *current, ObjectMonitorContentionMark &cm) {
+ assert(current == JavaThread::current(), "must be");
+ assert(owner_raw() != current, "must be");
+ assert(cm._monitor == this, "must be");
+ assert(!is_being_async_deflated(), "must be");
+
JFR_ONLY(JfrConditionalFlush<EventJavaMonitorEnter> flush(current);)
EventJavaMonitorEnter event;
if (event.is_started()) {
event.set_monitorClass(object()->klass());
// Set an address that is 'unique enough', such that events close in
// We've just gotten past the enter-check-for-suspend dance and we now own
// the monitor free and clear.
}
- add_to_contentions(-1);
assert(contentions() >= 0, "must not be negative: contentions=%d", contentions());
// Must either set _recursions = 0 or ASSERT _recursions == 0.
assert(_recursions == 0, "invariant");
assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
- assert(object()->mark() == markWord::encode(this), "invariant");
+ assert_mark_word_consistency();
// The thread -- now the owner -- is back in vm mode.
// Report the glorious news via TI,DTrace and jvmstat.
// The probe effect is non-trivial. All the reportage occurs
// while we hold the monitor, increasing the length of the critical
if (event.should_commit()) {
event.set_previousOwner(_previous_owner_tid);
event.commit();
}
OM_PERFDATA_OP(ContendedLockAttempts, inc());
- return true;
}
// Caveat: TryLock() is not necessarily serializing if it returns failure.
// Callers must compensate as needed.
//
// The ObjectMonitor has been successfully async deflated when:
// (contentions < 0)
// Contending threads that see that condition know to retry their operation.
//
- bool ObjectMonitor::deflate_monitor() {
+ bool ObjectMonitor::deflate_monitor(Thread* current) {
if (is_busy()) {
// Easy checks are first - the ObjectMonitor is busy so no deflation.
return false;
}
log_trace(monitorinflation)("deflate_monitor: object=" INTPTR_FORMAT
", mark=" INTPTR_FORMAT ", type='%s'",
p2i(obj), obj->mark().value(),
obj->klass()->external_name());
}
+ }
- // Install the old mark word if nobody else has already done it.
- install_displaced_markword_in_object(obj);
+ if (UseObjectMonitorTable) {
+ LightweightSynchronizer::deflate_monitor(current, obj, this);
+ } else {
+ if (obj != nullptr) {
+ // Install the old mark word if nobody else has already done it.
+ install_displaced_markword_in_object(obj);
+ }
}
// We leave owner == DEFLATER_MARKER and contentions < 0
// to force any racing threads to retry.
return true; // Success, ObjectMonitor has been deflated.
// into the header of the object associated with the monitor. This
// idempotent method is called by a thread that is deflating a
// monitor and by other threads that have detected a race with the
// deflation process.
void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
+ assert(!UseObjectMonitorTable, "Lightweight has no dmw");
// This function must only be called when (owner == DEFLATER_MARKER
// && contentions <= 0), but we can't guarantee that here because
// those values could change when the ObjectMonitor gets moved from
// the global free list to a per-thread free list.
//
// In the future we should reconcile EnterI() and ReenterI().
void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) {
assert(current != nullptr, "invariant");
+ assert(current->thread_state() != _thread_blocked, "invariant");
assert(currentNode != nullptr, "invariant");
assert(currentNode->_thread == current, "invariant");
assert(_waiters > 0, "invariant");
- assert(object()->mark() == markWord::encode(this), "invariant");
-
- assert(current->thread_state() != _thread_blocked, "invariant");
+ assert_mark_word_consistency();
for (;;) {
ObjectWaiter::TStates v = currentNode->TState;
guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
assert(owner_raw() != current, "invariant");
// EntryList is stable and cxq is prepend-only.
// The head of cxq is volatile but the interior is stable.
// In addition, current.TState is stable.
assert(owner_raw() == current, "invariant");
- assert(object()->mark() == markWord::encode(this), "invariant");
+ assert_mark_word_consistency();
UnlinkAfterAcquire(current, currentNode);
if (_succ == current) _succ = nullptr;
assert(_succ != current, "invariant");
currentNode->TState = ObjectWaiter::TS_RUN;
OrderAccess::fence(); // see comments at the end of EnterI()
_waiters--; // decrement the number of waiters
// Verify a few postconditions
assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
- assert(object()->mark() == markWord::encode(this), "invariant");
+ assert_mark_word_consistency();
// check if the notification happened
if (!WasNotified) {
// no, it could be timeout or Thread.interrupt() or both
// check for interrupt event, otherwise it is timeout
#ifdef ASSERT
// Print the ObjectMonitor like a debugger would:
//
// (ObjectMonitor) 0x00007fdfb6012e40 = {
- // _header = 0x0000000000000001
+ // _metadata = 0x0000000000000001
// _object = 0x000000070ff45fd0
// _pad_buf0 = {
// [0] = '\0'
// ...
// [43] = '\0'
// _WaitSetLock = 0
// }
//
void ObjectMonitor::print_debug_style_on(outputStream* st) const {
st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
- st->print_cr(" _header = " INTPTR_FORMAT, header().value());
+ st->print_cr(" _metadata = " INTPTR_FORMAT, _metadata);
st->print_cr(" _object = " INTPTR_FORMAT, p2i(object_peek()));
st->print_cr(" _pad_buf0 = {");
st->print_cr(" [0] = '\\0'");
st->print_cr(" ...");
st->print_cr(" [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
< prev index next >