< prev index next > src/hotspot/share/runtime/synchronizer.cpp
Print this page
*/
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
#include "jfr/jfrEvents.hpp"
+ #include "gc/shared/suspendibleThreadSet.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/padded.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/handshake.hpp"
#include "runtime/interfaceSupport.inline.hpp"
+ #include "runtime/lockStack.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/osThread.hpp"
assert(current->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // slow-path for invalid obj
const markWord mark = obj->mark();
! if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Degenerate notify
// stack-locked by caller so by definition the implied waitset is empty.
return true;
}
assert(current->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // slow-path for invalid obj
const markWord mark = obj->mark();
! if ((mark.is_fast_locked() && current->lock_stack().contains(oop(obj))) ||
+ (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
// Degenerate notify
// stack-locked by caller so by definition the implied waitset is empty.
return true;
}
// performance reasons, stack walkers generally first check for
// Biased Locking in the object's header, the second check is for
// stack-locking in the object's header, the third check is for
// recursive stack-locking in the displaced header in the BasicLock,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
! lock->set_displaced_header(markWord::unused_mark());
if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
assert(m->_recursions == 0, "invariant");
return true;
}
// performance reasons, stack walkers generally first check for
// Biased Locking in the object's header, the second check is for
// stack-locking in the object's header, the third check is for
// recursive stack-locking in the displaced header in the BasicLock,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
! if (!UseFastLocking) {
+ lock->set_displaced_header(markWord::unused_mark());
+ }
if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
assert(m->_recursions == 0, "invariant");
return true;
}
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, current);
}
! if (UseBiasedLocking) {
! BiasedLocking::revoke(current, obj);
! }
!
! markWord mark = obj->mark();
! assert(!mark.has_bias_pattern(), "should not see bias pattern here");
! if (mark.is_neutral()) {
! // Anticipate successful CAS -- the ST of the displaced mark must
! // be visible <= the ST performed by the CAS.
! lock->set_displaced_header(mark);
! if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
return;
}
! // Fall through to inflate() ...
! } else if (mark.has_locker() &&
! current->is_lock_owned((address)mark.locker())) {
! assert(lock != mark.locker(), "must not re-lock the same lock");
! assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
! lock->set_displaced_header(markWord::from_pointer(NULL));
- return;
}
- // The object header will never be displaced to this lock,
- // so it does not matter what the value is, except that it
- // must be non-zero to avoid looking like a re-entrant lock,
- // and must not look locked either.
- lock->set_displaced_header(markWord::unused_mark());
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, current);
}
! if (UseFastLocking) {
! LockStack& lock_stack = current->lock_stack();
!
! markWord header = obj()->mark_acquire();
! while (true) {
! if (header.is_neutral()) {
+ assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
+ // Try to swing into 'fast-locked' state without inflating.
+ markWord locked_header = header.set_fast_locked();
+ markWord witness = obj()->cas_set_mark(locked_header, header);
+ if (witness == header) {
+ // Successfully fast-locked, push object to lock-stack and return.
+ lock_stack.push(obj());
+ return;
+ }
+ // Otherwise retry.
+ header = witness;
+ } else {
+ // Fall-through to inflate-enter.
+ break;
+ }
+ }
+ } else {
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke(current, obj);
+ }
! markWord mark = obj->mark();
! if (mark.is_neutral()) {
! // Anticipate successful CAS -- the ST of the displaced mark must
! // be visible <= the ST performed by the CAS.
! lock->set_displaced_header(mark);
+ if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
+ return;
+ }
+ // Fall through to inflate() ...
+ } else if (mark.has_locker() &&
+ current->is_lock_owned((address) mark.locker())) {
+ assert(lock != mark.locker(), "must not re-lock the same lock");
+ assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
+ lock->set_displaced_header(markWord::from_pointer(NULL));
return;
}
!
! // The object header will never be displaced to this lock,
! // so it does not matter what the value is, except that it
! // must be non-zero to avoid looking like a re-entrant lock,
! // and must not look locked either.
! lock->set_displaced_header(markWord::unused_mark());
}
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
}
}
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
markWord mark = object->mark();
! // We cannot check for Biased Locking if we are racing an inflation.
! assert(mark == markWord::INFLATING() ||
! !mark.has_bias_pattern(), "should not see bias pattern here");
!
! markWord dhw = lock->displaced_header();
! if (dhw.value() == 0) {
! // If the displaced header is NULL, then this exit matches up with
! // a recursive enter. No real work to do here except for diagnostics.
! #ifndef PRODUCT
! if (mark != markWord::INFLATING()) {
! // Only do diagnostics if we are not racing an inflation. Simply
! // exiting a recursive enter of a Java Monitor that is being
- // inflated is safe; see the has_monitor() comment below.
- assert(!mark.is_neutral(), "invariant");
- assert(!mark.has_locker() ||
- current->is_lock_owned((address)mark.locker()), "invariant");
- if (mark.has_monitor()) {
- // The BasicLock's displaced_header is marked as a recursive
- // enter and we have an inflated Java Monitor (ObjectMonitor).
- // This is a special case where the Java Monitor was inflated
- // after this thread entered the stack-lock recursively. When a
- // Java Monitor is inflated, we cannot safely walk the Java
- // Monitor owner's stack and update the BasicLocks because a
- // Java Monitor can be asynchronously inflated by a thread that
- // does not own the Java Monitor.
- ObjectMonitor* m = mark.monitor();
- assert(m->object()->mark() == mark, "invariant");
- assert(m->is_entered(current), "invariant");
}
}
#endif
- return;
- }
-
- if (mark == markWord::from_pointer(lock)) {
- // If the object is stack-locked by the current thread, try to
- // swing the displaced header from the BasicLock back to the mark.
- assert(dhw.is_neutral(), "invariant");
- if (object->cas_set_mark(dhw, mark) == mark) {
return;
}
}
// We have to take the slow-path of possible inflation and then exit.
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
monitor->exit(current);
}
// -----------------------------------------------------------------------------
// Class Loader support to workaround deadlocks on the class loader lock objects
}
}
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
markWord mark = object->mark();
! if (UseFastLocking) {
! if (mark.is_fast_locked()) {
! markWord unlocked_header = mark.set_unlocked();
! markWord witness = object->cas_set_mark(unlocked_header, mark);
! if (witness != mark) {
! // Another thread beat us, it can only have installed an anonymously locked monitor at this point.
! // Fetch that monitor, set owner correctly to this thread, and exit it (allowing waiting threads to enter).
! assert(witness.has_monitor(), "must have monitor");
! ObjectMonitor* monitor = witness.monitor();
! assert(monitor->is_owner_anonymous(), "must be anonymous owner");
! monitor->set_owner_from_anonymous(current);
! monitor->exit(current);
}
+ LockStack& lock_stack = current->lock_stack();
+ lock_stack.remove(object);
+ return;
}
+ } else {
+ markWord dhw = lock->displaced_header();
+ if (dhw.value() == 0) {
+ // If the displaced header is NULL, then this exit matches up with
+ // a recursive enter. No real work to do here except for diagnostics.
+ #ifndef PRODUCT
+ if (mark != markWord::INFLATING()) {
+ // Only do diagnostics if we are not racing an inflation. Simply
+ // exiting a recursive enter of a Java Monitor that is being
+ // inflated is safe; see the has_monitor() comment below.
+ assert(!mark.is_neutral(), "invariant");
+ assert(!mark.has_locker() ||
+ current->is_lock_owned((address)mark.locker()), "invariant");
+ if (mark.has_monitor()) {
+ // The BasicLock's displaced_header is marked as a recursive
+ // enter and we have an inflated Java Monitor (ObjectMonitor).
+ // This is a special case where the Java Monitor was inflated
+ // after this thread entered the stack-lock recursively. When a
+ // Java Monitor is inflated, we cannot safely walk the Java
+ // Monitor owner's stack and update the BasicLocks because a
+ // Java Monitor can be asynchronously inflated by a thread that
+ // does not own the Java Monitor.
+ ObjectMonitor* m = mark.monitor();
+ assert(m->object()->mark() == mark, "invariant");
+ assert(m->is_entered(current), "invariant");
+ }
+ }
#endif
return;
}
+
+ if (mark == markWord::from_pointer(lock)) {
+ // If the object is stack-locked by the current thread, try to
+ // swing the displaced header from the BasicLock back to the mark.
+ assert(dhw.is_neutral(), "invariant");
+ if (object->cas_set_mark(dhw, mark) == mark) {
+ return;
+ }
+ }
}
// We have to take the slow-path of possible inflation and then exit.
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
+ if (UseFastLocking && monitor->is_owner_anonymous()) {
+ // It must be us. Pop lock object from lock stack.
+ LockStack& lock_stack = current->lock_stack();
+ oop popped = lock_stack.pop();
+ assert(popped == object, "must be owned by this thread");
+ monitor->set_owner_from_anonymous(current);
+ }
monitor->exit(current);
}
// -----------------------------------------------------------------------------
// Class Loader support to workaround deadlocks on the class loader lock objects
BiasedLocking::revoke(current, obj);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
markWord mark = obj->mark();
! if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
BiasedLocking::revoke(current, obj);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
markWord mark = obj->mark();
! if ((mark.is_fast_locked() && current->lock_stack().contains(obj())) ||
+ (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
// Not inflated so there can't be any waiters to notify.
return;
}
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
BiasedLocking::revoke(current, obj);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
markWord mark = obj->mark();
! if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
BiasedLocking::revoke(current, obj);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
markWord mark = obj->mark();
! if ((mark.is_fast_locked() && current->lock_stack().contains(obj())) ||
+ (mark.has_locker() && current->is_lock_owned((address)mark.locker()))) {
// Not inflated so there can't be any waiters to notify.
return;
}
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
static SharedGlobals GVars;
static markWord read_stable_mark(oop obj) {
markWord mark = obj->mark();
! if (!mark.is_being_inflated()) {
return mark; // normal fast-path return
}
int its = 0;
for (;;) {
static SharedGlobals GVars;
static markWord read_stable_mark(oop obj) {
markWord mark = obj->mark();
! if (!mark.is_being_inflated() || UseFastLocking) {
return mark; // normal fast-path return
}
int its = 0;
for (;;) {
if (value == 0) value = 0xBAD;
assert(value != markWord::no_hash, "invariant");
return value;
}
+ static bool is_lock_owned(Thread* thread, oop obj) {
+ assert(UseFastLocking, "only call this with fast-locking enabled");
+ return thread->is_Java_thread() ? reinterpret_cast<JavaThread*>(thread)->lock_stack().contains(obj) : false;
+ }
+
intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
if (UseBiasedLocking) {
// NOTE: many places throughout the JVM do not expect a safepoint
// to be taken here. However, we only ever bias Java instances and all
// of the call sites of identity_hash that might revoke biases have
}
return hash;
}
// Fall thru so we only have one place that installs the hash in
// the ObjectMonitor.
! } else if (current->is_lock_owned((address)mark.locker())) {
// This is a stack lock owned by the calling thread so fetch the
// displaced markWord from the BasicLock on the stack.
temp = mark.displaced_mark_helper();
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
hash = temp.hash();
}
return hash;
}
// Fall thru so we only have one place that installs the hash in
// the ObjectMonitor.
! } else if (mark.is_fast_locked() && is_lock_owned(current, obj)) {
+ // This is a fast lock owned by the calling thread so use the
+ // markWord from the object.
+ hash = mark.hash();
+ if (hash != 0) { // if it has a hash, just return it
+ return hash;
+ }
+ } else if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// This is a stack lock owned by the calling thread so fetch the
// displaced markWord from the BasicLock on the stack.
temp = mark.displaced_mark_helper();
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
hash = temp.hash();
// Uncontended case, header points to stack
if (mark.has_locker()) {
return current->is_lock_owned((address)mark.locker());
}
+
+ // Fast-locking case.
+ if (mark.is_fast_locked()) {
+ return current->lock_stack().contains(h_obj());
+ }
+
// Contended case, header points to ObjectMonitor (tagged pointer)
if (mark.has_monitor()) {
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
ObjectMonitor* monitor = mark.monitor();
}
assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
oop obj = h_obj();
- address owner = NULL;
-
markWord mark = read_stable_mark(obj);
// Uncontended case, header points to stack
if (mark.has_locker()) {
! owner = (address) mark.locker();
}
// Contended case, header points to ObjectMonitor (tagged pointer)
! else if (mark.has_monitor()) {
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
ObjectMonitor* monitor = mark.monitor();
assert(monitor != NULL, "monitor should be non-null");
! owner = (address) monitor->owner();
- }
-
- if (owner != NULL) {
- // owning_thread_from_monitor_owner() may also return NULL here
- return Threads::owning_thread_from_monitor_owner(t_list, owner);
}
- // Unlocked case, header in place
- // Cannot have assertion since this object may have been
- // locked by another thread when reaching here.
- // assert(mark.is_neutral(), "sanity check");
-
return NULL;
}
// Visitors ...
}
assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
oop obj = h_obj();
markWord mark = read_stable_mark(obj);
// Uncontended case, header points to stack
if (mark.has_locker()) {
! return Threads::owning_thread_from_monitor_owner(t_list, (address) mark.locker());
+ }
+
+ if (mark.is_fast_locked()) {
+ return Threads::owning_thread_from_object(t_list, h_obj());
}
// Contended case, header points to ObjectMonitor (tagged pointer)
! if (mark.has_monitor()) {
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
ObjectMonitor* monitor = mark.monitor();
assert(monitor != NULL, "monitor should be non-null");
! return Threads::owning_thread_from_monitor(t_list, monitor);
}
return NULL;
}
// Visitors ...
// CASE: inflated
if (mark.has_monitor()) {
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
return inf;
}
// CASE: inflation in progress - inflating over a stack-lock.
// Some other thread is converting from stack-locked to inflated.
// Only that thread can complete inflation -- other threads must wait.
// The INFLATING value is transient.
// Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
// We could always eliminate polling by parking the thread on some auxiliary list.
! if (mark == markWord::INFLATING()) {
read_stable_mark(object);
continue;
}
// CASE: stack-locked
// CASE: inflated
if (mark.has_monitor()) {
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
+ if (UseFastLocking && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
+ inf->set_owner_from_anonymous(current);
+ assert(current->is_Java_thread(), "must be Java thread");
+ reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
+ }
return inf;
}
// CASE: inflation in progress - inflating over a stack-lock.
// Some other thread is converting from stack-locked to inflated.
// Only that thread can complete inflation -- other threads must wait.
// The INFLATING value is transient.
// Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
// We could always eliminate polling by parking the thread on some auxiliary list.
! // NOTE: We need to check UseFastLocking here, because with fast-locking, the header
+ // may legitimately be zero: cleared lock-bits and all upper header bits zero.
+ // With fast-locking, the INFLATING protocol is not used.
+ if (mark == markWord::INFLATING() && !UseFastLocking) {
read_stable_mark(object);
continue;
}
// CASE: stack-locked
// ObjectMonitor into the mark. This was correct, but artificially lengthened
// the interval in which INFLATING appeared in the mark, thus increasing
// the odds of inflation contention.
LogStreamHandle(Trace, monitorinflation) lsh;
+ if (mark.is_fast_locked()) {
+ assert(UseFastLocking, "can only happen with fast-locking");
+ ObjectMonitor* monitor = new ObjectMonitor(object);
+ monitor->set_header(mark.set_unlocked());
+ bool own = is_lock_owned(current, object);
+ if (own) {
+ // Owned by us.
+ monitor->set_owner_from(NULL, current);
+ } else {
+ // Owned by somebody else.
+ monitor->set_owner_anonymous();
+ }
+ markWord monitor_mark = markWord::encode(monitor);
+ markWord witness = object->cas_set_mark(monitor_mark, mark);
+ if (witness == mark) {
+ // Success! Return inflated monitor.
+ if (own) {
+ assert(current->is_Java_thread(), "must be: checked in is_lock_owned()");
+ reinterpret_cast<JavaThread*>(current)->lock_stack().remove(object);
+ }
+ // Once the ObjectMonitor is configured and object is associated
+ // with the ObjectMonitor, it is safe to allow async deflation:
+ _in_use_list.add(monitor);
+
+ // Hopefully the performance counters are allocated on distinct
+ // cache lines to avoid false sharing on MP systems ...
+ OM_PERFDATA_OP(Inflations, inc());
+ if (log_is_enabled(Trace, monitorinflation)) {
+ ResourceMark rm(current);
+ lsh.print_cr("inflate(locked): object=" INTPTR_FORMAT ", mark="
+ INTPTR_FORMAT ", type='%s'", p2i(object),
+ object->mark().value(), object->klass()->external_name());
+ }
+ if (event.should_commit()) {
+ post_monitor_inflate_event(&event, object, cause);
+ }
+ return monitor;
+ } else {
+ delete monitor;
+ continue;
+ }
+ }
if (mark.has_locker()) {
+ assert(!UseFastLocking, "can not happen with fast-locking");
ObjectMonitor* m = new ObjectMonitor(object);
// Optimistically prepare the ObjectMonitor - anticipate successful CAS
// We do this before the CAS in order to minimize the length of time
// in which INFLATING appears in the mark.
// Hopefully the performance counters are allocated on distinct cache lines
// to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm(current);
! lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
// Hopefully the performance counters are allocated on distinct cache lines
// to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm(current);
! lsh.print_cr("inflate(locked): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
INTPTR_FORMAT, p2i(thread));
}
};
+ class VM_RendezvousGCThreads : public VM_Operation {
+ public:
+ bool evaluate_at_safepoint() const override { return false; }
+ VMOp_Type type() const override { return VMOp_RendezvousGCThreads; }
+ void doit() override {
+ SuspendibleThreadSet::synchronize();
+ SuspendibleThreadSet::desynchronize();
+ };
+ };
+
// This function is called by the MonitorDeflationThread to deflate
// ObjectMonitors. It is also called via do_final_audit_and_print_stats()
// by the VMThread.
size_t ObjectSynchronizer::deflate_idle_monitors() {
Thread* current = Thread::current();
_in_use_list.count(), _in_use_list.max());
}
// A JavaThread needs to handshake in order to safely free the
// ObjectMonitors that were deflated in this cycle.
+ // Also, we sync and desync GC threads around the handshake, so that they can
+ // safely read the mark-word and look-through to the object-monitor, without
+ // being afraid that the object-monitor is going away.
HandshakeForDeflation hfd_hc;
Handshake::execute(&hfd_hc);
+ VM_RendezvousGCThreads sync_gc;
+ VMThread::execute(&sync_gc);
if (ls != NULL) {
ls->print_cr("after handshaking: in_use_list stats: ceiling="
SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
< prev index next >