< prev index next > src/hotspot/share/runtime/deoptimization.cpp
Print this page
assert(thread == Thread::current(), "should be");
HandleMark hm(thread);
#ifndef PRODUCT
bool first = true;
#endif // !PRODUCT
+ DEBUG_ONLY(GrowableArray<oop> lock_order{0};)
// Start locking from outermost/oldest frame
for (int i = (chunk->length() - 1); i >= 0; i--) {
compiledVFrame* cvf = chunk->at(i);
assert (cvf->scope() != nullptr,"expect only compiled java frames");
GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
if (monitors->is_nonempty()) {
bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
exec_mode, realloc_failures);
deoptimized_objects = deoptimized_objects || relocked;
+ #ifdef ASSERT
+ if (LockingMode == LM_LIGHTWEIGHT && !realloc_failures) {
+ for (MonitorInfo* mi : *monitors) {
+ lock_order.push(mi->owner());
+ }
+ }
+ #endif // ASSERT
#ifndef PRODUCT
if (PrintDeoptimizationDetails) {
ResourceMark rm;
stringStream st;
for (int j = 0; j < monitors->length(); j++) {
tty->print_raw(st.freeze());
}
#endif // !PRODUCT
}
}
+ #ifdef ASSERT
+ if (LockingMode == LM_LIGHTWEIGHT && !realloc_failures) {
+ deoptee_thread->lock_stack().verify_consistent_lock_order(lock_order, exec_mode != Deoptimization::Unpack_none);
+ }
+ #endif // ASSERT
}
// Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI.
// The given vframes cover one physical frame.
bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk,
JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
continue;
}
}
}
! if (LockingMode == LM_LIGHTWEIGHT && exec_mode == Unpack_none) {
// We have lost information about the correct state of the lock stack.
// Inflate the locks instead. Enter then inflate to avoid races with
// deflation.
! ObjectSynchronizer::enter(obj, nullptr, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
! ObjectMonitor* mon = ObjectSynchronizer::inflate(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
assert(mon->owner() == deoptee_thread, "must be");
} else {
BasicLock* lock = mon_info->lock();
! ObjectSynchronizer::enter(obj, lock, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
}
}
}
}
JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
continue;
}
}
}
! if (LockingMode == LM_LIGHTWEIGHT) {
// We have lost information about the correct state of the lock stack.
// Inflate the locks instead. Enter then inflate to avoid races with
// deflation.
! ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
! ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
assert(mon->owner() == deoptee_thread, "must be");
} else {
BasicLock* lock = mon_info->lock();
! ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
}
}
}
}
< prev index next >