< prev index next > src/hotspot/share/runtime/continuationFreezeThaw.cpp
Print this page
#include "gc/shared/continuationGCSupport.inline.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/memAllocator.hpp"
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
+ #include "interpreter/bytecodeStream.hpp"
#include "interpreter/interpreter.hpp"
+ #include "interpreter/interpreterRuntime.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "oops/access.inline.hpp"
+ #include "oops/constantPool.inline.hpp"
#include "oops/method.inline.hpp"
#include "oops/oopsHierarchy.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/stackChunkOop.inline.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stackChunkFrameStream.inline.hpp"
#include "runtime/stackFrameStream.inline.hpp"
#include "runtime/stackOverflow.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
+ #include "runtime/vframe.inline.hpp"
+ #include "runtime/vframe_hp.hpp"
#include "utilities/debug.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
#if INCLUDE_ZGC
#include "gc/z/zStackChunkGCData.inline.hpp"
#endif
+ #ifdef COMPILER1
+ #include "c1/c1_Runtime1.hpp"
+ #endif
+ #ifdef COMPILER2
+ #include "opto/runtime.hpp"
+ #endif
#include <type_traits>
/*
* This file contains the implementation of continuation freezing (yield) and thawing (run).
static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
static void do_deopt_after_thaw(JavaThread* thread);
static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
! static void log_frames(JavaThread* thread);
! static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted);
static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
#define assert_pfl(p, ...) \
do { \
if (!(p)) { \
JavaThread* t = JavaThread::active(); \
static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
static void do_deopt_after_thaw(JavaThread* thread);
static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
! static void log_frames(JavaThread* thread, bool dolog = false);
! static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp);
static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
+ static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr = nullptr, const char** code_name_ptr = nullptr, int* bci_ptr = nullptr);
#define assert_pfl(p, ...) \
do { \
if (!(p)) { \
JavaThread* t = JavaThread::active(); \
DEBUG_ONLY(_last_write = nullptr;)
assert(_cont.chunk_invariant(), "");
assert(!Interpreter::contains(_cont.entryPC()), "");
#if !defined(PPC64) || defined(ZERO)
! static const int doYield_stub_frame_size = frame::metadata_words;
#else
! static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
#endif
// With preemption doYield() might not have been resolved yet
! assert(_preempt || SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
if (preempt) {
_last_frame = _thread->last_frame();
}
// properties of the continuation on the stack; all sizes are in words
! _cont_stack_top = frame_sp + (!preempt ? doYield_stub_frame_size : 0); // we don't freeze the doYield stub frame
_cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
- ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
DEBUG_ONLY(_last_write = nullptr;)
assert(_cont.chunk_invariant(), "");
assert(!Interpreter::contains(_cont.entryPC()), "");
#if !defined(PPC64) || defined(ZERO)
! static const int do_yield_frame_size = frame::metadata_words;
#else
! static const int do_yield_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
#endif
// With preemption doYield() might not have been resolved yet
! assert(_preempt || ContinuationEntry::do_yield_nmethod()->frame_size() == do_yield_frame_size, "");
if (preempt) {
_last_frame = _thread->last_frame();
}
// properties of the continuation on the stack; all sizes are in words
! _cont_stack_top = frame_sp + (!preempt ? do_yield_frame_size : 0); // we don't freeze the doYield stub frame
_cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
- ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
}
}
frame FreezeBase::freeze_start_frame_yield_stub() {
frame f = _thread->last_frame();
! assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
return f;
}
}
}
frame FreezeBase::freeze_start_frame_yield_stub() {
frame f = _thread->last_frame();
! assert(ContinuationEntry::do_yield_nmethod()->contains(f.pc()), "must be");
f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
return f;
}
assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
if (_preempt) {
! frame f = _thread->last_frame();
! if (f.is_interpreted_frame()) {
// Some platforms do not save the last_sp in the top interpreter frame on VM calls.
// We need it so that on resume we can restore the sp to the right place, since
// thawing might add an alignment word to the expression stack (see finish_thaw()).
// We do it now that we know freezing will be successful.
! prepare_freeze_interpreted_top_frame(f);
}
}
// We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
// writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
if (_preempt) {
! frame top_frame = _thread->last_frame();
! if (top_frame.is_interpreted_frame()) {
// Some platforms do not save the last_sp in the top interpreter frame on VM calls.
// We need it so that on resume we can restore the sp to the right place, since
// thawing might add an alignment word to the expression stack (see finish_thaw()).
// We do it now that we know freezing will be successful.
! prepare_freeze_interpreted_top_frame(top_frame);
+ }
+
+ // Do this now so should_process_args_at_top() is set before calling finish_freeze
+ // in case we might need to apply GC barriers to frames in this stackChunk.
+ if (_thread->at_preemptable_init()) {
+ assert(top_frame.is_interpreted_frame(), "only InterpreterRuntime::_new/resolve_from_cache allowed");
+ chunk->set_at_klass_init(true);
+ Method* m = top_frame.interpreter_frame_method();
+ Bytecode current_bytecode = Bytecode(m, top_frame.interpreter_frame_bcp());
+ Bytecodes::Code code = current_bytecode.code();
+ int exp_size = top_frame.interpreter_frame_expression_stack_size();
+ if (code == Bytecodes::Code::_invokestatic && exp_size > 0) {
+ chunk->set_has_args_at_top(true);
+ }
}
}
// We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
// writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
}
+ class AnchorMark : public StackObj {
+ JavaThread* _current;
+ frame& _top_frame;
+ intptr_t* _last_sp_from_frame;
+ bool _is_interpreted;
+
+ public:
+ AnchorMark(JavaThread* current, frame& f) : _current(current), _top_frame(f), _is_interpreted(false) {
+ intptr_t* sp = anchor_mark_set_pd();
+ set_anchor(_current, sp);
+ }
+ ~AnchorMark() {
+ clear_anchor(_current);
+ anchor_mark_clear_pd();
+ }
+ inline intptr_t* anchor_mark_set_pd();
+ inline void anchor_mark_clear_pd();
+ };
+
#if INCLUDE_JVMTI
static int num_java_frames(ContinuationWrapper& cont) {
ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
int count = 0;
for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
}
invalidate_jvmti_stack(thread);
}
! static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top) {
assert(current->vthread() != nullptr, "must be");
! HandleMarkCleaner hm(current);
Handle vth(current, current->vthread());
-
ContinuationWrapper::SafepointOp so(current, cont);
! // Since we might safepoint set the anchor so that the stack can be walked.
- set_anchor(current, top.sp());
JRT_BLOCK
JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
if (current->pending_contended_entered_event()) {
! JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
current->set_contended_entered_monitor(nullptr);
}
JRT_BLOCK_END
-
- clear_anchor(current);
}
#endif // INCLUDE_JVMTI
#ifdef ASSERT
static bool monitors_on_stack(JavaThread* thread) {
JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
}
invalidate_jvmti_stack(thread);
}
! static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top, Continuation::preempt_kind pk) {
assert(current->vthread() != nullptr, "must be");
! HandleMarkCleaner hm(current); // Cleanup vth and so._conth Handles
Handle vth(current, current->vthread());
ContinuationWrapper::SafepointOp so(current, cont);
! AnchorMark am(current, top); // Set anchor so that the stack is walkable.
JRT_BLOCK
JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
if (current->pending_contended_entered_event()) {
! // No monitor JVMTI events for ObjectLocker case.
+ if (pk != Continuation::object_locker) {
+ JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
+ }
current->set_contended_entered_monitor(nullptr);
}
JRT_BLOCK_END
}
#endif // INCLUDE_JVMTI
#ifdef ASSERT
static bool monitors_on_stack(JavaThread* thread) {
return false;
}
}
return true;
}
+
+ static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr, const char** code_name_ptr, int* bci_ptr) {
+ JavaThread* current = JavaThread::current();
+ ResourceMark rm(current);
+
+ Method* m;
+ const char* code_name;
+ int bci;
+ if (preempt_kind == Continuation::monitorenter) {
+ assert(top.is_interpreted_frame() || top.is_runtime_frame(), "");
+ bool at_sync_method;
+ if (top.is_interpreted_frame()) {
+ m = top.interpreter_frame_method();
+ assert(!m->is_native() || m->is_synchronized(), "invalid method %s", m->external_name());
+ address bcp = top.interpreter_frame_bcp();
+ assert(bcp != 0 || m->is_native(), "");
+ at_sync_method = m->is_synchronized() && (bcp == 0 || bcp == m->code_base());
+ // bcp is advanced on monitorenter before making the VM call, adjust for that.
+ bool at_sync_bytecode = bcp > m->code_base() && Bytecode(m, bcp - 1).code() == Bytecodes::Code::_monitorenter;
+ assert(at_sync_method || at_sync_bytecode, "");
+ bci = at_sync_method ? -1 : top.interpreter_frame_bci();
+ } else {
+ CodeBlob* cb = top.cb();
+ RegisterMap reg_map(current,
+ RegisterMap::UpdateMap::skip,
+ RegisterMap::ProcessFrames::skip,
+ RegisterMap::WalkContinuation::skip);
+ frame fr = top.sender(®_map);
+ vframe* vf = vframe::new_vframe(&fr, ®_map, current);
+ compiledVFrame* cvf = compiledVFrame::cast(vf);
+ m = cvf->method();
+ bci = cvf->scope()->bci();
+ at_sync_method = bci == SynchronizationEntryBCI;
+ assert(!at_sync_method || m->is_synchronized(), "bci is %d but method %s is not synchronized", bci, m->external_name());
+ bool is_c1_monitorenter = false, is_c2_monitorenter = false;
+ COMPILER1_PRESENT(is_c1_monitorenter = cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
+ cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id);)
+ COMPILER2_PRESENT(is_c2_monitorenter = cb == CodeCache::find_blob(OptoRuntime::complete_monitor_locking_Java());)
+ assert(is_c1_monitorenter || is_c2_monitorenter, "wrong runtime stub frame");
+ }
+ code_name = at_sync_method ? "synchronized method" : "monitorenter";
+ } else if (preempt_kind == Continuation::object_wait) {
+ assert(top.is_interpreted_frame() || top.is_native_frame(), "");
+ m = top.is_interpreted_frame() ? top.interpreter_frame_method() : top.cb()->as_nmethod()->method();
+ assert(m->is_object_wait0(), "");
+ bci = 0;
+ code_name = "";
+ } else {
+ assert(preempt_kind == Continuation::object_locker, "invalid preempt kind");
+ assert(top.is_interpreted_frame(), "");
+ m = top.interpreter_frame_method();
+ Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
+ Bytecodes::Code code = current_bytecode.code();
+ assert(code == Bytecodes::Code::_new || code == Bytecodes::Code::_invokestatic ||
+ (code == Bytecodes::Code::_getstatic || code == Bytecodes::Code::_putstatic), "invalid bytecode");
+ bci = top.interpreter_frame_bci();
+ code_name = Bytecodes::name(current_bytecode.code());
+ }
+ assert(bci >= 0 || m->is_synchronized(), "invalid bci:%d at method %s", bci, m->external_name());
+
+ if (m_ptr != nullptr) {
+ *m_ptr = m;
+ *code_name_ptr = code_name;
+ *bci_ptr = bci;
+ }
+ }
+
+ static void log_preempt_after_freeze(ContinuationWrapper& cont) {
+ JavaThread* current = cont.thread();
+ StackChunkFrameStream<ChunkFrames::Mixed> sfs(cont.tail());
+ frame top_frame = sfs.to_frame();
+ bool at_init = current->at_preemptable_init();
+ bool at_enter = current->current_pending_monitor() != nullptr;
+ bool at_wait = current->current_waiting_monitor() != nullptr;
+ assert((at_enter && !at_wait) || (!at_enter && at_wait), "");
+ Continuation::preempt_kind pk = at_init ? Continuation::object_locker : at_enter ? Continuation::monitorenter : Continuation::object_wait;
+
+ Method* m = nullptr;
+ const char* code_name = nullptr;
+ int bci = InvalidFrameStateBci;
+ verify_frame_kind(top_frame, pk, &m, &code_name, &bci);
+ assert(m != nullptr && code_name != nullptr && bci != InvalidFrameStateBci, "should be set");
+
+ ResourceMark rm(current);
+ if (bci < 0) {
+ log_trace(continuations, preempt)("Preempted " INT64_FORMAT " while synchronizing on %smethod %s", current->monitor_owner_id(), m->is_native() ? "native " : "", m->external_name());
+ } else if (m->is_object_wait0()) {
+ log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at native method %s", current->monitor_owner_id(), m->external_name());
+ } else {
+ Klass* k = current->preempt_init_klass();
+ assert(k != nullptr || !at_init, "");
+ log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at %s(bci:%d) in method %s %s%s", current->monitor_owner_id(),
+ code_name, bci, m->external_name(), at_init ? "trying to initialize klass " : "", at_init ? k->external_name() : "");
+ }
+ }
#endif // ASSERT
static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
verify_continuation(cont.continuation());
assert(!cont.is_empty(), "");
verify_continuation(cont.continuation());
log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
return res;
}
patch_return_pc_with_preempt_stub(old_last_frame);
cont.tail()->set_preempted(true);
!
return freeze_epilog(cont);
}
template<typename ConfigT, bool preempt>
static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
assert(!current->has_pending_exception(), "");
#ifdef ASSERT
log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
! log_frames(current);
#endif
CONT_JFR_ONLY(EventContinuationFreeze event;)
ContinuationEntry* entry = current->last_continuation();
verify_continuation(cont.continuation());
log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
return res;
}
+ // Set up things so that on return to Java we jump to preempt stub.
patch_return_pc_with_preempt_stub(old_last_frame);
cont.tail()->set_preempted(true);
! DEBUG_ONLY(log_preempt_after_freeze(cont);)
return freeze_epilog(cont);
}
template<typename ConfigT, bool preempt>
static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
assert(!current->has_pending_exception(), "");
#ifdef ASSERT
log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
! log_frames(current, false);
#endif
CONT_JFR_ONLY(EventContinuationFreeze event;)
ContinuationEntry* entry = current->last_continuation();
CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
intptr_t* _fastpath;
bool _barriers;
bool _preempted_case;
+ bool _process_args_at_top;
intptr_t* _top_unextended_sp_before_thaw;
int _align_size;
DEBUG_ONLY(intptr_t* _top_stack_address);
+ // Only used for some preemption cases.
+ ObjectMonitor* _monitor;
+
StackChunkFrameStream<ChunkFrames::Mixed> _stream;
NOT_PRODUCT(int _frames;)
protected:
inline void prefetch_chunk_pd(void* start, int size_words);
void patch_return(intptr_t* sp, bool is_last);
intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
inline intptr_t* push_cleanup_continuation();
+ inline intptr_t* push_preempt_adapter();
+ intptr_t* redo_vmcall(JavaThread* current, frame& top);
void throw_interrupted_exception(JavaThread* current, frame& top);
void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
void finish_thaw(frame& f);
inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
inline void after_thaw_java_frame(const frame& f, bool bottom);
inline void patch(frame& f, const frame& caller, bool bottom);
void clear_bitmap_bits(address start, address end);
! NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
void push_return_frame(frame& f);
inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
inline void after_thaw_java_frame(const frame& f, bool bottom);
inline void patch(frame& f, const frame& caller, bool bottom);
void clear_bitmap_bits(address start, address end);
! NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
void push_return_frame(frame& f);
assert(!f.is_stub() || check_stub, "");
if (check_stub && f.is_stub()) {
// If we don't thaw the top compiled frame too, after restoring the saved
// registers back in Java, we would hit the return barrier to thaw one more
// frame effectively overwriting the restored registers during that call.
! f.next(SmallRegisterMap::instance(), true /* stop */);
assert(!f.is_done(), "");
f.get_cb();
assert(f.is_compiled(), "");
frame_size += f.cb()->frame_size();
assert(!f.is_stub() || check_stub, "");
if (check_stub && f.is_stub()) {
// If we don't thaw the top compiled frame too, after restoring the saved
// registers back in Java, we would hit the return barrier to thaw one more
// frame effectively overwriting the restored registers during that call.
! f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
assert(!f.is_done(), "");
f.get_cb();
assert(f.is_compiled(), "");
frame_size += f.cb()->frame_size();
log_develop_trace(continuations)("Deoptimizing runtime stub caller");
f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
}
}
! f.next(SmallRegisterMap::instance(), true /* stop */);
empty = f.is_done();
assert(!empty || argsize == chunk->argsize(), "");
if (empty) {
clear_chunk(chunk);
log_develop_trace(continuations)("Deoptimizing runtime stub caller");
f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
}
}
! f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
empty = f.is_done();
assert(!empty || argsize == chunk->argsize(), "");
if (empty) {
clear_chunk(chunk);
template <typename ConfigT>
NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
Continuation::preempt_kind preempt_kind;
bool retry_fast_path = false;
_preempted_case = chunk->preempted();
if (_preempted_case) {
ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
if (waiter != nullptr) {
// Mounted again after preemption. Resume the pending monitor operation,
// which will be either a monitorenter or Object.wait() call.
ObjectMonitor* mon = waiter->monitor();
! preempt_kind = waiter->is_wait() ? Continuation::freeze_on_wait : Continuation::freeze_on_monitorenter;
bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
assert(!mon_acquired || mon->has_owner(_thread), "invariant");
if (!mon_acquired) {
// Failed to acquire monitor. Return to enterSpecial to unmount again.
return push_cleanup_continuation();
}
chunk = _cont.tail(); // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
} else {
// Preemption cancelled in moniterenter case. We actually acquired
! // the monitor after freezing all frames so nothing to do.
! preempt_kind = Continuation::freeze_on_monitorenter;
}
// Call this first to avoid racing with GC threads later when modifying the chunk flags.
relativize_chunk_concurrently(chunk);
chunk->set_preempted(false);
retry_fast_path = true;
} else {
relativize_chunk_concurrently(chunk);
}
template <typename ConfigT>
NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
Continuation::preempt_kind preempt_kind;
bool retry_fast_path = false;
+ _process_args_at_top = false;
_preempted_case = chunk->preempted();
if (_preempted_case) {
ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
if (waiter != nullptr) {
// Mounted again after preemption. Resume the pending monitor operation,
// which will be either a monitorenter or Object.wait() call.
ObjectMonitor* mon = waiter->monitor();
! preempt_kind = waiter->is_wait() ? Continuation::object_wait : Continuation::monitorenter;
bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
assert(!mon_acquired || mon->has_owner(_thread), "invariant");
if (!mon_acquired) {
// Failed to acquire monitor. Return to enterSpecial to unmount again.
+ log_trace(continuations, tracking)("Failed to acquire monitor, unmounting again");
return push_cleanup_continuation();
}
+ _monitor = mon; // remember monitor since we might need it on handle_preempted_continuation()
chunk = _cont.tail(); // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
} else {
// Preemption cancelled in moniterenter case. We actually acquired
! // the monitor after freezing all frames so nothing to do. In case
! // of preemption on ObjectLocker during klass init, we released the
+ // monitor already at ~ObjectLocker so here we just set _monitor to
+ // nullptr so we know there is no need to release it later.
+ preempt_kind = Continuation::monitorenter;
+ _monitor = nullptr;
}
+
// Call this first to avoid racing with GC threads later when modifying the chunk flags.
relativize_chunk_concurrently(chunk);
+
+ if (chunk->at_klass_init()) {
+ preempt_kind = Continuation::object_locker;
+ chunk->set_at_klass_init(false);
+ _process_args_at_top = chunk->has_args_at_top();
+ if (_process_args_at_top) chunk->set_has_args_at_top(false);
+ }
chunk->set_preempted(false);
retry_fast_path = true;
} else {
relativize_chunk_concurrently(chunk);
}
if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
} else if (!heap_frame.is_interpreted_frame()) {
recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
} else {
! recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
}
}
template<typename FKind>
bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
} else if (!heap_frame.is_interpreted_frame()) {
recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
} else {
! recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
}
}
template<typename FKind>
bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
DEBUG_ONLY(_frames++;)
int argsize = _stream.stack_argsize();
! _stream.next(SmallRegisterMap::instance());
assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
// we never leave a compiled caller of an interpreted frame as the top frame in the chunk
// as it makes detecting that situation and adjusting unextended_sp tricky
if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
DEBUG_ONLY(_frames++;)
int argsize = _stream.stack_argsize();
! _stream.next(SmallRegisterMap::instance_no_args());
assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
// we never leave a compiled caller of an interpreted frame as the top frame in the chunk
// as it makes detecting that situation and adjusting unextended_sp tricky
if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
}
intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
- assert(preempt_kind == Continuation::freeze_on_wait || preempt_kind == Continuation::freeze_on_monitorenter, "");
frame top(sp);
assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
#if INCLUDE_JVMTI
// Finish the VTMS transition.
assert(_thread->is_in_VTMS_transition(), "must be");
bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
if (is_vthread) {
if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
! jvmti_mount_end(_thread, _cont, top);
} else {
_thread->set_is_in_VTMS_transition(false);
java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
}
}
chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
}
intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
frame top(sp);
assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
+ DEBUG_ONLY(verify_frame_kind(top, preempt_kind);)
+ NOT_PRODUCT(int64_t tid = _thread->monitor_owner_id();)
#if INCLUDE_JVMTI
// Finish the VTMS transition.
assert(_thread->is_in_VTMS_transition(), "must be");
bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
if (is_vthread) {
if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
! jvmti_mount_end(_thread, _cont, top, preempt_kind);
} else {
_thread->set_is_in_VTMS_transition(false);
java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
}
}
assert(top.is_runtime_frame() || top.is_native_frame(), "");
int fsize = top.cb()->frame_size();
patch_pd(top, sp + fsize);
}
! if (preempt_kind == Continuation::freeze_on_wait) {
// Check now if we need to throw IE exception.
! if (_thread->pending_interrupted_exception()) {
throw_interrupted_exception(_thread, top);
_thread->set_pending_interrupted_exception(false);
}
! } else if (top.is_runtime_frame()) {
! // The continuation might now run on a different platform thread than the previous time so
! // we need to adjust the current thread saved in the stub frame before restoring registers.
! JavaThread** thread_addr = frame::saved_thread_address(top);
! if (thread_addr != nullptr) *thread_addr = _thread;
}
return sp;
}
void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
ContinuationWrapper::SafepointOp so(current, _cont);
// Since we might safepoint set the anchor so that the stack can be walked.
set_anchor(current, top.sp());
JRT_BLOCK
THROW(vmSymbols::java_lang_InterruptedException());
JRT_BLOCK_END
clear_anchor(current);
}
! NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
assert(hf.is_interpreted_frame(), "");
if (UNLIKELY(seen_by_gc())) {
! _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
}
const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
assert(top.is_runtime_frame() || top.is_native_frame(), "");
int fsize = top.cb()->frame_size();
patch_pd(top, sp + fsize);
}
! if (preempt_kind == Continuation::object_wait) {
// Check now if we need to throw IE exception.
! bool throw_ie = _thread->pending_interrupted_exception();
+ if (throw_ie) {
throw_interrupted_exception(_thread, top);
_thread->set_pending_interrupted_exception(false);
}
! log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT" after preemption on Object.wait%s", tid, throw_ie ? "(throwing IE)" : "");
! } else if (preempt_kind == Continuation::monitorenter) {
! if (top.is_runtime_frame()) {
! // The continuation might now run on a different platform thread than the previous time so
! // we need to adjust the current thread saved in the stub frame before restoring registers.
+ JavaThread** thread_addr = frame::saved_thread_address(top);
+ if (thread_addr != nullptr) *thread_addr = _thread;
+ }
+ log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT " after preemption on monitorenter", tid);
+ } else {
+ // We need to redo the original call into the VM. First though, we need
+ // to exit the monitor we just acquired (except on preemption cancelled
+ // case where it was already released).
+ assert(preempt_kind == Continuation::object_locker, "");
+ if (_monitor != nullptr) _monitor->exit(_thread);
+ sp = redo_vmcall(_thread, top);
+ }
+ return sp;
+ }
+
+ intptr_t* ThawBase::redo_vmcall(JavaThread* current, frame& top) {
+ assert(!current->preempting(), "");
+ NOT_PRODUCT(int64_t tid = current->monitor_owner_id();)
+ intptr_t* sp = top.sp();
+
+ {
+ HandleMarkCleaner hmc(current); // Cleanup so._conth Handle
+ ContinuationWrapper::SafepointOp so(current, _cont);
+ AnchorMark am(current, top); // Set the anchor so that the stack is walkable.
+
+ Method* m = top.interpreter_frame_method();
+ Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
+ Bytecodes::Code code = current_bytecode.code();
+ log_develop_trace(continuations, preempt)("Redoing InterpreterRuntime::%s for " INT64_FORMAT, code == Bytecodes::Code::_new ? "_new" : "resolve_from_cache", tid);
+
+ // These InterpreterRuntime entry points use JRT_ENTRY which uses a HandleMarkCleaner.
+ // Create a HandeMark to avoid destroying so._conth.
+ HandleMark hm(current);
+ if (code == Bytecodes::Code::_new) {
+ InterpreterRuntime::_new(current, m->constants(), current_bytecode.get_index_u2(code));
+ } else {
+ InterpreterRuntime::resolve_from_cache(current, code);
+ }
+ }
+
+ if (current->preempting()) {
+ // Preempted again so we just arrange to return to preempt stub to unmount.
+ sp = push_preempt_adapter();
+ current->set_preempt_alternate_return(nullptr);
+ bool cancelled = current->preemption_cancelled();
+ if (cancelled) {
+ // Instead of calling thaw again from the preempt stub just unmount anyways with
+ // state of YIELDING. This will give a chance for other vthreads to run while
+ // minimizing repeated loops of "thaw->redo_vmcall->try_preempt->preemption_cancelled->thaw..."
+ // in case of multiple vthreads contending for the same init_lock().
+ current->set_preemption_cancelled(false);
+ oop vthread = current->vthread();
+ assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
+ java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::YIELDING);
+ }
+ log_develop_trace(continuations, preempt)("Preempted " INT64_FORMAT " again%s", tid, cancelled ? "(preemption cancelled, setting state to YIELDING)" : "");
+ } else {
+ log_develop_trace(continuations, preempt)("Call succesful, resuming " INT64_FORMAT, tid);
}
return sp;
}
void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
+ HandleMarkCleaner hm(current); // Cleanup so._conth Handle
ContinuationWrapper::SafepointOp so(current, _cont);
// Since we might safepoint set the anchor so that the stack can be walked.
set_anchor(current, top.sp());
JRT_BLOCK
THROW(vmSymbols::java_lang_InterruptedException());
JRT_BLOCK_END
clear_anchor(current);
}
! NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top) {
assert(hf.is_interpreted_frame(), "");
if (UNLIKELY(seen_by_gc())) {
! if (is_top && _process_args_at_top) {
+ log_trace(continuations, tracking)("Processing arguments in recurse_thaw_interpreted_frame");
+ _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_with_args());
+ } else {
+ _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
+ }
}
const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
const int locals = m->max_locals();
if (!is_bottom_frame) {
// can only fix caller once this frame is thawed (due to callee saved regs)
! _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
} else if (_cont.tail()->has_bitmap() && locals > 0) {
assert(hf.is_heap_frame(), "should be");
address start = (address)(heap_frame_bottom - locals);
address end = (address)heap_frame_bottom;
clear_bitmap_bits(start, end);
assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
const int locals = m->max_locals();
if (!is_bottom_frame) {
// can only fix caller once this frame is thawed (due to callee saved regs)
! _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
} else if (_cont.tail()->has_bitmap() && locals > 0) {
assert(hf.is_heap_frame(), "should be");
address start = (address)(heap_frame_bottom - locals);
address end = (address)heap_frame_bottom;
clear_bitmap_bits(start, end);
void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
assert(hf.is_compiled_frame(), "");
assert(_preempted_case || !stub_caller, "stub caller not at preemption");
if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
! _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
}
const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
assert(hf.is_compiled_frame(), "");
assert(_preempted_case || !stub_caller, "stub caller not at preemption");
if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
! _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
}
const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
maybe_set_fastpath(f.sp());
}
if (!is_bottom_frame) {
// can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
! _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
} else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
clear_bitmap_bits(start, start + argsize_in_bytes);
maybe_set_fastpath(f.sp());
}
if (!is_bottom_frame) {
// can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
! _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
} else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
clear_bitmap_bits(start, start + argsize_in_bytes);
map.set_include_argument_oops(false);
_stream.next(&map);
assert(!_stream.is_done(), "");
_cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
} else {
! _stream.next(SmallRegisterMap::instance());
assert(!_stream.is_done(), "");
}
recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
map.set_include_argument_oops(false);
_stream.next(&map);
assert(!_stream.is_done(), "");
_cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
} else {
! _stream.next(SmallRegisterMap::instance_no_args());
assert(!_stream.is_done(), "");
}
recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
assert(hf.is_native_frame(), "");
assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
! _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
}
const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
assert(!is_bottom_frame, "");
void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
assert(hf.is_native_frame(), "");
assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
! _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
}
const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
assert(!is_bottom_frame, "");
assert(!f.is_deoptimized_frame(), "");
assert(!hf.is_deoptimized_frame(), "");
assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
// can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
! _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
caller = f;
}
assert(!f.is_deoptimized_frame(), "");
assert(!hf.is_deoptimized_frame(), "");
assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
// can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
! _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
caller = f;
}
if (!is_aligned(f.sp(), frame::frame_alignment)) {
assert(f.is_interpreted_frame(), "");
f.set_sp(align_down(f.sp(), frame::frame_alignment));
}
push_return_frame(f);
! chunk->fix_thawed_frame(f, SmallRegisterMap::instance()); // can only fix caller after push_return_frame (due to callee saved regs)
assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
log_develop_trace(continuations)("thawed %d frames", _frames);
if (!is_aligned(f.sp(), frame::frame_alignment)) {
assert(f.is_interpreted_frame(), "");
f.set_sp(align_down(f.sp(), frame::frame_alignment));
}
push_return_frame(f);
! // can only fix caller after push_return_frame (due to callee saved regs)
+ if (_process_args_at_top) {
+ log_trace(continuations, tracking)("Processing arguments in finish_thaw");
+ chunk->fix_thawed_frame(f, SmallRegisterMap::instance_with_args());
+ } else {
+ chunk->fix_thawed_frame(f, SmallRegisterMap::instance_no_args());
+ }
assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
log_develop_trace(continuations)("thawed %d frames", _frames);
set_anchor_to_entry(thread, cont.entry());
log_frames(thread);
clear_anchor(thread);
#endif
- DEBUG_ONLY(bool preempted = cont.tail()->preempted();)
Thaw<ConfigT> thw(thread, cont);
intptr_t* const sp = thw.thaw(kind);
assert(is_aligned(sp, frame::frame_alignment), "");
! DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp, preempted);)
CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
verify_continuation(cont.continuation());
log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
set_anchor_to_entry(thread, cont.entry());
log_frames(thread);
clear_anchor(thread);
#endif
Thaw<ConfigT> thw(thread, cont);
intptr_t* const sp = thw.thaw(kind);
assert(is_aligned(sp, frame::frame_alignment), "");
! DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp);)
CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
verify_continuation(cont.continuation());
log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
}
}
return true;
}
! static void log_frames(JavaThread* thread) {
const static int show_entry_callers = 3;
! LogTarget(Trace, continuations) lt;
! if (!lt.develop_is_enabled()) {
return;
}
LogStream ls(lt);
ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
}
}
return true;
}
! static void log_frames(JavaThread* thread, bool dolog) {
const static int show_entry_callers = 3;
! LogTarget(Trace, continuations, tracking) lt;
! if (!lt.develop_is_enabled() || !dolog) {
return;
}
LogStream ls(lt);
ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
}
ls.print_cr("======= end frames =========");
}
! static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted) {
intptr_t* sp0 = sp;
address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
! if (preempted && sp0 == cont.entrySP()) {
// Still preempted (monitor not acquired) so no frames were thawed.
- assert(cont.tail()->preempted(), "");
set_anchor(thread, cont.entrySP(), cont.entryPC());
} else {
set_anchor(thread, sp0);
}
log_frames(thread);
if (LoomVerifyAfterThaw) {
assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
}
! assert(ContinuationEntry::assert_entry_frame_laid_out(thread), "");
clear_anchor(thread);
LogTarget(Trace, continuations) lt;
if (lt.develop_is_enabled()) {
LogStream ls(lt);
}
ls.print_cr("======= end frames =========");
}
! static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp) {
intptr_t* sp0 = sp;
address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
! bool preempted = false;
+ stackChunkOop tail = cont.tail();
+ if (tail != nullptr && tail->preempted()) {
// Still preempted (monitor not acquired) so no frames were thawed.
set_anchor(thread, cont.entrySP(), cont.entryPC());
+ preempted = true;
} else {
set_anchor(thread, sp0);
}
log_frames(thread);
if (LoomVerifyAfterThaw) {
assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
}
! assert(ContinuationEntry::assert_entry_frame_laid_out(thread, preempted), "");
clear_anchor(thread);
LogTarget(Trace, continuations) lt;
if (lt.develop_is_enabled()) {
LogStream ls(lt);
< prev index next >