< prev index next > src/hotspot/share/runtime/continuationFreezeThaw.cpp
Print this page
inline bool stack_overflow();
static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
: sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
template<typename FKind> static inline frame sender(const frame& f);
! template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
inline void set_top_frame_metadata_pd(const frame& hf);
! inline void patch_pd(frame& callee, const frame& caller);
void adjust_interpreted_frame_unextended_sp(frame& f);
static inline void prepare_freeze_interpreted_top_frame(frame& f);
static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
protected:
inline bool stack_overflow();
static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
: sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
template<typename FKind> static inline frame sender(const frame& f);
! template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
inline void set_top_frame_metadata_pd(const frame& hf);
! inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
void adjust_interpreted_frame_unextended_sp(frame& f);
static inline void prepare_freeze_interpreted_top_frame(frame& f);
static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
protected:
ContinuationHelper::Frame::patch_pc(caller, last_pc);
} else {
assert(!caller.is_empty(), "");
}
! patch_pd(hf, caller);
if (f.is_interpreted_frame()) {
assert(hf.is_heap_frame(), "should be");
ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
}
ContinuationHelper::Frame::patch_pc(caller, last_pc);
} else {
assert(!caller.is_empty(), "");
}
! patch_pd(hf, caller, is_bottom_frame);
if (f.is_interpreted_frame()) {
assert(hf.is_heap_frame(), "should be");
ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
}
bool callee_interpreted) {
// The frame's top never includes the stack arguments to the callee
intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
// including metadata between f and its stackargs
! const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
! const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
! log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
ContinuationHelper::Frame::frame_method(f) != nullptr ?
ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
! _freeze_size, fsize, argsize);
// we'd rather not yield inside methods annotated with @JvmtiMountTransition
assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
if (UNLIKELY(result > freeze_ok_bottom)) {
return result;
}
bool is_bottom_frame = result == freeze_ok_bottom;
assert(!caller.is_empty() || is_bottom_frame, "");
DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
! frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
bool callee_interpreted) {
// The frame's top never includes the stack arguments to the callee
intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
// including metadata between f and its stackargs
! int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
! int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
+
+ int real_frame_size = 0;
+ bool augmented = f.was_augmented_on_entry(real_frame_size);
+ if (augmented) {
+ // The args reside inside the frame so clear argsize. If the caller is compiled,
+ // this will cause the stack arguments passed by the caller to be freezed when
+ // freezing the caller frame itself. If the caller is interpreted this will have
+ // the effect of discarding the arg area created in the i2c stub.
+ argsize = 0;
+ fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
+ #ifdef ASSERT
+ nmethod* nm = f.cb()->as_nmethod();
+ Method* method = nm->method();
+ address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
+ CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
+ assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
+ assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
+ (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
+ (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
+ "frame should not be extended");
+ #endif
+ }
! log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
ContinuationHelper::Frame::frame_method(f) != nullptr ?
ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
! _freeze_size, fsize, argsize, augmented);
// we'd rather not yield inside methods annotated with @JvmtiMountTransition
assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
if (UNLIKELY(result > freeze_ok_bottom)) {
return result;
}
bool is_bottom_frame = result == freeze_ok_bottom;
assert(!caller.is_empty() || is_bottom_frame, "");
+ assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
! frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
}
void clear_chunk(stackChunkOop chunk);
template<bool check_stub>
int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
+ int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, stackChunkOop chunk, int &argsize);
void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
void thaw_lockstack(stackChunkOop chunk);
// fast path
inline bool seen_by_gc();
inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
inline void after_thaw_java_frame(const frame& f, bool bottom);
! inline void patch(frame& f, const frame& caller, bool bottom);
void clear_bitmap_bits(address start, address end);
NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
void push_return_frame(frame& f);
inline frame new_entry_frame();
! template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
inline void patch_pd(frame& f, const frame& sender);
inline void patch_pd(frame& f, intptr_t* caller_sp);
inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
inline bool seen_by_gc();
inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
inline void after_thaw_java_frame(const frame& f, bool bottom);
! inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
void clear_bitmap_bits(address start, address end);
NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
void push_return_frame(frame& f);
inline frame new_entry_frame();
! template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
inline void patch_pd(frame& f, const frame& sender);
inline void patch_pd(frame& f, intptr_t* caller_sp);
inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
inline void ThawBase::clear_chunk(stackChunkOop chunk) {
chunk->set_sp(chunk->bottom());
chunk->set_max_thawing_size(0);
}
+ int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, stackChunkOop chunk, int &argsize) {
+ DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
+ intptr_t* top = f.sp();
+
+ while (f.cb()->as_nmethod_or_null()->needs_stack_repair()) {
+ f.next(SmallRegisterMap::instance(), false /* stop */);
+ }
+ assert(!f.is_done(), "");
+ assert(f.is_compiled(), "");
+
+ intptr_t* bottom = f.sp() + f.cb()->frame_size();
+ argsize = f.stack_argsize();
+ int frames_size = bottom - top;
+
+ f.next(SmallRegisterMap::instance(), true /* stop */);
+ bool empty = f.is_done();
+ assert(!empty || argsize == chunk->argsize(), "");
+
+ if (empty) {
+ clear_chunk(chunk);
+ } else {
+ chunk->set_sp(chunk->sp() + frames_size);
+ chunk->set_max_thawing_size(chunk->max_thawing_size() - frames_size);
+ // We set chunk->pc to the return pc into the next frame
+ chunk->set_pc(f.pc());
+ #ifdef ASSERT
+ {
+ intptr_t* retaddr_slot = (chunk_sp
+ + frames_size
+ - frame::sender_sp_ret_address_offset());
+ assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
+ "unexpected pc");
+ }
+ #endif
+ }
+ assert(empty == chunk->is_empty(), "");
+ // returns the size required to store the frame on stack, and because it is a
+ // compiled frame, it must include a copy of the arguments passed by the caller
+ return frames_size + argsize + frame::metadata_words_at_top;
+ }
+
template<bool check_stub>
int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
bool empty = false;
StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
assert(chunk_sp == f.sp(), "");
assert(chunk_sp == f.unextended_sp(), "");
+ if (f.cb()->as_nmethod_or_null()->needs_stack_repair()) {
+ return remove_scalarized_frames(f, chunk, argsize);
+ }
+
int frame_size = f.cb()->frame_size();
argsize = f.stack_argsize();
assert(!f.is_stub() || check_stub, "");
if (check_stub && f.is_stub()) {
assert(num_frames > 0, "");
DEBUG_ONLY(_frames++;)
int argsize = _stream.stack_argsize();
_stream.next(SmallRegisterMap::instance());
assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
! // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
! // as it makes detecting that situation and adjusting unextended_sp tricky
! if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
num_frames++;
}
if (num_frames == 1 || _stream.is_done()) { // end recursion
assert(num_frames > 0, "");
DEBUG_ONLY(_frames++;)
int argsize = _stream.stack_argsize();
+ CodeBlob* cb = _stream.cb();
_stream.next(SmallRegisterMap::instance());
assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
! // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
! // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
! // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
+ // - Regardless of whether the frame was extended or not, we would need to copy the right arg
+ // size if its greater than the one given by the normal method signature (non-scalarized).
+ // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
+ // the chunk (we need unextended_sp, but we only have sp).
+ if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
num_frames++;
}
if (num_frames == 1 || _stream.is_done()) { // end recursion
print_frame_layout(f, false, &ls); // f.print_on(&ls);
}
#endif
}
! inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
assert(!bottom || caller.fp() == _cont.entryFP(), "");
if (bottom) {
ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
: StubRoutines::cont_returnBarrier());
! } else {
// caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
// If the caller is not deoptimized, pc is unchanged.
! ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
}
patch_pd(f, caller);
if (f.is_interpreted_frame()) {
print_frame_layout(f, false, &ls); // f.print_on(&ls);
}
#endif
}
! inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
assert(!bottom || caller.fp() == _cont.entryFP(), "");
if (bottom) {
ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
: StubRoutines::cont_returnBarrier());
! } else if (caller.is_compiled_frame()){
// caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
// If the caller is not deoptimized, pc is unchanged.
! ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
}
patch_pd(f, caller);
if (f.is_interpreted_frame()) {
if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
_align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
}
// new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
// yet laid out in the stack, and so the original_pc is not stored in it.
// As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
! frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
intptr_t* const stack_frame_top = f.sp();
intptr_t* const heap_frame_top = hf.unextended_sp();
-
- const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
- int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
- assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
-
intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
// copy metadata, except the metadata at the top of the (unextended) entry frame
int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
_align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
}
+ int fsize = 0;
+ int added_argsize = 0;
+ bool augmented = hf.was_augmented_on_entry(fsize);
+ if (!augmented) {
+ added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
+ fsize += added_argsize;
+ }
+ assert(!is_bottom_frame || !augmented, "");
+
+
// new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
// yet laid out in the stack, and so the original_pc is not stored in it.
// As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
! frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
+ assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
+
intptr_t* const stack_frame_top = f.sp();
intptr_t* const heap_frame_top = hf.unextended_sp();
intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
// copy metadata, except the metadata at the top of the (unextended) entry frame
int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
! patch(f, caller, is_bottom_frame);
// f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
assert(!f.is_deoptimized_frame(), "");
if (hf.is_deoptimized_frame()) {
maybe_set_fastpath(f.sp());
assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
! patch(f, caller, is_bottom_frame, augmented);
// f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
assert(!f.is_deoptimized_frame(), "");
if (hf.is_deoptimized_frame()) {
maybe_set_fastpath(f.sp());
< prev index next >