452 inline frame freeze_start_frame_yield_stub();
453 template<typename FKind>
454 inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
455 inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
456 inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
457 freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
458 void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
459 NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
460 freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
461 NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
462 NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
463 NOINLINE void finish_freeze(const frame& f, const frame& top);
464
465 void freeze_lockstack(stackChunkOop chunk);
466
467 inline bool stack_overflow();
468
469 static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
470 : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
471 template<typename FKind> static inline frame sender(const frame& f);
472 template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
473 inline void set_top_frame_metadata_pd(const frame& hf);
474 inline void patch_pd(frame& callee, const frame& caller);
475 inline void patch_pd_unused(intptr_t* sp);
476 void adjust_interpreted_frame_unextended_sp(frame& f);
477 inline void prepare_freeze_interpreted_top_frame(frame& f);
478 static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
479
480 protected:
481 void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
482 bool freeze_fast_new_chunk(stackChunkOop chunk);
483 };
484
485 template <typename ConfigT>
486 class Freeze : public FreezeBase {
487 private:
488 stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
489
490 public:
491 inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
492 : FreezeBase(thread, cont, frame_sp, preempt) {}
493
494 freeze_result try_freeze_fast();
1164
1165 assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1166 assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1167 #endif
1168
1169 return freeze_ok_bottom;
1170 }
1171
1172 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1173 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1174 if (is_bottom_frame) {
1175 // If we're the bottom frame, we need to replace the return barrier with the real
1176 // caller's pc.
1177 address last_pc = caller.pc();
1178 assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1179 ContinuationHelper::Frame::patch_pc(caller, last_pc);
1180 } else {
1181 assert(!caller.is_empty(), "");
1182 }
1183
1184 patch_pd(hf, caller);
1185
1186 if (f.is_interpreted_frame()) {
1187 assert(hf.is_heap_frame(), "should be");
1188 ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1189 }
1190
1191 #ifdef ASSERT
1192 if (hf.is_compiled_frame()) {
1193 if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1194 log_develop_trace(continuations)("Freezing deoptimized frame");
1195 assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1196 assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1197 }
1198 }
1199 #endif
1200 }
1201
1202 #ifdef ASSERT
1203 static void verify_frame_top(const frame& f, intptr_t* top) {
1204 ResourceMark rm;
1261
1262 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1263 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1264 caller = hf;
1265
1266 // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1267 frame_method->record_gc_epoch();
1268
1269 return freeze_ok;
1270 }
1271
1272 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1273 // See also StackChunkFrameStream<frame_kind>::frame_size()
1274 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1275 int callee_argsize /* incl. metadata */,
1276 bool callee_interpreted) {
1277 // The frame's top never includes the stack arguments to the callee
1278 intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1279 intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1280 // including metadata between f and its stackargs
1281 const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1282 const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1283
1284 log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1285 ContinuationHelper::Frame::frame_method(f) != nullptr ?
1286 ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1287 _freeze_size, fsize, argsize);
1288 // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1289 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1290
1291 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1292 if (UNLIKELY(result > freeze_ok_bottom)) {
1293 return result;
1294 }
1295
1296 bool is_bottom_frame = result == freeze_ok_bottom;
1297 assert(!caller.is_empty() || is_bottom_frame, "");
1298
1299 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1300
1301 frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1302
1303 intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1304
1305 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1306 assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1307
1308 if (caller.is_interpreted_frame()) {
1309 // When thawing the frame we might need to add alignment (see Thaw::align)
1310 _total_align_size += frame::align_wiggle;
1311 }
1312
1313 patch(f, hf, caller, is_bottom_frame);
1314
1315 assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1316
1317 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1318 caller = hf;
1319 return freeze_ok;
1320 }
1321
2061
2062 // Only used for preemption on ObjectLocker
2063 ObjectMonitor* _init_lock;
2064
2065 StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2066
2067 NOT_PRODUCT(int _frames;)
2068
2069 protected:
2070 ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2071 _thread(thread), _cont(cont),
2072 _fastpath(nullptr) {
2073 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2074 assert (cont.tail() != nullptr, "no last chunk");
2075 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2076 }
2077
2078 void clear_chunk(stackChunkOop chunk);
2079 template<bool check_stub>
2080 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2081 void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2082
2083 void thaw_lockstack(stackChunkOop chunk);
2084
2085 // fast path
2086 inline void prefetch_chunk_pd(void* start, int size_words);
2087 void patch_return(intptr_t* sp, bool is_last);
2088
2089 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2090 inline intptr_t* push_cleanup_continuation();
2091 inline intptr_t* push_preempt_adapter();
2092 intptr_t* redo_vmcall(JavaThread* current, frame& top);
2093 void throw_interrupted_exception(JavaThread* current, frame& top);
2094
2095 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2096 void finish_thaw(frame& f);
2097
2098 private:
2099 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2100 void finalize_thaw(frame& entry, int argsize);
2101
2102 inline bool seen_by_gc();
2103
2104 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2105 inline void after_thaw_java_frame(const frame& f, bool bottom);
2106 inline void patch(frame& f, const frame& caller, bool bottom);
2107 void clear_bitmap_bits(address start, address end);
2108
2109 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2110 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2111 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2112 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2113
2114 void push_return_frame(const frame& f);
2115 inline frame new_entry_frame();
2116 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
2117 inline void patch_pd(frame& f, const frame& sender);
2118 inline void patch_pd(frame& f, intptr_t* caller_sp);
2119 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2120
2121 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2122
2123 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2124
2125 public:
2126 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2127 };
2128
2129 template <typename ConfigT>
2130 class Thaw : public ThawBase {
2131 public:
2132 Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2133
2134 inline bool can_thaw_fast(stackChunkOop chunk) {
2135 return !_barriers
2136 && _thread->cont_fastpath_thread_state()
2173 assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2174 }
2175
2176 int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2177
2178 // top and bottom stack pointers
2179 intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2180 intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2181
2182 // several operations operate on the totality of the stack being reconstructed,
2183 // including the metadata words
2184 intptr_t* top() const { return sp() - frame::metadata_words_at_bottom; }
2185 int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2186 };
2187
2188 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2189 chunk->set_sp(chunk->bottom());
2190 chunk->set_max_thawing_size(0);
2191 }
2192
2193 template<bool check_stub>
2194 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2195 bool empty = false;
2196 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2197 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2198 assert(chunk_sp == f.sp(), "");
2199 assert(chunk_sp == f.unextended_sp(), "");
2200
2201 int frame_size = f.cb()->frame_size();
2202 argsize = f.stack_argsize();
2203
2204 assert(!f.is_stub() || check_stub, "");
2205 if (check_stub && f.is_stub()) {
2206 // If we don't thaw the top compiled frame too, after restoring the saved
2207 // registers back in Java, we would hit the return barrier to thaw one more
2208 // frame effectively overwriting the restored registers during that call.
2209 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2210 assert(!f.is_done(), "");
2211
2212 f.get_cb();
2213 assert(f.is_compiled(), "");
2214 frame_size += f.cb()->frame_size();
2215 argsize = f.stack_argsize();
2216
2217 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2218 // The caller of the runtime stub when the continuation is preempted is not at a
2219 // Java call instruction, and so cannot rely on nmethod patching for deopt.
2220 log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2221 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2222 }
2223 }
2224
2225 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2226 empty = f.is_done();
2227 assert(!empty || argsize == chunk->argsize(), "");
2228
2229 if (empty) {
2230 clear_chunk(chunk);
2231 } else {
2232 chunk->set_sp(chunk->sp() + frame_size);
2233 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2234 // We set chunk->pc to the return pc into the next frame
2235 chunk->set_pc(f.pc());
2236 #ifdef ASSERT
2237 {
2238 intptr_t* retaddr_slot = (chunk_sp
2239 + frame_size
2240 - frame::sender_sp_ret_address_offset());
2241 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2242 "unexpected pc");
2502 assert(!_cont.is_empty(), "no more frames");
2503 assert(num_frames > 0, "");
2504 assert(!heap_frame.is_empty(), "");
2505
2506 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2507 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2508 } else if (!heap_frame.is_interpreted_frame()) {
2509 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2510 } else {
2511 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2512 }
2513 }
2514
2515 template<typename FKind>
2516 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2517 assert(num_frames > 0, "");
2518
2519 DEBUG_ONLY(_frames++;)
2520
2521 int argsize = _stream.stack_argsize();
2522
2523 _stream.next(SmallRegisterMap::instance_no_args());
2524 assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2525
2526 // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2527 // as it makes detecting that situation and adjusting unextended_sp tricky
2528 if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2529 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2530 num_frames++;
2531 }
2532
2533 if (num_frames == 1 || _stream.is_done()) { // end recursion
2534 finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2535 return true; // bottom
2536 } else { // recurse
2537 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2538 return false;
2539 }
2540 }
2541
2542 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2543 stackChunkOop chunk = _cont.tail();
2544
2545 if (!_stream.is_done()) {
2546 assert(_stream.sp() >= chunk->sp_address(), "");
2547 chunk->set_sp(chunk->to_offset(_stream.sp()));
2548 chunk->set_pc(_stream.pc());
2568 if (lt.develop_is_enabled()) {
2569 LogStream ls(lt);
2570 ls.print_cr("======== THAWING FRAME: %d", num_frame);
2571 assert(hf.is_heap_frame(), "should be");
2572 hf.print_value_on(&ls);
2573 }
2574 assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2575 }
2576
2577 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2578 #ifdef ASSERT
2579 LogTarget(Trace, continuations) lt;
2580 if (lt.develop_is_enabled()) {
2581 LogStream ls(lt);
2582 ls.print_cr("thawed frame:");
2583 print_frame_layout(f, false, &ls); // f.print_on(&ls);
2584 }
2585 #endif
2586 }
2587
2588 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2589 assert(!bottom || caller.fp() == _cont.entryFP(), "");
2590 if (bottom) {
2591 ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2592 : StubRoutines::cont_returnBarrier());
2593 } else {
2594 // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2595 // If the caller is not deoptimized, pc is unchanged.
2596 ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2597 }
2598
2599 patch_pd(f, caller);
2600
2601 if (f.is_interpreted_frame()) {
2602 ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2603 }
2604
2605 assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2606 assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2607 }
2608
2609 void ThawBase::clear_bitmap_bits(address start, address end) {
2610 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2611 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2612
2613 // we need to clear the bits that correspond to arguments as they reside in the caller frame
2614 // or they will keep objects that are otherwise unreachable alive.
2615
2616 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2809 }
2810
2811 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2812 assert(hf.is_compiled_frame(), "");
2813 assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2814
2815 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2816 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2817 }
2818
2819 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2820
2821 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2822
2823 assert(caller.sp() == caller.unextended_sp(), "");
2824
2825 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2826 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2827 }
2828
2829 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2830 // yet laid out in the stack, and so the original_pc is not stored in it.
2831 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2832 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2833 intptr_t* const stack_frame_top = f.sp();
2834 intptr_t* const heap_frame_top = hf.unextended_sp();
2835
2836 const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2837 int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2838 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2839
2840 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2841 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2842 // copy metadata, except the metadata at the top of the (unextended) entry frame
2843 int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2844
2845 // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2846 // (we might have one padding word for alignment)
2847 assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2848 assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2849
2850 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2851
2852 patch(f, caller, is_bottom_frame);
2853
2854 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2855 assert(!f.is_deoptimized_frame(), "");
2856 if (hf.is_deoptimized_frame()) {
2857 maybe_set_fastpath(f.sp());
2858 } else if (_thread->is_interp_only_mode()
2859 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2860 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2861 // cannot rely on nmethod patching for deopt.
2862 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2863
2864 log_develop_trace(continuations)("Deoptimizing thawed frame");
2865 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2866
2867 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2868 assert(f.is_deoptimized_frame(), "");
2869 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2870 maybe_set_fastpath(f.sp());
2871 }
2872
|
452 inline frame freeze_start_frame_yield_stub();
453 template<typename FKind>
454 inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
455 inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
456 inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
457 freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
458 void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
459 NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
460 freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
461 NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
462 NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
463 NOINLINE void finish_freeze(const frame& f, const frame& top);
464
465 void freeze_lockstack(stackChunkOop chunk);
466
467 inline bool stack_overflow();
468
469 static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
470 : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
471 template<typename FKind> static inline frame sender(const frame& f);
472 template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
473 inline void set_top_frame_metadata_pd(const frame& hf);
474 inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
475 inline void patch_pd_unused(intptr_t* sp);
476 void adjust_interpreted_frame_unextended_sp(frame& f);
477 inline void prepare_freeze_interpreted_top_frame(frame& f);
478 static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
479
480 protected:
481 void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
482 bool freeze_fast_new_chunk(stackChunkOop chunk);
483 };
484
485 template <typename ConfigT>
486 class Freeze : public FreezeBase {
487 private:
488 stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
489
490 public:
491 inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
492 : FreezeBase(thread, cont, frame_sp, preempt) {}
493
494 freeze_result try_freeze_fast();
1164
1165 assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1166 assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1167 #endif
1168
1169 return freeze_ok_bottom;
1170 }
1171
1172 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1173 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1174 if (is_bottom_frame) {
1175 // If we're the bottom frame, we need to replace the return barrier with the real
1176 // caller's pc.
1177 address last_pc = caller.pc();
1178 assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1179 ContinuationHelper::Frame::patch_pc(caller, last_pc);
1180 } else {
1181 assert(!caller.is_empty(), "");
1182 }
1183
1184 patch_pd(hf, caller, is_bottom_frame);
1185
1186 if (f.is_interpreted_frame()) {
1187 assert(hf.is_heap_frame(), "should be");
1188 ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1189 }
1190
1191 #ifdef ASSERT
1192 if (hf.is_compiled_frame()) {
1193 if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1194 log_develop_trace(continuations)("Freezing deoptimized frame");
1195 assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1196 assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1197 }
1198 }
1199 #endif
1200 }
1201
1202 #ifdef ASSERT
1203 static void verify_frame_top(const frame& f, intptr_t* top) {
1204 ResourceMark rm;
1261
1262 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1263 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1264 caller = hf;
1265
1266 // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1267 frame_method->record_gc_epoch();
1268
1269 return freeze_ok;
1270 }
1271
1272 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1273 // See also StackChunkFrameStream<frame_kind>::frame_size()
1274 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1275 int callee_argsize /* incl. metadata */,
1276 bool callee_interpreted) {
1277 // The frame's top never includes the stack arguments to the callee
1278 intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1279 intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1280 // including metadata between f and its stackargs
1281 int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1282 int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1283
1284 int real_frame_size = 0;
1285 bool augmented = f.was_augmented_on_entry(real_frame_size);
1286 if (augmented) {
1287 // The args reside inside the frame so clear argsize. If the caller is compiled,
1288 // this will cause the stack arguments passed by the caller to be freezed when
1289 // freezing the caller frame itself. If the caller is interpreted this will have
1290 // the effect of discarding the arg area created in the i2c stub.
1291 argsize = 0;
1292 fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
1293 #ifdef ASSERT
1294 nmethod* nm = f.cb()->as_nmethod();
1295 Method* method = nm->method();
1296 address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
1297 CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
1298 assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
1299 assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
1300 (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
1301 (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
1302 "frame should not be extended");
1303 #endif
1304 }
1305
1306 log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
1307 ContinuationHelper::Frame::frame_method(f) != nullptr ?
1308 ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1309 _freeze_size, fsize, argsize, augmented);
1310 // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1311 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1312
1313 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1314 if (UNLIKELY(result > freeze_ok_bottom)) {
1315 return result;
1316 }
1317
1318 bool is_bottom_frame = result == freeze_ok_bottom;
1319 assert(!caller.is_empty() || is_bottom_frame, "");
1320 assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
1321
1322 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1323
1324 frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
1325
1326 intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1327
1328 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1329 assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1330
1331 if (caller.is_interpreted_frame()) {
1332 // When thawing the frame we might need to add alignment (see Thaw::align)
1333 _total_align_size += frame::align_wiggle;
1334 }
1335
1336 patch(f, hf, caller, is_bottom_frame);
1337
1338 assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1339
1340 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1341 caller = hf;
1342 return freeze_ok;
1343 }
1344
2084
2085 // Only used for preemption on ObjectLocker
2086 ObjectMonitor* _init_lock;
2087
2088 StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2089
2090 NOT_PRODUCT(int _frames;)
2091
2092 protected:
2093 ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2094 _thread(thread), _cont(cont),
2095 _fastpath(nullptr) {
2096 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2097 assert (cont.tail() != nullptr, "no last chunk");
2098 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2099 }
2100
2101 void clear_chunk(stackChunkOop chunk);
2102 template<bool check_stub>
2103 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2104 int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, int &argsize);
2105 void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2106
2107 void thaw_lockstack(stackChunkOop chunk);
2108
2109 // fast path
2110 inline void prefetch_chunk_pd(void* start, int size_words);
2111 void patch_return(intptr_t* sp, bool is_last);
2112
2113 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2114 inline intptr_t* push_cleanup_continuation();
2115 inline intptr_t* push_preempt_adapter();
2116 intptr_t* redo_vmcall(JavaThread* current, frame& top);
2117 void throw_interrupted_exception(JavaThread* current, frame& top);
2118
2119 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2120 void finish_thaw(frame& f);
2121
2122 private:
2123 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2124 void finalize_thaw(frame& entry, int argsize);
2125
2126 inline bool seen_by_gc();
2127
2128 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2129 inline void after_thaw_java_frame(const frame& f, bool bottom);
2130 inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
2131 void clear_bitmap_bits(address start, address end);
2132
2133 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2134 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2135 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2136 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2137
2138 void push_return_frame(const frame& f);
2139 inline frame new_entry_frame();
2140 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
2141 inline void patch_pd(frame& f, const frame& sender);
2142 inline void patch_pd(frame& f, intptr_t* caller_sp);
2143 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2144
2145 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2146
2147 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2148
2149 public:
2150 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2151 };
2152
2153 template <typename ConfigT>
2154 class Thaw : public ThawBase {
2155 public:
2156 Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2157
2158 inline bool can_thaw_fast(stackChunkOop chunk) {
2159 return !_barriers
2160 && _thread->cont_fastpath_thread_state()
2197 assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2198 }
2199
2200 int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2201
2202 // top and bottom stack pointers
2203 intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2204 intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2205
2206 // several operations operate on the totality of the stack being reconstructed,
2207 // including the metadata words
2208 intptr_t* top() const { return sp() - frame::metadata_words_at_bottom; }
2209 int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2210 };
2211
2212 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2213 chunk->set_sp(chunk->bottom());
2214 chunk->set_max_thawing_size(0);
2215 }
2216
2217 int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, int &argsize) {
2218 intptr_t* top = f.sp();
2219
2220 while (f.cb()->as_nmethod()->needs_stack_repair()) {
2221 f.next(SmallRegisterMap::instance_no_args(), false /* stop */);
2222 }
2223 assert(!f.is_done(), "");
2224 assert(f.is_compiled(), "");
2225
2226 intptr_t* bottom = f.sp() + f.cb()->frame_size();
2227 argsize = f.stack_argsize();
2228 return bottom - top;
2229 }
2230
2231 template<bool check_stub>
2232 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2233 bool empty = false;
2234 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2235 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2236 assert(chunk_sp == f.sp(), "");
2237 assert(chunk_sp == f.unextended_sp(), "");
2238
2239 int frame_size = f.cb()->frame_size();
2240 argsize = f.stack_argsize();
2241
2242 assert(!f.is_stub() || check_stub, "");
2243 if (check_stub && f.is_stub()) {
2244 // If we don't thaw the top compiled frame too, after restoring the saved
2245 // registers back in Java, we would hit the return barrier to thaw one more
2246 // frame effectively overwriting the restored registers during that call.
2247 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2248 assert(!f.is_done(), "");
2249
2250 f.get_cb();
2251 assert(f.is_compiled(), "");
2252 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2253 // The caller of the runtime stub when the continuation is preempted is not at a
2254 // Java call instruction, and so cannot rely on nmethod patching for deopt.
2255 log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2256 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2257 }
2258
2259 if (f.cb()->as_nmethod()->needs_stack_repair()) {
2260 frame_size += remove_scalarized_frames(f, argsize);
2261 } else {
2262 frame_size += f.cb()->frame_size();
2263 argsize = f.stack_argsize();
2264 }
2265 } else if (f.cb()->as_nmethod()->needs_stack_repair()) {
2266 frame_size = remove_scalarized_frames(f, argsize);
2267 }
2268
2269 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2270 empty = f.is_done();
2271 assert(!empty || argsize == chunk->argsize(), "");
2272
2273 if (empty) {
2274 clear_chunk(chunk);
2275 } else {
2276 chunk->set_sp(chunk->sp() + frame_size);
2277 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2278 // We set chunk->pc to the return pc into the next frame
2279 chunk->set_pc(f.pc());
2280 #ifdef ASSERT
2281 {
2282 intptr_t* retaddr_slot = (chunk_sp
2283 + frame_size
2284 - frame::sender_sp_ret_address_offset());
2285 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2286 "unexpected pc");
2546 assert(!_cont.is_empty(), "no more frames");
2547 assert(num_frames > 0, "");
2548 assert(!heap_frame.is_empty(), "");
2549
2550 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2551 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2552 } else if (!heap_frame.is_interpreted_frame()) {
2553 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2554 } else {
2555 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2556 }
2557 }
2558
2559 template<typename FKind>
2560 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2561 assert(num_frames > 0, "");
2562
2563 DEBUG_ONLY(_frames++;)
2564
2565 int argsize = _stream.stack_argsize();
2566 CodeBlob* cb = _stream.cb();
2567
2568 _stream.next(SmallRegisterMap::instance_no_args());
2569 assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2570
2571 // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
2572 // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
2573 // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
2574 // - Regardless of whether the frame was extended or not, we would need to copy the right arg
2575 // size if its greater than the one given by the normal method signature (non-scalarized).
2576 // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
2577 // the chunk (we need unextended_sp, but we only have sp).
2578 if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
2579 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2580 num_frames++;
2581 }
2582
2583 if (num_frames == 1 || _stream.is_done()) { // end recursion
2584 finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2585 return true; // bottom
2586 } else { // recurse
2587 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2588 return false;
2589 }
2590 }
2591
2592 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2593 stackChunkOop chunk = _cont.tail();
2594
2595 if (!_stream.is_done()) {
2596 assert(_stream.sp() >= chunk->sp_address(), "");
2597 chunk->set_sp(chunk->to_offset(_stream.sp()));
2598 chunk->set_pc(_stream.pc());
2618 if (lt.develop_is_enabled()) {
2619 LogStream ls(lt);
2620 ls.print_cr("======== THAWING FRAME: %d", num_frame);
2621 assert(hf.is_heap_frame(), "should be");
2622 hf.print_value_on(&ls);
2623 }
2624 assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2625 }
2626
2627 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2628 #ifdef ASSERT
2629 LogTarget(Trace, continuations) lt;
2630 if (lt.develop_is_enabled()) {
2631 LogStream ls(lt);
2632 ls.print_cr("thawed frame:");
2633 print_frame_layout(f, false, &ls); // f.print_on(&ls);
2634 }
2635 #endif
2636 }
2637
2638 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
2639 assert(!bottom || caller.fp() == _cont.entryFP(), "");
2640 if (bottom) {
2641 ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2642 : StubRoutines::cont_returnBarrier());
2643 } else if (caller.is_compiled_frame()){
2644 // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2645 // If the caller is not deoptimized, pc is unchanged.
2646 ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
2647 }
2648
2649 patch_pd(f, caller);
2650
2651 if (f.is_interpreted_frame()) {
2652 ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2653 }
2654
2655 assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2656 assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2657 }
2658
2659 void ThawBase::clear_bitmap_bits(address start, address end) {
2660 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2661 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2662
2663 // we need to clear the bits that correspond to arguments as they reside in the caller frame
2664 // or they will keep objects that are otherwise unreachable alive.
2665
2666 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2859 }
2860
2861 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2862 assert(hf.is_compiled_frame(), "");
2863 assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2864
2865 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2866 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2867 }
2868
2869 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2870
2871 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2872
2873 assert(caller.sp() == caller.unextended_sp(), "");
2874
2875 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2876 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2877 }
2878
2879 int fsize = 0;
2880 int added_argsize = 0;
2881 bool augmented = hf.was_augmented_on_entry(fsize);
2882 if (!augmented) {
2883 added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2884 fsize += added_argsize;
2885 }
2886 assert(!is_bottom_frame || !augmented, "");
2887
2888
2889 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2890 // yet laid out in the stack, and so the original_pc is not stored in it.
2891 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2892 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
2893 assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
2894
2895 intptr_t* const stack_frame_top = f.sp();
2896 intptr_t* const heap_frame_top = hf.unextended_sp();
2897 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2898 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2899 // copy metadata, except the metadata at the top of the (unextended) entry frame
2900 int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2901
2902 // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2903 // (we might have one padding word for alignment)
2904 assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2905 assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2906
2907 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2908
2909 patch(f, caller, is_bottom_frame, augmented);
2910
2911 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2912 assert(!f.is_deoptimized_frame(), "");
2913 if (hf.is_deoptimized_frame()) {
2914 maybe_set_fastpath(f.sp());
2915 } else if (_thread->is_interp_only_mode()
2916 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2917 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2918 // cannot rely on nmethod patching for deopt.
2919 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2920
2921 log_develop_trace(continuations)("Deoptimizing thawed frame");
2922 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2923
2924 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2925 assert(f.is_deoptimized_frame(), "");
2926 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2927 maybe_set_fastpath(f.sp());
2928 }
2929
|