451 inline frame freeze_start_frame_yield_stub();
452 template<typename FKind>
453 inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
454 inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
455 inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
456 freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
457 void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
458 NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
459 freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
460 NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
461 NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
462 NOINLINE void finish_freeze(const frame& f, const frame& top);
463
464 void freeze_lockstack(stackChunkOop chunk);
465
466 inline bool stack_overflow();
467
468 static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
469 : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
470 template<typename FKind> static inline frame sender(const frame& f);
471 template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
472 inline void set_top_frame_metadata_pd(const frame& hf);
473 inline void patch_pd(frame& callee, const frame& caller);
474 inline void patch_pd_unused(intptr_t* sp);
475 void adjust_interpreted_frame_unextended_sp(frame& f);
476 inline void prepare_freeze_interpreted_top_frame(frame& f);
477 static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
478
479 protected:
480 void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
481 bool freeze_fast_new_chunk(stackChunkOop chunk);
482 };
483
484 template <typename ConfigT>
485 class Freeze : public FreezeBase {
486 private:
487 stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
488
489 public:
490 inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
491 : FreezeBase(thread, cont, frame_sp, preempt) {}
492
493 freeze_result try_freeze_fast();
1163
1164 assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1165 assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1166 #endif
1167
1168 return freeze_ok_bottom;
1169 }
1170
1171 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1172 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1173 if (is_bottom_frame) {
1174 // If we're the bottom frame, we need to replace the return barrier with the real
1175 // caller's pc.
1176 address last_pc = caller.pc();
1177 assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1178 ContinuationHelper::Frame::patch_pc(caller, last_pc);
1179 } else {
1180 assert(!caller.is_empty(), "");
1181 }
1182
1183 patch_pd(hf, caller);
1184
1185 if (f.is_interpreted_frame()) {
1186 assert(hf.is_heap_frame(), "should be");
1187 ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1188 }
1189
1190 #ifdef ASSERT
1191 if (hf.is_compiled_frame()) {
1192 if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1193 log_develop_trace(continuations)("Freezing deoptimized frame");
1194 assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1195 assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1196 }
1197 }
1198 #endif
1199 }
1200
1201 #ifdef ASSERT
1202 static void verify_frame_top(const frame& f, intptr_t* top) {
1203 ResourceMark rm;
1260
1261 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1262 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1263 caller = hf;
1264
1265 // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1266 frame_method->record_gc_epoch();
1267
1268 return freeze_ok;
1269 }
1270
1271 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1272 // See also StackChunkFrameStream<frame_kind>::frame_size()
1273 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1274 int callee_argsize /* incl. metadata */,
1275 bool callee_interpreted) {
1276 // The frame's top never includes the stack arguments to the callee
1277 intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1278 intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1279 // including metadata between f and its stackargs
1280 const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1281 const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1282
1283 log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1284 ContinuationHelper::Frame::frame_method(f) != nullptr ?
1285 ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1286 _freeze_size, fsize, argsize);
1287 // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1288 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1289
1290 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1291 if (UNLIKELY(result > freeze_ok_bottom)) {
1292 return result;
1293 }
1294
1295 bool is_bottom_frame = result == freeze_ok_bottom;
1296 assert(!caller.is_empty() || is_bottom_frame, "");
1297
1298 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1299
1300 frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1301
1302 intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1303
1304 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1305 assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1306
1307 if (caller.is_interpreted_frame()) {
1308 // When thawing the frame we might need to add alignment (see Thaw::align)
1309 _total_align_size += frame::align_wiggle;
1310 }
1311
1312 patch(f, hf, caller, is_bottom_frame);
1313
1314 assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1315
1316 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1317 caller = hf;
1318 return freeze_ok;
1319 }
1320
2060
2061 // Only used for preemption on ObjectLocker
2062 ObjectMonitor* _init_lock;
2063
2064 StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2065
2066 NOT_PRODUCT(int _frames;)
2067
2068 protected:
2069 ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2070 _thread(thread), _cont(cont),
2071 _fastpath(nullptr) {
2072 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2073 assert (cont.tail() != nullptr, "no last chunk");
2074 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2075 }
2076
2077 void clear_chunk(stackChunkOop chunk);
2078 template<bool check_stub>
2079 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2080 void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2081
2082 void thaw_lockstack(stackChunkOop chunk);
2083
2084 // fast path
2085 inline void prefetch_chunk_pd(void* start, int size_words);
2086 void patch_return(intptr_t* sp, bool is_last);
2087
2088 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2089 inline intptr_t* push_cleanup_continuation();
2090 inline intptr_t* push_preempt_adapter();
2091 intptr_t* redo_vmcall(JavaThread* current, frame& top);
2092 void throw_interrupted_exception(JavaThread* current, frame& top);
2093
2094 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2095 void finish_thaw(frame& f);
2096
2097 private:
2098 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2099 void finalize_thaw(frame& entry, int argsize);
2100
2101 inline bool seen_by_gc();
2102
2103 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2104 inline void after_thaw_java_frame(const frame& f, bool bottom);
2105 inline void patch(frame& f, const frame& caller, bool bottom);
2106 void clear_bitmap_bits(address start, address end);
2107
2108 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2109 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2110 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2111 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2112
2113 void push_return_frame(const frame& f);
2114 inline frame new_entry_frame();
2115 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
2116 inline void patch_pd(frame& f, const frame& sender);
2117 inline void patch_pd(frame& f, intptr_t* caller_sp);
2118 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2119
2120 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2121
2122 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2123
2124 public:
2125 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2126 };
2127
2128 template <typename ConfigT>
2129 class Thaw : public ThawBase {
2130 public:
2131 Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2132
2133 inline bool can_thaw_fast(stackChunkOop chunk) {
2134 return !_barriers
2135 && _thread->cont_fastpath_thread_state()
2172 assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2173 }
2174
2175 int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2176
2177 // top and bottom stack pointers
2178 intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2179 intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2180
2181 // several operations operate on the totality of the stack being reconstructed,
2182 // including the metadata words
2183 intptr_t* top() const { return sp() - frame::metadata_words_at_bottom; }
2184 int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2185 };
2186
2187 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2188 chunk->set_sp(chunk->bottom());
2189 chunk->set_max_thawing_size(0);
2190 }
2191
2192 template<bool check_stub>
2193 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2194 bool empty = false;
2195 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2196 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2197 assert(chunk_sp == f.sp(), "");
2198 assert(chunk_sp == f.unextended_sp(), "");
2199
2200 int frame_size = f.cb()->frame_size();
2201 argsize = f.stack_argsize();
2202
2203 assert(!f.is_stub() || check_stub, "");
2204 if (check_stub && f.is_stub()) {
2205 // If we don't thaw the top compiled frame too, after restoring the saved
2206 // registers back in Java, we would hit the return barrier to thaw one more
2207 // frame effectively overwriting the restored registers during that call.
2208 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2209 assert(!f.is_done(), "");
2210
2211 f.get_cb();
2212 assert(f.is_compiled(), "");
2213 frame_size += f.cb()->frame_size();
2214 argsize = f.stack_argsize();
2215
2216 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2217 // The caller of the runtime stub when the continuation is preempted is not at a
2218 // Java call instruction, and so cannot rely on nmethod patching for deopt.
2219 log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2220 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2221 }
2222 }
2223
2224 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2225 empty = f.is_done();
2226 assert(!empty || argsize == chunk->argsize(), "");
2227
2228 if (empty) {
2229 clear_chunk(chunk);
2230 } else {
2231 chunk->set_sp(chunk->sp() + frame_size);
2232 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2233 // We set chunk->pc to the return pc into the next frame
2234 chunk->set_pc(f.pc());
2235 #ifdef ASSERT
2236 {
2237 intptr_t* retaddr_slot = (chunk_sp
2238 + frame_size
2239 - frame::sender_sp_ret_address_offset());
2240 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2241 "unexpected pc");
2501 assert(!_cont.is_empty(), "no more frames");
2502 assert(num_frames > 0, "");
2503 assert(!heap_frame.is_empty(), "");
2504
2505 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2506 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2507 } else if (!heap_frame.is_interpreted_frame()) {
2508 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2509 } else {
2510 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2511 }
2512 }
2513
2514 template<typename FKind>
2515 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2516 assert(num_frames > 0, "");
2517
2518 DEBUG_ONLY(_frames++;)
2519
2520 int argsize = _stream.stack_argsize();
2521
2522 _stream.next(SmallRegisterMap::instance_no_args());
2523 assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2524
2525 // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2526 // as it makes detecting that situation and adjusting unextended_sp tricky
2527 if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2528 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2529 num_frames++;
2530 }
2531
2532 if (num_frames == 1 || _stream.is_done()) { // end recursion
2533 finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2534 return true; // bottom
2535 } else { // recurse
2536 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2537 return false;
2538 }
2539 }
2540
2541 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2542 stackChunkOop chunk = _cont.tail();
2543
2544 if (!_stream.is_done()) {
2545 assert(_stream.sp() >= chunk->sp_address(), "");
2546 chunk->set_sp(chunk->to_offset(_stream.sp()));
2547 chunk->set_pc(_stream.pc());
2567 if (lt.develop_is_enabled()) {
2568 LogStream ls(lt);
2569 ls.print_cr("======== THAWING FRAME: %d", num_frame);
2570 assert(hf.is_heap_frame(), "should be");
2571 hf.print_value_on(&ls);
2572 }
2573 assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2574 }
2575
2576 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2577 #ifdef ASSERT
2578 LogTarget(Trace, continuations) lt;
2579 if (lt.develop_is_enabled()) {
2580 LogStream ls(lt);
2581 ls.print_cr("thawed frame:");
2582 print_frame_layout(f, false, &ls); // f.print_on(&ls);
2583 }
2584 #endif
2585 }
2586
2587 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2588 assert(!bottom || caller.fp() == _cont.entryFP(), "");
2589 if (bottom) {
2590 ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2591 : StubRoutines::cont_returnBarrier());
2592 } else {
2593 // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2594 // If the caller is not deoptimized, pc is unchanged.
2595 ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2596 }
2597
2598 patch_pd(f, caller);
2599
2600 if (f.is_interpreted_frame()) {
2601 ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2602 }
2603
2604 assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2605 assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2606 }
2607
2608 void ThawBase::clear_bitmap_bits(address start, address end) {
2609 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2610 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2611
2612 // we need to clear the bits that correspond to arguments as they reside in the caller frame
2613 // or they will keep objects that are otherwise unreachable alive.
2614
2615 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2806 }
2807
2808 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2809 assert(hf.is_compiled_frame(), "");
2810 assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2811
2812 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2813 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2814 }
2815
2816 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2817
2818 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2819
2820 assert(caller.sp() == caller.unextended_sp(), "");
2821
2822 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2823 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2824 }
2825
2826 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2827 // yet laid out in the stack, and so the original_pc is not stored in it.
2828 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2829 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2830 intptr_t* const stack_frame_top = f.sp();
2831 intptr_t* const heap_frame_top = hf.unextended_sp();
2832
2833 const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2834 int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2835 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2836
2837 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2838 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2839 // copy metadata, except the metadata at the top of the (unextended) entry frame
2840 int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2841
2842 // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2843 // (we might have one padding word for alignment)
2844 assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2845 assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2846
2847 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2848
2849 patch(f, caller, is_bottom_frame);
2850
2851 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2852 assert(!f.is_deoptimized_frame(), "");
2853 if (hf.is_deoptimized_frame()) {
2854 maybe_set_fastpath(f.sp());
2855 } else if (_thread->is_interp_only_mode()
2856 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2857 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2858 // cannot rely on nmethod patching for deopt.
2859 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2860
2861 log_develop_trace(continuations)("Deoptimizing thawed frame");
2862 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2863
2864 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2865 assert(f.is_deoptimized_frame(), "");
2866 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2867 maybe_set_fastpath(f.sp());
2868 }
2869
|
451 inline frame freeze_start_frame_yield_stub();
452 template<typename FKind>
453 inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
454 inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
455 inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
456 freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
457 void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
458 NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
459 freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
460 NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
461 NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
462 NOINLINE void finish_freeze(const frame& f, const frame& top);
463
464 void freeze_lockstack(stackChunkOop chunk);
465
466 inline bool stack_overflow();
467
468 static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
469 : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
470 template<typename FKind> static inline frame sender(const frame& f);
471 template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
472 inline void set_top_frame_metadata_pd(const frame& hf);
473 inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
474 inline void patch_pd_unused(intptr_t* sp);
475 void adjust_interpreted_frame_unextended_sp(frame& f);
476 inline void prepare_freeze_interpreted_top_frame(frame& f);
477 static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
478
479 protected:
480 void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
481 bool freeze_fast_new_chunk(stackChunkOop chunk);
482 };
483
484 template <typename ConfigT>
485 class Freeze : public FreezeBase {
486 private:
487 stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
488
489 public:
490 inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
491 : FreezeBase(thread, cont, frame_sp, preempt) {}
492
493 freeze_result try_freeze_fast();
1163
1164 assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1165 assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1166 #endif
1167
1168 return freeze_ok_bottom;
1169 }
1170
1171 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1172 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1173 if (is_bottom_frame) {
1174 // If we're the bottom frame, we need to replace the return barrier with the real
1175 // caller's pc.
1176 address last_pc = caller.pc();
1177 assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1178 ContinuationHelper::Frame::patch_pc(caller, last_pc);
1179 } else {
1180 assert(!caller.is_empty(), "");
1181 }
1182
1183 patch_pd(hf, caller, is_bottom_frame);
1184
1185 if (f.is_interpreted_frame()) {
1186 assert(hf.is_heap_frame(), "should be");
1187 ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1188 }
1189
1190 #ifdef ASSERT
1191 if (hf.is_compiled_frame()) {
1192 if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1193 log_develop_trace(continuations)("Freezing deoptimized frame");
1194 assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1195 assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1196 }
1197 }
1198 #endif
1199 }
1200
1201 #ifdef ASSERT
1202 static void verify_frame_top(const frame& f, intptr_t* top) {
1203 ResourceMark rm;
1260
1261 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1262 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1263 caller = hf;
1264
1265 // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1266 frame_method->record_gc_epoch();
1267
1268 return freeze_ok;
1269 }
1270
1271 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1272 // See also StackChunkFrameStream<frame_kind>::frame_size()
1273 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1274 int callee_argsize /* incl. metadata */,
1275 bool callee_interpreted) {
1276 // The frame's top never includes the stack arguments to the callee
1277 intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1278 intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1279 // including metadata between f and its stackargs
1280 int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1281 int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1282
1283 int real_frame_size = 0;
1284 bool augmented = f.was_augmented_on_entry(real_frame_size);
1285 if (augmented) {
1286 // The args reside inside the frame so clear argsize. If the caller is compiled,
1287 // this will cause the stack arguments passed by the caller to be freezed when
1288 // freezing the caller frame itself. If the caller is interpreted this will have
1289 // the effect of discarding the arg area created in the i2c stub.
1290 argsize = 0;
1291 fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
1292 #ifdef ASSERT
1293 nmethod* nm = f.cb()->as_nmethod();
1294 Method* method = nm->method();
1295 address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
1296 CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
1297 assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
1298 assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
1299 (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
1300 (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
1301 "frame should not be extended");
1302 #endif
1303 }
1304
1305 log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
1306 ContinuationHelper::Frame::frame_method(f) != nullptr ?
1307 ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1308 _freeze_size, fsize, argsize, augmented);
1309 // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1310 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1311
1312 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1313 if (UNLIKELY(result > freeze_ok_bottom)) {
1314 return result;
1315 }
1316
1317 bool is_bottom_frame = result == freeze_ok_bottom;
1318 assert(!caller.is_empty() || is_bottom_frame, "");
1319 assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
1320
1321 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1322
1323 frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
1324
1325 intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1326
1327 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1328 assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1329
1330 if (caller.is_interpreted_frame()) {
1331 // When thawing the frame we might need to add alignment (see Thaw::align)
1332 _total_align_size += frame::align_wiggle;
1333 }
1334
1335 patch(f, hf, caller, is_bottom_frame);
1336
1337 assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1338
1339 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1340 caller = hf;
1341 return freeze_ok;
1342 }
1343
2083
2084 // Only used for preemption on ObjectLocker
2085 ObjectMonitor* _init_lock;
2086
2087 StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2088
2089 NOT_PRODUCT(int _frames;)
2090
2091 protected:
2092 ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2093 _thread(thread), _cont(cont),
2094 _fastpath(nullptr) {
2095 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2096 assert (cont.tail() != nullptr, "no last chunk");
2097 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2098 }
2099
2100 void clear_chunk(stackChunkOop chunk);
2101 template<bool check_stub>
2102 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2103 int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, int &argsize);
2104 void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2105
2106 void thaw_lockstack(stackChunkOop chunk);
2107
2108 // fast path
2109 inline void prefetch_chunk_pd(void* start, int size_words);
2110 void patch_return(intptr_t* sp, bool is_last);
2111
2112 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2113 inline intptr_t* push_cleanup_continuation();
2114 inline intptr_t* push_preempt_adapter();
2115 intptr_t* redo_vmcall(JavaThread* current, frame& top);
2116 void throw_interrupted_exception(JavaThread* current, frame& top);
2117
2118 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2119 void finish_thaw(frame& f);
2120
2121 private:
2122 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2123 void finalize_thaw(frame& entry, int argsize);
2124
2125 inline bool seen_by_gc();
2126
2127 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2128 inline void after_thaw_java_frame(const frame& f, bool bottom);
2129 inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
2130 void clear_bitmap_bits(address start, address end);
2131
2132 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2133 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2134 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2135 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2136
2137 void push_return_frame(const frame& f);
2138 inline frame new_entry_frame();
2139 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
2140 inline void patch_pd(frame& f, const frame& sender);
2141 inline void patch_pd(frame& f, intptr_t* caller_sp);
2142 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2143
2144 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2145
2146 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2147
2148 public:
2149 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2150 };
2151
2152 template <typename ConfigT>
2153 class Thaw : public ThawBase {
2154 public:
2155 Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2156
2157 inline bool can_thaw_fast(stackChunkOop chunk) {
2158 return !_barriers
2159 && _thread->cont_fastpath_thread_state()
2196 assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2197 }
2198
2199 int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2200
2201 // top and bottom stack pointers
2202 intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2203 intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2204
2205 // several operations operate on the totality of the stack being reconstructed,
2206 // including the metadata words
2207 intptr_t* top() const { return sp() - frame::metadata_words_at_bottom; }
2208 int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2209 };
2210
2211 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2212 chunk->set_sp(chunk->bottom());
2213 chunk->set_max_thawing_size(0);
2214 }
2215
2216 int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, int &argsize) {
2217 intptr_t* top = f.sp();
2218
2219 while (f.cb()->as_nmethod()->needs_stack_repair()) {
2220 f.next(SmallRegisterMap::instance_no_args(), false /* stop */);
2221 }
2222 assert(!f.is_done(), "");
2223 assert(f.is_compiled(), "");
2224
2225 intptr_t* bottom = f.sp() + f.cb()->frame_size();
2226 argsize = f.stack_argsize();
2227 return bottom - top;
2228 }
2229
2230 template<bool check_stub>
2231 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2232 bool empty = false;
2233 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2234 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2235 assert(chunk_sp == f.sp(), "");
2236 assert(chunk_sp == f.unextended_sp(), "");
2237
2238 int frame_size = f.cb()->frame_size();
2239 argsize = f.stack_argsize();
2240
2241 assert(!f.is_stub() || check_stub, "");
2242 if (check_stub && f.is_stub()) {
2243 // If we don't thaw the top compiled frame too, after restoring the saved
2244 // registers back in Java, we would hit the return barrier to thaw one more
2245 // frame effectively overwriting the restored registers during that call.
2246 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2247 assert(!f.is_done(), "");
2248
2249 f.get_cb();
2250 assert(f.is_compiled(), "");
2251 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2252 // The caller of the runtime stub when the continuation is preempted is not at a
2253 // Java call instruction, and so cannot rely on nmethod patching for deopt.
2254 log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2255 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2256 }
2257
2258 if (f.cb()->as_nmethod()->needs_stack_repair()) {
2259 frame_size += remove_scalarized_frames(f, argsize);
2260 } else {
2261 frame_size += f.cb()->frame_size();
2262 argsize = f.stack_argsize();
2263 }
2264 } else if (f.cb()->as_nmethod()->needs_stack_repair()) {
2265 frame_size = remove_scalarized_frames(f, argsize);
2266 }
2267
2268 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2269 empty = f.is_done();
2270 assert(!empty || argsize == chunk->argsize(), "");
2271
2272 if (empty) {
2273 clear_chunk(chunk);
2274 } else {
2275 chunk->set_sp(chunk->sp() + frame_size);
2276 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2277 // We set chunk->pc to the return pc into the next frame
2278 chunk->set_pc(f.pc());
2279 #ifdef ASSERT
2280 {
2281 intptr_t* retaddr_slot = (chunk_sp
2282 + frame_size
2283 - frame::sender_sp_ret_address_offset());
2284 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2285 "unexpected pc");
2545 assert(!_cont.is_empty(), "no more frames");
2546 assert(num_frames > 0, "");
2547 assert(!heap_frame.is_empty(), "");
2548
2549 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2550 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2551 } else if (!heap_frame.is_interpreted_frame()) {
2552 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2553 } else {
2554 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2555 }
2556 }
2557
2558 template<typename FKind>
2559 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2560 assert(num_frames > 0, "");
2561
2562 DEBUG_ONLY(_frames++;)
2563
2564 int argsize = _stream.stack_argsize();
2565 CodeBlob* cb = _stream.cb();
2566
2567 _stream.next(SmallRegisterMap::instance_no_args());
2568 assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2569
2570 // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
2571 // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
2572 // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
2573 // - Regardless of whether the frame was extended or not, we would need to copy the right arg
2574 // size if its greater than the one given by the normal method signature (non-scalarized).
2575 // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
2576 // the chunk (we need unextended_sp, but we only have sp).
2577 if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
2578 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2579 num_frames++;
2580 }
2581
2582 if (num_frames == 1 || _stream.is_done()) { // end recursion
2583 finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2584 return true; // bottom
2585 } else { // recurse
2586 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2587 return false;
2588 }
2589 }
2590
2591 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2592 stackChunkOop chunk = _cont.tail();
2593
2594 if (!_stream.is_done()) {
2595 assert(_stream.sp() >= chunk->sp_address(), "");
2596 chunk->set_sp(chunk->to_offset(_stream.sp()));
2597 chunk->set_pc(_stream.pc());
2617 if (lt.develop_is_enabled()) {
2618 LogStream ls(lt);
2619 ls.print_cr("======== THAWING FRAME: %d", num_frame);
2620 assert(hf.is_heap_frame(), "should be");
2621 hf.print_value_on(&ls);
2622 }
2623 assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2624 }
2625
2626 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2627 #ifdef ASSERT
2628 LogTarget(Trace, continuations) lt;
2629 if (lt.develop_is_enabled()) {
2630 LogStream ls(lt);
2631 ls.print_cr("thawed frame:");
2632 print_frame_layout(f, false, &ls); // f.print_on(&ls);
2633 }
2634 #endif
2635 }
2636
2637 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
2638 assert(!bottom || caller.fp() == _cont.entryFP(), "");
2639 if (bottom) {
2640 ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2641 : StubRoutines::cont_returnBarrier());
2642 } else if (caller.is_compiled_frame()){
2643 // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2644 // If the caller is not deoptimized, pc is unchanged.
2645 ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
2646 }
2647
2648 patch_pd(f, caller);
2649
2650 if (f.is_interpreted_frame()) {
2651 ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2652 }
2653
2654 assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2655 assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2656 }
2657
2658 void ThawBase::clear_bitmap_bits(address start, address end) {
2659 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2660 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2661
2662 // we need to clear the bits that correspond to arguments as they reside in the caller frame
2663 // or they will keep objects that are otherwise unreachable alive.
2664
2665 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2856 }
2857
2858 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2859 assert(hf.is_compiled_frame(), "");
2860 assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2861
2862 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2863 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2864 }
2865
2866 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2867
2868 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2869
2870 assert(caller.sp() == caller.unextended_sp(), "");
2871
2872 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2873 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2874 }
2875
2876 int fsize = 0;
2877 int added_argsize = 0;
2878 bool augmented = hf.was_augmented_on_entry(fsize);
2879 if (!augmented) {
2880 added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2881 fsize += added_argsize;
2882 }
2883 assert(!is_bottom_frame || !augmented, "");
2884
2885
2886 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2887 // yet laid out in the stack, and so the original_pc is not stored in it.
2888 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2889 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
2890 assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
2891
2892 intptr_t* const stack_frame_top = f.sp();
2893 intptr_t* const heap_frame_top = hf.unextended_sp();
2894 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2895 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2896 // copy metadata, except the metadata at the top of the (unextended) entry frame
2897 int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2898
2899 // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2900 // (we might have one padding word for alignment)
2901 assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2902 assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2903
2904 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2905
2906 patch(f, caller, is_bottom_frame, augmented);
2907
2908 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2909 assert(!f.is_deoptimized_frame(), "");
2910 if (hf.is_deoptimized_frame()) {
2911 maybe_set_fastpath(f.sp());
2912 } else if (_thread->is_interp_only_mode()
2913 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2914 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2915 // cannot rely on nmethod patching for deopt.
2916 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2917
2918 log_develop_trace(continuations)("Deoptimizing thawed frame");
2919 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2920
2921 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2922 assert(f.is_deoptimized_frame(), "");
2923 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2924 maybe_set_fastpath(f.sp());
2925 }
2926
|