< prev index next >

src/hotspot/share/runtime/continuationFreezeThaw.cpp

Print this page

 440   inline frame freeze_start_frame_yield_stub();
 441   template<typename FKind>
 442   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 443   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 444   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 445   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 446   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 447   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 448   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 449   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 450   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 451   NOINLINE void finish_freeze(const frame& f, const frame& top);
 452 
 453   void freeze_lockstack(stackChunkOop chunk);
 454 
 455   inline bool stack_overflow();
 456 
 457   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 458                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 459   template<typename FKind> static inline frame sender(const frame& f);
 460   template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
 461   inline void set_top_frame_metadata_pd(const frame& hf);
 462   inline void patch_pd(frame& callee, const frame& caller);
 463   inline void patch_pd_unused(intptr_t* sp);
 464   void adjust_interpreted_frame_unextended_sp(frame& f);
 465   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 466   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 467 
 468 protected:
 469   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 470   bool freeze_fast_new_chunk(stackChunkOop chunk);
 471 };
 472 
 473 template <typename ConfigT>
 474 class Freeze : public FreezeBase {
 475 private:
 476   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 477 
 478 public:
 479   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 480     : FreezeBase(thread, cont, frame_sp, preempt) {}
 481 
 482   freeze_result try_freeze_fast();

1138 
1139   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1140   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1141 #endif
1142 
1143   return freeze_ok_bottom;
1144 }
1145 
1146 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1147 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1148   if (is_bottom_frame) {
1149     // If we're the bottom frame, we need to replace the return barrier with the real
1150     // caller's pc.
1151     address last_pc = caller.pc();
1152     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1153     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1154   } else {
1155     assert(!caller.is_empty(), "");
1156   }
1157 
1158   patch_pd(hf, caller);
1159 
1160   if (f.is_interpreted_frame()) {
1161     assert(hf.is_heap_frame(), "should be");
1162     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1163   }
1164 
1165 #ifdef ASSERT
1166   if (hf.is_compiled_frame()) {
1167     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1168       log_develop_trace(continuations)("Freezing deoptimized frame");
1169       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1170       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1171     }
1172   }
1173 #endif
1174 }
1175 
1176 #ifdef ASSERT
1177 static void verify_frame_top(const frame& f, intptr_t* top) {
1178   ResourceMark rm;

1235 
1236   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1237   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1238   caller = hf;
1239 
1240   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1241   frame_method->record_gc_epoch();
1242 
1243   return freeze_ok;
1244 }
1245 
1246 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1247 // See also StackChunkFrameStream<frame_kind>::frame_size()
1248 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1249                                                         int callee_argsize /* incl. metadata */,
1250                                                         bool callee_interpreted) {
1251   // The frame's top never includes the stack arguments to the callee
1252   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1253   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1254   // including metadata between f and its stackargs
1255   const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1256   const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);






















1257 
1258   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1259                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1260                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1261                              _freeze_size, fsize, argsize);
1262   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1263   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1264 
1265   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1266   if (UNLIKELY(result > freeze_ok_bottom)) {
1267     return result;
1268   }
1269 
1270   bool is_bottom_frame = result == freeze_ok_bottom;
1271   assert(!caller.is_empty() || is_bottom_frame, "");

1272 
1273   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1274 
1275   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1276 
1277   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1278 
1279   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1280   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1281 
1282   if (caller.is_interpreted_frame()) {
1283     // When thawing the frame we might need to add alignment (see Thaw::align)
1284     _total_align_size += frame::align_wiggle;
1285   }
1286 
1287   patch(f, hf, caller, is_bottom_frame);
1288 
1289   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1290 
1291   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1292   caller = hf;
1293   return freeze_ok;
1294 }
1295 

1908   intptr_t* _top_unextended_sp_before_thaw;
1909   int _align_size;
1910   DEBUG_ONLY(intptr_t* _top_stack_address);
1911 
1912   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1913 
1914   NOT_PRODUCT(int _frames;)
1915 
1916 protected:
1917   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1918       _thread(thread), _cont(cont),
1919       _fastpath(nullptr) {
1920     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1921     assert (cont.tail() != nullptr, "no last chunk");
1922     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1923   }
1924 
1925   void clear_chunk(stackChunkOop chunk);
1926   template<bool check_stub>
1927   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);

1928   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1929 
1930   void thaw_lockstack(stackChunkOop chunk);
1931 
1932   // fast path
1933   inline void prefetch_chunk_pd(void* start, int size_words);
1934   void patch_return(intptr_t* sp, bool is_last);
1935 
1936   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1937   inline intptr_t* push_cleanup_continuation();
1938   void throw_interrupted_exception(JavaThread* current, frame& top);
1939 
1940   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1941   void finish_thaw(frame& f);
1942 
1943 private:
1944   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1945   void finalize_thaw(frame& entry, int argsize);
1946 
1947   inline bool seen_by_gc();
1948 
1949   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1950   inline void after_thaw_java_frame(const frame& f, bool bottom);
1951   inline void patch(frame& f, const frame& caller, bool bottom);
1952   void clear_bitmap_bits(address start, address end);
1953 
1954   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1955   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1956   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1957   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1958 
1959   void push_return_frame(frame& f);
1960   inline frame new_entry_frame();
1961   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
1962   inline void patch_pd(frame& f, const frame& sender);
1963   inline void patch_pd(frame& f, intptr_t* caller_sp);
1964   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
1965 
1966   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
1967 
1968   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
1969 
1970  public:
1971   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
1972 };
1973 
1974 template <typename ConfigT>
1975 class Thaw : public ThawBase {
1976 public:
1977   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
1978 
1979   inline bool can_thaw_fast(stackChunkOop chunk) {
1980     return    !_barriers
1981            &&  _thread->cont_fastpath_thread_state()

2018     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2019   }
2020 
2021   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2022 
2023   // top and bottom stack pointers
2024   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2025   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2026 
2027   // several operations operate on the totality of the stack being reconstructed,
2028   // including the metadata words
2029   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2030   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2031 };
2032 
2033 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2034   chunk->set_sp(chunk->bottom());
2035   chunk->set_max_thawing_size(0);
2036 }
2037 














2038 template<bool check_stub>
2039 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2040   bool empty = false;
2041   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2042   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2043   assert(chunk_sp == f.sp(), "");
2044   assert(chunk_sp == f.unextended_sp(), "");
2045 
2046   int frame_size = f.cb()->frame_size();
2047   argsize = f.stack_argsize();
2048 
2049   assert(!f.is_stub() || check_stub, "");
2050   if (check_stub && f.is_stub()) {
2051     // If we don't thaw the top compiled frame too, after restoring the saved
2052     // registers back in Java, we would hit the return barrier to thaw one more
2053     // frame effectively overwriting the restored registers during that call.
2054     f.next(SmallRegisterMap::instance(), true /* stop */);
2055     assert(!f.is_done(), "");
2056 
2057     f.get_cb();
2058     assert(f.is_compiled(), "");
2059     frame_size += f.cb()->frame_size();
2060     argsize = f.stack_argsize();
2061 
2062     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2063       // The caller of the runtime stub when the continuation is preempted is not at a
2064       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2065       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2066       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2067     }









2068   }
2069 
2070   f.next(SmallRegisterMap::instance(), true /* stop */);
2071   empty = f.is_done();
2072   assert(!empty || argsize == chunk->argsize(), "");
2073 
2074   if (empty) {
2075     clear_chunk(chunk);
2076   } else {
2077     chunk->set_sp(chunk->sp() + frame_size);
2078     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2079     // We set chunk->pc to the return pc into the next frame
2080     chunk->set_pc(f.pc());
2081 #ifdef ASSERT
2082     {
2083       intptr_t* retaddr_slot = (chunk_sp
2084                                 + frame_size
2085                                 - frame::sender_sp_ret_address_offset());
2086       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2087              "unexpected pc");

2327   assert(!_cont.is_empty(), "no more frames");
2328   assert(num_frames > 0, "");
2329   assert(!heap_frame.is_empty(), "");
2330 
2331   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2332     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2333   } else if (!heap_frame.is_interpreted_frame()) {
2334     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2335   } else {
2336     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2337   }
2338 }
2339 
2340 template<typename FKind>
2341 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2342   assert(num_frames > 0, "");
2343 
2344   DEBUG_ONLY(_frames++;)
2345 
2346   int argsize = _stream.stack_argsize();

2347 
2348   _stream.next(SmallRegisterMap::instance());
2349   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2350 
2351   // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2352   // as it makes detecting that situation and adjusting unextended_sp tricky
2353   if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {





2354     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2355     num_frames++;
2356   }
2357 
2358   if (num_frames == 1 || _stream.is_done()) { // end recursion
2359     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2360     return true; // bottom
2361   } else { // recurse
2362     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2363     return false;
2364   }
2365 }
2366 
2367 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2368   stackChunkOop chunk = _cont.tail();
2369 
2370   if (!_stream.is_done()) {
2371     assert(_stream.sp() >= chunk->sp_address(), "");
2372     chunk->set_sp(chunk->to_offset(_stream.sp()));
2373     chunk->set_pc(_stream.pc());

2393   if (lt.develop_is_enabled()) {
2394     LogStream ls(lt);
2395     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2396     assert(hf.is_heap_frame(), "should be");
2397     hf.print_value_on(&ls);
2398   }
2399   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2400 }
2401 
2402 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2403 #ifdef ASSERT
2404   LogTarget(Trace, continuations) lt;
2405   if (lt.develop_is_enabled()) {
2406     LogStream ls(lt);
2407     ls.print_cr("thawed frame:");
2408     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2409   }
2410 #endif
2411 }
2412 
2413 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2414   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2415   if (bottom) {
2416     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2417                                                                  : StubRoutines::cont_returnBarrier());
2418   } else {
2419     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2420     // If the caller is not deoptimized, pc is unchanged.
2421     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2422   }
2423 
2424   patch_pd(f, caller);
2425 
2426   if (f.is_interpreted_frame()) {
2427     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2428   }
2429 
2430   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2431   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2432 }
2433 
2434 void ThawBase::clear_bitmap_bits(address start, address end) {
2435   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2436   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2437 
2438   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2439   // or they will keep objects that are otherwise unreachable alive.
2440 
2441   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since

2564 }
2565 
2566 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2567   assert(hf.is_compiled_frame(), "");
2568   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2569 
2570   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2571     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2572   }
2573 
2574   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2575 
2576   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2577 
2578   assert(caller.sp() == caller.unextended_sp(), "");
2579 
2580   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2581     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2582   }
2583 










2584   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2585   // yet laid out in the stack, and so the original_pc is not stored in it.
2586   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2587   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);


2588   intptr_t* const stack_frame_top = f.sp();
2589   intptr_t* const heap_frame_top = hf.unextended_sp();
2590 
2591   const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2592   int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2593   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2594 
2595   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2596   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2597   // copy metadata, except the metadata at the top of the (unextended) entry frame
2598   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2599 
2600   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2601   // (we might have one padding word for alignment)
2602   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2603   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2604 
2605   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2606 
2607   patch(f, caller, is_bottom_frame);
2608 
2609   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2610   assert(!f.is_deoptimized_frame(), "");
2611   if (hf.is_deoptimized_frame()) {
2612     maybe_set_fastpath(f.sp());
2613   } else if (_thread->is_interp_only_mode()
2614               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2615     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2616     // cannot rely on nmethod patching for deopt.
2617     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2618 
2619     log_develop_trace(continuations)("Deoptimizing thawed frame");
2620     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2621 
2622     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2623     assert(f.is_deoptimized_frame(), "");
2624     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2625     maybe_set_fastpath(f.sp());
2626   }
2627 

 440   inline frame freeze_start_frame_yield_stub();
 441   template<typename FKind>
 442   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 443   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 444   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 445   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 446   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 447   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 448   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 449   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 450   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 451   NOINLINE void finish_freeze(const frame& f, const frame& top);
 452 
 453   void freeze_lockstack(stackChunkOop chunk);
 454 
 455   inline bool stack_overflow();
 456 
 457   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 458                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 459   template<typename FKind> static inline frame sender(const frame& f);
 460   template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
 461   inline void set_top_frame_metadata_pd(const frame& hf);
 462   inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
 463   inline void patch_pd_unused(intptr_t* sp);
 464   void adjust_interpreted_frame_unextended_sp(frame& f);
 465   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 466   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 467 
 468 protected:
 469   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 470   bool freeze_fast_new_chunk(stackChunkOop chunk);
 471 };
 472 
 473 template <typename ConfigT>
 474 class Freeze : public FreezeBase {
 475 private:
 476   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 477 
 478 public:
 479   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 480     : FreezeBase(thread, cont, frame_sp, preempt) {}
 481 
 482   freeze_result try_freeze_fast();

1138 
1139   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1140   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1141 #endif
1142 
1143   return freeze_ok_bottom;
1144 }
1145 
1146 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1147 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1148   if (is_bottom_frame) {
1149     // If we're the bottom frame, we need to replace the return barrier with the real
1150     // caller's pc.
1151     address last_pc = caller.pc();
1152     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1153     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1154   } else {
1155     assert(!caller.is_empty(), "");
1156   }
1157 
1158   patch_pd(hf, caller, is_bottom_frame);
1159 
1160   if (f.is_interpreted_frame()) {
1161     assert(hf.is_heap_frame(), "should be");
1162     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1163   }
1164 
1165 #ifdef ASSERT
1166   if (hf.is_compiled_frame()) {
1167     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1168       log_develop_trace(continuations)("Freezing deoptimized frame");
1169       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1170       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1171     }
1172   }
1173 #endif
1174 }
1175 
1176 #ifdef ASSERT
1177 static void verify_frame_top(const frame& f, intptr_t* top) {
1178   ResourceMark rm;

1235 
1236   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1237   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1238   caller = hf;
1239 
1240   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1241   frame_method->record_gc_epoch();
1242 
1243   return freeze_ok;
1244 }
1245 
1246 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1247 // See also StackChunkFrameStream<frame_kind>::frame_size()
1248 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1249                                                         int callee_argsize /* incl. metadata */,
1250                                                         bool callee_interpreted) {
1251   // The frame's top never includes the stack arguments to the callee
1252   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1253   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1254   // including metadata between f and its stackargs
1255   int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1256   int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1257 
1258   int real_frame_size = 0;
1259   bool augmented = f.was_augmented_on_entry(real_frame_size);
1260   if (augmented) {
1261     // The args reside inside the frame so clear argsize. If the caller is compiled,
1262     // this will cause the stack arguments passed by the caller to be freezed when
1263     // freezing the caller frame itself. If the caller is interpreted this will have
1264     // the effect of discarding the arg area created in the i2c stub.
1265     argsize = 0;
1266     fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
1267 #ifdef ASSERT
1268     nmethod* nm = f.cb()->as_nmethod();
1269     Method* method = nm->method();
1270     address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
1271     CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
1272     assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
1273     assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
1274            (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
1275            (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
1276            "frame should not be extended");
1277 #endif
1278   }
1279 
1280   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
1281                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1282                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1283                              _freeze_size, fsize, argsize, augmented);
1284   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1285   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1286 
1287   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1288   if (UNLIKELY(result > freeze_ok_bottom)) {
1289     return result;
1290   }
1291 
1292   bool is_bottom_frame = result == freeze_ok_bottom;
1293   assert(!caller.is_empty() || is_bottom_frame, "");
1294   assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
1295 
1296   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1297 
1298   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
1299 
1300   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1301 
1302   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1303   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1304 
1305   if (caller.is_interpreted_frame()) {
1306     // When thawing the frame we might need to add alignment (see Thaw::align)
1307     _total_align_size += frame::align_wiggle;
1308   }
1309 
1310   patch(f, hf, caller, is_bottom_frame);
1311 
1312   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1313 
1314   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1315   caller = hf;
1316   return freeze_ok;
1317 }
1318 

1931   intptr_t* _top_unextended_sp_before_thaw;
1932   int _align_size;
1933   DEBUG_ONLY(intptr_t* _top_stack_address);
1934 
1935   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1936 
1937   NOT_PRODUCT(int _frames;)
1938 
1939 protected:
1940   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1941       _thread(thread), _cont(cont),
1942       _fastpath(nullptr) {
1943     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1944     assert (cont.tail() != nullptr, "no last chunk");
1945     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1946   }
1947 
1948   void clear_chunk(stackChunkOop chunk);
1949   template<bool check_stub>
1950   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
1951   int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, int &argsize);
1952   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1953 
1954   void thaw_lockstack(stackChunkOop chunk);
1955 
1956   // fast path
1957   inline void prefetch_chunk_pd(void* start, int size_words);
1958   void patch_return(intptr_t* sp, bool is_last);
1959 
1960   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1961   inline intptr_t* push_cleanup_continuation();
1962   void throw_interrupted_exception(JavaThread* current, frame& top);
1963 
1964   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1965   void finish_thaw(frame& f);
1966 
1967 private:
1968   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1969   void finalize_thaw(frame& entry, int argsize);
1970 
1971   inline bool seen_by_gc();
1972 
1973   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1974   inline void after_thaw_java_frame(const frame& f, bool bottom);
1975   inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
1976   void clear_bitmap_bits(address start, address end);
1977 
1978   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1979   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1980   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1981   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1982 
1983   void push_return_frame(frame& f);
1984   inline frame new_entry_frame();
1985   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
1986   inline void patch_pd(frame& f, const frame& sender);
1987   inline void patch_pd(frame& f, intptr_t* caller_sp);
1988   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
1989 
1990   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
1991 
1992   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
1993 
1994  public:
1995   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
1996 };
1997 
1998 template <typename ConfigT>
1999 class Thaw : public ThawBase {
2000 public:
2001   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2002 
2003   inline bool can_thaw_fast(stackChunkOop chunk) {
2004     return    !_barriers
2005            &&  _thread->cont_fastpath_thread_state()

2042     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2043   }
2044 
2045   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2046 
2047   // top and bottom stack pointers
2048   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2049   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2050 
2051   // several operations operate on the totality of the stack being reconstructed,
2052   // including the metadata words
2053   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2054   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2055 };
2056 
2057 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2058   chunk->set_sp(chunk->bottom());
2059   chunk->set_max_thawing_size(0);
2060 }
2061 
2062 int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, int &argsize) {
2063   intptr_t* top = f.sp();
2064 
2065   while (f.cb()->as_nmethod()->needs_stack_repair()) {
2066     f.next(SmallRegisterMap::instance(), false /* stop */);
2067   }
2068   assert(!f.is_done(), "");
2069   assert(f.is_compiled(), "");
2070 
2071   intptr_t* bottom = f.sp() + f.cb()->frame_size();
2072   argsize = f.stack_argsize();
2073   return bottom - top;
2074 }
2075 
2076 template<bool check_stub>
2077 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2078   bool empty = false;
2079   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2080   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2081   assert(chunk_sp == f.sp(), "");
2082   assert(chunk_sp == f.unextended_sp(), "");
2083 
2084   int frame_size = f.cb()->frame_size();
2085   argsize = f.stack_argsize();
2086 
2087   assert(!f.is_stub() || check_stub, "");
2088   if (check_stub && f.is_stub()) {
2089     // If we don't thaw the top compiled frame too, after restoring the saved
2090     // registers back in Java, we would hit the return barrier to thaw one more
2091     // frame effectively overwriting the restored registers during that call.
2092     f.next(SmallRegisterMap::instance(), true /* stop */);
2093     assert(!f.is_done(), "");
2094 
2095     f.get_cb();
2096     assert(f.is_compiled(), "");



2097     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2098       // The caller of the runtime stub when the continuation is preempted is not at a
2099       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2100       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2101       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2102     }
2103 
2104     if (f.cb()->as_nmethod()->needs_stack_repair()) {
2105       frame_size += remove_scalarized_frames(f, argsize);
2106     } else {
2107       frame_size += f.cb()->frame_size();
2108       argsize = f.stack_argsize();
2109     }
2110   } else if (f.cb()->as_nmethod()->needs_stack_repair()) {
2111     frame_size = remove_scalarized_frames(f, argsize);
2112   }
2113 
2114   f.next(SmallRegisterMap::instance(), true /* stop */);
2115   empty = f.is_done();
2116   assert(!empty || argsize == chunk->argsize(), "");
2117 
2118   if (empty) {
2119     clear_chunk(chunk);
2120   } else {
2121     chunk->set_sp(chunk->sp() + frame_size);
2122     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2123     // We set chunk->pc to the return pc into the next frame
2124     chunk->set_pc(f.pc());
2125 #ifdef ASSERT
2126     {
2127       intptr_t* retaddr_slot = (chunk_sp
2128                                 + frame_size
2129                                 - frame::sender_sp_ret_address_offset());
2130       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2131              "unexpected pc");

2371   assert(!_cont.is_empty(), "no more frames");
2372   assert(num_frames > 0, "");
2373   assert(!heap_frame.is_empty(), "");
2374 
2375   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2376     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2377   } else if (!heap_frame.is_interpreted_frame()) {
2378     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2379   } else {
2380     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2381   }
2382 }
2383 
2384 template<typename FKind>
2385 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2386   assert(num_frames > 0, "");
2387 
2388   DEBUG_ONLY(_frames++;)
2389 
2390   int argsize = _stream.stack_argsize();
2391   CodeBlob* cb = _stream.cb();
2392 
2393   _stream.next(SmallRegisterMap::instance());
2394   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2395 
2396   // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
2397   // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
2398   // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
2399   // - Regardless of whether the frame was extended or not, we would need to copy the right arg
2400   //   size if its greater than the one given by the normal method signature (non-scalarized).
2401   // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
2402   //   the chunk (we need unextended_sp, but we only have sp).
2403   if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
2404     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2405     num_frames++;
2406   }
2407 
2408   if (num_frames == 1 || _stream.is_done()) { // end recursion
2409     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2410     return true; // bottom
2411   } else { // recurse
2412     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2413     return false;
2414   }
2415 }
2416 
2417 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2418   stackChunkOop chunk = _cont.tail();
2419 
2420   if (!_stream.is_done()) {
2421     assert(_stream.sp() >= chunk->sp_address(), "");
2422     chunk->set_sp(chunk->to_offset(_stream.sp()));
2423     chunk->set_pc(_stream.pc());

2443   if (lt.develop_is_enabled()) {
2444     LogStream ls(lt);
2445     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2446     assert(hf.is_heap_frame(), "should be");
2447     hf.print_value_on(&ls);
2448   }
2449   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2450 }
2451 
2452 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2453 #ifdef ASSERT
2454   LogTarget(Trace, continuations) lt;
2455   if (lt.develop_is_enabled()) {
2456     LogStream ls(lt);
2457     ls.print_cr("thawed frame:");
2458     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2459   }
2460 #endif
2461 }
2462 
2463 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
2464   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2465   if (bottom) {
2466     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2467                                                                  : StubRoutines::cont_returnBarrier());
2468   } else if (caller.is_compiled_frame()){
2469     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2470     // If the caller is not deoptimized, pc is unchanged.
2471     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
2472   }
2473 
2474   patch_pd(f, caller);
2475 
2476   if (f.is_interpreted_frame()) {
2477     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2478   }
2479 
2480   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2481   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2482 }
2483 
2484 void ThawBase::clear_bitmap_bits(address start, address end) {
2485   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2486   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2487 
2488   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2489   // or they will keep objects that are otherwise unreachable alive.
2490 
2491   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since

2614 }
2615 
2616 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2617   assert(hf.is_compiled_frame(), "");
2618   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2619 
2620   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2621     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2622   }
2623 
2624   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2625 
2626   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2627 
2628   assert(caller.sp() == caller.unextended_sp(), "");
2629 
2630   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2631     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2632   }
2633 
2634   int fsize = 0;
2635   int added_argsize = 0;
2636   bool augmented = hf.was_augmented_on_entry(fsize);
2637   if (!augmented) {
2638     added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2639     fsize += added_argsize;
2640   }
2641   assert(!is_bottom_frame || !augmented, "");
2642 
2643 
2644   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2645   // yet laid out in the stack, and so the original_pc is not stored in it.
2646   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2647   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
2648   assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
2649 
2650   intptr_t* const stack_frame_top = f.sp();
2651   intptr_t* const heap_frame_top = hf.unextended_sp();





2652   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2653   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2654   // copy metadata, except the metadata at the top of the (unextended) entry frame
2655   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2656 
2657   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2658   // (we might have one padding word for alignment)
2659   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2660   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2661 
2662   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2663 
2664   patch(f, caller, is_bottom_frame, augmented);
2665 
2666   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2667   assert(!f.is_deoptimized_frame(), "");
2668   if (hf.is_deoptimized_frame()) {
2669     maybe_set_fastpath(f.sp());
2670   } else if (_thread->is_interp_only_mode()
2671               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2672     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2673     // cannot rely on nmethod patching for deopt.
2674     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2675 
2676     log_develop_trace(continuations)("Deoptimizing thawed frame");
2677     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2678 
2679     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2680     assert(f.is_deoptimized_frame(), "");
2681     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2682     maybe_set_fastpath(f.sp());
2683   }
2684 
< prev index next >