< prev index next >

src/hotspot/share/runtime/continuationFreezeThaw.cpp

Print this page

 440   inline frame freeze_start_frame_yield_stub();
 441   template<typename FKind>
 442   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 443   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 444   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 445   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 446   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 447   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 448   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 449   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 450   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 451   NOINLINE void finish_freeze(const frame& f, const frame& top);
 452 
 453   void freeze_lockstack(stackChunkOop chunk);
 454 
 455   inline bool stack_overflow();
 456 
 457   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 458                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 459   template<typename FKind> static inline frame sender(const frame& f);
 460   template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
 461   inline void set_top_frame_metadata_pd(const frame& hf);
 462   inline void patch_pd(frame& callee, const frame& caller);
 463   inline void patch_pd_unused(intptr_t* sp);
 464   void adjust_interpreted_frame_unextended_sp(frame& f);
 465   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 466   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 467 
 468 protected:
 469   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 470   bool freeze_fast_new_chunk(stackChunkOop chunk);
 471 };
 472 
 473 template <typename ConfigT>
 474 class Freeze : public FreezeBase {
 475 private:
 476   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 477 
 478 public:
 479   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 480     : FreezeBase(thread, cont, frame_sp, preempt) {}
 481 
 482   freeze_result try_freeze_fast();

1163 
1164   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1165   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1166 #endif
1167 
1168   return freeze_ok_bottom;
1169 }
1170 
1171 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1172 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1173   if (is_bottom_frame) {
1174     // If we're the bottom frame, we need to replace the return barrier with the real
1175     // caller's pc.
1176     address last_pc = caller.pc();
1177     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1178     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1179   } else {
1180     assert(!caller.is_empty(), "");
1181   }
1182 
1183   patch_pd(hf, caller);
1184 
1185   if (f.is_interpreted_frame()) {
1186     assert(hf.is_heap_frame(), "should be");
1187     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1188   }
1189 
1190 #ifdef ASSERT
1191   if (hf.is_compiled_frame()) {
1192     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1193       log_develop_trace(continuations)("Freezing deoptimized frame");
1194       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1195       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1196     }
1197   }
1198 #endif
1199 }
1200 
1201 #ifdef ASSERT
1202 static void verify_frame_top(const frame& f, intptr_t* top) {
1203   ResourceMark rm;

1260 
1261   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1262   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1263   caller = hf;
1264 
1265   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1266   frame_method->record_gc_epoch();
1267 
1268   return freeze_ok;
1269 }
1270 
1271 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1272 // See also StackChunkFrameStream<frame_kind>::frame_size()
1273 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1274                                                         int callee_argsize /* incl. metadata */,
1275                                                         bool callee_interpreted) {
1276   // The frame's top never includes the stack arguments to the callee
1277   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1278   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1279   // including metadata between f and its stackargs
1280   const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1281   const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);






















1282 
1283   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1284                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1285                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1286                              _freeze_size, fsize, argsize);
1287   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1288   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1289 
1290   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1291   if (UNLIKELY(result > freeze_ok_bottom)) {
1292     return result;
1293   }
1294 
1295   bool is_bottom_frame = result == freeze_ok_bottom;
1296   assert(!caller.is_empty() || is_bottom_frame, "");

1297 
1298   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1299 
1300   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1301 
1302   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1303 
1304   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1305   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1306 
1307   if (caller.is_interpreted_frame()) {
1308     // When thawing the frame we might need to add alignment (see Thaw::align)
1309     _total_align_size += frame::align_wiggle;
1310   }
1311 
1312   patch(f, hf, caller, is_bottom_frame);
1313 
1314   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1315 
1316   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1317   caller = hf;
1318   return freeze_ok;
1319 }
1320 

1942   intptr_t* _top_unextended_sp_before_thaw;
1943   int _align_size;
1944   DEBUG_ONLY(intptr_t* _top_stack_address);
1945 
1946   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1947 
1948   NOT_PRODUCT(int _frames;)
1949 
1950 protected:
1951   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1952       _thread(thread), _cont(cont),
1953       _fastpath(nullptr) {
1954     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1955     assert (cont.tail() != nullptr, "no last chunk");
1956     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1957   }
1958 
1959   void clear_chunk(stackChunkOop chunk);
1960   template<bool check_stub>
1961   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);

1962   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1963 
1964   void thaw_lockstack(stackChunkOop chunk);
1965 
1966   // fast path
1967   inline void prefetch_chunk_pd(void* start, int size_words);
1968   void patch_return(intptr_t* sp, bool is_last);
1969 
1970   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1971   inline intptr_t* push_cleanup_continuation();
1972   void throw_interrupted_exception(JavaThread* current, frame& top);
1973 
1974   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1975   void finish_thaw(frame& f);
1976 
1977 private:
1978   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1979   void finalize_thaw(frame& entry, int argsize);
1980 
1981   inline bool seen_by_gc();
1982 
1983   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1984   inline void after_thaw_java_frame(const frame& f, bool bottom);
1985   inline void patch(frame& f, const frame& caller, bool bottom);
1986   void clear_bitmap_bits(address start, address end);
1987 
1988   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1989   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1990   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1991   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1992 
1993   void push_return_frame(frame& f);
1994   inline frame new_entry_frame();
1995   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
1996   inline void patch_pd(frame& f, const frame& sender);
1997   inline void patch_pd(frame& f, intptr_t* caller_sp);
1998   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
1999 
2000   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2001 
2002   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2003 
2004  public:
2005   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2006 };
2007 
2008 template <typename ConfigT>
2009 class Thaw : public ThawBase {
2010 public:
2011   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2012 
2013   inline bool can_thaw_fast(stackChunkOop chunk) {
2014     return    !_barriers
2015            &&  _thread->cont_fastpath_thread_state()

2052     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2053   }
2054 
2055   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2056 
2057   // top and bottom stack pointers
2058   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2059   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2060 
2061   // several operations operate on the totality of the stack being reconstructed,
2062   // including the metadata words
2063   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2064   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2065 };
2066 
2067 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2068   chunk->set_sp(chunk->bottom());
2069   chunk->set_max_thawing_size(0);
2070 }
2071 














2072 template<bool check_stub>
2073 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2074   bool empty = false;
2075   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2076   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2077   assert(chunk_sp == f.sp(), "");
2078   assert(chunk_sp == f.unextended_sp(), "");
2079 
2080   int frame_size = f.cb()->frame_size();
2081   argsize = f.stack_argsize();
2082 
2083   assert(!f.is_stub() || check_stub, "");
2084   if (check_stub && f.is_stub()) {
2085     // If we don't thaw the top compiled frame too, after restoring the saved
2086     // registers back in Java, we would hit the return barrier to thaw one more
2087     // frame effectively overwriting the restored registers during that call.
2088     f.next(SmallRegisterMap::instance(), true /* stop */);
2089     assert(!f.is_done(), "");
2090 
2091     f.get_cb();
2092     assert(f.is_compiled(), "");
2093     frame_size += f.cb()->frame_size();
2094     argsize = f.stack_argsize();
2095 
2096     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2097       // The caller of the runtime stub when the continuation is preempted is not at a
2098       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2099       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2100       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2101     }









2102   }
2103 
2104   f.next(SmallRegisterMap::instance(), true /* stop */);
2105   empty = f.is_done();
2106   assert(!empty || argsize == chunk->argsize(), "");
2107 
2108   if (empty) {
2109     clear_chunk(chunk);
2110   } else {
2111     chunk->set_sp(chunk->sp() + frame_size);
2112     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2113     // We set chunk->pc to the return pc into the next frame
2114     chunk->set_pc(f.pc());
2115 #ifdef ASSERT
2116     {
2117       intptr_t* retaddr_slot = (chunk_sp
2118                                 + frame_size
2119                                 - frame::sender_sp_ret_address_offset());
2120       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2121              "unexpected pc");

2361   assert(!_cont.is_empty(), "no more frames");
2362   assert(num_frames > 0, "");
2363   assert(!heap_frame.is_empty(), "");
2364 
2365   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2366     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2367   } else if (!heap_frame.is_interpreted_frame()) {
2368     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2369   } else {
2370     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2371   }
2372 }
2373 
2374 template<typename FKind>
2375 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2376   assert(num_frames > 0, "");
2377 
2378   DEBUG_ONLY(_frames++;)
2379 
2380   int argsize = _stream.stack_argsize();

2381 
2382   _stream.next(SmallRegisterMap::instance());
2383   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2384 
2385   // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2386   // as it makes detecting that situation and adjusting unextended_sp tricky
2387   if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {





2388     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2389     num_frames++;
2390   }
2391 
2392   if (num_frames == 1 || _stream.is_done()) { // end recursion
2393     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2394     return true; // bottom
2395   } else { // recurse
2396     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2397     return false;
2398   }
2399 }
2400 
2401 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2402   stackChunkOop chunk = _cont.tail();
2403 
2404   if (!_stream.is_done()) {
2405     assert(_stream.sp() >= chunk->sp_address(), "");
2406     chunk->set_sp(chunk->to_offset(_stream.sp()));
2407     chunk->set_pc(_stream.pc());

2427   if (lt.develop_is_enabled()) {
2428     LogStream ls(lt);
2429     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2430     assert(hf.is_heap_frame(), "should be");
2431     hf.print_value_on(&ls);
2432   }
2433   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2434 }
2435 
2436 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2437 #ifdef ASSERT
2438   LogTarget(Trace, continuations) lt;
2439   if (lt.develop_is_enabled()) {
2440     LogStream ls(lt);
2441     ls.print_cr("thawed frame:");
2442     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2443   }
2444 #endif
2445 }
2446 
2447 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2448   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2449   if (bottom) {
2450     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2451                                                                  : StubRoutines::cont_returnBarrier());
2452   } else {
2453     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2454     // If the caller is not deoptimized, pc is unchanged.
2455     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2456   }
2457 
2458   patch_pd(f, caller);
2459 
2460   if (f.is_interpreted_frame()) {
2461     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2462   }
2463 
2464   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2465   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2466 }
2467 
2468 void ThawBase::clear_bitmap_bits(address start, address end) {
2469   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2470   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2471 
2472   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2473   // or they will keep objects that are otherwise unreachable alive.
2474 
2475   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since

2598 }
2599 
2600 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2601   assert(hf.is_compiled_frame(), "");
2602   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2603 
2604   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2605     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2606   }
2607 
2608   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2609 
2610   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2611 
2612   assert(caller.sp() == caller.unextended_sp(), "");
2613 
2614   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2615     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2616   }
2617 










2618   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2619   // yet laid out in the stack, and so the original_pc is not stored in it.
2620   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2621   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);


2622   intptr_t* const stack_frame_top = f.sp();
2623   intptr_t* const heap_frame_top = hf.unextended_sp();
2624 
2625   const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2626   int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2627   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2628 
2629   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2630   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2631   // copy metadata, except the metadata at the top of the (unextended) entry frame
2632   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2633 
2634   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2635   // (we might have one padding word for alignment)
2636   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2637   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2638 
2639   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2640 
2641   patch(f, caller, is_bottom_frame);
2642 
2643   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2644   assert(!f.is_deoptimized_frame(), "");
2645   if (hf.is_deoptimized_frame()) {
2646     maybe_set_fastpath(f.sp());
2647   } else if (_thread->is_interp_only_mode()
2648               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2649     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2650     // cannot rely on nmethod patching for deopt.
2651     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2652 
2653     log_develop_trace(continuations)("Deoptimizing thawed frame");
2654     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2655 
2656     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2657     assert(f.is_deoptimized_frame(), "");
2658     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2659     maybe_set_fastpath(f.sp());
2660   }
2661 

 440   inline frame freeze_start_frame_yield_stub();
 441   template<typename FKind>
 442   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 443   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 444   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 445   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 446   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 447   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 448   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 449   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 450   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 451   NOINLINE void finish_freeze(const frame& f, const frame& top);
 452 
 453   void freeze_lockstack(stackChunkOop chunk);
 454 
 455   inline bool stack_overflow();
 456 
 457   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 458                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 459   template<typename FKind> static inline frame sender(const frame& f);
 460   template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
 461   inline void set_top_frame_metadata_pd(const frame& hf);
 462   inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
 463   inline void patch_pd_unused(intptr_t* sp);
 464   void adjust_interpreted_frame_unextended_sp(frame& f);
 465   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 466   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 467 
 468 protected:
 469   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 470   bool freeze_fast_new_chunk(stackChunkOop chunk);
 471 };
 472 
 473 template <typename ConfigT>
 474 class Freeze : public FreezeBase {
 475 private:
 476   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 477 
 478 public:
 479   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 480     : FreezeBase(thread, cont, frame_sp, preempt) {}
 481 
 482   freeze_result try_freeze_fast();

1163 
1164   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1165   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1166 #endif
1167 
1168   return freeze_ok_bottom;
1169 }
1170 
1171 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1172 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1173   if (is_bottom_frame) {
1174     // If we're the bottom frame, we need to replace the return barrier with the real
1175     // caller's pc.
1176     address last_pc = caller.pc();
1177     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1178     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1179   } else {
1180     assert(!caller.is_empty(), "");
1181   }
1182 
1183   patch_pd(hf, caller, is_bottom_frame);
1184 
1185   if (f.is_interpreted_frame()) {
1186     assert(hf.is_heap_frame(), "should be");
1187     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1188   }
1189 
1190 #ifdef ASSERT
1191   if (hf.is_compiled_frame()) {
1192     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1193       log_develop_trace(continuations)("Freezing deoptimized frame");
1194       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1195       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1196     }
1197   }
1198 #endif
1199 }
1200 
1201 #ifdef ASSERT
1202 static void verify_frame_top(const frame& f, intptr_t* top) {
1203   ResourceMark rm;

1260 
1261   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1262   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1263   caller = hf;
1264 
1265   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1266   frame_method->record_gc_epoch();
1267 
1268   return freeze_ok;
1269 }
1270 
1271 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1272 // See also StackChunkFrameStream<frame_kind>::frame_size()
1273 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1274                                                         int callee_argsize /* incl. metadata */,
1275                                                         bool callee_interpreted) {
1276   // The frame's top never includes the stack arguments to the callee
1277   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1278   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1279   // including metadata between f and its stackargs
1280   int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1281   int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1282 
1283   int real_frame_size = 0;
1284   bool augmented = f.was_augmented_on_entry(real_frame_size);
1285   if (augmented) {
1286     // The args reside inside the frame so clear argsize. If the caller is compiled,
1287     // this will cause the stack arguments passed by the caller to be freezed when
1288     // freezing the caller frame itself. If the caller is interpreted this will have
1289     // the effect of discarding the arg area created in the i2c stub.
1290     argsize = 0;
1291     fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
1292 #ifdef ASSERT
1293     nmethod* nm = f.cb()->as_nmethod();
1294     Method* method = nm->method();
1295     address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
1296     CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
1297     assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
1298     assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
1299            (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
1300            (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
1301            "frame should not be extended");
1302 #endif
1303   }
1304 
1305   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
1306                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1307                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1308                              _freeze_size, fsize, argsize, augmented);
1309   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1310   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1311 
1312   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1313   if (UNLIKELY(result > freeze_ok_bottom)) {
1314     return result;
1315   }
1316 
1317   bool is_bottom_frame = result == freeze_ok_bottom;
1318   assert(!caller.is_empty() || is_bottom_frame, "");
1319   assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
1320 
1321   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1322 
1323   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
1324 
1325   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1326 
1327   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1328   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1329 
1330   if (caller.is_interpreted_frame()) {
1331     // When thawing the frame we might need to add alignment (see Thaw::align)
1332     _total_align_size += frame::align_wiggle;
1333   }
1334 
1335   patch(f, hf, caller, is_bottom_frame);
1336 
1337   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1338 
1339   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1340   caller = hf;
1341   return freeze_ok;
1342 }
1343 

1965   intptr_t* _top_unextended_sp_before_thaw;
1966   int _align_size;
1967   DEBUG_ONLY(intptr_t* _top_stack_address);
1968 
1969   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1970 
1971   NOT_PRODUCT(int _frames;)
1972 
1973 protected:
1974   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1975       _thread(thread), _cont(cont),
1976       _fastpath(nullptr) {
1977     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1978     assert (cont.tail() != nullptr, "no last chunk");
1979     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1980   }
1981 
1982   void clear_chunk(stackChunkOop chunk);
1983   template<bool check_stub>
1984   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
1985   int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, int &argsize);
1986   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1987 
1988   void thaw_lockstack(stackChunkOop chunk);
1989 
1990   // fast path
1991   inline void prefetch_chunk_pd(void* start, int size_words);
1992   void patch_return(intptr_t* sp, bool is_last);
1993 
1994   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1995   inline intptr_t* push_cleanup_continuation();
1996   void throw_interrupted_exception(JavaThread* current, frame& top);
1997 
1998   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1999   void finish_thaw(frame& f);
2000 
2001 private:
2002   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2003   void finalize_thaw(frame& entry, int argsize);
2004 
2005   inline bool seen_by_gc();
2006 
2007   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2008   inline void after_thaw_java_frame(const frame& f, bool bottom);
2009   inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
2010   void clear_bitmap_bits(address start, address end);
2011 
2012   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
2013   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2014   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2015   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2016 
2017   void push_return_frame(frame& f);
2018   inline frame new_entry_frame();
2019   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
2020   inline void patch_pd(frame& f, const frame& sender);
2021   inline void patch_pd(frame& f, intptr_t* caller_sp);
2022   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2023 
2024   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2025 
2026   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2027 
2028  public:
2029   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2030 };
2031 
2032 template <typename ConfigT>
2033 class Thaw : public ThawBase {
2034 public:
2035   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2036 
2037   inline bool can_thaw_fast(stackChunkOop chunk) {
2038     return    !_barriers
2039            &&  _thread->cont_fastpath_thread_state()

2076     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2077   }
2078 
2079   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2080 
2081   // top and bottom stack pointers
2082   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2083   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2084 
2085   // several operations operate on the totality of the stack being reconstructed,
2086   // including the metadata words
2087   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2088   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2089 };
2090 
2091 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2092   chunk->set_sp(chunk->bottom());
2093   chunk->set_max_thawing_size(0);
2094 }
2095 
2096 int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, int &argsize) {
2097   intptr_t* top = f.sp();
2098 
2099   while (f.cb()->as_nmethod()->needs_stack_repair()) {
2100     f.next(SmallRegisterMap::instance(), false /* stop */);
2101   }
2102   assert(!f.is_done(), "");
2103   assert(f.is_compiled(), "");
2104 
2105   intptr_t* bottom = f.sp() + f.cb()->frame_size();
2106   argsize = f.stack_argsize();
2107   return bottom - top;
2108 }
2109 
2110 template<bool check_stub>
2111 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2112   bool empty = false;
2113   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2114   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2115   assert(chunk_sp == f.sp(), "");
2116   assert(chunk_sp == f.unextended_sp(), "");
2117 
2118   int frame_size = f.cb()->frame_size();
2119   argsize = f.stack_argsize();
2120 
2121   assert(!f.is_stub() || check_stub, "");
2122   if (check_stub && f.is_stub()) {
2123     // If we don't thaw the top compiled frame too, after restoring the saved
2124     // registers back in Java, we would hit the return barrier to thaw one more
2125     // frame effectively overwriting the restored registers during that call.
2126     f.next(SmallRegisterMap::instance(), true /* stop */);
2127     assert(!f.is_done(), "");
2128 
2129     f.get_cb();
2130     assert(f.is_compiled(), "");



2131     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2132       // The caller of the runtime stub when the continuation is preempted is not at a
2133       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2134       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2135       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2136     }
2137 
2138     if (f.cb()->as_nmethod()->needs_stack_repair()) {
2139       frame_size += remove_scalarized_frames(f, argsize);
2140     } else {
2141       frame_size += f.cb()->frame_size();
2142       argsize = f.stack_argsize();
2143     }
2144   } else if (f.cb()->as_nmethod()->needs_stack_repair()) {
2145     frame_size = remove_scalarized_frames(f, argsize);
2146   }
2147 
2148   f.next(SmallRegisterMap::instance(), true /* stop */);
2149   empty = f.is_done();
2150   assert(!empty || argsize == chunk->argsize(), "");
2151 
2152   if (empty) {
2153     clear_chunk(chunk);
2154   } else {
2155     chunk->set_sp(chunk->sp() + frame_size);
2156     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2157     // We set chunk->pc to the return pc into the next frame
2158     chunk->set_pc(f.pc());
2159 #ifdef ASSERT
2160     {
2161       intptr_t* retaddr_slot = (chunk_sp
2162                                 + frame_size
2163                                 - frame::sender_sp_ret_address_offset());
2164       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2165              "unexpected pc");

2405   assert(!_cont.is_empty(), "no more frames");
2406   assert(num_frames > 0, "");
2407   assert(!heap_frame.is_empty(), "");
2408 
2409   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2410     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2411   } else if (!heap_frame.is_interpreted_frame()) {
2412     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2413   } else {
2414     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2415   }
2416 }
2417 
2418 template<typename FKind>
2419 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2420   assert(num_frames > 0, "");
2421 
2422   DEBUG_ONLY(_frames++;)
2423 
2424   int argsize = _stream.stack_argsize();
2425   CodeBlob* cb = _stream.cb();
2426 
2427   _stream.next(SmallRegisterMap::instance());
2428   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2429 
2430   // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
2431   // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
2432   // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
2433   // - Regardless of whether the frame was extended or not, we would need to copy the right arg
2434   //   size if its greater than the one given by the normal method signature (non-scalarized).
2435   // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
2436   //   the chunk (we need unextended_sp, but we only have sp).
2437   if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
2438     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2439     num_frames++;
2440   }
2441 
2442   if (num_frames == 1 || _stream.is_done()) { // end recursion
2443     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2444     return true; // bottom
2445   } else { // recurse
2446     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2447     return false;
2448   }
2449 }
2450 
2451 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2452   stackChunkOop chunk = _cont.tail();
2453 
2454   if (!_stream.is_done()) {
2455     assert(_stream.sp() >= chunk->sp_address(), "");
2456     chunk->set_sp(chunk->to_offset(_stream.sp()));
2457     chunk->set_pc(_stream.pc());

2477   if (lt.develop_is_enabled()) {
2478     LogStream ls(lt);
2479     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2480     assert(hf.is_heap_frame(), "should be");
2481     hf.print_value_on(&ls);
2482   }
2483   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2484 }
2485 
2486 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2487 #ifdef ASSERT
2488   LogTarget(Trace, continuations) lt;
2489   if (lt.develop_is_enabled()) {
2490     LogStream ls(lt);
2491     ls.print_cr("thawed frame:");
2492     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2493   }
2494 #endif
2495 }
2496 
2497 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
2498   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2499   if (bottom) {
2500     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2501                                                                  : StubRoutines::cont_returnBarrier());
2502   } else if (caller.is_compiled_frame()){
2503     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2504     // If the caller is not deoptimized, pc is unchanged.
2505     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
2506   }
2507 
2508   patch_pd(f, caller);
2509 
2510   if (f.is_interpreted_frame()) {
2511     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2512   }
2513 
2514   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2515   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2516 }
2517 
2518 void ThawBase::clear_bitmap_bits(address start, address end) {
2519   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2520   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2521 
2522   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2523   // or they will keep objects that are otherwise unreachable alive.
2524 
2525   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since

2648 }
2649 
2650 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2651   assert(hf.is_compiled_frame(), "");
2652   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2653 
2654   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2655     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2656   }
2657 
2658   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2659 
2660   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2661 
2662   assert(caller.sp() == caller.unextended_sp(), "");
2663 
2664   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2665     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2666   }
2667 
2668   int fsize = 0;
2669   int added_argsize = 0;
2670   bool augmented = hf.was_augmented_on_entry(fsize);
2671   if (!augmented) {
2672     added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2673     fsize += added_argsize;
2674   }
2675   assert(!is_bottom_frame || !augmented, "");
2676 
2677 
2678   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2679   // yet laid out in the stack, and so the original_pc is not stored in it.
2680   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2681   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
2682   assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
2683 
2684   intptr_t* const stack_frame_top = f.sp();
2685   intptr_t* const heap_frame_top = hf.unextended_sp();





2686   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2687   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2688   // copy metadata, except the metadata at the top of the (unextended) entry frame
2689   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2690 
2691   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2692   // (we might have one padding word for alignment)
2693   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2694   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2695 
2696   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2697 
2698   patch(f, caller, is_bottom_frame, augmented);
2699 
2700   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2701   assert(!f.is_deoptimized_frame(), "");
2702   if (hf.is_deoptimized_frame()) {
2703     maybe_set_fastpath(f.sp());
2704   } else if (_thread->is_interp_only_mode()
2705               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2706     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2707     // cannot rely on nmethod patching for deopt.
2708     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2709 
2710     log_develop_trace(continuations)("Deoptimizing thawed frame");
2711     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2712 
2713     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2714     assert(f.is_deoptimized_frame(), "");
2715     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2716     maybe_set_fastpath(f.sp());
2717   }
2718 
< prev index next >