< prev index next >

src/hotspot/share/runtime/continuationFreezeThaw.cpp

Print this page

 440   inline frame freeze_start_frame_yield_stub();
 441   template<typename FKind>
 442   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 443   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 444   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 445   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 446   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 447   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 448   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 449   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 450   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 451   NOINLINE void finish_freeze(const frame& f, const frame& top);
 452 
 453   void freeze_lockstack(stackChunkOop chunk);
 454 
 455   inline bool stack_overflow();
 456 
 457   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 458                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 459   template<typename FKind> static inline frame sender(const frame& f);
 460   template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
 461   inline void set_top_frame_metadata_pd(const frame& hf);
 462   inline void patch_pd(frame& callee, const frame& caller);
 463   void adjust_interpreted_frame_unextended_sp(frame& f);
 464   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 465   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 466 
 467 protected:
 468   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 469   bool freeze_fast_new_chunk(stackChunkOop chunk);
 470 };
 471 
 472 template <typename ConfigT>
 473 class Freeze : public FreezeBase {
 474 private:
 475   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 476 
 477 public:
 478   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 479     : FreezeBase(thread, cont, frame_sp, preempt) {}
 480 
 481   freeze_result try_freeze_fast();
 482 

1142 
1143   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1144   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1145 #endif
1146 
1147   return freeze_ok_bottom;
1148 }
1149 
1150 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1151 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1152   if (is_bottom_frame) {
1153     // If we're the bottom frame, we need to replace the return barrier with the real
1154     // caller's pc.
1155     address last_pc = caller.pc();
1156     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1157     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1158   } else {
1159     assert(!caller.is_empty(), "");
1160   }
1161 
1162   patch_pd(hf, caller);
1163 
1164   if (f.is_interpreted_frame()) {
1165     assert(hf.is_heap_frame(), "should be");
1166     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1167   }
1168 
1169 #ifdef ASSERT
1170   if (hf.is_compiled_frame()) {
1171     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1172       log_develop_trace(continuations)("Freezing deoptimized frame");
1173       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1174       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1175     }
1176   }
1177 #endif
1178 }
1179 
1180 #ifdef ASSERT
1181 static void verify_frame_top(const frame& f, intptr_t* top) {
1182   ResourceMark rm;

1239 
1240   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1241   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1242   caller = hf;
1243 
1244   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1245   frame_method->record_gc_epoch();
1246 
1247   return freeze_ok;
1248 }
1249 
1250 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1251 // See also StackChunkFrameStream<frame_kind>::frame_size()
1252 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1253                                                         int callee_argsize /* incl. metadata */,
1254                                                         bool callee_interpreted) {
1255   // The frame's top never includes the stack arguments to the callee
1256   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1257   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1258   // including metadata between f and its stackargs
1259   const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1260   const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);






















1261 
1262   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1263                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1264                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1265                              _freeze_size, fsize, argsize);
1266   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1267   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1268 
1269   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1270   if (UNLIKELY(result > freeze_ok_bottom)) {
1271     return result;
1272   }
1273 
1274   bool is_bottom_frame = result == freeze_ok_bottom;
1275   assert(!caller.is_empty() || is_bottom_frame, "");

1276 
1277   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1278 
1279   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1280 
1281   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1282 
1283   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1284   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1285 
1286   if (caller.is_interpreted_frame()) {
1287     // When thawing the frame we might need to add alignment (see Thaw::align)
1288     _total_align_size += frame::align_wiggle;
1289   }
1290 
1291   patch(f, hf, caller, is_bottom_frame);
1292 
1293   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1294 
1295   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1296   caller = hf;
1297   return freeze_ok;
1298 }
1299 

1921   intptr_t* _top_unextended_sp_before_thaw;
1922   int _align_size;
1923   DEBUG_ONLY(intptr_t* _top_stack_address);
1924 
1925   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1926 
1927   NOT_PRODUCT(int _frames;)
1928 
1929 protected:
1930   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1931       _thread(thread), _cont(cont),
1932       _fastpath(nullptr) {
1933     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1934     assert (cont.tail() != nullptr, "no last chunk");
1935     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1936   }
1937 
1938   void clear_chunk(stackChunkOop chunk);
1939   template<bool check_stub>
1940   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);

1941   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1942 
1943   void thaw_lockstack(stackChunkOop chunk);
1944 
1945   // fast path
1946   inline void prefetch_chunk_pd(void* start, int size_words);
1947   void patch_return(intptr_t* sp, bool is_last);
1948 
1949   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1950   inline intptr_t* push_cleanup_continuation();
1951   void throw_interrupted_exception(JavaThread* current, frame& top);
1952 
1953   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1954   void finish_thaw(frame& f);
1955 
1956 private:
1957   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1958   void finalize_thaw(frame& entry, int argsize);
1959 
1960   inline bool seen_by_gc();
1961 
1962   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1963   inline void after_thaw_java_frame(const frame& f, bool bottom);
1964   inline void patch(frame& f, const frame& caller, bool bottom);
1965   void clear_bitmap_bits(address start, address end);
1966 
1967   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1968   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1969   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1970   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1971 
1972   void push_return_frame(frame& f);
1973   inline frame new_entry_frame();
1974   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
1975   inline void patch_pd(frame& f, const frame& sender);
1976   inline void patch_pd(frame& f, intptr_t* caller_sp);
1977   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
1978 
1979   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
1980 
1981   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
1982 
1983  public:
1984   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
1985 };
1986 
1987 template <typename ConfigT>
1988 class Thaw : public ThawBase {
1989 public:
1990   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
1991 
1992   inline bool can_thaw_fast(stackChunkOop chunk) {
1993     return    !_barriers
1994            &&  _thread->cont_fastpath_thread_state()

2031     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2032   }
2033 
2034   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2035 
2036   // top and bottom stack pointers
2037   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2038   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2039 
2040   // several operations operate on the totality of the stack being reconstructed,
2041   // including the metadata words
2042   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2043   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2044 };
2045 
2046 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2047   chunk->set_sp(chunk->bottom());
2048   chunk->set_max_thawing_size(0);
2049 }
2050 









































2051 template<bool check_stub>
2052 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2053   bool empty = false;
2054   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2055   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2056   assert(chunk_sp == f.sp(), "");
2057   assert(chunk_sp == f.unextended_sp(), "");
2058 




2059   int frame_size = f.cb()->frame_size();
2060   argsize = f.stack_argsize();
2061 
2062   assert(!f.is_stub() || check_stub, "");
2063   if (check_stub && f.is_stub()) {
2064     // If we don't thaw the top compiled frame too, after restoring the saved
2065     // registers back in Java, we would hit the return barrier to thaw one more
2066     // frame effectively overwriting the restored registers during that call.
2067     f.next(SmallRegisterMap::instance(), true /* stop */);
2068     assert(!f.is_done(), "");
2069 
2070     f.get_cb();
2071     assert(f.is_compiled(), "");
2072     frame_size += f.cb()->frame_size();
2073     argsize = f.stack_argsize();
2074 
2075     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2076       // The caller of the runtime stub when the continuation is preempted is not at a
2077       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2078       log_develop_trace(continuations)("Deoptimizing runtime stub caller");

2340   assert(!_cont.is_empty(), "no more frames");
2341   assert(num_frames > 0, "");
2342   assert(!heap_frame.is_empty(), "");
2343 
2344   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2345     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2346   } else if (!heap_frame.is_interpreted_frame()) {
2347     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2348   } else {
2349     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2350   }
2351 }
2352 
2353 template<typename FKind>
2354 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2355   assert(num_frames > 0, "");
2356 
2357   DEBUG_ONLY(_frames++;)
2358 
2359   int argsize = _stream.stack_argsize();

2360 
2361   _stream.next(SmallRegisterMap::instance());
2362   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2363 
2364   // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2365   // as it makes detecting that situation and adjusting unextended_sp tricky
2366   if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {





2367     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2368     num_frames++;
2369   }
2370 
2371   if (num_frames == 1 || _stream.is_done()) { // end recursion
2372     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2373     return true; // bottom
2374   } else { // recurse
2375     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2376     return false;
2377   }
2378 }
2379 
2380 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2381   stackChunkOop chunk = _cont.tail();
2382 
2383   if (!_stream.is_done()) {
2384     assert(_stream.sp() >= chunk->sp_address(), "");
2385     chunk->set_sp(chunk->to_offset(_stream.sp()));
2386     chunk->set_pc(_stream.pc());

2406   if (lt.develop_is_enabled()) {
2407     LogStream ls(lt);
2408     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2409     assert(hf.is_heap_frame(), "should be");
2410     hf.print_value_on(&ls);
2411   }
2412   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2413 }
2414 
2415 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2416 #ifdef ASSERT
2417   LogTarget(Trace, continuations) lt;
2418   if (lt.develop_is_enabled()) {
2419     LogStream ls(lt);
2420     ls.print_cr("thawed frame:");
2421     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2422   }
2423 #endif
2424 }
2425 
2426 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2427   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2428   if (bottom) {
2429     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2430                                                                  : StubRoutines::cont_returnBarrier());
2431   } else {
2432     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2433     // If the caller is not deoptimized, pc is unchanged.
2434     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2435   }
2436 
2437   patch_pd(f, caller);
2438 
2439   if (f.is_interpreted_frame()) {
2440     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2441   }
2442 
2443   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2444   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2445 }
2446 
2447 void ThawBase::clear_bitmap_bits(address start, address end) {
2448   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2449   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2450 
2451   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2452   // or they will keep objects that are otherwise unreachable alive.
2453 
2454   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since

2577 }
2578 
2579 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2580   assert(hf.is_compiled_frame(), "");
2581   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2582 
2583   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2584     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2585   }
2586 
2587   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2588 
2589   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2590 
2591   assert(caller.sp() == caller.unextended_sp(), "");
2592 
2593   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2594     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2595   }
2596 










2597   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2598   // yet laid out in the stack, and so the original_pc is not stored in it.
2599   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2600   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);


2601   intptr_t* const stack_frame_top = f.sp();
2602   intptr_t* const heap_frame_top = hf.unextended_sp();
2603 
2604   const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2605   int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2606   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2607 
2608   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2609   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2610   // copy metadata, except the metadata at the top of the (unextended) entry frame
2611   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2612 
2613   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2614   // (we might have one padding word for alignment)
2615   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2616   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2617 
2618   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2619 
2620   patch(f, caller, is_bottom_frame);
2621 
2622   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2623   assert(!f.is_deoptimized_frame(), "");
2624   if (hf.is_deoptimized_frame()) {
2625     maybe_set_fastpath(f.sp());
2626   } else if (_thread->is_interp_only_mode()
2627               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2628     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2629     // cannot rely on nmethod patching for deopt.
2630     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2631 
2632     log_develop_trace(continuations)("Deoptimizing thawed frame");
2633     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2634 
2635     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2636     assert(f.is_deoptimized_frame(), "");
2637     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2638     maybe_set_fastpath(f.sp());
2639   }
2640 

 440   inline frame freeze_start_frame_yield_stub();
 441   template<typename FKind>
 442   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 443   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 444   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 445   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 446   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 447   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 448   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 449   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 450   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 451   NOINLINE void finish_freeze(const frame& f, const frame& top);
 452 
 453   void freeze_lockstack(stackChunkOop chunk);
 454 
 455   inline bool stack_overflow();
 456 
 457   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 458                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 459   template<typename FKind> static inline frame sender(const frame& f);
 460   template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
 461   inline void set_top_frame_metadata_pd(const frame& hf);
 462   inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
 463   void adjust_interpreted_frame_unextended_sp(frame& f);
 464   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 465   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 466 
 467 protected:
 468   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 469   bool freeze_fast_new_chunk(stackChunkOop chunk);
 470 };
 471 
 472 template <typename ConfigT>
 473 class Freeze : public FreezeBase {
 474 private:
 475   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 476 
 477 public:
 478   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 479     : FreezeBase(thread, cont, frame_sp, preempt) {}
 480 
 481   freeze_result try_freeze_fast();
 482 

1142 
1143   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1144   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1145 #endif
1146 
1147   return freeze_ok_bottom;
1148 }
1149 
1150 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1151 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1152   if (is_bottom_frame) {
1153     // If we're the bottom frame, we need to replace the return barrier with the real
1154     // caller's pc.
1155     address last_pc = caller.pc();
1156     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1157     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1158   } else {
1159     assert(!caller.is_empty(), "");
1160   }
1161 
1162   patch_pd(hf, caller, is_bottom_frame);
1163 
1164   if (f.is_interpreted_frame()) {
1165     assert(hf.is_heap_frame(), "should be");
1166     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1167   }
1168 
1169 #ifdef ASSERT
1170   if (hf.is_compiled_frame()) {
1171     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1172       log_develop_trace(continuations)("Freezing deoptimized frame");
1173       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1174       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1175     }
1176   }
1177 #endif
1178 }
1179 
1180 #ifdef ASSERT
1181 static void verify_frame_top(const frame& f, intptr_t* top) {
1182   ResourceMark rm;

1239 
1240   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1241   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1242   caller = hf;
1243 
1244   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1245   frame_method->record_gc_epoch();
1246 
1247   return freeze_ok;
1248 }
1249 
1250 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1251 // See also StackChunkFrameStream<frame_kind>::frame_size()
1252 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1253                                                         int callee_argsize /* incl. metadata */,
1254                                                         bool callee_interpreted) {
1255   // The frame's top never includes the stack arguments to the callee
1256   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1257   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1258   // including metadata between f and its stackargs
1259   int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1260   int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1261 
1262   int real_frame_size = 0;
1263   bool augmented = f.was_augmented_on_entry(real_frame_size);
1264   if (augmented) {
1265     // The args reside inside the frame so clear argsize. If the caller is compiled,
1266     // this will cause the stack arguments passed by the caller to be freezed when
1267     // freezing the caller frame itself. If the caller is interpreted this will have
1268     // the effect of discarding the arg area created in the i2c stub.
1269     argsize = 0;
1270     fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
1271 #ifdef ASSERT
1272     nmethod* nm = f.cb()->as_nmethod();
1273     Method* method = nm->method();
1274     address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
1275     CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
1276     assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
1277     assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
1278            (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
1279            (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
1280            "frame should not be extended");
1281 #endif
1282   }
1283 
1284   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
1285                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1286                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1287                              _freeze_size, fsize, argsize, augmented);
1288   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1289   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1290 
1291   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1292   if (UNLIKELY(result > freeze_ok_bottom)) {
1293     return result;
1294   }
1295 
1296   bool is_bottom_frame = result == freeze_ok_bottom;
1297   assert(!caller.is_empty() || is_bottom_frame, "");
1298   assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
1299 
1300   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1301 
1302   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
1303 
1304   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1305 
1306   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1307   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1308 
1309   if (caller.is_interpreted_frame()) {
1310     // When thawing the frame we might need to add alignment (see Thaw::align)
1311     _total_align_size += frame::align_wiggle;
1312   }
1313 
1314   patch(f, hf, caller, is_bottom_frame);
1315 
1316   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1317 
1318   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1319   caller = hf;
1320   return freeze_ok;
1321 }
1322 

1944   intptr_t* _top_unextended_sp_before_thaw;
1945   int _align_size;
1946   DEBUG_ONLY(intptr_t* _top_stack_address);
1947 
1948   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1949 
1950   NOT_PRODUCT(int _frames;)
1951 
1952 protected:
1953   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1954       _thread(thread), _cont(cont),
1955       _fastpath(nullptr) {
1956     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1957     assert (cont.tail() != nullptr, "no last chunk");
1958     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1959   }
1960 
1961   void clear_chunk(stackChunkOop chunk);
1962   template<bool check_stub>
1963   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
1964   int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, stackChunkOop chunk, int &argsize);
1965   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1966 
1967   void thaw_lockstack(stackChunkOop chunk);
1968 
1969   // fast path
1970   inline void prefetch_chunk_pd(void* start, int size_words);
1971   void patch_return(intptr_t* sp, bool is_last);
1972 
1973   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1974   inline intptr_t* push_cleanup_continuation();
1975   void throw_interrupted_exception(JavaThread* current, frame& top);
1976 
1977   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1978   void finish_thaw(frame& f);
1979 
1980 private:
1981   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1982   void finalize_thaw(frame& entry, int argsize);
1983 
1984   inline bool seen_by_gc();
1985 
1986   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1987   inline void after_thaw_java_frame(const frame& f, bool bottom);
1988   inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
1989   void clear_bitmap_bits(address start, address end);
1990 
1991   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1992   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1993   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1994   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1995 
1996   void push_return_frame(frame& f);
1997   inline frame new_entry_frame();
1998   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
1999   inline void patch_pd(frame& f, const frame& sender);
2000   inline void patch_pd(frame& f, intptr_t* caller_sp);
2001   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2002 
2003   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2004 
2005   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2006 
2007  public:
2008   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2009 };
2010 
2011 template <typename ConfigT>
2012 class Thaw : public ThawBase {
2013 public:
2014   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2015 
2016   inline bool can_thaw_fast(stackChunkOop chunk) {
2017     return    !_barriers
2018            &&  _thread->cont_fastpath_thread_state()

2055     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2056   }
2057 
2058   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2059 
2060   // top and bottom stack pointers
2061   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2062   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2063 
2064   // several operations operate on the totality of the stack being reconstructed,
2065   // including the metadata words
2066   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2067   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2068 };
2069 
2070 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2071   chunk->set_sp(chunk->bottom());
2072   chunk->set_max_thawing_size(0);
2073 }
2074 
2075 int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, stackChunkOop chunk, int &argsize) {
2076   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2077   intptr_t* top = f.sp();
2078 
2079   while (f.cb()->as_nmethod_or_null()->needs_stack_repair()) {
2080     f.next(SmallRegisterMap::instance(), false /* stop */);
2081   }
2082   assert(!f.is_done(), "");
2083   assert(f.is_compiled(), "");
2084 
2085   intptr_t* bottom = f.sp() + f.cb()->frame_size();
2086   argsize = f.stack_argsize();
2087   int frames_size = bottom - top;
2088 
2089   f.next(SmallRegisterMap::instance(), true /* stop */);
2090   bool empty = f.is_done();
2091   assert(!empty || argsize == chunk->argsize(), "");
2092 
2093   if (empty) {
2094     clear_chunk(chunk);
2095   } else {
2096     chunk->set_sp(chunk->sp() + frames_size);
2097     chunk->set_max_thawing_size(chunk->max_thawing_size() - frames_size);
2098     // We set chunk->pc to the return pc into the next frame
2099     chunk->set_pc(f.pc());
2100 #ifdef ASSERT
2101     {
2102       intptr_t* retaddr_slot = (chunk_sp
2103                                 + frames_size
2104                                 - frame::sender_sp_ret_address_offset());
2105       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2106              "unexpected pc");
2107     }
2108 #endif
2109   }
2110   assert(empty == chunk->is_empty(), "");
2111   // returns the size required to store the frame on stack, and because it is a
2112   // compiled frame, it must include a copy of the arguments passed by the caller
2113   return frames_size + argsize + frame::metadata_words_at_top;
2114 }
2115 
2116 template<bool check_stub>
2117 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2118   bool empty = false;
2119   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2120   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2121   assert(chunk_sp == f.sp(), "");
2122   assert(chunk_sp == f.unextended_sp(), "");
2123 
2124   if (f.cb()->as_nmethod_or_null()->needs_stack_repair()) {
2125     return remove_scalarized_frames(f, chunk, argsize);
2126   }
2127 
2128   int frame_size = f.cb()->frame_size();
2129   argsize = f.stack_argsize();
2130 
2131   assert(!f.is_stub() || check_stub, "");
2132   if (check_stub && f.is_stub()) {
2133     // If we don't thaw the top compiled frame too, after restoring the saved
2134     // registers back in Java, we would hit the return barrier to thaw one more
2135     // frame effectively overwriting the restored registers during that call.
2136     f.next(SmallRegisterMap::instance(), true /* stop */);
2137     assert(!f.is_done(), "");
2138 
2139     f.get_cb();
2140     assert(f.is_compiled(), "");
2141     frame_size += f.cb()->frame_size();
2142     argsize = f.stack_argsize();
2143 
2144     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2145       // The caller of the runtime stub when the continuation is preempted is not at a
2146       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2147       log_develop_trace(continuations)("Deoptimizing runtime stub caller");

2409   assert(!_cont.is_empty(), "no more frames");
2410   assert(num_frames > 0, "");
2411   assert(!heap_frame.is_empty(), "");
2412 
2413   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2414     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2415   } else if (!heap_frame.is_interpreted_frame()) {
2416     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2417   } else {
2418     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2419   }
2420 }
2421 
2422 template<typename FKind>
2423 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2424   assert(num_frames > 0, "");
2425 
2426   DEBUG_ONLY(_frames++;)
2427 
2428   int argsize = _stream.stack_argsize();
2429   CodeBlob* cb = _stream.cb();
2430 
2431   _stream.next(SmallRegisterMap::instance());
2432   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2433 
2434   // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
2435   // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
2436   // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
2437   // - Regardless of whether the frame was extended or not, we would need to copy the right arg
2438   //   size if its greater than the one given by the normal method signature (non-scalarized).
2439   // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
2440   //   the chunk (we need unextended_sp, but we only have sp).
2441   if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
2442     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2443     num_frames++;
2444   }
2445 
2446   if (num_frames == 1 || _stream.is_done()) { // end recursion
2447     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2448     return true; // bottom
2449   } else { // recurse
2450     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2451     return false;
2452   }
2453 }
2454 
2455 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2456   stackChunkOop chunk = _cont.tail();
2457 
2458   if (!_stream.is_done()) {
2459     assert(_stream.sp() >= chunk->sp_address(), "");
2460     chunk->set_sp(chunk->to_offset(_stream.sp()));
2461     chunk->set_pc(_stream.pc());

2481   if (lt.develop_is_enabled()) {
2482     LogStream ls(lt);
2483     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2484     assert(hf.is_heap_frame(), "should be");
2485     hf.print_value_on(&ls);
2486   }
2487   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2488 }
2489 
2490 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2491 #ifdef ASSERT
2492   LogTarget(Trace, continuations) lt;
2493   if (lt.develop_is_enabled()) {
2494     LogStream ls(lt);
2495     ls.print_cr("thawed frame:");
2496     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2497   }
2498 #endif
2499 }
2500 
2501 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
2502   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2503   if (bottom) {
2504     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2505                                                                  : StubRoutines::cont_returnBarrier());
2506   } else if (caller.is_compiled_frame()){
2507     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2508     // If the caller is not deoptimized, pc is unchanged.
2509     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
2510   }
2511 
2512   patch_pd(f, caller);
2513 
2514   if (f.is_interpreted_frame()) {
2515     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2516   }
2517 
2518   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2519   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2520 }
2521 
2522 void ThawBase::clear_bitmap_bits(address start, address end) {
2523   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2524   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2525 
2526   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2527   // or they will keep objects that are otherwise unreachable alive.
2528 
2529   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since

2652 }
2653 
2654 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2655   assert(hf.is_compiled_frame(), "");
2656   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2657 
2658   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2659     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2660   }
2661 
2662   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2663 
2664   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2665 
2666   assert(caller.sp() == caller.unextended_sp(), "");
2667 
2668   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2669     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2670   }
2671 
2672   int fsize = 0;
2673   int added_argsize = 0;
2674   bool augmented = hf.was_augmented_on_entry(fsize);
2675   if (!augmented) {
2676     added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2677     fsize += added_argsize;
2678   }
2679   assert(!is_bottom_frame || !augmented, "");
2680 
2681 
2682   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2683   // yet laid out in the stack, and so the original_pc is not stored in it.
2684   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2685   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
2686   assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
2687 
2688   intptr_t* const stack_frame_top = f.sp();
2689   intptr_t* const heap_frame_top = hf.unextended_sp();





2690   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2691   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2692   // copy metadata, except the metadata at the top of the (unextended) entry frame
2693   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2694 
2695   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2696   // (we might have one padding word for alignment)
2697   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2698   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2699 
2700   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2701 
2702   patch(f, caller, is_bottom_frame, augmented);
2703 
2704   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2705   assert(!f.is_deoptimized_frame(), "");
2706   if (hf.is_deoptimized_frame()) {
2707     maybe_set_fastpath(f.sp());
2708   } else if (_thread->is_interp_only_mode()
2709               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2710     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2711     // cannot rely on nmethod patching for deopt.
2712     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2713 
2714     log_develop_trace(continuations)("Deoptimizing thawed frame");
2715     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2716 
2717     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2718     assert(f.is_deoptimized_frame(), "");
2719     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2720     maybe_set_fastpath(f.sp());
2721   }
2722 
< prev index next >