< prev index next >

src/hotspot/share/runtime/continuationFreezeThaw.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.inline.hpp"
  28 #include "code/nmethod.inline.hpp"
  29 #include "code/vmreg.inline.hpp"
  30 #include "compiler/oopMap.inline.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shared/gc_globals.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"

  36 #include "interpreter/interpreter.hpp"

  37 #include "jfr/jfrEvents.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "oops/access.inline.hpp"

  41 #include "oops/method.inline.hpp"
  42 #include "oops/oopsHierarchy.hpp"
  43 #include "oops/objArrayOop.inline.hpp"
  44 #include "oops/stackChunkOop.inline.hpp"
  45 #include "prims/jvmtiThreadState.hpp"
  46 #include "runtime/arguments.hpp"
  47 #include "runtime/continuation.hpp"
  48 #include "runtime/continuationEntry.inline.hpp"
  49 #include "runtime/continuationHelper.inline.hpp"
  50 #include "runtime/continuationJavaClasses.inline.hpp"
  51 #include "runtime/continuationWrapper.inline.hpp"
  52 #include "runtime/frame.inline.hpp"
  53 #include "runtime/interfaceSupport.inline.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.inline.hpp"
  56 #include "runtime/keepStackGCProcessed.hpp"
  57 #include "runtime/objectMonitor.inline.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "runtime/smallRegisterMap.inline.hpp"
  61 #include "runtime/sharedRuntime.hpp"
  62 #include "runtime/stackChunkFrameStream.inline.hpp"
  63 #include "runtime/stackFrameStream.inline.hpp"
  64 #include "runtime/stackOverflow.hpp"
  65 #include "runtime/stackWatermarkSet.inline.hpp"


  66 #include "utilities/debug.hpp"
  67 #include "utilities/exceptions.hpp"
  68 #include "utilities/macros.hpp"
  69 #include "utilities/vmError.hpp"
  70 #if INCLUDE_ZGC
  71 #include "gc/z/zStackChunkGCData.inline.hpp"
  72 #endif
  73 #if INCLUDE_JFR
  74 #include "jfr/jfr.inline.hpp"
  75 #endif






  76 
  77 #include <type_traits>
  78 
  79 /*
  80  * This file contains the implementation of continuation freezing (yield) and thawing (run).
  81  *
  82  * This code is very latency-critical and very hot. An ordinary and well-behaved server application
  83  * would likely call these operations many thousands of times per second second, on every core.
  84  *
  85  * Freeze might be called every time the application performs any I/O operation, every time it
  86  * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
  87  * multiple times in each of those cases, as it is called by the return barrier, which may be
  88  * invoked on method return.
  89  *
  90  * The amortized budget for each of those two operations is ~100-150ns. That is why, for
  91  * example, every effort is made to avoid Java-VM transitions as much as possible.
  92  *
  93  * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
  94  * and so frames simply copied, and the bottom-most one is patched.
  95  * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets

 165 #endif
 166 
 167 // TODO: See AbstractAssembler::generate_stack_overflow_check,
 168 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
 169 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
 170 
 171 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
 172 
 173 // Used to just annotatate cold/hot branches
 174 #define LIKELY(condition)   (condition)
 175 #define UNLIKELY(condition) (condition)
 176 
 177 // debugging functions
 178 #ifdef ASSERT
 179 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
 180 
 181 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
 182 
 183 static void do_deopt_after_thaw(JavaThread* thread);
 184 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
 185 static void log_frames(JavaThread* thread);
 186 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted);
 187 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);

 188 
 189 #define assert_pfl(p, ...) \
 190 do {                                           \
 191   if (!(p)) {                                  \
 192     JavaThread* t = JavaThread::active();      \
 193     if (t->has_last_Java_frame()) {            \
 194       tty->print_cr("assert(" #p ") failed:"); \
 195       t->print_frame_layout();                 \
 196     }                                          \
 197   }                                            \
 198   vmassert(p, __VA_ARGS__);                    \
 199 } while(0)
 200 
 201 #else
 202 static void verify_continuation(oop continuation) { }
 203 #define assert_pfl(p, ...)
 204 #endif
 205 
 206 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
 207 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);

 495 
 496   assert(!Interpreter::contains(_cont.entryPC()), "");
 497 
 498   _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
 499 #ifdef _LP64
 500   if (((intptr_t)_bottom_address & 0xf) != 0) {
 501     _bottom_address--;
 502   }
 503   assert(is_aligned(_bottom_address, frame::frame_alignment), "");
 504 #endif
 505 
 506   log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
 507                 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
 508   assert(_bottom_address != nullptr, "");
 509   assert(_bottom_address <= _cont.entrySP(), "");
 510   DEBUG_ONLY(_last_write = nullptr;)
 511 
 512   assert(_cont.chunk_invariant(), "");
 513   assert(!Interpreter::contains(_cont.entryPC()), "");
 514 #if !defined(PPC64) || defined(ZERO)
 515   static const int doYield_stub_frame_size = frame::metadata_words;
 516 #else
 517   static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
 518 #endif
 519   // With preemption doYield() might not have been resolved yet
 520   assert(_preempt || SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
 521 
 522   if (preempt) {
 523     _last_frame = _thread->last_frame();
 524   }
 525 
 526   // properties of the continuation on the stack; all sizes are in words
 527   _cont_stack_top    = frame_sp + (!preempt ? doYield_stub_frame_size : 0); // we don't freeze the doYield stub frame
 528   _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
 529       - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
 530 
 531   log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 532     cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 533   assert(cont_size() > 0, "");
 534 
 535   if (LockingMode != LM_LIGHTWEIGHT) {
 536     _monitors_in_lockstack = 0;
 537   } else {
 538     _monitors_in_lockstack = _thread->lock_stack().monitor_count();
 539   }
 540 }
 541 
 542 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
 543   _freeze_size = 0;
 544   _total_align_size = 0;
 545   NOT_PRODUCT(_frames = 0;)
 546 }
 547 

 881   freeze_result res = recurse_freeze(f, caller, 0, false, true);
 882 
 883   if (res == freeze_ok) {
 884     finish_freeze(f, caller);
 885     _cont.write();
 886   }
 887 
 888   return res;
 889 }
 890 
 891 frame FreezeBase::freeze_start_frame() {
 892   if (LIKELY(!_preempt)) {
 893     return freeze_start_frame_yield_stub();
 894   } else {
 895     return freeze_start_frame_on_preempt();
 896   }
 897 }
 898 
 899 frame FreezeBase::freeze_start_frame_yield_stub() {
 900   frame f = _thread->last_frame();
 901   assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
 902   f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
 903   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
 904   return f;
 905 }
 906 
 907 frame FreezeBase::freeze_start_frame_on_preempt() {
 908   assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
 909   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
 910   return _last_frame;
 911 }
 912 
 913 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 914 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
 915   assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
 916   assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
 917          || ((top && _preempt) == f.is_native_frame()), "");
 918 
 919   if (stack_overflow()) {
 920     return freeze_exception;
 921   }

1077     log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1078     if (chunk->is_empty()) {
1079       int sp = chunk->stack_size() - argsize_md;
1080       chunk->set_sp(sp);
1081       chunk->set_bottom(sp);
1082       _freeze_size += overlap;
1083       assert(chunk->max_thawing_size() == 0, "");
1084     } DEBUG_ONLY(else empty_chunk = false;)
1085   }
1086   assert(!chunk->is_gc_mode(), "");
1087   assert(!chunk->has_bitmap(), "");
1088   chunk->set_has_mixed_frames(true);
1089 
1090   assert(chunk->requires_barriers() == _barriers, "");
1091   assert(!_barriers || chunk->is_empty(), "");
1092 
1093   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1094   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1095 
1096   if (_preempt) {
1097     frame f = _thread->last_frame();
1098     if (f.is_interpreted_frame()) {
1099       // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1100       // We need it so that on resume we can restore the sp to the right place, since
1101       // thawing might add an alignment word to the expression stack (see finish_thaw()).
1102       // We do it now that we know freezing will be successful.
1103       prepare_freeze_interpreted_top_frame(f);














1104     }
1105   }
1106 
1107   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1108   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1109   // will either see no continuation or a consistent chunk.
1110   unwind_frames();
1111 
1112   chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1113 
1114   if (lt.develop_is_enabled()) {
1115     LogStream ls(lt);
1116     ls.print_cr("top chunk:");
1117     chunk->print_on(&ls);
1118   }
1119 
1120   if (_monitors_in_lockstack > 0) {
1121     freeze_lockstack(chunk);
1122   }
1123 

1595       // Some GCs could put direct allocations in old gen for slow-path
1596       // allocations; need to explicitly check if that was the case.
1597       _barriers = chunk->requires_barriers();
1598     }
1599   }
1600 
1601   if (_barriers) {
1602     log_develop_trace(continuations)("allocation requires barriers");
1603   }
1604 
1605   assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1606 
1607   return chunk;
1608 }
1609 
1610 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1611   ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1612   Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1613 }
1614 



















1615 #if INCLUDE_JVMTI
1616 static int num_java_frames(ContinuationWrapper& cont) {
1617   ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1618   int count = 0;
1619   for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1620     count += chunk->num_java_frames();
1621   }
1622   return count;
1623 }
1624 
1625 static void invalidate_jvmti_stack(JavaThread* thread) {
1626   if (thread->is_interp_only_mode()) {
1627     JvmtiThreadState *state = thread->jvmti_thread_state();
1628     if (state != nullptr)
1629       state->invalidate_cur_stack_depth();
1630   }
1631 }
1632 
1633 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1634   if (JvmtiExport::can_post_frame_pop()) {
1635     int num_frames = num_java_frames(cont);
1636 
1637     ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1638     JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1639   }
1640   invalidate_jvmti_stack(thread);
1641 }
1642 
1643 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top) {
1644   assert(current->vthread() != nullptr, "must be");
1645 
1646   HandleMarkCleaner hm(current);
1647   Handle vth(current, current->vthread());
1648 
1649   ContinuationWrapper::SafepointOp so(current, cont);
1650 
1651   // Since we might safepoint set the anchor so that the stack can be walked.
1652   set_anchor(current, top.sp());
1653 
1654   JRT_BLOCK
1655     JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
1656 
1657     if (current->pending_contended_entered_event()) {
1658       JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());



1659       current->set_contended_entered_monitor(nullptr);
1660     }
1661   JRT_BLOCK_END
1662 
1663   clear_anchor(current);
1664 }
1665 #endif // INCLUDE_JVMTI
1666 
1667 #ifdef ASSERT
1668 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1669 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1670 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1671 bool FreezeBase::check_valid_fast_path() {
1672   ContinuationEntry* ce = _thread->last_continuation();
1673   RegisterMap map(_thread,
1674                   RegisterMap::UpdateMap::skip,
1675                   RegisterMap::ProcessFrames::skip,
1676                   RegisterMap::WalkContinuation::skip);
1677   map.set_include_argument_oops(false);
1678   bool is_top_frame = true;
1679   for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1680     if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1681       return false;
1682     }
1683   }
1684   return true;
1685 }































































































1686 #endif // ASSERT
1687 
1688 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1689   verify_continuation(cont.continuation());
1690   assert(!cont.is_empty(), "");
1691 
1692   log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1693   return freeze_ok;
1694 }
1695 
1696 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1697   if (UNLIKELY(res != freeze_ok)) {
1698     JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1699     verify_continuation(cont.continuation());
1700     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1701     return res;
1702   }
1703 
1704   JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1705   return freeze_epilog(cont);
1706 }
1707 
1708 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1709   if (UNLIKELY(res != freeze_ok)) {
1710     verify_continuation(cont.continuation());
1711     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1712     return res;
1713   }
1714 

1715   patch_return_pc_with_preempt_stub(old_last_frame);
1716   cont.tail()->set_preempted(true);
1717 
1718   return freeze_epilog(cont);
1719 }
1720 
1721 template<typename ConfigT, bool preempt>
1722 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1723   assert(!current->has_pending_exception(), "");
1724 
1725 #ifdef ASSERT
1726   log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1727   log_frames(current);
1728 #endif
1729 
1730   CONT_JFR_ONLY(EventContinuationFreeze event;)
1731 
1732   ContinuationEntry* entry = current->last_continuation();
1733 
1734   oop oopCont = entry->cont_oop(current);
1735   assert(oopCont == current->last_continuation()->cont_oop(current), "");
1736   assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1737 
1738   verify_continuation(oopCont);
1739   ContinuationWrapper cont(current, oopCont);
1740   log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1741 
1742   assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1743 
1744   assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
1745          "Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1746 
1747   if (entry->is_pinned() || current->held_monitor_count() > 0) {

1901   // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
1902   // for the Java frames in the check below.
1903   if (!stack_overflow_check(thread, size + 300, bottom)) {
1904     return 0;
1905   }
1906 
1907   log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
1908                               p2i(bottom), p2i(bottom - size), size);
1909   return size;
1910 }
1911 
1912 class ThawBase : public StackObj {
1913 protected:
1914   JavaThread* _thread;
1915   ContinuationWrapper& _cont;
1916   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
1917 
1918   intptr_t* _fastpath;
1919   bool _barriers;
1920   bool _preempted_case;

1921   intptr_t* _top_unextended_sp_before_thaw;
1922   int _align_size;
1923   DEBUG_ONLY(intptr_t* _top_stack_address);
1924 



1925   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1926 
1927   NOT_PRODUCT(int _frames;)
1928 
1929 protected:
1930   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1931       _thread(thread), _cont(cont),
1932       _fastpath(nullptr) {
1933     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1934     assert (cont.tail() != nullptr, "no last chunk");
1935     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1936   }
1937 
1938   void clear_chunk(stackChunkOop chunk);
1939   template<bool check_stub>
1940   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
1941   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1942 
1943   void thaw_lockstack(stackChunkOop chunk);
1944 
1945   // fast path
1946   inline void prefetch_chunk_pd(void* start, int size_words);
1947   void patch_return(intptr_t* sp, bool is_last);
1948 
1949   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1950   inline intptr_t* push_cleanup_continuation();


1951   void throw_interrupted_exception(JavaThread* current, frame& top);
1952 
1953   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1954   void finish_thaw(frame& f);
1955 
1956 private:
1957   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1958   void finalize_thaw(frame& entry, int argsize);
1959 
1960   inline bool seen_by_gc();
1961 
1962   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1963   inline void after_thaw_java_frame(const frame& f, bool bottom);
1964   inline void patch(frame& f, const frame& caller, bool bottom);
1965   void clear_bitmap_bits(address start, address end);
1966 
1967   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1968   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1969   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1970   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1971 
1972   void push_return_frame(frame& f);
1973   inline frame new_entry_frame();
1974   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
1975   inline void patch_pd(frame& f, const frame& sender);
1976   inline void patch_pd(frame& f, intptr_t* caller_sp);
1977   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
1978 
1979   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
1980 
1981   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
1982 
1983  public:
1984   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
1985 };
1986 
1987 template <typename ConfigT>

2047   chunk->set_sp(chunk->bottom());
2048   chunk->set_max_thawing_size(0);
2049 }
2050 
2051 template<bool check_stub>
2052 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2053   bool empty = false;
2054   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2055   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2056   assert(chunk_sp == f.sp(), "");
2057   assert(chunk_sp == f.unextended_sp(), "");
2058 
2059   int frame_size = f.cb()->frame_size();
2060   argsize = f.stack_argsize();
2061 
2062   assert(!f.is_stub() || check_stub, "");
2063   if (check_stub && f.is_stub()) {
2064     // If we don't thaw the top compiled frame too, after restoring the saved
2065     // registers back in Java, we would hit the return barrier to thaw one more
2066     // frame effectively overwriting the restored registers during that call.
2067     f.next(SmallRegisterMap::instance(), true /* stop */);
2068     assert(!f.is_done(), "");
2069 
2070     f.get_cb();
2071     assert(f.is_compiled(), "");
2072     frame_size += f.cb()->frame_size();
2073     argsize = f.stack_argsize();
2074 
2075     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2076       // The caller of the runtime stub when the continuation is preempted is not at a
2077       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2078       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2079       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2080     }
2081   }
2082 
2083   f.next(SmallRegisterMap::instance(), true /* stop */);
2084   empty = f.is_done();
2085   assert(!empty || argsize == chunk->argsize(), "");
2086 
2087   if (empty) {
2088     clear_chunk(chunk);
2089   } else {
2090     chunk->set_sp(chunk->sp() + frame_size);
2091     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2092     // We set chunk->pc to the return pc into the next frame
2093     chunk->set_pc(f.pc());
2094 #ifdef ASSERT
2095     {
2096       intptr_t* retaddr_slot = (chunk_sp
2097                                 + frame_size
2098                                 - frame::sender_sp_ret_address_offset());
2099       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2100              "unexpected pc");
2101     }
2102 #endif
2103   }

2223   return rs.sp();
2224 }
2225 
2226 inline bool ThawBase::seen_by_gc() {
2227   return _barriers || _cont.tail()->is_gc_mode();
2228 }
2229 
2230 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2231 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2232   if (UseZGC || UseShenandoahGC) {
2233     chunk->relativize_derived_pointers_concurrently();
2234   }
2235 #endif
2236 }
2237 
2238 template <typename ConfigT>
2239 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2240   Continuation::preempt_kind preempt_kind;
2241   bool retry_fast_path = false;
2242 

2243   _preempted_case = chunk->preempted();
2244   if (_preempted_case) {
2245     ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2246     if (waiter != nullptr) {
2247       // Mounted again after preemption. Resume the pending monitor operation,
2248       // which will be either a monitorenter or Object.wait() call.
2249       ObjectMonitor* mon = waiter->monitor();
2250       preempt_kind = waiter->is_wait() ? Continuation::freeze_on_wait : Continuation::freeze_on_monitorenter;
2251 
2252       bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2253       assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2254       if (!mon_acquired) {
2255         // Failed to acquire monitor. Return to enterSpecial to unmount again.

2256         return push_cleanup_continuation();
2257       }

2258       chunk = _cont.tail();  // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2259     } else {
2260       // Preemption cancelled in moniterenter case. We actually acquired
2261       // the monitor after freezing all frames so nothing to do.
2262       preempt_kind = Continuation::freeze_on_monitorenter;




2263     }

2264     // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2265     relativize_chunk_concurrently(chunk);







2266     chunk->set_preempted(false);
2267     retry_fast_path = true;
2268   } else {
2269     relativize_chunk_concurrently(chunk);
2270   }
2271 
2272   // On first thaw after freeze restore oops to the lockstack if any.
2273   assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2274   if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2275     thaw_lockstack(chunk);
2276     retry_fast_path = true;
2277   }
2278 
2279   // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2280   // and FLAG_PREEMPTED flags from the stackChunk.
2281   if (retry_fast_path && can_thaw_fast(chunk)) {
2282     intptr_t* sp = thaw_fast<true>(chunk);
2283     if (_preempted_case) {
2284       return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2285     }

2329 
2330   intptr_t* sp = caller.sp();
2331 
2332   if (_preempted_case) {
2333     return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2334   }
2335   return sp;
2336 }
2337 
2338 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2339   log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2340   assert(!_cont.is_empty(), "no more frames");
2341   assert(num_frames > 0, "");
2342   assert(!heap_frame.is_empty(), "");
2343 
2344   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2345     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2346   } else if (!heap_frame.is_interpreted_frame()) {
2347     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2348   } else {
2349     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2350   }
2351 }
2352 
2353 template<typename FKind>
2354 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2355   assert(num_frames > 0, "");
2356 
2357   DEBUG_ONLY(_frames++;)
2358 
2359   int argsize = _stream.stack_argsize();
2360 
2361   _stream.next(SmallRegisterMap::instance());
2362   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2363 
2364   // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2365   // as it makes detecting that situation and adjusting unextended_sp tricky
2366   if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2367     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2368     num_frames++;
2369   }
2370 
2371   if (num_frames == 1 || _stream.is_done()) { // end recursion
2372     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2373     return true; // bottom
2374   } else { // recurse
2375     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2376     return false;
2377   }
2378 }
2379 
2380 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2381   stackChunkOop chunk = _cont.tail();

2446 
2447 void ThawBase::clear_bitmap_bits(address start, address end) {
2448   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2449   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2450 
2451   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2452   // or they will keep objects that are otherwise unreachable alive.
2453 
2454   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2455   // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2456   // If that's the case the bit range corresponding to the last stack slot should not have bits set
2457   // anyways and we assert that before returning.
2458   address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2459   log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2460   stackChunkOop chunk = _cont.tail();
2461   chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2462   assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2463 }
2464 
2465 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2466   assert(preempt_kind == Continuation::freeze_on_wait || preempt_kind == Continuation::freeze_on_monitorenter, "");
2467   frame top(sp);
2468   assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");


2469 
2470 #if INCLUDE_JVMTI
2471   // Finish the VTMS transition.
2472   assert(_thread->is_in_VTMS_transition(), "must be");
2473   bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2474   if (is_vthread) {
2475     if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
2476       jvmti_mount_end(_thread, _cont, top);
2477     } else {
2478       _thread->set_is_in_VTMS_transition(false);
2479       java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
2480     }
2481   }
2482 #endif
2483 
2484   if (fast_case) {
2485     // If we thawed in the slow path the runtime stub/native wrapper frame already
2486     // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2487     // we copied the original fp at the time of freeze which now will have to be fixed.
2488     assert(top.is_runtime_frame() || top.is_native_frame(), "");
2489     int fsize = top.cb()->frame_size();
2490     patch_pd(top, sp + fsize);
2491   }
2492 
2493   if (preempt_kind == Continuation::freeze_on_wait) {
2494     // Check now if we need to throw IE exception.
2495     if (_thread->pending_interrupted_exception()) {

2496       throw_interrupted_exception(_thread, top);
2497       _thread->set_pending_interrupted_exception(false);
2498     }
2499   } else if (top.is_runtime_frame()) {
2500     // The continuation might now run on a different platform thread than the previous time so
2501     // we need to adjust the current thread saved in the stub frame before restoring registers.
2502     JavaThread** thread_addr = frame::saved_thread_address(top);
2503     if (thread_addr != nullptr) *thread_addr = _thread;


























































2504   }
2505   return sp;
2506 }
2507 
2508 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {

2509   ContinuationWrapper::SafepointOp so(current, _cont);
2510   // Since we might safepoint set the anchor so that the stack can be walked.
2511   set_anchor(current, top.sp());
2512   JRT_BLOCK
2513     THROW(vmSymbols::java_lang_InterruptedException());
2514   JRT_BLOCK_END
2515   clear_anchor(current);
2516 }
2517 
2518 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
2519   assert(hf.is_interpreted_frame(), "");
2520 
2521   if (UNLIKELY(seen_by_gc())) {
2522     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());





2523   }
2524 
2525   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2526 
2527   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2528 
2529   _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2530 
2531   frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2532 
2533   intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2534   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2535   intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2536   intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2537 
2538   assert(hf.is_heap_frame(), "should be");
2539   assert(!f.is_heap_frame(), "should not be");
2540 
2541   const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2542   assert((stack_frame_bottom == stack_frame_top + fsize), "");

2547 
2548   // Make sure the relativized locals is already set.
2549   assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2550 
2551   derelativize_interpreted_frame_metadata(hf, f);
2552   patch(f, caller, is_bottom_frame);
2553 
2554   assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2555   assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2556 
2557   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2558 
2559   maybe_set_fastpath(f.sp());
2560 
2561   Method* m = hf.interpreter_frame_method();
2562   assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2563   const int locals = m->max_locals();
2564 
2565   if (!is_bottom_frame) {
2566     // can only fix caller once this frame is thawed (due to callee saved regs)
2567     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2568   } else if (_cont.tail()->has_bitmap() && locals > 0) {
2569     assert(hf.is_heap_frame(), "should be");
2570     address start = (address)(heap_frame_bottom - locals);
2571     address end = (address)heap_frame_bottom;
2572     clear_bitmap_bits(start, end);
2573   }
2574 
2575   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2576   caller = f;
2577 }
2578 
2579 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2580   assert(hf.is_compiled_frame(), "");
2581   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2582 
2583   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2584     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2585   }
2586 
2587   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2588 
2589   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2590 
2591   assert(caller.sp() == caller.unextended_sp(), "");
2592 
2593   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2594     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2595   }
2596 
2597   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2598   // yet laid out in the stack, and so the original_pc is not stored in it.
2599   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2600   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2601   intptr_t* const stack_frame_top = f.sp();
2602   intptr_t* const heap_frame_top = hf.unextended_sp();
2603 
2604   const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;

2623   assert(!f.is_deoptimized_frame(), "");
2624   if (hf.is_deoptimized_frame()) {
2625     maybe_set_fastpath(f.sp());
2626   } else if (_thread->is_interp_only_mode()
2627               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2628     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2629     // cannot rely on nmethod patching for deopt.
2630     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2631 
2632     log_develop_trace(continuations)("Deoptimizing thawed frame");
2633     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2634 
2635     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2636     assert(f.is_deoptimized_frame(), "");
2637     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2638     maybe_set_fastpath(f.sp());
2639   }
2640 
2641   if (!is_bottom_frame) {
2642     // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2643     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2644   } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2645     address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2646     int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2647     int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2648     clear_bitmap_bits(start, start + argsize_in_bytes);
2649   }
2650 
2651   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2652   caller = f;
2653 }
2654 
2655 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2656   DEBUG_ONLY(_frames++;)
2657 
2658   if (UNLIKELY(seen_by_gc())) {
2659     // Process the stub's caller here since we might need the full map.
2660     RegisterMap map(nullptr,
2661                     RegisterMap::UpdateMap::include,
2662                     RegisterMap::ProcessFrames::skip,
2663                     RegisterMap::WalkContinuation::skip);
2664     map.set_include_argument_oops(false);
2665     _stream.next(&map);
2666     assert(!_stream.is_done(), "");
2667     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2668   } else {
2669     _stream.next(SmallRegisterMap::instance());
2670     assert(!_stream.is_done(), "");
2671   }
2672 
2673   recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2674 
2675   assert(caller.is_compiled_frame(), "");
2676   assert(caller.sp() == caller.unextended_sp(), "");
2677 
2678   DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2679 
2680   frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2681   intptr_t* stack_frame_top = f.sp();
2682   intptr_t* heap_frame_top = hf.sp();
2683   int fsize = ContinuationHelper::StubFrame::size(hf);
2684 
2685   copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2686                   fsize + frame::metadata_words);
2687 
2688   patch(f, caller, false /*is_bottom_frame*/);
2689 
2690   // can only fix caller once this frame is thawed (due to callee saved regs)
2691   RegisterMap map(nullptr,
2692                   RegisterMap::UpdateMap::include,
2693                   RegisterMap::ProcessFrames::skip,
2694                   RegisterMap::WalkContinuation::skip);
2695   map.set_include_argument_oops(false);
2696   f.oop_map()->update_register_map(&f, &map);
2697   ContinuationHelper::update_register_map_with_callee(caller, &map);
2698   _cont.tail()->fix_thawed_frame(caller, &map);
2699 
2700   DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2701   caller = f;
2702 }
2703 
2704 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2705   assert(hf.is_native_frame(), "");
2706   assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2707 
2708   if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2709     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2710   }
2711 
2712   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2713   assert(!is_bottom_frame, "");
2714 
2715   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2716 
2717   assert(caller.sp() == caller.unextended_sp(), "");
2718 
2719   if (caller.is_interpreted_frame()) {
2720     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2721   }
2722 
2723   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2724   // yet laid out in the stack, and so the original_pc is not stored in it.
2725   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2726   frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2727   intptr_t* const stack_frame_top = f.sp();
2728   intptr_t* const heap_frame_top = hf.unextended_sp();
2729 
2730   int fsize = ContinuationHelper::NativeFrame::size(hf);
2731   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2732 
2733   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2734   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2735   int sz = fsize + frame::metadata_words_at_bottom;
2736 
2737   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2738 
2739   patch(f, caller, false /* bottom */);
2740 
2741   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2742   assert(!f.is_deoptimized_frame(), "");
2743   assert(!hf.is_deoptimized_frame(), "");
2744   assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2745 
2746   // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2747   _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2748 
2749   DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2750   caller = f;
2751 }
2752 
2753 void ThawBase::finish_thaw(frame& f) {
2754   stackChunkOop chunk = _cont.tail();
2755 
2756   if (chunk->is_empty()) {
2757     // Only remove chunk from list if it can't be reused for another freeze
2758     if (seen_by_gc()) {
2759       _cont.set_tail(chunk->parent());
2760     } else {
2761       chunk->set_has_mixed_frames(false);
2762     }
2763     chunk->set_max_thawing_size(0);
2764   } else {
2765     chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2766   }
2767   assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
2768 
2769   if (!is_aligned(f.sp(), frame::frame_alignment)) {
2770     assert(f.is_interpreted_frame(), "");
2771     f.set_sp(align_down(f.sp(), frame::frame_alignment));
2772   }
2773   push_return_frame(f);
2774   chunk->fix_thawed_frame(f, SmallRegisterMap::instance()); // can only fix caller after push_return_frame (due to callee saved regs)






2775 
2776   assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
2777 
2778   log_develop_trace(continuations)("thawed %d frames", _frames);
2779 
2780   LogTarget(Trace, continuations) lt;
2781   if (lt.develop_is_enabled()) {
2782     LogStream ls(lt);
2783     ls.print_cr("top hframe after (thaw):");
2784     _cont.last_frame().print_value_on(&ls);
2785   }
2786 }
2787 
2788 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
2789   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
2790   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
2791 
2792   LogTarget(Trace, continuations) lt;
2793   if (lt.develop_is_enabled()) {
2794     LogStream ls(lt);

2816 
2817   ContinuationEntry* entry = thread->last_continuation();
2818   assert(entry != nullptr, "");
2819   oop oopCont = entry->cont_oop(thread);
2820 
2821   assert(!jdk_internal_vm_Continuation::done(oopCont), "");
2822   assert(oopCont == get_continuation(thread), "");
2823   verify_continuation(oopCont);
2824 
2825   assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
2826 
2827   ContinuationWrapper cont(thread, oopCont);
2828   log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
2829 
2830 #ifdef ASSERT
2831   set_anchor_to_entry(thread, cont.entry());
2832   log_frames(thread);
2833   clear_anchor(thread);
2834 #endif
2835 
2836   DEBUG_ONLY(bool preempted = cont.tail()->preempted();)
2837   Thaw<ConfigT> thw(thread, cont);
2838   intptr_t* const sp = thw.thaw(kind);
2839   assert(is_aligned(sp, frame::frame_alignment), "");
2840   DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp, preempted);)
2841 
2842   CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
2843 
2844   verify_continuation(cont.continuation());
2845   log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
2846 
2847   return sp;
2848 }
2849 
2850 #ifdef ASSERT
2851 static void do_deopt_after_thaw(JavaThread* thread) {
2852   int i = 0;
2853   StackFrameStream fst(thread, true, false);
2854   fst.register_map()->set_include_argument_oops(false);
2855   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
2856   for (; !fst.is_done(); fst.next()) {
2857     if (fst.current()->cb()->is_nmethod()) {
2858       nmethod* nm = fst.current()->cb()->as_nmethod();
2859       if (!nm->method()->is_continuation_native_intrinsic()) {
2860         nm->make_deoptimized();

2917       if (!fr.is_interpreted_frame()) {
2918         st->print_cr("size: %d argsize: %d",
2919                      ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
2920                      ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
2921       }
2922       VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
2923       if (reg != nullptr) {
2924         st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
2925       }
2926       cl.reset();
2927       DEBUG_ONLY(thread->print_frame_layout();)
2928       if (chunk != nullptr) {
2929         chunk->print_on(true, st);
2930       }
2931       return false;
2932     }
2933   }
2934   return true;
2935 }
2936 
2937 static void log_frames(JavaThread* thread) {
2938   const static int show_entry_callers = 3;
2939   LogTarget(Trace, continuations) lt;
2940   if (!lt.develop_is_enabled()) {
2941     return;
2942   }
2943   LogStream ls(lt);
2944 
2945   ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
2946   if (!thread->has_last_Java_frame()) {
2947     ls.print_cr("NO ANCHOR!");
2948   }
2949 
2950   RegisterMap map(thread,
2951                   RegisterMap::UpdateMap::include,
2952                   RegisterMap::ProcessFrames::include,
2953                   RegisterMap::WalkContinuation::skip);
2954   map.set_include_argument_oops(false);
2955 
2956   if (false) {
2957     for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
2958       f.print_on(&ls);
2959     }
2960   } else {

2962     ResetNoHandleMark rnhm;
2963     ResourceMark rm;
2964     HandleMark hm(Thread::current());
2965     FrameValues values;
2966 
2967     int i = 0;
2968     int post_entry = -1;
2969     for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
2970       f.describe(values, i, &map, i == 0);
2971       if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
2972         post_entry++;
2973       if (post_entry >= show_entry_callers)
2974         break;
2975     }
2976     values.print_on(thread, &ls);
2977   }
2978 
2979   ls.print_cr("======= end frames =========");
2980 }
2981 
2982 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted) {
2983   intptr_t* sp0 = sp;
2984   address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
2985 
2986   if (preempted && sp0 == cont.entrySP()) {


2987     // Still preempted (monitor not acquired) so no frames were thawed.
2988     assert(cont.tail()->preempted(), "");
2989     set_anchor(thread, cont.entrySP(), cont.entryPC());

2990   } else {
2991     set_anchor(thread, sp0);
2992   }
2993 
2994   log_frames(thread);
2995   if (LoomVerifyAfterThaw) {
2996     assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
2997   }
2998   assert(ContinuationEntry::assert_entry_frame_laid_out(thread), "");
2999   clear_anchor(thread);
3000 
3001   LogTarget(Trace, continuations) lt;
3002   if (lt.develop_is_enabled()) {
3003     LogStream ls(lt);
3004     ls.print_cr("Jumping to frame (thaw):");
3005     frame(sp).print_value_on(&ls);
3006   }
3007 }
3008 #endif // ASSERT
3009 
3010 #include CPU_HEADER_INLINE(continuationFreezeThaw)
3011 
3012 #ifdef ASSERT
3013 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
3014   ResourceMark rm;
3015   FrameValues values;
3016   assert(f.get_cb() != nullptr, "");
3017   RegisterMap map(f.is_heap_frame() ?
3018                     nullptr :

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.inline.hpp"
  28 #include "code/nmethod.inline.hpp"
  29 #include "code/vmreg.inline.hpp"
  30 #include "compiler/oopMap.inline.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shared/gc_globals.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  36 #include "interpreter/bytecodeStream.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interpreterRuntime.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "logging/log.hpp"
  41 #include "logging/logStream.hpp"
  42 #include "oops/access.inline.hpp"
  43 #include "oops/constantPool.inline.hpp"
  44 #include "oops/method.inline.hpp"
  45 #include "oops/oopsHierarchy.hpp"
  46 #include "oops/objArrayOop.inline.hpp"
  47 #include "oops/stackChunkOop.inline.hpp"
  48 #include "prims/jvmtiThreadState.hpp"
  49 #include "runtime/arguments.hpp"
  50 #include "runtime/continuation.hpp"
  51 #include "runtime/continuationEntry.inline.hpp"
  52 #include "runtime/continuationHelper.inline.hpp"
  53 #include "runtime/continuationJavaClasses.inline.hpp"
  54 #include "runtime/continuationWrapper.inline.hpp"
  55 #include "runtime/frame.inline.hpp"
  56 #include "runtime/interfaceSupport.inline.hpp"
  57 #include "runtime/javaThread.inline.hpp"
  58 #include "runtime/jniHandles.inline.hpp"
  59 #include "runtime/keepStackGCProcessed.hpp"
  60 #include "runtime/objectMonitor.inline.hpp"
  61 #include "runtime/orderAccess.hpp"
  62 #include "runtime/prefetch.inline.hpp"
  63 #include "runtime/smallRegisterMap.inline.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/stackChunkFrameStream.inline.hpp"
  66 #include "runtime/stackFrameStream.inline.hpp"
  67 #include "runtime/stackOverflow.hpp"
  68 #include "runtime/stackWatermarkSet.inline.hpp"
  69 #include "runtime/vframe.inline.hpp"
  70 #include "runtime/vframe_hp.hpp"
  71 #include "utilities/debug.hpp"
  72 #include "utilities/exceptions.hpp"
  73 #include "utilities/macros.hpp"
  74 #include "utilities/vmError.hpp"
  75 #if INCLUDE_ZGC
  76 #include "gc/z/zStackChunkGCData.inline.hpp"
  77 #endif
  78 #if INCLUDE_JFR
  79 #include "jfr/jfr.inline.hpp"
  80 #endif
  81 #ifdef COMPILER1
  82 #include "c1/c1_Runtime1.hpp"
  83 #endif
  84 #ifdef COMPILER2
  85 #include "opto/runtime.hpp"
  86 #endif
  87 
  88 #include <type_traits>
  89 
  90 /*
  91  * This file contains the implementation of continuation freezing (yield) and thawing (run).
  92  *
  93  * This code is very latency-critical and very hot. An ordinary and well-behaved server application
  94  * would likely call these operations many thousands of times per second second, on every core.
  95  *
  96  * Freeze might be called every time the application performs any I/O operation, every time it
  97  * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
  98  * multiple times in each of those cases, as it is called by the return barrier, which may be
  99  * invoked on method return.
 100  *
 101  * The amortized budget for each of those two operations is ~100-150ns. That is why, for
 102  * example, every effort is made to avoid Java-VM transitions as much as possible.
 103  *
 104  * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
 105  * and so frames simply copied, and the bottom-most one is patched.
 106  * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets

 176 #endif
 177 
 178 // TODO: See AbstractAssembler::generate_stack_overflow_check,
 179 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
 180 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
 181 
 182 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
 183 
 184 // Used to just annotatate cold/hot branches
 185 #define LIKELY(condition)   (condition)
 186 #define UNLIKELY(condition) (condition)
 187 
 188 // debugging functions
 189 #ifdef ASSERT
 190 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
 191 
 192 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
 193 
 194 static void do_deopt_after_thaw(JavaThread* thread);
 195 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
 196 static void log_frames(JavaThread* thread, bool dolog = false);
 197 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp);
 198 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
 199 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr = nullptr, const char** code_name_ptr = nullptr, int* bci_ptr = nullptr);
 200 
 201 #define assert_pfl(p, ...) \
 202 do {                                           \
 203   if (!(p)) {                                  \
 204     JavaThread* t = JavaThread::active();      \
 205     if (t->has_last_Java_frame()) {            \
 206       tty->print_cr("assert(" #p ") failed:"); \
 207       t->print_frame_layout();                 \
 208     }                                          \
 209   }                                            \
 210   vmassert(p, __VA_ARGS__);                    \
 211 } while(0)
 212 
 213 #else
 214 static void verify_continuation(oop continuation) { }
 215 #define assert_pfl(p, ...)
 216 #endif
 217 
 218 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
 219 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);

 507 
 508   assert(!Interpreter::contains(_cont.entryPC()), "");
 509 
 510   _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
 511 #ifdef _LP64
 512   if (((intptr_t)_bottom_address & 0xf) != 0) {
 513     _bottom_address--;
 514   }
 515   assert(is_aligned(_bottom_address, frame::frame_alignment), "");
 516 #endif
 517 
 518   log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
 519                 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
 520   assert(_bottom_address != nullptr, "");
 521   assert(_bottom_address <= _cont.entrySP(), "");
 522   DEBUG_ONLY(_last_write = nullptr;)
 523 
 524   assert(_cont.chunk_invariant(), "");
 525   assert(!Interpreter::contains(_cont.entryPC()), "");
 526 #if !defined(PPC64) || defined(ZERO)
 527   static const int do_yield_frame_size = frame::metadata_words;
 528 #else
 529   static const int do_yield_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
 530 #endif
 531   // With preemption doYield() might not have been resolved yet
 532   assert(_preempt || ContinuationEntry::do_yield_nmethod()->frame_size() == do_yield_frame_size, "");
 533 
 534   if (preempt) {
 535     _last_frame = _thread->last_frame();
 536   }
 537 
 538   // properties of the continuation on the stack; all sizes are in words
 539   _cont_stack_top    = frame_sp + (!preempt ? do_yield_frame_size : 0); // we don't freeze the doYield stub frame
 540   _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
 541       - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
 542 
 543   log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 544     cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 545   assert(cont_size() > 0, "");
 546 
 547   if (LockingMode != LM_LIGHTWEIGHT) {
 548     _monitors_in_lockstack = 0;
 549   } else {
 550     _monitors_in_lockstack = _thread->lock_stack().monitor_count();
 551   }
 552 }
 553 
 554 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
 555   _freeze_size = 0;
 556   _total_align_size = 0;
 557   NOT_PRODUCT(_frames = 0;)
 558 }
 559 

 893   freeze_result res = recurse_freeze(f, caller, 0, false, true);
 894 
 895   if (res == freeze_ok) {
 896     finish_freeze(f, caller);
 897     _cont.write();
 898   }
 899 
 900   return res;
 901 }
 902 
 903 frame FreezeBase::freeze_start_frame() {
 904   if (LIKELY(!_preempt)) {
 905     return freeze_start_frame_yield_stub();
 906   } else {
 907     return freeze_start_frame_on_preempt();
 908   }
 909 }
 910 
 911 frame FreezeBase::freeze_start_frame_yield_stub() {
 912   frame f = _thread->last_frame();
 913   assert(ContinuationEntry::do_yield_nmethod()->contains(f.pc()), "must be");
 914   f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
 915   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
 916   return f;
 917 }
 918 
 919 frame FreezeBase::freeze_start_frame_on_preempt() {
 920   assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
 921   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
 922   return _last_frame;
 923 }
 924 
 925 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 926 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
 927   assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
 928   assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
 929          || ((top && _preempt) == f.is_native_frame()), "");
 930 
 931   if (stack_overflow()) {
 932     return freeze_exception;
 933   }

1089     log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1090     if (chunk->is_empty()) {
1091       int sp = chunk->stack_size() - argsize_md;
1092       chunk->set_sp(sp);
1093       chunk->set_bottom(sp);
1094       _freeze_size += overlap;
1095       assert(chunk->max_thawing_size() == 0, "");
1096     } DEBUG_ONLY(else empty_chunk = false;)
1097   }
1098   assert(!chunk->is_gc_mode(), "");
1099   assert(!chunk->has_bitmap(), "");
1100   chunk->set_has_mixed_frames(true);
1101 
1102   assert(chunk->requires_barriers() == _barriers, "");
1103   assert(!_barriers || chunk->is_empty(), "");
1104 
1105   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1106   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1107 
1108   if (_preempt) {
1109     frame top_frame = _thread->last_frame();
1110     if (top_frame.is_interpreted_frame()) {
1111       // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1112       // We need it so that on resume we can restore the sp to the right place, since
1113       // thawing might add an alignment word to the expression stack (see finish_thaw()).
1114       // We do it now that we know freezing will be successful.
1115       prepare_freeze_interpreted_top_frame(top_frame);
1116     }
1117 
1118     // Do this now so should_process_args_at_top() is set before calling finish_freeze
1119     // in case we might need to apply GC barriers to frames in this stackChunk.
1120     if (_thread->at_preemptable_init()) {
1121       assert(top_frame.is_interpreted_frame(), "only InterpreterRuntime::_new/resolve_from_cache allowed");
1122       chunk->set_at_klass_init(true);
1123       Method* m = top_frame.interpreter_frame_method();
1124       Bytecode current_bytecode = Bytecode(m, top_frame.interpreter_frame_bcp());
1125       Bytecodes::Code code = current_bytecode.code();
1126       int exp_size = top_frame.interpreter_frame_expression_stack_size();
1127       if (code == Bytecodes::Code::_invokestatic && exp_size > 0) {
1128         chunk->set_has_args_at_top(true);
1129       }
1130     }
1131   }
1132 
1133   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1134   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1135   // will either see no continuation or a consistent chunk.
1136   unwind_frames();
1137 
1138   chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1139 
1140   if (lt.develop_is_enabled()) {
1141     LogStream ls(lt);
1142     ls.print_cr("top chunk:");
1143     chunk->print_on(&ls);
1144   }
1145 
1146   if (_monitors_in_lockstack > 0) {
1147     freeze_lockstack(chunk);
1148   }
1149 

1621       // Some GCs could put direct allocations in old gen for slow-path
1622       // allocations; need to explicitly check if that was the case.
1623       _barriers = chunk->requires_barriers();
1624     }
1625   }
1626 
1627   if (_barriers) {
1628     log_develop_trace(continuations)("allocation requires barriers");
1629   }
1630 
1631   assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1632 
1633   return chunk;
1634 }
1635 
1636 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1637   ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1638   Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1639 }
1640 
1641 class AnchorMark : public StackObj {
1642   JavaThread* _current;
1643   frame& _top_frame;
1644   intptr_t* _last_sp_from_frame;
1645   bool _is_interpreted;
1646 
1647  public:
1648   AnchorMark(JavaThread* current, frame& f) : _current(current), _top_frame(f), _is_interpreted(false) {
1649     intptr_t* sp = anchor_mark_set_pd();
1650     set_anchor(_current, sp);
1651   }
1652   ~AnchorMark() {
1653     clear_anchor(_current);
1654     anchor_mark_clear_pd();
1655   }
1656   inline intptr_t* anchor_mark_set_pd();
1657   inline void anchor_mark_clear_pd();
1658 };
1659 
1660 #if INCLUDE_JVMTI
1661 static int num_java_frames(ContinuationWrapper& cont) {
1662   ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1663   int count = 0;
1664   for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1665     count += chunk->num_java_frames();
1666   }
1667   return count;
1668 }
1669 
1670 static void invalidate_jvmti_stack(JavaThread* thread) {
1671   if (thread->is_interp_only_mode()) {
1672     JvmtiThreadState *state = thread->jvmti_thread_state();
1673     if (state != nullptr)
1674       state->invalidate_cur_stack_depth();
1675   }
1676 }
1677 
1678 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1679   if (JvmtiExport::can_post_frame_pop()) {
1680     int num_frames = num_java_frames(cont);
1681 
1682     ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1683     JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1684   }
1685   invalidate_jvmti_stack(thread);
1686 }
1687 
1688 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top, Continuation::preempt_kind pk) {
1689   assert(current->vthread() != nullptr, "must be");
1690 
1691   HandleMarkCleaner hm(current);  // Cleanup vth and so._conth Handles
1692   Handle vth(current, current->vthread());

1693   ContinuationWrapper::SafepointOp so(current, cont);
1694 
1695   AnchorMark am(current, top);  // Set anchor so that the stack is walkable.

1696 
1697   JRT_BLOCK
1698     JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
1699 
1700     if (current->pending_contended_entered_event()) {
1701       // No monitor JVMTI events for ObjectLocker case.
1702       if (pk != Continuation::object_locker) {
1703         JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1704       }
1705       current->set_contended_entered_monitor(nullptr);
1706     }
1707   JRT_BLOCK_END


1708 }
1709 #endif // INCLUDE_JVMTI
1710 
1711 #ifdef ASSERT
1712 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1713 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1714 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1715 bool FreezeBase::check_valid_fast_path() {
1716   ContinuationEntry* ce = _thread->last_continuation();
1717   RegisterMap map(_thread,
1718                   RegisterMap::UpdateMap::skip,
1719                   RegisterMap::ProcessFrames::skip,
1720                   RegisterMap::WalkContinuation::skip);
1721   map.set_include_argument_oops(false);
1722   bool is_top_frame = true;
1723   for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1724     if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1725       return false;
1726     }
1727   }
1728   return true;
1729 }
1730 
1731 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr, const char** code_name_ptr, int* bci_ptr) {
1732   JavaThread* current = JavaThread::current();
1733   ResourceMark rm(current);
1734 
1735   Method* m;
1736   const char* code_name;
1737   int bci;
1738   if (preempt_kind == Continuation::monitorenter) {
1739     assert(top.is_interpreted_frame() || top.is_runtime_frame(), "");
1740     bool at_sync_method;
1741     if (top.is_interpreted_frame()) {
1742       m = top.interpreter_frame_method();
1743       assert(!m->is_native() || m->is_synchronized(), "invalid method %s", m->external_name());
1744       address bcp = top.interpreter_frame_bcp();
1745       assert(bcp != 0 || m->is_native(), "");
1746       at_sync_method = m->is_synchronized() && (bcp == 0 || bcp == m->code_base());
1747       // bcp is advanced on monitorenter before making the VM call, adjust for that.
1748       bool at_sync_bytecode = bcp > m->code_base() && Bytecode(m, bcp - 1).code() == Bytecodes::Code::_monitorenter;
1749       assert(at_sync_method || at_sync_bytecode, "");
1750       bci = at_sync_method ? -1 : top.interpreter_frame_bci();
1751     } else {
1752       CodeBlob* cb = top.cb();
1753       RegisterMap reg_map(current,
1754                   RegisterMap::UpdateMap::skip,
1755                   RegisterMap::ProcessFrames::skip,
1756                   RegisterMap::WalkContinuation::skip);
1757       frame fr = top.sender(&reg_map);
1758       vframe*  vf  = vframe::new_vframe(&fr, &reg_map, current);
1759       compiledVFrame* cvf = compiledVFrame::cast(vf);
1760       m = cvf->method();
1761       bci = cvf->scope()->bci();
1762       at_sync_method = bci == SynchronizationEntryBCI;
1763       assert(!at_sync_method || m->is_synchronized(), "bci is %d but method %s is not synchronized", bci, m->external_name());
1764       bool is_c1_monitorenter = false, is_c2_monitorenter = false;
1765       COMPILER1_PRESENT(is_c1_monitorenter = cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
1766                                              cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id);)
1767       COMPILER2_PRESENT(is_c2_monitorenter = cb == CodeCache::find_blob(OptoRuntime::complete_monitor_locking_Java());)
1768       assert(is_c1_monitorenter || is_c2_monitorenter, "wrong runtime stub frame");
1769     }
1770     code_name = at_sync_method ? "synchronized method" : "monitorenter";
1771   } else if (preempt_kind == Continuation::object_wait) {
1772     assert(top.is_interpreted_frame() || top.is_native_frame(), "");
1773     m  = top.is_interpreted_frame() ? top.interpreter_frame_method() : top.cb()->as_nmethod()->method();
1774     assert(m->is_object_wait0(), "");
1775     bci = 0;
1776     code_name = "";
1777   } else {
1778     assert(preempt_kind == Continuation::object_locker, "invalid preempt kind");
1779     assert(top.is_interpreted_frame(), "");
1780     m = top.interpreter_frame_method();
1781     Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
1782     Bytecodes::Code code = current_bytecode.code();
1783     assert(code == Bytecodes::Code::_new || code == Bytecodes::Code::_invokestatic ||
1784            (code == Bytecodes::Code::_getstatic || code == Bytecodes::Code::_putstatic), "invalid bytecode");
1785     bci = top.interpreter_frame_bci();
1786     code_name = Bytecodes::name(current_bytecode.code());
1787   }
1788   assert(bci >= 0 || m->is_synchronized(), "invalid bci:%d at method %s", bci, m->external_name());
1789 
1790   if (m_ptr != nullptr) {
1791     *m_ptr = m;
1792     *code_name_ptr = code_name;
1793     *bci_ptr = bci;
1794   }
1795 }
1796 
1797 static void log_preempt_after_freeze(ContinuationWrapper& cont) {
1798   JavaThread* current = cont.thread();
1799   StackChunkFrameStream<ChunkFrames::Mixed> sfs(cont.tail());
1800   frame top_frame = sfs.to_frame();
1801   bool at_init = current->at_preemptable_init();
1802   bool at_enter = current->current_pending_monitor() != nullptr;
1803   bool at_wait = current->current_waiting_monitor() != nullptr;
1804   assert((at_enter && !at_wait) || (!at_enter && at_wait), "");
1805   Continuation::preempt_kind pk = at_init ? Continuation::object_locker : at_enter ? Continuation::monitorenter : Continuation::object_wait;
1806 
1807   Method* m = nullptr;
1808   const char* code_name = nullptr;
1809   int bci = InvalidFrameStateBci;
1810   verify_frame_kind(top_frame, pk, &m, &code_name, &bci);
1811   assert(m != nullptr && code_name != nullptr && bci != InvalidFrameStateBci, "should be set");
1812 
1813   ResourceMark rm(current);
1814   if (bci < 0) {
1815     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " while synchronizing on %smethod %s", current->monitor_owner_id(), m->is_native() ? "native " : "", m->external_name());
1816   } else if (m->is_object_wait0()) {
1817     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at native method %s", current->monitor_owner_id(), m->external_name());
1818   } else {
1819     Klass* k = current->preempt_init_klass();
1820     assert(k != nullptr || !at_init, "");
1821     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at %s(bci:%d) in method %s %s%s", current->monitor_owner_id(),
1822             code_name, bci, m->external_name(), at_init ? "trying to initialize klass " : "", at_init ? k->external_name() : "");
1823   }
1824 }
1825 #endif // ASSERT
1826 
1827 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1828   verify_continuation(cont.continuation());
1829   assert(!cont.is_empty(), "");
1830 
1831   log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1832   return freeze_ok;
1833 }
1834 
1835 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1836   if (UNLIKELY(res != freeze_ok)) {
1837     JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1838     verify_continuation(cont.continuation());
1839     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1840     return res;
1841   }
1842 
1843   JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1844   return freeze_epilog(cont);
1845 }
1846 
1847 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1848   if (UNLIKELY(res != freeze_ok)) {
1849     verify_continuation(cont.continuation());
1850     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1851     return res;
1852   }
1853 
1854   // Set up things so that on return to Java we jump to preempt stub.
1855   patch_return_pc_with_preempt_stub(old_last_frame);
1856   cont.tail()->set_preempted(true);
1857   DEBUG_ONLY(log_preempt_after_freeze(cont);)
1858   return freeze_epilog(cont);
1859 }
1860 
1861 template<typename ConfigT, bool preempt>
1862 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1863   assert(!current->has_pending_exception(), "");
1864 
1865 #ifdef ASSERT
1866   log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1867   log_frames(current, false);
1868 #endif
1869 
1870   CONT_JFR_ONLY(EventContinuationFreeze event;)
1871 
1872   ContinuationEntry* entry = current->last_continuation();
1873 
1874   oop oopCont = entry->cont_oop(current);
1875   assert(oopCont == current->last_continuation()->cont_oop(current), "");
1876   assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1877 
1878   verify_continuation(oopCont);
1879   ContinuationWrapper cont(current, oopCont);
1880   log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1881 
1882   assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1883 
1884   assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
1885          "Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1886 
1887   if (entry->is_pinned() || current->held_monitor_count() > 0) {

2041   // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
2042   // for the Java frames in the check below.
2043   if (!stack_overflow_check(thread, size + 300, bottom)) {
2044     return 0;
2045   }
2046 
2047   log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
2048                               p2i(bottom), p2i(bottom - size), size);
2049   return size;
2050 }
2051 
2052 class ThawBase : public StackObj {
2053 protected:
2054   JavaThread* _thread;
2055   ContinuationWrapper& _cont;
2056   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
2057 
2058   intptr_t* _fastpath;
2059   bool _barriers;
2060   bool _preempted_case;
2061   bool _process_args_at_top;
2062   intptr_t* _top_unextended_sp_before_thaw;
2063   int _align_size;
2064   DEBUG_ONLY(intptr_t* _top_stack_address);
2065 
2066   // Only used for some preemption cases.
2067   ObjectMonitor* _monitor;
2068 
2069   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2070 
2071   NOT_PRODUCT(int _frames;)
2072 
2073 protected:
2074   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2075       _thread(thread), _cont(cont),
2076       _fastpath(nullptr) {
2077     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2078     assert (cont.tail() != nullptr, "no last chunk");
2079     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2080   }
2081 
2082   void clear_chunk(stackChunkOop chunk);
2083   template<bool check_stub>
2084   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2085   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2086 
2087   void thaw_lockstack(stackChunkOop chunk);
2088 
2089   // fast path
2090   inline void prefetch_chunk_pd(void* start, int size_words);
2091   void patch_return(intptr_t* sp, bool is_last);
2092 
2093   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2094   inline intptr_t* push_cleanup_continuation();
2095   inline intptr_t* push_preempt_adapter();
2096   intptr_t* redo_vmcall(JavaThread* current, frame& top);
2097   void throw_interrupted_exception(JavaThread* current, frame& top);
2098 
2099   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2100   void finish_thaw(frame& f);
2101 
2102 private:
2103   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2104   void finalize_thaw(frame& entry, int argsize);
2105 
2106   inline bool seen_by_gc();
2107 
2108   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2109   inline void after_thaw_java_frame(const frame& f, bool bottom);
2110   inline void patch(frame& f, const frame& caller, bool bottom);
2111   void clear_bitmap_bits(address start, address end);
2112 
2113   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2114   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2115   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2116   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2117 
2118   void push_return_frame(frame& f);
2119   inline frame new_entry_frame();
2120   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
2121   inline void patch_pd(frame& f, const frame& sender);
2122   inline void patch_pd(frame& f, intptr_t* caller_sp);
2123   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2124 
2125   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2126 
2127   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2128 
2129  public:
2130   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2131 };
2132 
2133 template <typename ConfigT>

2193   chunk->set_sp(chunk->bottom());
2194   chunk->set_max_thawing_size(0);
2195 }
2196 
2197 template<bool check_stub>
2198 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2199   bool empty = false;
2200   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2201   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2202   assert(chunk_sp == f.sp(), "");
2203   assert(chunk_sp == f.unextended_sp(), "");
2204 
2205   int frame_size = f.cb()->frame_size();
2206   argsize = f.stack_argsize();
2207 
2208   assert(!f.is_stub() || check_stub, "");
2209   if (check_stub && f.is_stub()) {
2210     // If we don't thaw the top compiled frame too, after restoring the saved
2211     // registers back in Java, we would hit the return barrier to thaw one more
2212     // frame effectively overwriting the restored registers during that call.
2213     f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2214     assert(!f.is_done(), "");
2215 
2216     f.get_cb();
2217     assert(f.is_compiled(), "");
2218     frame_size += f.cb()->frame_size();
2219     argsize = f.stack_argsize();
2220 
2221     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2222       // The caller of the runtime stub when the continuation is preempted is not at a
2223       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2224       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2225       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2226     }
2227   }
2228 
2229   f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2230   empty = f.is_done();
2231   assert(!empty || argsize == chunk->argsize(), "");
2232 
2233   if (empty) {
2234     clear_chunk(chunk);
2235   } else {
2236     chunk->set_sp(chunk->sp() + frame_size);
2237     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2238     // We set chunk->pc to the return pc into the next frame
2239     chunk->set_pc(f.pc());
2240 #ifdef ASSERT
2241     {
2242       intptr_t* retaddr_slot = (chunk_sp
2243                                 + frame_size
2244                                 - frame::sender_sp_ret_address_offset());
2245       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2246              "unexpected pc");
2247     }
2248 #endif
2249   }

2369   return rs.sp();
2370 }
2371 
2372 inline bool ThawBase::seen_by_gc() {
2373   return _barriers || _cont.tail()->is_gc_mode();
2374 }
2375 
2376 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2377 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2378   if (UseZGC || UseShenandoahGC) {
2379     chunk->relativize_derived_pointers_concurrently();
2380   }
2381 #endif
2382 }
2383 
2384 template <typename ConfigT>
2385 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2386   Continuation::preempt_kind preempt_kind;
2387   bool retry_fast_path = false;
2388 
2389   _process_args_at_top = false;
2390   _preempted_case = chunk->preempted();
2391   if (_preempted_case) {
2392     ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2393     if (waiter != nullptr) {
2394       // Mounted again after preemption. Resume the pending monitor operation,
2395       // which will be either a monitorenter or Object.wait() call.
2396       ObjectMonitor* mon = waiter->monitor();
2397       preempt_kind = waiter->is_wait() ? Continuation::object_wait : Continuation::monitorenter;
2398 
2399       bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2400       assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2401       if (!mon_acquired) {
2402         // Failed to acquire monitor. Return to enterSpecial to unmount again.
2403         log_trace(continuations, tracking)("Failed to acquire monitor, unmounting again");
2404         return push_cleanup_continuation();
2405       }
2406       _monitor = mon;        // remember monitor since we might need it on handle_preempted_continuation()
2407       chunk = _cont.tail();  // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2408     } else {
2409       // Preemption cancelled in moniterenter case. We actually acquired
2410       // the monitor after freezing all frames so nothing to do. In case
2411       // of preemption on ObjectLocker during klass init, we released the
2412       // monitor already at ~ObjectLocker so here we just set _monitor to
2413       // nullptr so we know there is no need to release it later.
2414       preempt_kind = Continuation::monitorenter;
2415       _monitor = nullptr;
2416     }
2417 
2418     // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2419     relativize_chunk_concurrently(chunk);
2420 
2421     if (chunk->at_klass_init()) {
2422       preempt_kind = Continuation::object_locker;
2423       chunk->set_at_klass_init(false);
2424       _process_args_at_top = chunk->has_args_at_top();
2425       if (_process_args_at_top) chunk->set_has_args_at_top(false);
2426     }
2427     chunk->set_preempted(false);
2428     retry_fast_path = true;
2429   } else {
2430     relativize_chunk_concurrently(chunk);
2431   }
2432 
2433   // On first thaw after freeze restore oops to the lockstack if any.
2434   assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2435   if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2436     thaw_lockstack(chunk);
2437     retry_fast_path = true;
2438   }
2439 
2440   // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2441   // and FLAG_PREEMPTED flags from the stackChunk.
2442   if (retry_fast_path && can_thaw_fast(chunk)) {
2443     intptr_t* sp = thaw_fast<true>(chunk);
2444     if (_preempted_case) {
2445       return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2446     }

2490 
2491   intptr_t* sp = caller.sp();
2492 
2493   if (_preempted_case) {
2494     return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2495   }
2496   return sp;
2497 }
2498 
2499 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2500   log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2501   assert(!_cont.is_empty(), "no more frames");
2502   assert(num_frames > 0, "");
2503   assert(!heap_frame.is_empty(), "");
2504 
2505   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2506     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2507   } else if (!heap_frame.is_interpreted_frame()) {
2508     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2509   } else {
2510     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2511   }
2512 }
2513 
2514 template<typename FKind>
2515 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2516   assert(num_frames > 0, "");
2517 
2518   DEBUG_ONLY(_frames++;)
2519 
2520   int argsize = _stream.stack_argsize();
2521 
2522   _stream.next(SmallRegisterMap::instance_no_args());
2523   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2524 
2525   // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2526   // as it makes detecting that situation and adjusting unextended_sp tricky
2527   if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2528     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2529     num_frames++;
2530   }
2531 
2532   if (num_frames == 1 || _stream.is_done()) { // end recursion
2533     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2534     return true; // bottom
2535   } else { // recurse
2536     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2537     return false;
2538   }
2539 }
2540 
2541 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2542   stackChunkOop chunk = _cont.tail();

2607 
2608 void ThawBase::clear_bitmap_bits(address start, address end) {
2609   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2610   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2611 
2612   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2613   // or they will keep objects that are otherwise unreachable alive.
2614 
2615   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2616   // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2617   // If that's the case the bit range corresponding to the last stack slot should not have bits set
2618   // anyways and we assert that before returning.
2619   address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2620   log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2621   stackChunkOop chunk = _cont.tail();
2622   chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2623   assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2624 }
2625 
2626 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {

2627   frame top(sp);
2628   assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2629   DEBUG_ONLY(verify_frame_kind(top, preempt_kind);)
2630   NOT_PRODUCT(int64_t tid = _thread->monitor_owner_id();)
2631 
2632 #if INCLUDE_JVMTI
2633   // Finish the VTMS transition.
2634   assert(_thread->is_in_VTMS_transition(), "must be");
2635   bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2636   if (is_vthread) {
2637     if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
2638       jvmti_mount_end(_thread, _cont, top, preempt_kind);
2639     } else {
2640       _thread->set_is_in_VTMS_transition(false);
2641       java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
2642     }
2643   }
2644 #endif
2645 
2646   if (fast_case) {
2647     // If we thawed in the slow path the runtime stub/native wrapper frame already
2648     // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2649     // we copied the original fp at the time of freeze which now will have to be fixed.
2650     assert(top.is_runtime_frame() || top.is_native_frame(), "");
2651     int fsize = top.cb()->frame_size();
2652     patch_pd(top, sp + fsize);
2653   }
2654 
2655   if (preempt_kind == Continuation::object_wait) {
2656     // Check now if we need to throw IE exception.
2657     bool throw_ie = _thread->pending_interrupted_exception();
2658     if (throw_ie) {
2659       throw_interrupted_exception(_thread, top);
2660       _thread->set_pending_interrupted_exception(false);
2661     }
2662     log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT" after preemption on Object.wait%s", tid, throw_ie ? "(throwing IE)" : "");
2663   } else if (preempt_kind == Continuation::monitorenter) {
2664     if (top.is_runtime_frame()) {
2665       // The continuation might now run on a different platform thread than the previous time so
2666       // we need to adjust the current thread saved in the stub frame before restoring registers.
2667       JavaThread** thread_addr = frame::saved_thread_address(top);
2668       if (thread_addr != nullptr) *thread_addr = _thread;
2669     }
2670     log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT " after preemption on monitorenter", tid);
2671   } else {
2672     // We need to redo the original call into the VM. First though, we need
2673     // to exit the monitor we just acquired (except on preemption cancelled
2674     // case where it was already released).
2675     assert(preempt_kind == Continuation::object_locker, "");
2676     if (_monitor != nullptr) _monitor->exit(_thread);
2677     sp = redo_vmcall(_thread, top);
2678   }
2679   return sp;
2680 }
2681 
2682 intptr_t* ThawBase::redo_vmcall(JavaThread* current, frame& top) {
2683   assert(!current->preempting(), "");
2684   NOT_PRODUCT(int64_t tid = current->monitor_owner_id();)
2685   intptr_t* sp = top.sp();
2686 
2687   {
2688     HandleMarkCleaner hmc(current);  // Cleanup so._conth Handle
2689     ContinuationWrapper::SafepointOp so(current, _cont);
2690     AnchorMark am(current, top);    // Set the anchor so that the stack is walkable.
2691 
2692     Method* m = top.interpreter_frame_method();
2693     Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
2694     Bytecodes::Code code = current_bytecode.code();
2695     log_develop_trace(continuations, preempt)("Redoing InterpreterRuntime::%s for " INT64_FORMAT, code == Bytecodes::Code::_new ? "_new" : "resolve_from_cache", tid);
2696 
2697     // These InterpreterRuntime entry points use JRT_ENTRY which uses a HandleMarkCleaner.
2698     // Create a HandeMark to avoid destroying so._conth.
2699     HandleMark hm(current);
2700     if (code == Bytecodes::Code::_new) {
2701       InterpreterRuntime::_new(current, m->constants(), current_bytecode.get_index_u2(code));
2702     } else {
2703       InterpreterRuntime::resolve_from_cache(current, code);
2704     }
2705   }
2706 
2707   if (current->preempting()) {
2708     // Preempted again so we just arrange to return to preempt stub to unmount.
2709     sp = push_preempt_adapter();
2710     current->set_preempt_alternate_return(nullptr);
2711     bool cancelled = current->preemption_cancelled();
2712     if (cancelled) {
2713       // Instead of calling thaw again from the preempt stub just unmount anyways with
2714       // state of YIELDING. This will give a chance for other vthreads to run while
2715       // minimizing repeated loops of "thaw->redo_vmcall->try_preempt->preemption_cancelled->thaw..."
2716       // in case of multiple vthreads contending for the same init_lock().
2717       current->set_preemption_cancelled(false);
2718       oop vthread = current->vthread();
2719       assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2720       java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::YIELDING);
2721     }
2722     log_develop_trace(continuations, preempt)("Preempted " INT64_FORMAT " again%s", tid, cancelled ? "(preemption cancelled, setting state to YIELDING)" : "");
2723   } else {
2724     log_develop_trace(continuations, preempt)("Call succesful, resuming " INT64_FORMAT, tid);
2725   }
2726   return sp;
2727 }
2728 
2729 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2730   HandleMarkCleaner hm(current);  // Cleanup so._conth Handle
2731   ContinuationWrapper::SafepointOp so(current, _cont);
2732   // Since we might safepoint set the anchor so that the stack can be walked.
2733   set_anchor(current, top.sp());
2734   JRT_BLOCK
2735     THROW(vmSymbols::java_lang_InterruptedException());
2736   JRT_BLOCK_END
2737   clear_anchor(current);
2738 }
2739 
2740 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top) {
2741   assert(hf.is_interpreted_frame(), "");
2742 
2743   if (UNLIKELY(seen_by_gc())) {
2744     if (is_top && _process_args_at_top) {
2745       log_trace(continuations, tracking)("Processing arguments in recurse_thaw_interpreted_frame");
2746       _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_with_args());  
2747     } else {
2748       _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());  
2749     }
2750   }
2751 
2752   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2753 
2754   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2755 
2756   _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2757 
2758   frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2759 
2760   intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2761   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2762   intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2763   intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2764 
2765   assert(hf.is_heap_frame(), "should be");
2766   assert(!f.is_heap_frame(), "should not be");
2767 
2768   const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2769   assert((stack_frame_bottom == stack_frame_top + fsize), "");

2774 
2775   // Make sure the relativized locals is already set.
2776   assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2777 
2778   derelativize_interpreted_frame_metadata(hf, f);
2779   patch(f, caller, is_bottom_frame);
2780 
2781   assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2782   assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2783 
2784   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2785 
2786   maybe_set_fastpath(f.sp());
2787 
2788   Method* m = hf.interpreter_frame_method();
2789   assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2790   const int locals = m->max_locals();
2791 
2792   if (!is_bottom_frame) {
2793     // can only fix caller once this frame is thawed (due to callee saved regs)
2794     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2795   } else if (_cont.tail()->has_bitmap() && locals > 0) {
2796     assert(hf.is_heap_frame(), "should be");
2797     address start = (address)(heap_frame_bottom - locals);
2798     address end = (address)heap_frame_bottom;
2799     clear_bitmap_bits(start, end);
2800   }
2801 
2802   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2803   caller = f;
2804 }
2805 
2806 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2807   assert(hf.is_compiled_frame(), "");
2808   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2809 
2810   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2811     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2812   }
2813 
2814   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2815 
2816   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2817 
2818   assert(caller.sp() == caller.unextended_sp(), "");
2819 
2820   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2821     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2822   }
2823 
2824   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2825   // yet laid out in the stack, and so the original_pc is not stored in it.
2826   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2827   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2828   intptr_t* const stack_frame_top = f.sp();
2829   intptr_t* const heap_frame_top = hf.unextended_sp();
2830 
2831   const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;

2850   assert(!f.is_deoptimized_frame(), "");
2851   if (hf.is_deoptimized_frame()) {
2852     maybe_set_fastpath(f.sp());
2853   } else if (_thread->is_interp_only_mode()
2854               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2855     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2856     // cannot rely on nmethod patching for deopt.
2857     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2858 
2859     log_develop_trace(continuations)("Deoptimizing thawed frame");
2860     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2861 
2862     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2863     assert(f.is_deoptimized_frame(), "");
2864     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2865     maybe_set_fastpath(f.sp());
2866   }
2867 
2868   if (!is_bottom_frame) {
2869     // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2870     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2871   } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2872     address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2873     int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2874     int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2875     clear_bitmap_bits(start, start + argsize_in_bytes);
2876   }
2877 
2878   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2879   caller = f;
2880 }
2881 
2882 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2883   DEBUG_ONLY(_frames++;)
2884 
2885   if (UNLIKELY(seen_by_gc())) {
2886     // Process the stub's caller here since we might need the full map.
2887     RegisterMap map(nullptr,
2888                     RegisterMap::UpdateMap::include,
2889                     RegisterMap::ProcessFrames::skip,
2890                     RegisterMap::WalkContinuation::skip);
2891     map.set_include_argument_oops(false);
2892     _stream.next(&map);
2893     assert(!_stream.is_done(), "");
2894     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2895   } else {
2896     _stream.next(SmallRegisterMap::instance_no_args());
2897     assert(!_stream.is_done(), "");
2898   }
2899 
2900   recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2901 
2902   assert(caller.is_compiled_frame(), "");
2903   assert(caller.sp() == caller.unextended_sp(), "");
2904 
2905   DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2906 
2907   frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2908   intptr_t* stack_frame_top = f.sp();
2909   intptr_t* heap_frame_top = hf.sp();
2910   int fsize = ContinuationHelper::StubFrame::size(hf);
2911 
2912   copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2913                   fsize + frame::metadata_words);
2914 
2915   patch(f, caller, false /*is_bottom_frame*/);
2916 
2917   // can only fix caller once this frame is thawed (due to callee saved regs)
2918   RegisterMap map(nullptr,
2919                   RegisterMap::UpdateMap::include,
2920                   RegisterMap::ProcessFrames::skip,
2921                   RegisterMap::WalkContinuation::skip);
2922   map.set_include_argument_oops(false);
2923   f.oop_map()->update_register_map(&f, &map);
2924   ContinuationHelper::update_register_map_with_callee(caller, &map);
2925   _cont.tail()->fix_thawed_frame(caller, &map);
2926 
2927   DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2928   caller = f;
2929 }
2930 
2931 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2932   assert(hf.is_native_frame(), "");
2933   assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2934 
2935   if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2936     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2937   }
2938 
2939   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2940   assert(!is_bottom_frame, "");
2941 
2942   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2943 
2944   assert(caller.sp() == caller.unextended_sp(), "");
2945 
2946   if (caller.is_interpreted_frame()) {
2947     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2948   }
2949 
2950   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2951   // yet laid out in the stack, and so the original_pc is not stored in it.
2952   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2953   frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2954   intptr_t* const stack_frame_top = f.sp();
2955   intptr_t* const heap_frame_top = hf.unextended_sp();
2956 
2957   int fsize = ContinuationHelper::NativeFrame::size(hf);
2958   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2959 
2960   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2961   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2962   int sz = fsize + frame::metadata_words_at_bottom;
2963 
2964   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2965 
2966   patch(f, caller, false /* bottom */);
2967 
2968   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2969   assert(!f.is_deoptimized_frame(), "");
2970   assert(!hf.is_deoptimized_frame(), "");
2971   assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2972 
2973   // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2974   _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2975 
2976   DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2977   caller = f;
2978 }
2979 
2980 void ThawBase::finish_thaw(frame& f) {
2981   stackChunkOop chunk = _cont.tail();
2982 
2983   if (chunk->is_empty()) {
2984     // Only remove chunk from list if it can't be reused for another freeze
2985     if (seen_by_gc()) {
2986       _cont.set_tail(chunk->parent());
2987     } else {
2988       chunk->set_has_mixed_frames(false);
2989     }
2990     chunk->set_max_thawing_size(0);
2991   } else {
2992     chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2993   }
2994   assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
2995 
2996   if (!is_aligned(f.sp(), frame::frame_alignment)) {
2997     assert(f.is_interpreted_frame(), "");
2998     f.set_sp(align_down(f.sp(), frame::frame_alignment));
2999   }
3000   push_return_frame(f);
3001    // can only fix caller after push_return_frame (due to callee saved regs)
3002   if (_process_args_at_top) {
3003     log_trace(continuations, tracking)("Processing arguments in finish_thaw");
3004     chunk->fix_thawed_frame(f, SmallRegisterMap::instance_with_args());
3005   } else {
3006     chunk->fix_thawed_frame(f, SmallRegisterMap::instance_no_args());  
3007   }
3008 
3009   assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
3010 
3011   log_develop_trace(continuations)("thawed %d frames", _frames);
3012 
3013   LogTarget(Trace, continuations) lt;
3014   if (lt.develop_is_enabled()) {
3015     LogStream ls(lt);
3016     ls.print_cr("top hframe after (thaw):");
3017     _cont.last_frame().print_value_on(&ls);
3018   }
3019 }
3020 
3021 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
3022   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
3023   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
3024 
3025   LogTarget(Trace, continuations) lt;
3026   if (lt.develop_is_enabled()) {
3027     LogStream ls(lt);

3049 
3050   ContinuationEntry* entry = thread->last_continuation();
3051   assert(entry != nullptr, "");
3052   oop oopCont = entry->cont_oop(thread);
3053 
3054   assert(!jdk_internal_vm_Continuation::done(oopCont), "");
3055   assert(oopCont == get_continuation(thread), "");
3056   verify_continuation(oopCont);
3057 
3058   assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
3059 
3060   ContinuationWrapper cont(thread, oopCont);
3061   log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
3062 
3063 #ifdef ASSERT
3064   set_anchor_to_entry(thread, cont.entry());
3065   log_frames(thread);
3066   clear_anchor(thread);
3067 #endif
3068 

3069   Thaw<ConfigT> thw(thread, cont);
3070   intptr_t* const sp = thw.thaw(kind);
3071   assert(is_aligned(sp, frame::frame_alignment), "");
3072   DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp);)
3073 
3074   CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
3075 
3076   verify_continuation(cont.continuation());
3077   log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
3078 
3079   return sp;
3080 }
3081 
3082 #ifdef ASSERT
3083 static void do_deopt_after_thaw(JavaThread* thread) {
3084   int i = 0;
3085   StackFrameStream fst(thread, true, false);
3086   fst.register_map()->set_include_argument_oops(false);
3087   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3088   for (; !fst.is_done(); fst.next()) {
3089     if (fst.current()->cb()->is_nmethod()) {
3090       nmethod* nm = fst.current()->cb()->as_nmethod();
3091       if (!nm->method()->is_continuation_native_intrinsic()) {
3092         nm->make_deoptimized();

3149       if (!fr.is_interpreted_frame()) {
3150         st->print_cr("size: %d argsize: %d",
3151                      ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
3152                      ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
3153       }
3154       VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
3155       if (reg != nullptr) {
3156         st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
3157       }
3158       cl.reset();
3159       DEBUG_ONLY(thread->print_frame_layout();)
3160       if (chunk != nullptr) {
3161         chunk->print_on(true, st);
3162       }
3163       return false;
3164     }
3165   }
3166   return true;
3167 }
3168 
3169 static void log_frames(JavaThread* thread, bool dolog) {
3170   const static int show_entry_callers = 3;
3171   LogTarget(Trace, continuations, tracking) lt;
3172   if (!lt.develop_is_enabled() || !dolog) {
3173     return;
3174   }
3175   LogStream ls(lt);
3176 
3177   ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
3178   if (!thread->has_last_Java_frame()) {
3179     ls.print_cr("NO ANCHOR!");
3180   }
3181 
3182   RegisterMap map(thread,
3183                   RegisterMap::UpdateMap::include,
3184                   RegisterMap::ProcessFrames::include,
3185                   RegisterMap::WalkContinuation::skip);
3186   map.set_include_argument_oops(false);
3187 
3188   if (false) {
3189     for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
3190       f.print_on(&ls);
3191     }
3192   } else {

3194     ResetNoHandleMark rnhm;
3195     ResourceMark rm;
3196     HandleMark hm(Thread::current());
3197     FrameValues values;
3198 
3199     int i = 0;
3200     int post_entry = -1;
3201     for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
3202       f.describe(values, i, &map, i == 0);
3203       if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
3204         post_entry++;
3205       if (post_entry >= show_entry_callers)
3206         break;
3207     }
3208     values.print_on(thread, &ls);
3209   }
3210 
3211   ls.print_cr("======= end frames =========");
3212 }
3213 
3214 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp) {
3215   intptr_t* sp0 = sp;
3216   address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
3217 
3218   bool preempted = false;
3219   stackChunkOop tail = cont.tail();
3220   if (tail != nullptr && tail->preempted()) {
3221     // Still preempted (monitor not acquired) so no frames were thawed.

3222     set_anchor(thread, cont.entrySP(), cont.entryPC());
3223     preempted = true;
3224   } else {
3225     set_anchor(thread, sp0);
3226   }
3227 
3228   log_frames(thread);
3229   if (LoomVerifyAfterThaw) {
3230     assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
3231   }
3232   assert(ContinuationEntry::assert_entry_frame_laid_out(thread, preempted), "");
3233   clear_anchor(thread);
3234 
3235   LogTarget(Trace, continuations) lt;
3236   if (lt.develop_is_enabled()) {
3237     LogStream ls(lt);
3238     ls.print_cr("Jumping to frame (thaw):");
3239     frame(sp).print_value_on(&ls);
3240   }
3241 }
3242 #endif // ASSERT
3243 
3244 #include CPU_HEADER_INLINE(continuationFreezeThaw)
3245 
3246 #ifdef ASSERT
3247 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
3248   ResourceMark rm;
3249   FrameValues values;
3250   assert(f.get_cb() != nullptr, "");
3251   RegisterMap map(f.is_heap_frame() ?
3252                     nullptr :
< prev index next >