1 /*
   2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.inline.hpp"
  28 #include "code/nmethod.inline.hpp"
  29 #include "code/vmreg.inline.hpp"
  30 #include "compiler/oopMap.inline.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shared/gc_globals.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jfr/jfrEvents.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "oops/access.inline.hpp"
  41 #include "oops/method.inline.hpp"
  42 #include "oops/oopsHierarchy.hpp"
  43 #include "oops/objArrayOop.inline.hpp"
  44 #include "oops/stackChunkOop.inline.hpp"
  45 #include "prims/jvmtiThreadState.hpp"
  46 #include "runtime/arguments.hpp"
  47 #include "runtime/continuation.hpp"
  48 #include "runtime/continuationEntry.inline.hpp"
  49 #include "runtime/continuationHelper.inline.hpp"
  50 #include "runtime/continuationJavaClasses.inline.hpp"
  51 #include "runtime/continuationWrapper.inline.hpp"
  52 #include "runtime/frame.inline.hpp"
  53 #include "runtime/interfaceSupport.inline.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.inline.hpp"
  56 #include "runtime/keepStackGCProcessed.hpp"
  57 #include "runtime/objectMonitor.inline.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "runtime/smallRegisterMap.inline.hpp"
  61 #include "runtime/sharedRuntime.hpp"
  62 #include "runtime/stackChunkFrameStream.inline.hpp"
  63 #include "runtime/stackFrameStream.inline.hpp"
  64 #include "runtime/stackOverflow.hpp"
  65 #include "runtime/stackWatermarkSet.inline.hpp"
  66 #include "utilities/debug.hpp"
  67 #include "utilities/exceptions.hpp"
  68 #include "utilities/macros.hpp"
  69 #include "utilities/vmError.hpp"
  70 #if INCLUDE_ZGC
  71 #include "gc/z/zStackChunkGCData.inline.hpp"
  72 #endif
  73 #if INCLUDE_JFR
  74 #include "jfr/jfr.inline.hpp"
  75 #endif
  76 
  77 #include <type_traits>
  78 
  79 /*
  80  * This file contains the implementation of continuation freezing (yield) and thawing (run).
  81  *
  82  * This code is very latency-critical and very hot. An ordinary and well-behaved server application
  83  * would likely call these operations many thousands of times per second second, on every core.
  84  *
  85  * Freeze might be called every time the application performs any I/O operation, every time it
  86  * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
  87  * multiple times in each of those cases, as it is called by the return barrier, which may be
  88  * invoked on method return.
  89  *
  90  * The amortized budget for each of those two operations is ~100-150ns. That is why, for
  91  * example, every effort is made to avoid Java-VM transitions as much as possible.
  92  *
  93  * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
  94  * and so frames simply copied, and the bottom-most one is patched.
  95  * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets
  96  * and absolute pointers, and barriers invoked.
  97  */
  98 
  99 /************************************************
 100 
 101 Thread-stack layout on freeze/thaw.
 102 See corresponding stack-chunk layout in instanceStackChunkKlass.hpp
 103 
 104             +----------------------------+
 105             |      .                     |
 106             |      .                     |
 107             |      .                     |
 108             |   carrier frames           |
 109             |                            |
 110             |----------------------------|
 111             |                            |
 112             |    Continuation.run        |
 113             |                            |
 114             |============================|
 115             |    enterSpecial frame      |
 116             |  pc                        |
 117             |  rbp                       |
 118             |  -----                     |
 119         ^   |  int argsize               | = ContinuationEntry
 120         |   |  oopDesc* cont             |
 121         |   |  oopDesc* chunk            |
 122         |   |  ContinuationEntry* parent |
 123         |   |  ...                       |
 124         |   |============================| <------ JavaThread::_cont_entry = entry->sp()
 125         |   |  ? alignment word ?        |
 126         |   |----------------------------| <--\
 127         |   |                            |    |
 128         |   |  ? caller stack args ?     |    |   argsize (might not be 2-word aligned) words
 129 Address |   |                            |    |   Caller is still in the chunk.
 130         |   |----------------------------|    |
 131         |   |  pc (? return barrier ?)   |    |  This pc contains the return barrier when the bottom-most frame
 132         |   |  rbp                       |    |  isn't the last one in the continuation.
 133         |   |                            |    |
 134         |   |    frame                   |    |
 135         |   |                            |    |
 136             +----------------------------|     \__ Continuation frames to be frozen/thawed
 137             |                            |     /
 138             |    frame                   |    |
 139             |                            |    |
 140             |----------------------------|    |
 141             |                            |    |
 142             |    frame                   |    |
 143             |                            |    |
 144             |----------------------------| <--/
 145             |                            |
 146             |    doYield/safepoint stub  | When preempting forcefully, we could have a safepoint stub
 147             |                            | instead of a doYield stub
 148             |============================| <- the sp passed to freeze
 149             |                            |
 150             |  Native freeze/thaw frames |
 151             |      .                     |
 152             |      .                     |
 153             |      .                     |
 154             +----------------------------+
 155 
 156 ************************************************/
 157 
 158 static const bool TEST_THAW_ONE_CHUNK_FRAME = false; // force thawing frames one-at-a-time for testing
 159 
 160 #define CONT_JFR false // emit low-level JFR events that count slow/fast path for continuation performance debugging only
 161 #if CONT_JFR
 162   #define CONT_JFR_ONLY(code) code
 163 #else
 164   #define CONT_JFR_ONLY(code)
 165 #endif
 166 
 167 // TODO: See AbstractAssembler::generate_stack_overflow_check,
 168 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
 169 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
 170 
 171 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
 172 
 173 // Used to just annotatate cold/hot branches
 174 #define LIKELY(condition)   (condition)
 175 #define UNLIKELY(condition) (condition)
 176 
 177 // debugging functions
 178 #ifdef ASSERT
 179 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
 180 
 181 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
 182 
 183 static void do_deopt_after_thaw(JavaThread* thread);
 184 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
 185 static void log_frames(JavaThread* thread);
 186 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted);
 187 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
 188 
 189 #define assert_pfl(p, ...) \
 190 do {                                           \
 191   if (!(p)) {                                  \
 192     JavaThread* t = JavaThread::active();      \
 193     if (t->has_last_Java_frame()) {            \
 194       tty->print_cr("assert(" #p ") failed:"); \
 195       t->print_frame_layout();                 \
 196     }                                          \
 197   }                                            \
 198   vmassert(p, __VA_ARGS__);                    \
 199 } while(0)
 200 
 201 #else
 202 static void verify_continuation(oop continuation) { }
 203 #define assert_pfl(p, ...)
 204 #endif
 205 
 206 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
 207 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);
 208 
 209 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier);
 210 template<typename ConfigT> static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind);
 211 
 212 
 213 // Entry point to freeze. Transitions are handled manually
 214 // Called from gen_continuation_yield() in sharedRuntime_<cpu>.cpp through Continuation::freeze_entry();
 215 template<typename ConfigT>
 216 static JRT_BLOCK_ENTRY(int, freeze(JavaThread* current, intptr_t* sp))
 217   assert(sp == current->frame_anchor()->last_Java_sp(), "");
 218 
 219   if (current->raw_cont_fastpath() > current->last_continuation()->entry_sp() || current->raw_cont_fastpath() < sp) {
 220     current->set_cont_fastpath(nullptr);
 221   }
 222 
 223   return checked_cast<int>(ConfigT::freeze(current, sp));
 224 JRT_END
 225 
 226 JRT_LEAF(int, Continuation::prepare_thaw(JavaThread* thread, bool return_barrier))
 227   return prepare_thaw_internal(thread, return_barrier);
 228 JRT_END
 229 
 230 template<typename ConfigT>
 231 static JRT_LEAF(intptr_t*, thaw(JavaThread* thread, int kind))
 232   // TODO: JRT_LEAF and NoHandleMark is problematic for JFR events.
 233   // vFrameStreamCommon allocates Handles in RegisterMap for continuations.
 234   // Also the preemption case with JVMTI events enabled might safepoint so
 235   // undo the NoSafepointVerifier here and rely on handling by ContinuationWrapper.
 236   // JRT_ENTRY instead?
 237   ResetNoHandleMark rnhm;
 238   DEBUG_ONLY(PauseNoSafepointVerifier pnsv(&__nsv);)
 239 
 240   // we might modify the code cache via BarrierSetNMethod::nmethod_entry_barrier
 241   MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread));
 242   return ConfigT::thaw(thread, (Continuation::thaw_kind)kind);
 243 JRT_END
 244 
 245 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) {
 246   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
 247   return is_pinned0(thread, JNIHandles::resolve(cont_scope), false);
 248 }
 249 JVM_END
 250 
 251 ///////////
 252 
 253 enum class oop_kind { NARROW, WIDE };
 254 template <oop_kind oops, typename BarrierSetT>
 255 class Config {
 256 public:
 257   typedef Config<oops, BarrierSetT> SelfT;
 258   using OopT = std::conditional_t<oops == oop_kind::NARROW, narrowOop, oop>;
 259 
 260   static freeze_result freeze(JavaThread* thread, intptr_t* const sp) {
 261     freeze_result res = freeze_internal<SelfT, false>(thread, sp);
 262     JFR_ONLY(assert((res == freeze_ok) || (res == thread->last_freeze_fail_result()), "freeze failure not set"));
 263     return res;
 264   }
 265 
 266   static freeze_result freeze_preempt(JavaThread* thread, intptr_t* const sp) {
 267     return freeze_internal<SelfT, true>(thread, sp);
 268   }
 269 
 270   static intptr_t* thaw(JavaThread* thread, Continuation::thaw_kind kind) {
 271     return thaw_internal<SelfT>(thread, kind);
 272   }
 273 };
 274 
 275 #ifdef _WINDOWS
 276 static void map_stack_pages(JavaThread* thread, size_t size, address sp) {
 277   address new_sp = sp - size;
 278   address watermark = thread->stack_overflow_state()->shadow_zone_growth_watermark();
 279 
 280   if (new_sp < watermark) {
 281     size_t page_size = os::vm_page_size();
 282     address last_touched_page = watermark - StackOverflow::stack_shadow_zone_size();
 283     size_t pages_to_touch = align_up(watermark - new_sp, page_size) / page_size;
 284     while (pages_to_touch-- > 0) {
 285       last_touched_page -= page_size;
 286       *last_touched_page = 0;
 287     }
 288     thread->stack_overflow_state()->set_shadow_zone_growth_watermark(new_sp);
 289   }
 290 }
 291 #endif
 292 
 293 static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) {
 294   const size_t page_size = os::vm_page_size();
 295   if (size > page_size) {
 296     if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) {
 297       return false;
 298     }
 299     WINDOWS_ONLY(map_stack_pages(thread, size, sp));
 300   }
 301   return true;
 302 }
 303 
 304 #ifdef ASSERT
 305 static oop get_continuation(JavaThread* thread) {
 306   assert(thread != nullptr, "");
 307   assert(thread->threadObj() != nullptr, "");
 308   return java_lang_Thread::continuation(thread->threadObj());
 309 }
 310 #endif // ASSERT
 311 
 312 inline void clear_anchor(JavaThread* thread) {
 313   thread->frame_anchor()->clear();
 314 }
 315 
 316 static void set_anchor(JavaThread* thread, intptr_t* sp, address pc) {
 317   assert(pc != nullptr, "");
 318 
 319   JavaFrameAnchor* anchor = thread->frame_anchor();
 320   anchor->set_last_Java_sp(sp);
 321   anchor->set_last_Java_pc(pc);
 322   ContinuationHelper::set_anchor_pd(anchor, sp);
 323 
 324   assert(thread->has_last_Java_frame(), "");
 325   assert(thread->last_frame().cb() != nullptr, "");
 326 }
 327 
 328 static void set_anchor(JavaThread* thread, intptr_t* sp) {
 329   address pc = ContinuationHelper::return_address_at(
 330            sp - frame::sender_sp_ret_address_offset());
 331   set_anchor(thread, sp, pc);
 332 }
 333 
 334 static void set_anchor_to_entry(JavaThread* thread, ContinuationEntry* entry) {
 335   JavaFrameAnchor* anchor = thread->frame_anchor();
 336   anchor->set_last_Java_sp(entry->entry_sp());
 337   anchor->set_last_Java_pc(entry->entry_pc());
 338   ContinuationHelper::set_anchor_to_entry_pd(anchor, entry);
 339 
 340   assert(thread->has_last_Java_frame(), "");
 341   assert(thread->last_frame().cb() != nullptr, "");
 342 }
 343 
 344 #if CONT_JFR
 345 class FreezeThawJfrInfo : public StackObj {
 346   short _e_size;
 347   short _e_num_interpreted_frames;
 348  public:
 349 
 350   FreezeThawJfrInfo() : _e_size(0), _e_num_interpreted_frames(0) {}
 351   inline void record_interpreted_frame() { _e_num_interpreted_frames++; }
 352   inline void record_size_copied(int size) { _e_size += size << LogBytesPerWord; }
 353   template<typename Event> void post_jfr_event(Event *e, oop continuation, JavaThread* jt);
 354 };
 355 
 356 template<typename Event> void FreezeThawJfrInfo::post_jfr_event(Event* e, oop continuation, JavaThread* jt) {
 357   if (e->should_commit()) {
 358     log_develop_trace(continuations)("JFR event: iframes: %d size: %d", _e_num_interpreted_frames, _e_size);
 359     e->set_carrierThread(JFR_JVM_THREAD_ID(jt));
 360     e->set_continuationClass(continuation->klass());
 361     e->set_interpretedFrames(_e_num_interpreted_frames);
 362     e->set_size(_e_size);
 363     e->commit();
 364   }
 365 }
 366 #endif // CONT_JFR
 367 
 368 /////////////// FREEZE ////
 369 
 370 class FreezeBase : public StackObj {
 371 protected:
 372   JavaThread* const _thread;
 373   ContinuationWrapper& _cont;
 374   bool _barriers; // only set when we allocate a chunk
 375 
 376   intptr_t* _bottom_address;
 377 
 378   // Used for preemption only
 379   const bool _preempt;
 380   frame _last_frame;
 381 
 382   // Used to support freezing with held monitors
 383   int _monitors_in_lockstack;
 384 
 385   int _freeze_size; // total size of all frames plus metadata in words.
 386   int _total_align_size;
 387 
 388   intptr_t* _cont_stack_top;
 389   intptr_t* _cont_stack_bottom;
 390 
 391   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
 392 
 393 #ifdef ASSERT
 394   intptr_t* _orig_chunk_sp;
 395   int _fast_freeze_size;
 396   bool _empty;
 397 #endif
 398 
 399   JvmtiSampledObjectAllocEventCollector* _jvmti_event_collector;
 400 
 401   NOT_PRODUCT(int _frames;)
 402   DEBUG_ONLY(intptr_t* _last_write;)
 403 
 404   inline FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempt);
 405 
 406 public:
 407   NOINLINE freeze_result freeze_slow();
 408   void freeze_fast_existing_chunk();
 409 
 410   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
 411   void set_jvmti_event_collector(JvmtiSampledObjectAllocEventCollector* jsoaec) { _jvmti_event_collector = jsoaec; }
 412 
 413   inline int size_if_fast_freeze_available();
 414 
 415   inline frame& last_frame() { return _last_frame; }
 416 
 417 #ifdef ASSERT
 418   bool check_valid_fast_path();
 419 #endif
 420 
 421 protected:
 422   inline void init_rest();
 423   void throw_stack_overflow_on_humongous_chunk();
 424 
 425   // fast path
 426   inline void copy_to_chunk(intptr_t* from, intptr_t* to, int size);
 427   inline void unwind_frames();
 428   inline void patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp);
 429 
 430   // slow path
 431   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) = 0;
 432 
 433   int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); }
 434 
 435 private:
 436   // slow path
 437   frame freeze_start_frame();
 438   frame freeze_start_frame_on_preempt();
 439   NOINLINE freeze_result recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top);
 440   inline frame freeze_start_frame_yield_stub();
 441   template<typename FKind>
 442   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 443   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 444   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 445   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 446   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 447   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 448   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 449   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 450   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 451   NOINLINE void finish_freeze(const frame& f, const frame& top);
 452 
 453   void freeze_lockstack(stackChunkOop chunk);
 454 
 455   inline bool stack_overflow();
 456 
 457   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 458                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 459   template<typename FKind> static inline frame sender(const frame& f);
 460   template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
 461   inline void set_top_frame_metadata_pd(const frame& hf);
 462   inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
 463   inline void patch_pd_unused(intptr_t* sp);
 464   void adjust_interpreted_frame_unextended_sp(frame& f);
 465   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 466   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 467 
 468 protected:
 469   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 470   bool freeze_fast_new_chunk(stackChunkOop chunk);
 471 };
 472 
 473 template <typename ConfigT>
 474 class Freeze : public FreezeBase {
 475 private:
 476   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 477 
 478 public:
 479   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 480     : FreezeBase(thread, cont, frame_sp, preempt) {}
 481 
 482   freeze_result try_freeze_fast();
 483 
 484 protected:
 485   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) override { return allocate_chunk(stack_size, argsize_md); }
 486 };
 487 
 488 FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt) :
 489     _thread(thread), _cont(cont), _barriers(false), _preempt(preempt), _last_frame(false /* no initialization */) {
 490   DEBUG_ONLY(_jvmti_event_collector = nullptr;)
 491 
 492   assert(_thread != nullptr, "");
 493   assert(_thread->last_continuation()->entry_sp() == _cont.entrySP(), "");
 494 
 495   DEBUG_ONLY(_cont.entry()->verify_cookie();)
 496 
 497   assert(!Interpreter::contains(_cont.entryPC()), "");
 498 
 499   _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
 500 #ifdef _LP64
 501   if (((intptr_t)_bottom_address & 0xf) != 0) {
 502     _bottom_address--;
 503   }
 504   assert(is_aligned(_bottom_address, frame::frame_alignment), "");
 505 #endif
 506 
 507   log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
 508                 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
 509   assert(_bottom_address != nullptr, "");
 510   assert(_bottom_address <= _cont.entrySP(), "");
 511   DEBUG_ONLY(_last_write = nullptr;)
 512 
 513   assert(_cont.chunk_invariant(), "");
 514   assert(!Interpreter::contains(_cont.entryPC()), "");
 515 #if !defined(PPC64) || defined(ZERO)
 516   static const int doYield_stub_frame_size = frame::metadata_words;
 517 #else
 518   static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
 519 #endif
 520   // With preemption doYield() might not have been resolved yet
 521   assert(_preempt || SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
 522 
 523   if (preempt) {
 524     _last_frame = _thread->last_frame();
 525   }
 526 
 527   // properties of the continuation on the stack; all sizes are in words
 528   _cont_stack_top    = frame_sp + (!preempt ? doYield_stub_frame_size : 0); // we don't freeze the doYield stub frame
 529   _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
 530       - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
 531 
 532   log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 533     cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 534   assert(cont_size() > 0, "");
 535 
 536   if (LockingMode != LM_LIGHTWEIGHT) {
 537     _monitors_in_lockstack = 0;
 538   } else {
 539     _monitors_in_lockstack = _thread->lock_stack().monitor_count();
 540   }
 541 }
 542 
 543 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
 544   _freeze_size = 0;
 545   _total_align_size = 0;
 546   NOT_PRODUCT(_frames = 0;)
 547 }
 548 
 549 void FreezeBase::freeze_lockstack(stackChunkOop chunk) {
 550   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "no room for lockstack");
 551 
 552   _thread->lock_stack().move_to_address((oop*)chunk->start_address());
 553   chunk->set_lockstack_size(checked_cast<uint8_t>(_monitors_in_lockstack));
 554   chunk->set_has_lockstack(true);
 555 }
 556 
 557 void FreezeBase::copy_to_chunk(intptr_t* from, intptr_t* to, int size) {
 558   stackChunkOop chunk = _cont.tail();
 559   chunk->copy_from_stack_to_chunk(from, to, size);
 560   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
 561 
 562 #ifdef ASSERT
 563   if (_last_write != nullptr) {
 564     assert(_last_write == to + size, "Missed a spot: _last_write: " INTPTR_FORMAT " to+size: " INTPTR_FORMAT
 565         " stack_size: %d _last_write offset: " PTR_FORMAT " to+size: " PTR_FORMAT, p2i(_last_write), p2i(to+size),
 566         chunk->stack_size(), _last_write-chunk->start_address(), to+size-chunk->start_address());
 567     _last_write = to;
 568   }
 569 #endif
 570 }
 571 
 572 static void assert_frames_in_continuation_are_safe(JavaThread* thread) {
 573 #ifdef ASSERT
 574   StackWatermark* watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc);
 575   if (watermark == nullptr) {
 576     return;
 577   }
 578   ContinuationEntry* ce = thread->last_continuation();
 579   RegisterMap map(thread,
 580                   RegisterMap::UpdateMap::include,
 581                   RegisterMap::ProcessFrames::skip,
 582                   RegisterMap::WalkContinuation::skip);
 583   map.set_include_argument_oops(false);
 584   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
 585     watermark->assert_is_frame_safe(f);
 586   }
 587 #endif // ASSERT
 588 }
 589 
 590 #ifdef ASSERT
 591 static bool monitors_on_stack(JavaThread* thread) {
 592   assert_frames_in_continuation_are_safe(thread);
 593   ContinuationEntry* ce = thread->last_continuation();
 594   RegisterMap map(thread,
 595                   RegisterMap::UpdateMap::include,
 596                   RegisterMap::ProcessFrames::skip,
 597                   RegisterMap::WalkContinuation::skip);
 598   map.set_include_argument_oops(false);
 599   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
 600     if ((f.is_interpreted_frame() && ContinuationHelper::InterpretedFrame::is_owning_locks(f)) ||
 601         (f.is_compiled_frame() && ContinuationHelper::CompiledFrame::is_owning_locks(map.thread(), &map, f)) ||
 602         (f.is_native_frame() && ContinuationHelper::NativeFrame::is_owning_locks(map.thread(), f))) {
 603       return true;
 604     }
 605   }
 606   return false;
 607 }
 608 #endif // ASSERT
 609 
 610 // Called _after_ the last possible safepoint during the freeze operation (chunk allocation)
 611 void FreezeBase::unwind_frames() {
 612   ContinuationEntry* entry = _cont.entry();
 613   entry->flush_stack_processing(_thread);
 614   assert_frames_in_continuation_are_safe(_thread);
 615   JFR_ONLY(Jfr::check_and_process_sample_request(_thread);)
 616   assert(LockingMode != LM_LEGACY || !monitors_on_stack(_thread), "unexpected monitors on stack");
 617   set_anchor_to_entry(_thread, entry);
 618 }
 619 
 620 template <typename ConfigT>
 621 freeze_result Freeze<ConfigT>::try_freeze_fast() {
 622   assert(_thread->thread_state() == _thread_in_vm, "");
 623   assert(_thread->cont_fastpath(), "");
 624 
 625   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 626   assert(_fast_freeze_size == 0, "");
 627 
 628   stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words + _monitors_in_lockstack, _cont.argsize() + frame::metadata_words_at_top);
 629   if (freeze_fast_new_chunk(chunk)) {
 630     return freeze_ok;
 631   }
 632   if (_thread->has_pending_exception()) {
 633     return freeze_exception;
 634   }
 635 
 636   // TODO R REMOVE when deopt change is fixed
 637   assert(!_thread->cont_fastpath() || _barriers, "");
 638   log_develop_trace(continuations)("-- RETRYING SLOW --");
 639   return freeze_slow();
 640 }
 641 
 642 // Returns size needed if the continuation fits, otherwise 0.
 643 int FreezeBase::size_if_fast_freeze_available() {
 644   stackChunkOop chunk = _cont.tail();
 645   if (chunk == nullptr || chunk->is_gc_mode() || chunk->requires_barriers() || chunk->has_mixed_frames()) {
 646     log_develop_trace(continuations)("chunk available %s", chunk == nullptr ? "no chunk" : "chunk requires barriers");
 647     return 0;
 648   }
 649 
 650   int total_size_needed = cont_size();
 651   const int chunk_sp = chunk->sp();
 652 
 653   // argsize can be nonzero if we have a caller, but the caller could be in a non-empty parent chunk,
 654   // so we subtract it only if we overlap with the caller, i.e. the current chunk isn't empty.
 655   // Consider leaving the chunk's argsize set when emptying it and removing the following branch,
 656   // although that would require changing stackChunkOopDesc::is_empty
 657   if (!chunk->is_empty()) {
 658     total_size_needed -= _cont.argsize() + frame::metadata_words_at_top;
 659   }
 660 
 661   total_size_needed += _monitors_in_lockstack;
 662 
 663   int chunk_free_room = chunk_sp - frame::metadata_words_at_bottom;
 664   bool available = chunk_free_room >= total_size_needed;
 665   log_develop_trace(continuations)("chunk available: %s size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 666     available ? "yes" : "no" , total_size_needed, _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 667   return available ? total_size_needed : 0;
 668 }
 669 
 670 void FreezeBase::freeze_fast_existing_chunk() {
 671   stackChunkOop chunk = _cont.tail();
 672 
 673   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 674   assert(_fast_freeze_size > 0, "");
 675 
 676   if (!chunk->is_empty()) { // we are copying into a non-empty chunk
 677     DEBUG_ONLY(_empty = false;)
 678     DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();)
 679 #ifdef ASSERT
 680     {
 681       intptr_t* retaddr_slot = (chunk->sp_address()
 682                                 - frame::sender_sp_ret_address_offset());
 683       assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 684              "unexpected saved return address");
 685     }
 686 #endif
 687 
 688     // the chunk's sp before the freeze, adjusted to point beyond the stack-passed arguments in the topmost frame
 689     // we overlap; we'll overwrite the chunk's top frame's callee arguments
 690     const int chunk_start_sp = chunk->sp() + _cont.argsize() + frame::metadata_words_at_top;
 691     assert(chunk_start_sp <= chunk->stack_size(), "sp not pointing into stack");
 692 
 693     // increase max_size by what we're freezing minus the overlap
 694     chunk->set_max_thawing_size(chunk->max_thawing_size() + cont_size() - _cont.argsize() - frame::metadata_words_at_top);
 695 
 696     intptr_t* const bottom_sp = _cont_stack_bottom - _cont.argsize() - frame::metadata_words_at_top;
 697     assert(bottom_sp == _bottom_address, "");
 698     // Because the chunk isn't empty, we know there's a caller in the chunk, therefore the bottom-most frame
 699     // should have a return barrier (installed back when we thawed it).
 700 #ifdef ASSERT
 701     {
 702       intptr_t* retaddr_slot = (bottom_sp
 703                                 - frame::sender_sp_ret_address_offset());
 704       assert(ContinuationHelper::return_address_at(retaddr_slot)
 705              == StubRoutines::cont_returnBarrier(),
 706              "should be the continuation return barrier");
 707     }
 708 #endif
 709     // We copy the fp from the chunk back to the stack because it contains some caller data,
 710     // including, possibly, an oop that might have gone stale since we thawed.
 711     patch_stack_pd(bottom_sp, chunk->sp_address());
 712     // we don't patch the return pc at this time, so as not to make the stack unwalkable for async walks
 713 
 714     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 715   } else { // the chunk is empty
 716     const int chunk_start_sp = chunk->stack_size();
 717 
 718     DEBUG_ONLY(_empty = true;)
 719     DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 720 
 721     chunk->set_max_thawing_size(cont_size());
 722     chunk->set_bottom(chunk_start_sp - _cont.argsize() - frame::metadata_words_at_top);
 723     chunk->set_sp(chunk->bottom());
 724 
 725     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 726   }
 727 }
 728 
 729 bool FreezeBase::freeze_fast_new_chunk(stackChunkOop chunk) {
 730   DEBUG_ONLY(_empty = true;)
 731 
 732   // Install new chunk
 733   _cont.set_tail(chunk);
 734 
 735   if (UNLIKELY(chunk == nullptr || !_thread->cont_fastpath() || _barriers)) { // OOME/probably humongous
 736     log_develop_trace(continuations)("Retrying slow. Barriers: %d", _barriers);
 737     return false;
 738   }
 739 
 740   chunk->set_max_thawing_size(cont_size());
 741 
 742   // in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments.
 743   // They'll then be stored twice: in the chunk and in the parent chunk's top frame
 744   const int chunk_start_sp = cont_size() + frame::metadata_words + _monitors_in_lockstack;
 745   assert(chunk_start_sp == chunk->stack_size(), "");
 746 
 747   DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 748 
 749   freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA true));
 750 
 751   return true;
 752 }
 753 
 754 void FreezeBase::freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated)) {
 755   assert(chunk != nullptr, "");
 756   assert(!chunk->has_mixed_frames(), "");
 757   assert(!chunk->is_gc_mode(), "");
 758   assert(!chunk->has_bitmap(), "");
 759   assert(!chunk->requires_barriers(), "");
 760   assert(chunk == _cont.tail(), "");
 761 
 762   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
 763   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
 764   // will either see no continuation on the stack, or a consistent chunk.
 765   unwind_frames();
 766 
 767   log_develop_trace(continuations)("freeze_fast start: chunk " INTPTR_FORMAT " size: %d orig sp: %d argsize: %d",
 768     p2i((oopDesc*)chunk), chunk->stack_size(), chunk_start_sp, _cont.argsize());
 769   assert(chunk_start_sp <= chunk->stack_size(), "");
 770   assert(chunk_start_sp >= cont_size(), "no room in the chunk");
 771 
 772   const int chunk_new_sp = chunk_start_sp - cont_size(); // the chunk's new sp, after freeze
 773   assert(!(_fast_freeze_size > 0) || (_orig_chunk_sp - (chunk->start_address() + chunk_new_sp)) == (_fast_freeze_size - _monitors_in_lockstack), "");
 774 
 775   intptr_t* chunk_top = chunk->start_address() + chunk_new_sp;
 776 #ifdef ASSERT
 777   if (!_empty) {
 778     intptr_t* retaddr_slot = (_orig_chunk_sp
 779                               - frame::sender_sp_ret_address_offset());
 780     assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 781            "unexpected saved return address");
 782   }
 783 #endif
 784 
 785   log_develop_trace(continuations)("freeze_fast start: " INTPTR_FORMAT " sp: %d chunk_top: " INTPTR_FORMAT,
 786                               p2i(chunk->start_address()), chunk_new_sp, p2i(chunk_top));
 787 
 788   int adjust = frame::metadata_words_at_bottom;
 789 #if INCLUDE_ASAN && defined(AARCH64)
 790   // Reading at offset frame::metadata_words_at_bottom from _cont_stack_top
 791   // will accesss memory at the callee frame, which on preemption cases will
 792   // be the VM native method being called. The Arm 64-bit ABI doesn't specify
 793   // a location where the frame record (returnpc+fp) has to be stored within
 794   // a stack frame, and GCC currently chooses to save it at the top of the
 795   // frame (lowest address). ASan treats this memory access in the callee as
 796   // an overflow access to one of the locals stored in that frame. For these
 797   // preemption cases we don't need to read these words anyways so we avoid it.
 798   if (_preempt) {
 799     adjust = 0;
 800   }
 801 #endif
 802   intptr_t* from = _cont_stack_top - adjust;
 803   intptr_t* to   = chunk_top - adjust;
 804   copy_to_chunk(from, to, cont_size() + adjust);
 805   // Because we're not patched yet, the chunk is now in a bad state
 806 
 807   // patch return pc of the bottom-most frozen frame (now in the chunk)
 808   // with the actual caller's return address
 809   intptr_t* chunk_bottom_retaddr_slot = (chunk_top + cont_size()
 810                                          - _cont.argsize()
 811                                          - frame::metadata_words_at_top
 812                                          - frame::sender_sp_ret_address_offset());
 813 #ifdef ASSERT
 814   if (!_empty) {
 815     assert(ContinuationHelper::return_address_at(chunk_bottom_retaddr_slot)
 816            == StubRoutines::cont_returnBarrier(),
 817            "should be the continuation return barrier");
 818   }
 819 #endif
 820   ContinuationHelper::patch_return_address_at(chunk_bottom_retaddr_slot,
 821                                               chunk->pc());
 822 
 823   // We're always writing to a young chunk, so the GC can't see it until the next safepoint.
 824   chunk->set_sp(chunk_new_sp);
 825 
 826   // set chunk->pc to the return address of the topmost frame in the chunk
 827   if (_preempt) {
 828     // On aarch64/riscv64, the return pc of the top frame won't necessarily be at sp[-1].
 829     // Also, on x64, if the top frame is the native wrapper frame, sp[-1] will not
 830     // be the pc we used when creating the oopmap. Get the top's frame last pc from
 831     // the anchor instead.
 832     address last_pc = _last_frame.pc();
 833     ContinuationHelper::patch_return_address_at(chunk_top - frame::sender_sp_ret_address_offset(), last_pc);
 834     chunk->set_pc(last_pc);
 835     // For stub/native frames the fp is not used while frozen, and will be constructed
 836     // again when thawing the frame (see ThawBase::handle_preempted_continuation). We
 837     // patch it with a special bad address to help with debugging, particularly when
 838     // inspecting frames and identifying invalid accesses.
 839     patch_pd_unused(chunk_top);
 840   } else {
 841     chunk->set_pc(ContinuationHelper::return_address_at(
 842                   _cont_stack_top - frame::sender_sp_ret_address_offset()));
 843   }
 844 
 845   if (_monitors_in_lockstack > 0) {
 846     freeze_lockstack(chunk);
 847   }
 848 
 849   _cont.write();
 850 
 851   log_develop_trace(continuations)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash());
 852   LogTarget(Trace, continuations) lt;
 853   if (lt.develop_is_enabled()) {
 854     LogStream ls(lt);
 855     chunk->print_on(true, &ls);
 856   }
 857 
 858   // Verification
 859   assert(_cont.chunk_invariant(), "");
 860   chunk->verify();
 861 
 862 #if CONT_JFR
 863   EventContinuationFreezeFast e;
 864   if (e.should_commit()) {
 865     e.set_id(cast_from_oop<u8>(chunk));
 866     DEBUG_ONLY(e.set_allocate(chunk_is_allocated);)
 867     e.set_size(cont_size() << LogBytesPerWord);
 868     e.commit();
 869   }
 870 #endif
 871 }
 872 
 873 NOINLINE freeze_result FreezeBase::freeze_slow() {
 874 #ifdef ASSERT
 875   ResourceMark rm;
 876 #endif
 877 
 878   log_develop_trace(continuations)("freeze_slow  #" INTPTR_FORMAT, _cont.hash());
 879   assert(_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, "");
 880 
 881 #if CONT_JFR
 882   EventContinuationFreezeSlow e;
 883   if (e.should_commit()) {
 884     e.set_id(cast_from_oop<u8>(_cont.continuation()));
 885     e.commit();
 886   }
 887 #endif
 888 
 889   init_rest();
 890 
 891   HandleMark hm(Thread::current());
 892 
 893   frame f = freeze_start_frame();
 894 
 895   LogTarget(Debug, continuations) lt;
 896   if (lt.develop_is_enabled()) {
 897     LogStream ls(lt);
 898     f.print_on(&ls);
 899   }
 900 
 901   frame caller; // the frozen caller in the chunk
 902   freeze_result res = recurse_freeze(f, caller, 0, false, true);
 903 
 904   if (res == freeze_ok) {
 905     finish_freeze(f, caller);
 906     _cont.write();
 907   }
 908 
 909   return res;
 910 }
 911 
 912 frame FreezeBase::freeze_start_frame() {
 913   if (LIKELY(!_preempt)) {
 914     return freeze_start_frame_yield_stub();
 915   } else {
 916     return freeze_start_frame_on_preempt();
 917   }
 918 }
 919 
 920 frame FreezeBase::freeze_start_frame_yield_stub() {
 921   frame f = _thread->last_frame();
 922   assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
 923   f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
 924   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
 925   return f;
 926 }
 927 
 928 frame FreezeBase::freeze_start_frame_on_preempt() {
 929   assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
 930   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
 931   return _last_frame;
 932 }
 933 
 934 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 935 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
 936   assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
 937   assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
 938          || ((top && _preempt) == f.is_native_frame()), "");
 939 
 940   if (stack_overflow()) {
 941     return freeze_exception;
 942   }
 943 
 944   if (f.is_compiled_frame()) {
 945     if (UNLIKELY(f.oop_map() == nullptr)) {
 946       // special native frame
 947       return freeze_pinned_native;
 948     }
 949     return recurse_freeze_compiled_frame(f, caller, callee_argsize, callee_interpreted);
 950   } else if (f.is_interpreted_frame()) {
 951     assert(!f.interpreter_frame_method()->is_native() || (top && _preempt), "");
 952     return recurse_freeze_interpreted_frame(f, caller, callee_argsize, callee_interpreted);
 953   } else if (top && _preempt) {
 954     assert(f.is_native_frame() || f.is_runtime_frame(), "");
 955     return f.is_native_frame() ? recurse_freeze_native_frame(f, caller) : recurse_freeze_stub_frame(f, caller);
 956   } else {
 957     // Frame can't be frozen. Most likely the call_stub or upcall_stub
 958     // which indicates there are further natives frames up the stack.
 959     return freeze_pinned_native;
 960   }
 961 }
 962 
 963 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 964 // See also StackChunkFrameStream<frame_kind>::frame_size()
 965 template<typename FKind>
 966 inline freeze_result FreezeBase::recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize) {
 967   assert(FKind::is_instance(f), "");
 968 
 969   assert(fsize > 0, "");
 970   assert(argsize >= 0, "");
 971   _freeze_size += fsize;
 972   NOT_PRODUCT(_frames++;)
 973 
 974   assert(FKind::frame_bottom(f) <= _bottom_address, "");
 975 
 976   // We don't use FKind::frame_bottom(f) == _bottom_address because on x64 there's sometimes an extra word between
 977   // enterSpecial and an interpreted frame
 978   if (FKind::frame_bottom(f) >= _bottom_address - 1) {
 979     return finalize_freeze(f, caller, argsize); // recursion end
 980   } else {
 981     frame senderf = sender<FKind>(f);
 982     assert(FKind::interpreted || senderf.sp() == senderf.unextended_sp(), "");
 983     freeze_result result = recurse_freeze(senderf, caller, argsize, FKind::interpreted, false); // recursive call
 984     return result;
 985   }
 986 }
 987 
 988 inline void FreezeBase::before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame) {
 989   LogTarget(Trace, continuations) lt;
 990   if (lt.develop_is_enabled()) {
 991     LogStream ls(lt);
 992     ls.print_cr("======== FREEZING FRAME interpreted: %d bottom: %d", f.is_interpreted_frame(), is_bottom_frame);
 993     ls.print_cr("fsize: %d argsize: %d", fsize, argsize);
 994     f.print_value_on(&ls);
 995   }
 996   assert(caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
 997 }
 998 
 999 inline void FreezeBase::after_freeze_java_frame(const frame& hf, bool is_bottom_frame) {
1000   LogTarget(Trace, continuations) lt;
1001   if (lt.develop_is_enabled()) {
1002     LogStream ls(lt);
1003     DEBUG_ONLY(hf.print_value_on(&ls);)
1004     assert(hf.is_heap_frame(), "should be");
1005     DEBUG_ONLY(print_frame_layout(hf, false, &ls);)
1006     if (is_bottom_frame) {
1007       ls.print_cr("bottom h-frame:");
1008       hf.print_on(&ls);
1009     }
1010   }
1011 }
1012 
1013 // The parameter argsize_md includes metadata that has to be part of caller/callee overlap.
1014 // See also StackChunkFrameStream<frame_kind>::frame_size()
1015 freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, int argsize_md) {
1016   int argsize = argsize_md - frame::metadata_words_at_top;
1017   assert(callee.is_interpreted_frame()
1018     || ContinuationHelper::Frame::is_stub(callee.cb())
1019     || callee.cb()->as_nmethod()->is_osr_method()
1020     || argsize == _cont.argsize(), "argsize: %d cont.argsize: %d", argsize, _cont.argsize());
1021   log_develop_trace(continuations)("bottom: " INTPTR_FORMAT " count %d size: %d argsize: %d",
1022     p2i(_bottom_address), _frames, _freeze_size << LogBytesPerWord, argsize);
1023 
1024   LogTarget(Trace, continuations) lt;
1025 
1026 #ifdef ASSERT
1027   bool empty = _cont.is_empty();
1028   log_develop_trace(continuations)("empty: %d", empty);
1029 #endif
1030 
1031   stackChunkOop chunk = _cont.tail();
1032 
1033   assert(chunk == nullptr || (chunk->max_thawing_size() == 0) == chunk->is_empty(), "");
1034 
1035   _freeze_size += frame::metadata_words; // for top frame's metadata
1036 
1037   int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind
1038   int unextended_sp = -1;
1039   if (chunk != nullptr) {
1040     if (!chunk->is_empty()) {
1041       StackChunkFrameStream<ChunkFrames::Mixed> last(chunk);
1042       unextended_sp = chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp());
1043       bool top_interpreted = Interpreter::contains(chunk->pc());
1044       if (callee.is_interpreted_frame() == top_interpreted) {
1045         overlap = argsize_md;
1046       }
1047     } else {
1048       unextended_sp = chunk->stack_size() - frame::metadata_words_at_top;
1049     }
1050   }
1051 
1052   log_develop_trace(continuations)("finalize _size: %d overlap: %d unextended_sp: %d", _freeze_size, overlap, unextended_sp);
1053 
1054   _freeze_size -= overlap;
1055   assert(_freeze_size >= 0, "");
1056 
1057   assert(chunk == nullptr || chunk->is_empty()
1058           || unextended_sp == chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp()), "");
1059   assert(chunk != nullptr || unextended_sp < _freeze_size, "");
1060 
1061   _freeze_size += _monitors_in_lockstack;
1062 
1063   // _barriers can be set to true by an allocation in freeze_fast, in which case the chunk is available
1064   bool allocated_old_in_freeze_fast = _barriers;
1065   assert(!allocated_old_in_freeze_fast || (unextended_sp >= _freeze_size && chunk->is_empty()),
1066     "Chunk allocated in freeze_fast is of insufficient size "
1067     "unextended_sp: %d size: %d is_empty: %d", unextended_sp, _freeze_size, chunk->is_empty());
1068   assert(!allocated_old_in_freeze_fast || (!UseZGC && !UseG1GC), "Unexpected allocation");
1069 
1070   DEBUG_ONLY(bool empty_chunk = true);
1071   if (unextended_sp < _freeze_size || chunk->is_gc_mode() || (!allocated_old_in_freeze_fast && chunk->requires_barriers())) {
1072     // ALLOCATE NEW CHUNK
1073 
1074     if (lt.develop_is_enabled()) {
1075       LogStream ls(lt);
1076       if (chunk == nullptr) {
1077         ls.print_cr("no chunk");
1078       } else {
1079         ls.print_cr("chunk barriers: %d _size: %d free size: %d",
1080           chunk->requires_barriers(), _freeze_size, chunk->sp() - frame::metadata_words);
1081         chunk->print_on(&ls);
1082       }
1083     }
1084 
1085     _freeze_size += overlap; // we're allocating a new chunk, so no overlap
1086     // overlap = 0;
1087 
1088     chunk = allocate_chunk_slow(_freeze_size, argsize_md);
1089     if (chunk == nullptr) {
1090       return freeze_exception;
1091     }
1092 
1093     // Install new chunk
1094     _cont.set_tail(chunk);
1095     assert(chunk->is_empty(), "");
1096   } else {
1097     // REUSE EXISTING CHUNK
1098     log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1099     if (chunk->is_empty()) {
1100       int sp = chunk->stack_size() - argsize_md;
1101       chunk->set_sp(sp);
1102       chunk->set_bottom(sp);
1103       _freeze_size += overlap;
1104       assert(chunk->max_thawing_size() == 0, "");
1105     } DEBUG_ONLY(else empty_chunk = false;)
1106   }
1107   assert(!chunk->is_gc_mode(), "");
1108   assert(!chunk->has_bitmap(), "");
1109   chunk->set_has_mixed_frames(true);
1110 
1111   assert(chunk->requires_barriers() == _barriers, "");
1112   assert(!_barriers || chunk->is_empty(), "");
1113 
1114   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1115   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1116 
1117   if (_preempt) {
1118     frame f = _thread->last_frame();
1119     if (f.is_interpreted_frame()) {
1120       // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1121       // We need it so that on resume we can restore the sp to the right place, since
1122       // thawing might add an alignment word to the expression stack (see finish_thaw()).
1123       // We do it now that we know freezing will be successful.
1124       prepare_freeze_interpreted_top_frame(f);
1125     }
1126   }
1127 
1128   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1129   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1130   // will either see no continuation or a consistent chunk.
1131   unwind_frames();
1132 
1133   chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1134 
1135   if (lt.develop_is_enabled()) {
1136     LogStream ls(lt);
1137     ls.print_cr("top chunk:");
1138     chunk->print_on(&ls);
1139   }
1140 
1141   if (_monitors_in_lockstack > 0) {
1142     freeze_lockstack(chunk);
1143   }
1144 
1145   // The topmost existing frame in the chunk; or an empty frame if the chunk is empty
1146   caller = StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame();
1147 
1148   DEBUG_ONLY(_last_write = caller.unextended_sp() + (empty_chunk ? argsize_md : overlap);)
1149 
1150   assert(chunk->is_in_chunk(_last_write - _freeze_size),
1151     "last_write-size: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(_last_write-_freeze_size), p2i(chunk->start_address()));
1152 #ifdef ASSERT
1153   if (lt.develop_is_enabled()) {
1154     LogStream ls(lt);
1155     ls.print_cr("top hframe before (freeze):");
1156     assert(caller.is_heap_frame(), "should be");
1157     caller.print_on(&ls);
1158   }
1159 
1160   assert(!empty || Continuation::is_continuation_entry_frame(callee, nullptr), "");
1161 
1162   frame entry = sender(callee);
1163 
1164   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1165   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1166 #endif
1167 
1168   return freeze_ok_bottom;
1169 }
1170 
1171 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1172 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1173   if (is_bottom_frame) {
1174     // If we're the bottom frame, we need to replace the return barrier with the real
1175     // caller's pc.
1176     address last_pc = caller.pc();
1177     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1178     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1179   } else {
1180     assert(!caller.is_empty(), "");
1181   }
1182 
1183   patch_pd(hf, caller, is_bottom_frame);
1184 
1185   if (f.is_interpreted_frame()) {
1186     assert(hf.is_heap_frame(), "should be");
1187     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1188   }
1189 
1190 #ifdef ASSERT
1191   if (hf.is_compiled_frame()) {
1192     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1193       log_develop_trace(continuations)("Freezing deoptimized frame");
1194       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1195       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1196     }
1197   }
1198 #endif
1199 }
1200 
1201 #ifdef ASSERT
1202 static void verify_frame_top(const frame& f, intptr_t* top) {
1203   ResourceMark rm;
1204   InterpreterOopMap mask;
1205   f.interpreted_frame_oop_map(&mask);
1206   assert(top <= ContinuationHelper::InterpretedFrame::frame_top(f, &mask),
1207          "frame_top: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT,
1208            p2i(top), p2i(ContinuationHelper::InterpretedFrame::frame_top(f, &mask)));
1209 }
1210 #endif // ASSERT
1211 
1212 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1213 // See also StackChunkFrameStream<frame_kind>::frame_size()
1214 NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, frame& caller,
1215                                                                     int callee_argsize /* incl. metadata */,
1216                                                                     bool callee_interpreted) {
1217   adjust_interpreted_frame_unextended_sp(f);
1218 
1219   // The frame's top never includes the stack arguments to the callee
1220   intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted);
1221   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
1222   const int fsize = pointer_delta_as_int(stack_frame_bottom, stack_frame_top);
1223 
1224   DEBUG_ONLY(verify_frame_top(f, stack_frame_top));
1225 
1226   Method* frame_method = ContinuationHelper::Frame::frame_method(f);
1227   // including metadata between f and its args
1228   const int argsize = ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top;
1229 
1230   log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d",
1231     frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize, callee_interpreted);
1232   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1233   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1234 
1235   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::InterpretedFrame>(f, caller, fsize, argsize);
1236   if (UNLIKELY(result > freeze_ok_bottom)) {
1237     return result;
1238   }
1239 
1240   bool is_bottom_frame = result == freeze_ok_bottom;
1241   assert(!caller.is_empty() || is_bottom_frame, "");
1242 
1243   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, is_bottom_frame);)
1244 
1245   frame hf = new_heap_frame<ContinuationHelper::InterpretedFrame>(f, caller);
1246   _total_align_size += frame::align_wiggle; // add alignment room for internal interpreted frame alignment on AArch64/PPC64
1247 
1248   intptr_t* heap_frame_top = ContinuationHelper::InterpretedFrame::frame_top(hf, callee_argsize, callee_interpreted);
1249   intptr_t* heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
1250   assert(heap_frame_bottom == heap_frame_top + fsize, "");
1251 
1252   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
1253   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
1254   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1255   assert(!is_bottom_frame || !caller.is_interpreted_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1256 
1257   relativize_interpreted_frame_metadata(f, hf);
1258 
1259   patch(f, hf, caller, is_bottom_frame);
1260 
1261   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1262   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1263   caller = hf;
1264 
1265   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1266   frame_method->record_gc_epoch();
1267 
1268   return freeze_ok;
1269 }
1270 
1271 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1272 // See also StackChunkFrameStream<frame_kind>::frame_size()
1273 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1274                                                         int callee_argsize /* incl. metadata */,
1275                                                         bool callee_interpreted) {
1276   // The frame's top never includes the stack arguments to the callee
1277   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1278   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1279   // including metadata between f and its stackargs
1280   int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1281   int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1282 
1283   int real_frame_size = 0;
1284   bool augmented = f.was_augmented_on_entry(real_frame_size);
1285   if (augmented) {
1286     // The args reside inside the frame so clear argsize. If the caller is compiled,
1287     // this will cause the stack arguments passed by the caller to be freezed when
1288     // freezing the caller frame itself. If the caller is interpreted this will have
1289     // the effect of discarding the arg area created in the i2c stub.
1290     argsize = 0;
1291     fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
1292 #ifdef ASSERT
1293     nmethod* nm = f.cb()->as_nmethod();
1294     Method* method = nm->method();
1295     address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
1296     CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
1297     assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
1298     assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
1299            (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
1300            (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
1301            "frame should not be extended");
1302 #endif
1303   }
1304 
1305   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
1306                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1307                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1308                              _freeze_size, fsize, argsize, augmented);
1309   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1310   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1311 
1312   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1313   if (UNLIKELY(result > freeze_ok_bottom)) {
1314     return result;
1315   }
1316 
1317   bool is_bottom_frame = result == freeze_ok_bottom;
1318   assert(!caller.is_empty() || is_bottom_frame, "");
1319   assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
1320 
1321   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1322 
1323   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
1324 
1325   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1326 
1327   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1328   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1329 
1330   if (caller.is_interpreted_frame()) {
1331     // When thawing the frame we might need to add alignment (see Thaw::align)
1332     _total_align_size += frame::align_wiggle;
1333   }
1334 
1335   patch(f, hf, caller, is_bottom_frame);
1336 
1337   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1338 
1339   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1340   caller = hf;
1341   return freeze_ok;
1342 }
1343 
1344 NOINLINE freeze_result FreezeBase::recurse_freeze_stub_frame(frame& f, frame& caller) {
1345   DEBUG_ONLY(frame fsender = sender(f);)
1346   assert(fsender.is_compiled_frame(), "sender should be compiled frame");
1347 
1348   intptr_t* const stack_frame_top = ContinuationHelper::StubFrame::frame_top(f);
1349   const int fsize = f.cb()->frame_size();
1350 
1351   log_develop_trace(continuations)("recurse_freeze_stub_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1352     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1353 
1354   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::StubFrame>(f, caller, fsize, 0);
1355   if (UNLIKELY(result > freeze_ok_bottom)) {
1356     return result;
1357   }
1358 
1359   assert(result == freeze_ok, "should have caller");
1360   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, false /*is_bottom_frame*/);)
1361 
1362   frame hf = new_heap_frame<ContinuationHelper::StubFrame>(f, caller);
1363   intptr_t* heap_frame_top = ContinuationHelper::StubFrame::frame_top(hf);
1364 
1365   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1366 
1367   patch(f, hf, caller, false /*is_bottom_frame*/);
1368 
1369   DEBUG_ONLY(after_freeze_java_frame(hf, false /*is_bottom_frame*/);)
1370 
1371   caller = hf;
1372   return freeze_ok;
1373 }
1374 
1375 NOINLINE freeze_result FreezeBase::recurse_freeze_native_frame(frame& f, frame& caller) {
1376   if (!f.cb()->as_nmethod()->method()->is_object_wait0()) {
1377     assert(f.cb()->as_nmethod()->method()->is_synchronized(), "");
1378     // Synchronized native method case. Unlike the interpreter native wrapper, the compiled
1379     // native wrapper tries to acquire the monitor after marshalling the arguments from the
1380     // caller into the native convention. This is so that we have a valid oopMap in case of
1381     // having to block in the slow path. But that would require freezing those registers too
1382     // and then fixing them back on thaw in case of oops. To avoid complicating things and
1383     // given that this would be a rare case anyways just pin the vthread to the carrier.
1384     return freeze_pinned_native;
1385   }
1386 
1387   intptr_t* const stack_frame_top = ContinuationHelper::NativeFrame::frame_top(f);
1388   // There are no stackargs but argsize must include the metadata
1389   const int argsize = frame::metadata_words_at_top;
1390   const int fsize = f.cb()->frame_size() + argsize;
1391 
1392   log_develop_trace(continuations)("recurse_freeze_native_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1393     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1394 
1395   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::NativeFrame>(f, caller, fsize, argsize);
1396   if (UNLIKELY(result > freeze_ok_bottom)) {
1397     return result;
1398   }
1399 
1400   assert(result == freeze_ok, "should have caller frame");
1401   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, false /* is_bottom_frame */);)
1402 
1403   frame hf = new_heap_frame<ContinuationHelper::NativeFrame>(f, caller);
1404   intptr_t* heap_frame_top = ContinuationHelper::NativeFrame::frame_top(hf);
1405 
1406   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1407 
1408   if (caller.is_interpreted_frame()) {
1409     // When thawing the frame we might need to add alignment (see Thaw::align)
1410     _total_align_size += frame::align_wiggle;
1411   }
1412 
1413   patch(f, hf, caller, false /* is_bottom_frame */);
1414 
1415   DEBUG_ONLY(after_freeze_java_frame(hf, false /* is_bottom_frame */);)
1416 
1417   caller = hf;
1418   return freeze_ok;
1419 }
1420 
1421 NOINLINE void FreezeBase::finish_freeze(const frame& f, const frame& top) {
1422   stackChunkOop chunk = _cont.tail();
1423 
1424   LogTarget(Trace, continuations) lt;
1425   if (lt.develop_is_enabled()) {
1426     LogStream ls(lt);
1427     assert(top.is_heap_frame(), "should be");
1428     top.print_on(&ls);
1429   }
1430 
1431   set_top_frame_metadata_pd(top);
1432 
1433   chunk->set_sp(chunk->to_offset(top.sp()));
1434   chunk->set_pc(top.pc());
1435 
1436   chunk->set_max_thawing_size(chunk->max_thawing_size() + _total_align_size);
1437 
1438   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "clash with lockstack");
1439 
1440   // At this point the chunk is consistent
1441 
1442   if (UNLIKELY(_barriers)) {
1443     log_develop_trace(continuations)("do barriers on old chunk");
1444     // Serial and Parallel GC can allocate objects directly into the old generation.
1445     // Then we want to relativize the derived pointers eagerly so that
1446     // old chunks are all in GC mode.
1447     assert(!UseG1GC, "G1 can not deal with allocating outside of eden");
1448     assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking");
1449     if (UseShenandoahGC) {
1450       _cont.tail()->relativize_derived_pointers_concurrently();
1451     } else {
1452       ContinuationGCSupport::transform_stack_chunk(_cont.tail());
1453     }
1454     // For objects in the old generation we must maintain the remembered set
1455     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>();
1456   }
1457 
1458   log_develop_trace(continuations)("finish_freeze: has_mixed_frames: %d", chunk->has_mixed_frames());
1459   if (lt.develop_is_enabled()) {
1460     LogStream ls(lt);
1461     chunk->print_on(true, &ls);
1462   }
1463 
1464   if (lt.develop_is_enabled()) {
1465     LogStream ls(lt);
1466     ls.print_cr("top hframe after (freeze):");
1467     assert(_cont.last_frame().is_heap_frame(), "should be");
1468     _cont.last_frame().print_on(&ls);
1469     DEBUG_ONLY(print_frame_layout(top, false, &ls);)
1470   }
1471 
1472   assert(_cont.chunk_invariant(), "");
1473 }
1474 
1475 inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive native code
1476   JavaThread* t = !_preempt ? _thread : JavaThread::current();
1477   assert(t == JavaThread::current(), "");
1478   if (os::current_stack_pointer() < t->stack_overflow_state()->shadow_zone_safe_limit()) {
1479     if (!_preempt) {
1480       ContinuationWrapper::SafepointOp so(t, _cont); // could also call _cont.done() instead
1481       Exceptions::_throw_msg(t, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Stack overflow while freezing");
1482     }
1483     return true;
1484   }
1485   return false;
1486 }
1487 
1488 class StackChunkAllocator : public MemAllocator {
1489   const size_t                                 _stack_size;
1490   int                                          _argsize_md;
1491   ContinuationWrapper&                         _continuation_wrapper;
1492   JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector;
1493   mutable bool                                 _took_slow_path;
1494 
1495   // Does the minimal amount of initialization needed for a TLAB allocation.
1496   // We don't need to do a full initialization, as such an allocation need not be immediately walkable.
1497   virtual oop initialize(HeapWord* mem) const override {
1498     assert(_stack_size > 0, "");
1499     assert(_stack_size <= max_jint, "");
1500     assert(_word_size > _stack_size, "");
1501 
1502     // zero out fields (but not the stack)
1503     const size_t hs = oopDesc::header_size();
1504     if (oopDesc::has_klass_gap()) {
1505       oopDesc::set_klass_gap(mem, 0);
1506     }
1507     Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
1508 
1509     int bottom = (int)_stack_size - _argsize_md;
1510 
1511     jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
1512     jdk_internal_vm_StackChunk::set_bottom(mem, bottom);
1513     jdk_internal_vm_StackChunk::set_sp(mem, bottom);
1514 
1515     return finish(mem);
1516   }
1517 
1518   stackChunkOop allocate_fast() const {
1519     if (!UseTLAB) {
1520       return nullptr;
1521     }
1522 
1523     HeapWord* const mem = MemAllocator::mem_allocate_inside_tlab_fast();
1524     if (mem == nullptr) {
1525       return nullptr;
1526     }
1527 
1528     oop obj = initialize(mem);
1529     return stackChunkOopDesc::cast(obj);
1530   }
1531 
1532 public:
1533   StackChunkAllocator(Klass* klass,
1534                       size_t word_size,
1535                       Thread* thread,
1536                       size_t stack_size,
1537                       int argsize_md,
1538                       ContinuationWrapper& continuation_wrapper,
1539                       JvmtiSampledObjectAllocEventCollector* jvmti_event_collector)
1540     : MemAllocator(klass, word_size, thread),
1541       _stack_size(stack_size),
1542       _argsize_md(argsize_md),
1543       _continuation_wrapper(continuation_wrapper),
1544       _jvmti_event_collector(jvmti_event_collector),
1545       _took_slow_path(false) {}
1546 
1547   // Provides it's own, specialized allocation which skips instrumentation
1548   // if the memory can be allocated without going to a slow-path.
1549   stackChunkOop allocate() const {
1550     // First try to allocate without any slow-paths or instrumentation.
1551     stackChunkOop obj = allocate_fast();
1552     if (obj != nullptr) {
1553       return obj;
1554     }
1555 
1556     // Now try full-blown allocation with all expensive operations,
1557     // including potentially safepoint operations.
1558     _took_slow_path = true;
1559 
1560     // Protect unhandled Loom oops
1561     ContinuationWrapper::SafepointOp so(_thread, _continuation_wrapper);
1562 
1563     // Can safepoint
1564     _jvmti_event_collector->start();
1565 
1566     // Can safepoint
1567     return stackChunkOopDesc::cast(MemAllocator::allocate());
1568   }
1569 
1570   bool took_slow_path() const {
1571     return _took_slow_path;
1572   }
1573 };
1574 
1575 template <typename ConfigT>
1576 stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size, int argsize_md) {
1577   log_develop_trace(continuations)("allocate_chunk allocating new chunk");
1578 
1579   InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass());
1580   size_t size_in_words = klass->instance_size(stack_size);
1581 
1582   if (CollectedHeap::stack_chunk_max_size() > 0 && size_in_words >= CollectedHeap::stack_chunk_max_size()) {
1583     if (!_preempt) {
1584       throw_stack_overflow_on_humongous_chunk();
1585     }
1586     return nullptr;
1587   }
1588 
1589   JavaThread* current = _preempt ? JavaThread::current() : _thread;
1590   assert(current == JavaThread::current(), "should be current");
1591 
1592   // Allocate the chunk.
1593   //
1594   // This might safepoint while allocating, but all safepointing due to
1595   // instrumentation have been deferred. This property is important for
1596   // some GCs, as this ensures that the allocated object is in the young
1597   // generation / newly allocated memory.
1598   StackChunkAllocator allocator(klass, size_in_words, current, stack_size, argsize_md, _cont, _jvmti_event_collector);
1599   stackChunkOop chunk = allocator.allocate();
1600 
1601   if (chunk == nullptr) {
1602     return nullptr; // OOME
1603   }
1604 
1605   // assert that chunk is properly initialized
1606   assert(chunk->stack_size() == (int)stack_size, "");
1607   assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size);
1608   assert(chunk->sp() == chunk->bottom(), "");
1609   assert((intptr_t)chunk->start_address() % 8 == 0, "");
1610   assert(chunk->max_thawing_size() == 0, "");
1611   assert(chunk->pc() == nullptr, "");
1612   assert(chunk->is_empty(), "");
1613   assert(chunk->flags() == 0, "");
1614   assert(chunk->is_gc_mode() == false, "");
1615   assert(chunk->lockstack_size() == 0, "");
1616 
1617   // fields are uninitialized
1618   chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk());
1619   chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation());
1620 
1621 #if INCLUDE_ZGC
1622   if (UseZGC) {
1623     ZStackChunkGCData::initialize(chunk);
1624     assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation");
1625     _barriers = false;
1626   } else
1627 #endif
1628 #if INCLUDE_SHENANDOAHGC
1629   if (UseShenandoahGC) {
1630     _barriers = chunk->requires_barriers();
1631   } else
1632 #endif
1633   {
1634     if (!allocator.took_slow_path()) {
1635       // Guaranteed to be in young gen / newly allocated memory
1636       assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation");
1637       _barriers = false;
1638     } else {
1639       // Some GCs could put direct allocations in old gen for slow-path
1640       // allocations; need to explicitly check if that was the case.
1641       _barriers = chunk->requires_barriers();
1642     }
1643   }
1644 
1645   if (_barriers) {
1646     log_develop_trace(continuations)("allocation requires barriers");
1647   }
1648 
1649   assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1650 
1651   return chunk;
1652 }
1653 
1654 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1655   ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1656   Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1657 }
1658 
1659 #if INCLUDE_JVMTI
1660 static int num_java_frames(ContinuationWrapper& cont) {
1661   ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1662   int count = 0;
1663   for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1664     count += chunk->num_java_frames();
1665   }
1666   return count;
1667 }
1668 
1669 static void invalidate_jvmti_stack(JavaThread* thread) {
1670   if (thread->is_interp_only_mode()) {
1671     JvmtiThreadState *state = thread->jvmti_thread_state();
1672     if (state != nullptr)
1673       state->invalidate_cur_stack_depth();
1674   }
1675 }
1676 
1677 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1678   if (JvmtiExport::can_post_frame_pop()) {
1679     int num_frames = num_java_frames(cont);
1680 
1681     ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1682     JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1683   }
1684   invalidate_jvmti_stack(thread);
1685 }
1686 
1687 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top) {
1688   assert(current->vthread() != nullptr, "must be");
1689 
1690   HandleMarkCleaner hm(current);
1691   Handle vth(current, current->vthread());
1692 
1693   ContinuationWrapper::SafepointOp so(current, cont);
1694 
1695   // Since we might safepoint set the anchor so that the stack can be walked.
1696   set_anchor(current, top.sp());
1697 
1698   JRT_BLOCK
1699     JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
1700 
1701     if (current->pending_contended_entered_event()) {
1702       JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1703       current->set_contended_entered_monitor(nullptr);
1704     }
1705   JRT_BLOCK_END
1706 
1707   clear_anchor(current);
1708 }
1709 #endif // INCLUDE_JVMTI
1710 
1711 #ifdef ASSERT
1712 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1713 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1714 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1715 bool FreezeBase::check_valid_fast_path() {
1716   ContinuationEntry* ce = _thread->last_continuation();
1717   RegisterMap map(_thread,
1718                   RegisterMap::UpdateMap::skip,
1719                   RegisterMap::ProcessFrames::skip,
1720                   RegisterMap::WalkContinuation::skip);
1721   map.set_include_argument_oops(false);
1722   bool is_top_frame = true;
1723   for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1724     if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1725       return false;
1726     }
1727   }
1728   return true;
1729 }
1730 #endif // ASSERT
1731 
1732 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1733   verify_continuation(cont.continuation());
1734   assert(!cont.is_empty(), "");
1735 
1736   log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1737   return freeze_ok;
1738 }
1739 
1740 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1741   if (UNLIKELY(res != freeze_ok)) {
1742     JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1743     verify_continuation(cont.continuation());
1744     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1745     return res;
1746   }
1747 
1748   JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1749   return freeze_epilog(cont);
1750 }
1751 
1752 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1753   if (UNLIKELY(res != freeze_ok)) {
1754     verify_continuation(cont.continuation());
1755     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1756     return res;
1757   }
1758 
1759   patch_return_pc_with_preempt_stub(old_last_frame);
1760   cont.tail()->set_preempted(true);
1761 
1762   return freeze_epilog(cont);
1763 }
1764 
1765 template<typename ConfigT, bool preempt>
1766 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1767   assert(!current->has_pending_exception(), "");
1768 
1769 #ifdef ASSERT
1770   log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1771   log_frames(current);
1772 #endif
1773 
1774   CONT_JFR_ONLY(EventContinuationFreeze event;)
1775 
1776   ContinuationEntry* entry = current->last_continuation();
1777 
1778   oop oopCont = entry->cont_oop(current);
1779   assert(oopCont == current->last_continuation()->cont_oop(current), "");
1780   assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1781 
1782   verify_continuation(oopCont);
1783   ContinuationWrapper cont(current, oopCont);
1784   log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1785 
1786   assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1787 
1788   assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
1789          "Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1790 
1791   if (entry->is_pinned() || current->held_monitor_count() > 0) {
1792     log_develop_debug(continuations)("PINNED due to critical section/hold monitor");
1793     verify_continuation(cont.continuation());
1794     freeze_result res = entry->is_pinned() ? freeze_pinned_cs : freeze_pinned_monitor;
1795     if (!preempt) {
1796       JFR_ONLY(current->set_last_freeze_fail_result(res);)
1797     }
1798     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1799     // Avoid Thread.yield() loops without safepoint polls.
1800     if (SafepointMechanism::should_process(current) && !preempt) {
1801       cont.done(); // allow safepoint
1802       ThreadInVMfromJava tivmfj(current);
1803     }
1804     return res;
1805   }
1806 
1807   Freeze<ConfigT> freeze(current, cont, sp, preempt);
1808 
1809   assert(!current->cont_fastpath() || freeze.check_valid_fast_path(), "");
1810   bool fast = UseContinuationFastPath && current->cont_fastpath();
1811   if (fast && freeze.size_if_fast_freeze_available() > 0) {
1812     freeze.freeze_fast_existing_chunk();
1813     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1814     return !preempt ? freeze_epilog(cont) : preempt_epilog(cont, freeze_ok, freeze.last_frame());
1815   }
1816 
1817   if (preempt) {
1818     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1819     freeze.set_jvmti_event_collector(&jsoaec);
1820 
1821     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1822 
1823     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1824     preempt_epilog(cont, res, freeze.last_frame());
1825     return res;
1826   }
1827 
1828   log_develop_trace(continuations)("chunk unavailable; transitioning to VM");
1829   assert(current == JavaThread::current(), "must be current thread");
1830   JRT_BLOCK
1831     // delays a possible JvmtiSampledObjectAllocEventCollector in alloc_chunk
1832     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1833     freeze.set_jvmti_event_collector(&jsoaec);
1834 
1835     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1836 
1837     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1838     freeze_epilog(current, cont, res);
1839     cont.done(); // allow safepoint in the transition back to Java
1840     return res;
1841   JRT_BLOCK_END
1842 }
1843 
1844 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) {
1845   ContinuationEntry* entry = thread->last_continuation();
1846   if (entry == nullptr) {
1847     return freeze_ok;
1848   }
1849   if (entry->is_pinned()) {
1850     return freeze_pinned_cs;
1851   } else if (thread->held_monitor_count() > 0) {
1852     return freeze_pinned_monitor;
1853   }
1854 
1855   RegisterMap map(thread,
1856                   RegisterMap::UpdateMap::include,
1857                   RegisterMap::ProcessFrames::skip,
1858                   RegisterMap::WalkContinuation::skip);
1859   map.set_include_argument_oops(false);
1860   frame f = thread->last_frame();
1861 
1862   if (!safepoint) {
1863     f = f.sender(&map); // this is the yield frame
1864   } else { // safepoint yield
1865 #if (defined(X86) || defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
1866     f.set_fp(f.real_fp()); // Instead of this, maybe in ContinuationWrapper::set_last_frame always use the real_fp?
1867 #else
1868     Unimplemented();
1869 #endif
1870     if (!Interpreter::contains(f.pc())) {
1871       assert(ContinuationHelper::Frame::is_stub(f.cb()), "must be");
1872       assert(f.oop_map() != nullptr, "must be");
1873       f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
1874     }
1875   }
1876 
1877   while (true) {
1878     if ((f.is_interpreted_frame() && f.interpreter_frame_method()->is_native()) || f.is_native_frame()) {
1879       return freeze_pinned_native;
1880     }
1881 
1882     f = f.sender(&map);
1883     if (!Continuation::is_frame_in_continuation(entry, f)) {
1884       oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop(thread));
1885       if (scope == cont_scope) {
1886         break;
1887       }
1888       intx monitor_count = entry->parent_held_monitor_count();
1889       entry = entry->parent();
1890       if (entry == nullptr) {
1891         break;
1892       }
1893       if (entry->is_pinned()) {
1894         return freeze_pinned_cs;
1895       } else if (monitor_count > 0) {
1896         return freeze_pinned_monitor;
1897       }
1898     }
1899   }
1900   return freeze_ok;
1901 }
1902 
1903 /////////////// THAW ////
1904 
1905 static int thaw_size(stackChunkOop chunk) {
1906   int size = chunk->max_thawing_size();
1907   size += frame::metadata_words; // For the top pc+fp in push_return_frame or top = stack_sp - frame::metadata_words in thaw_fast
1908   size += 2*frame::align_wiggle; // in case of alignments at the top and bottom
1909   return size;
1910 }
1911 
1912 // make room on the stack for thaw
1913 // returns the size in bytes, or 0 on failure
1914 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier) {
1915   log_develop_trace(continuations)("~~~~ prepare_thaw return_barrier: %d", return_barrier);
1916 
1917   assert(thread == JavaThread::current(), "");
1918 
1919   ContinuationEntry* ce = thread->last_continuation();
1920   assert(ce != nullptr, "");
1921   oop continuation = ce->cont_oop(thread);
1922   assert(continuation == get_continuation(thread), "");
1923   verify_continuation(continuation);
1924 
1925   stackChunkOop chunk = jdk_internal_vm_Continuation::tail(continuation);
1926   assert(chunk != nullptr, "");
1927 
1928   // The tail can be empty because it might still be available for another freeze.
1929   // However, here we want to thaw, so we get rid of it (it will be GCed).
1930   if (UNLIKELY(chunk->is_empty())) {
1931     chunk = chunk->parent();
1932     assert(chunk != nullptr, "");
1933     assert(!chunk->is_empty(), "");
1934     jdk_internal_vm_Continuation::set_tail(continuation, chunk);
1935   }
1936 
1937   // Verification
1938   chunk->verify();
1939   assert(chunk->max_thawing_size() > 0, "chunk invariant violated; expected to not be empty");
1940 
1941   // Only make space for the last chunk because we only thaw from the last chunk
1942   int size = thaw_size(chunk) << LogBytesPerWord;
1943 
1944   const address bottom = (address)thread->last_continuation()->entry_sp();
1945   // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
1946   // for the Java frames in the check below.
1947   if (!stack_overflow_check(thread, size + 300, bottom)) {
1948     return 0;
1949   }
1950 
1951   log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
1952                               p2i(bottom), p2i(bottom - size), size);
1953   return size;
1954 }
1955 
1956 class ThawBase : public StackObj {
1957 protected:
1958   JavaThread* _thread;
1959   ContinuationWrapper& _cont;
1960   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
1961 
1962   intptr_t* _fastpath;
1963   bool _barriers;
1964   bool _preempted_case;
1965   intptr_t* _top_unextended_sp_before_thaw;
1966   int _align_size;
1967   DEBUG_ONLY(intptr_t* _top_stack_address);
1968 
1969   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1970 
1971   NOT_PRODUCT(int _frames;)
1972 
1973 protected:
1974   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1975       _thread(thread), _cont(cont),
1976       _fastpath(nullptr) {
1977     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1978     assert (cont.tail() != nullptr, "no last chunk");
1979     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1980   }
1981 
1982   void clear_chunk(stackChunkOop chunk);
1983   template<bool check_stub>
1984   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
1985   int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, int &argsize);
1986   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1987 
1988   void thaw_lockstack(stackChunkOop chunk);
1989 
1990   // fast path
1991   inline void prefetch_chunk_pd(void* start, int size_words);
1992   void patch_return(intptr_t* sp, bool is_last);
1993 
1994   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1995   inline intptr_t* push_cleanup_continuation();
1996   void throw_interrupted_exception(JavaThread* current, frame& top);
1997 
1998   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1999   void finish_thaw(frame& f);
2000 
2001 private:
2002   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2003   void finalize_thaw(frame& entry, int argsize);
2004 
2005   inline bool seen_by_gc();
2006 
2007   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2008   inline void after_thaw_java_frame(const frame& f, bool bottom);
2009   inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
2010   void clear_bitmap_bits(address start, address end);
2011 
2012   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
2013   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2014   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2015   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2016 
2017   void push_return_frame(frame& f);
2018   inline frame new_entry_frame();
2019   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
2020   inline void patch_pd(frame& f, const frame& sender);
2021   inline void patch_pd(frame& f, intptr_t* caller_sp);
2022   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2023 
2024   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2025 
2026   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2027 
2028  public:
2029   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2030 };
2031 
2032 template <typename ConfigT>
2033 class Thaw : public ThawBase {
2034 public:
2035   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2036 
2037   inline bool can_thaw_fast(stackChunkOop chunk) {
2038     return    !_barriers
2039            &&  _thread->cont_fastpath_thread_state()
2040            && !chunk->has_thaw_slowpath_condition()
2041            && !PreserveFramePointer;
2042   }
2043 
2044   inline intptr_t* thaw(Continuation::thaw_kind kind);
2045   template<bool check_stub = false>
2046   NOINLINE intptr_t* thaw_fast(stackChunkOop chunk);
2047   NOINLINE intptr_t* thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind);
2048   inline void patch_caller_links(intptr_t* sp, intptr_t* bottom);
2049 };
2050 
2051 template <typename ConfigT>
2052 inline intptr_t* Thaw<ConfigT>::thaw(Continuation::thaw_kind kind) {
2053   verify_continuation(_cont.continuation());
2054   assert(!jdk_internal_vm_Continuation::done(_cont.continuation()), "");
2055   assert(!_cont.is_empty(), "");
2056 
2057   stackChunkOop chunk = _cont.tail();
2058   assert(chunk != nullptr, "guaranteed by prepare_thaw");
2059   assert(!chunk->is_empty(), "guaranteed by prepare_thaw");
2060 
2061   _barriers = chunk->requires_barriers();
2062   return (LIKELY(can_thaw_fast(chunk))) ? thaw_fast(chunk)
2063                                         : thaw_slow(chunk, kind);
2064 }
2065 
2066 class ReconstructedStack : public StackObj {
2067   intptr_t* _base;  // _cont.entrySP(); // top of the entry frame
2068   int _thaw_size;
2069   int _argsize;
2070 public:
2071   ReconstructedStack(intptr_t* base, int thaw_size, int argsize)
2072   : _base(base), _thaw_size(thaw_size - (argsize == 0 ? frame::metadata_words_at_top : 0)), _argsize(argsize) {
2073     // The only possible source of misalignment is stack-passed arguments b/c compiled frames are 16-byte aligned.
2074     assert(argsize != 0 || (_base - _thaw_size) == ContinuationHelper::frame_align_pointer(_base - _thaw_size), "");
2075     // We're at most one alignment word away from entrySP
2076     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2077   }
2078 
2079   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2080 
2081   // top and bottom stack pointers
2082   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2083   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2084 
2085   // several operations operate on the totality of the stack being reconstructed,
2086   // including the metadata words
2087   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2088   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2089 };
2090 
2091 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2092   chunk->set_sp(chunk->bottom());
2093   chunk->set_max_thawing_size(0);
2094 }
2095 
2096 int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, int &argsize) {
2097   intptr_t* top = f.sp();
2098 
2099   while (f.cb()->as_nmethod()->needs_stack_repair()) {
2100     f.next(SmallRegisterMap::instance(), false /* stop */);
2101   }
2102   assert(!f.is_done(), "");
2103   assert(f.is_compiled(), "");
2104 
2105   intptr_t* bottom = f.sp() + f.cb()->frame_size();
2106   argsize = f.stack_argsize();
2107   return bottom - top;
2108 }
2109 
2110 template<bool check_stub>
2111 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2112   bool empty = false;
2113   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2114   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2115   assert(chunk_sp == f.sp(), "");
2116   assert(chunk_sp == f.unextended_sp(), "");
2117 
2118   int frame_size = f.cb()->frame_size();
2119   argsize = f.stack_argsize();
2120 
2121   assert(!f.is_stub() || check_stub, "");
2122   if (check_stub && f.is_stub()) {
2123     // If we don't thaw the top compiled frame too, after restoring the saved
2124     // registers back in Java, we would hit the return barrier to thaw one more
2125     // frame effectively overwriting the restored registers during that call.
2126     f.next(SmallRegisterMap::instance(), true /* stop */);
2127     assert(!f.is_done(), "");
2128 
2129     f.get_cb();
2130     assert(f.is_compiled(), "");
2131     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2132       // The caller of the runtime stub when the continuation is preempted is not at a
2133       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2134       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2135       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2136     }
2137 
2138     if (f.cb()->as_nmethod()->needs_stack_repair()) {
2139       frame_size += remove_scalarized_frames(f, argsize);
2140     } else {
2141       frame_size += f.cb()->frame_size();
2142       argsize = f.stack_argsize();
2143     }
2144   } else if (f.cb()->as_nmethod()->needs_stack_repair()) {
2145     frame_size = remove_scalarized_frames(f, argsize);
2146   }
2147 
2148   f.next(SmallRegisterMap::instance(), true /* stop */);
2149   empty = f.is_done();
2150   assert(!empty || argsize == chunk->argsize(), "");
2151 
2152   if (empty) {
2153     clear_chunk(chunk);
2154   } else {
2155     chunk->set_sp(chunk->sp() + frame_size);
2156     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2157     // We set chunk->pc to the return pc into the next frame
2158     chunk->set_pc(f.pc());
2159 #ifdef ASSERT
2160     {
2161       intptr_t* retaddr_slot = (chunk_sp
2162                                 + frame_size
2163                                 - frame::sender_sp_ret_address_offset());
2164       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2165              "unexpected pc");
2166     }
2167 #endif
2168   }
2169   assert(empty == chunk->is_empty(), "");
2170   // returns the size required to store the frame on stack, and because it is a
2171   // compiled frame, it must include a copy of the arguments passed by the caller
2172   return frame_size + argsize + frame::metadata_words_at_top;
2173 }
2174 
2175 void ThawBase::thaw_lockstack(stackChunkOop chunk) {
2176   int lockStackSize = chunk->lockstack_size();
2177   assert(lockStackSize > 0 && lockStackSize <= LockStack::CAPACITY, "");
2178 
2179   oop tmp_lockstack[LockStack::CAPACITY];
2180   chunk->transfer_lockstack(tmp_lockstack, _barriers);
2181   _thread->lock_stack().move_from_address(tmp_lockstack, lockStackSize);
2182 
2183   chunk->set_lockstack_size(0);
2184   chunk->set_has_lockstack(false);
2185 }
2186 
2187 void ThawBase::copy_from_chunk(intptr_t* from, intptr_t* to, int size) {
2188   assert(to >= _top_stack_address, "overwrote past thawing space"
2189     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(to), p2i(_top_stack_address));
2190   assert(to + size <= _cont.entrySP(), "overwrote past thawing space");
2191   _cont.tail()->copy_from_chunk_to_stack(from, to, size);
2192   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
2193 }
2194 
2195 void ThawBase::patch_return(intptr_t* sp, bool is_last) {
2196   log_develop_trace(continuations)("thaw_fast patching -- sp: " INTPTR_FORMAT, p2i(sp));
2197 
2198   address pc = !is_last ? StubRoutines::cont_returnBarrier() : _cont.entryPC();
2199   ContinuationHelper::patch_return_address_at(
2200     sp - frame::sender_sp_ret_address_offset(),
2201     pc);
2202 }
2203 
2204 template <typename ConfigT>
2205 template<bool check_stub>
2206 NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) {
2207   assert(chunk == _cont.tail(), "");
2208   assert(!chunk->has_mixed_frames(), "");
2209   assert(!chunk->requires_barriers(), "");
2210   assert(!chunk->has_bitmap(), "");
2211   assert(!_thread->is_interp_only_mode(), "");
2212 
2213   LogTarget(Trace, continuations) lt;
2214   if (lt.develop_is_enabled()) {
2215     LogStream ls(lt);
2216     ls.print_cr("thaw_fast");
2217     chunk->print_on(true, &ls);
2218   }
2219 
2220   // Below this heuristic, we thaw the whole chunk, above it we thaw just one frame.
2221   static const int threshold = 500; // words
2222 
2223   const int full_chunk_size = chunk->stack_size() - chunk->sp(); // this initial size could be reduced if it's a partial thaw
2224   int argsize, thaw_size;
2225 
2226   intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();
2227 
2228   bool partial, empty;
2229   if (LIKELY(!TEST_THAW_ONE_CHUNK_FRAME && (full_chunk_size < threshold))) {
2230     prefetch_chunk_pd(chunk->start_address(), full_chunk_size); // prefetch anticipating memcpy starting at highest address
2231 
2232     partial = false;
2233     argsize = chunk->argsize(); // must be called *before* clearing the chunk
2234     clear_chunk(chunk);
2235     thaw_size = full_chunk_size;
2236     empty = true;
2237   } else { // thaw a single frame
2238     partial = true;
2239     thaw_size = remove_top_compiled_frame_from_chunk<check_stub>(chunk, argsize);
2240     empty = chunk->is_empty();
2241   }
2242 
2243   // Are we thawing the last frame(s) in the continuation
2244   const bool is_last = empty && chunk->parent() == nullptr;
2245   assert(!is_last || argsize == 0, "");
2246 
2247   log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT,
2248                               partial, is_last, empty, thaw_size, argsize, p2i(_cont.entrySP()));
2249 
2250   ReconstructedStack rs(_cont.entrySP(), thaw_size, argsize);
2251 
2252   // also copy metadata words at frame bottom
2253   copy_from_chunk(chunk_sp - frame::metadata_words_at_bottom, rs.top(), rs.total_size());
2254 
2255   // update the ContinuationEntry
2256   _cont.set_argsize(argsize);
2257   log_develop_trace(continuations)("setting entry argsize: %d", _cont.argsize());
2258   assert(rs.bottom_sp() == _cont.entry()->bottom_sender_sp(), "");
2259 
2260   // install the return barrier if not last frame, or the entry's pc if last
2261   patch_return(rs.bottom_sp(), is_last);
2262 
2263   // insert the back links from callee to caller frames
2264   patch_caller_links(rs.top(), rs.top() + rs.total_size());
2265 
2266   assert(is_last == _cont.is_empty(), "");
2267   assert(_cont.chunk_invariant(), "");
2268 
2269 #if CONT_JFR
2270   EventContinuationThawFast e;
2271   if (e.should_commit()) {
2272     e.set_id(cast_from_oop<u8>(chunk));
2273     e.set_size(thaw_size << LogBytesPerWord);
2274     e.set_full(!partial);
2275     e.commit();
2276   }
2277 #endif
2278 
2279 #ifdef ASSERT
2280   set_anchor(_thread, rs.sp());
2281   log_frames(_thread);
2282   if (LoomDeoptAfterThaw) {
2283     do_deopt_after_thaw(_thread);
2284   }
2285   clear_anchor(_thread);
2286 #endif
2287 
2288   return rs.sp();
2289 }
2290 
2291 inline bool ThawBase::seen_by_gc() {
2292   return _barriers || _cont.tail()->is_gc_mode();
2293 }
2294 
2295 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2296 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2297   if (UseZGC || UseShenandoahGC) {
2298     chunk->relativize_derived_pointers_concurrently();
2299   }
2300 #endif
2301 }
2302 
2303 template <typename ConfigT>
2304 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2305   Continuation::preempt_kind preempt_kind;
2306   bool retry_fast_path = false;
2307 
2308   _preempted_case = chunk->preempted();
2309   if (_preempted_case) {
2310     ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2311     if (waiter != nullptr) {
2312       // Mounted again after preemption. Resume the pending monitor operation,
2313       // which will be either a monitorenter or Object.wait() call.
2314       ObjectMonitor* mon = waiter->monitor();
2315       preempt_kind = waiter->is_wait() ? Continuation::freeze_on_wait : Continuation::freeze_on_monitorenter;
2316 
2317       bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2318       assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2319       if (!mon_acquired) {
2320         // Failed to acquire monitor. Return to enterSpecial to unmount again.
2321         return push_cleanup_continuation();
2322       }
2323       chunk = _cont.tail();  // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2324     } else {
2325       // Preemption cancelled in moniterenter case. We actually acquired
2326       // the monitor after freezing all frames so nothing to do.
2327       preempt_kind = Continuation::freeze_on_monitorenter;
2328     }
2329     // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2330     relativize_chunk_concurrently(chunk);
2331     chunk->set_preempted(false);
2332     retry_fast_path = true;
2333   } else {
2334     relativize_chunk_concurrently(chunk);
2335   }
2336 
2337   // On first thaw after freeze restore oops to the lockstack if any.
2338   assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2339   if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2340     thaw_lockstack(chunk);
2341     retry_fast_path = true;
2342   }
2343 
2344   // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2345   // and FLAG_PREEMPTED flags from the stackChunk.
2346   if (retry_fast_path && can_thaw_fast(chunk)) {
2347     intptr_t* sp = thaw_fast<true>(chunk);
2348     if (_preempted_case) {
2349       return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2350     }
2351     return sp;
2352   }
2353 
2354   LogTarget(Trace, continuations) lt;
2355   if (lt.develop_is_enabled()) {
2356     LogStream ls(lt);
2357     ls.print_cr("thaw slow return_barrier: %d " INTPTR_FORMAT, kind, p2i(chunk));
2358     chunk->print_on(true, &ls);
2359   }
2360 
2361 #if CONT_JFR
2362   EventContinuationThawSlow e;
2363   if (e.should_commit()) {
2364     e.set_id(cast_from_oop<u8>(_cont.continuation()));
2365     e.commit();
2366   }
2367 #endif
2368 
2369   DEBUG_ONLY(_frames = 0;)
2370   _align_size = 0;
2371   int num_frames = kind == Continuation::thaw_top ? 2 : 1;
2372 
2373   _stream = StackChunkFrameStream<ChunkFrames::Mixed>(chunk);
2374   _top_unextended_sp_before_thaw = _stream.unextended_sp();
2375 
2376   frame heap_frame = _stream.to_frame();
2377   if (lt.develop_is_enabled()) {
2378     LogStream ls(lt);
2379     ls.print_cr("top hframe before (thaw):");
2380     assert(heap_frame.is_heap_frame(), "should have created a relative frame");
2381     heap_frame.print_value_on(&ls);
2382   }
2383 
2384   frame caller; // the thawed caller on the stack
2385   recurse_thaw(heap_frame, caller, num_frames, _preempted_case);
2386   finish_thaw(caller); // caller is now the topmost thawed frame
2387   _cont.write();
2388 
2389   assert(_cont.chunk_invariant(), "");
2390 
2391   JVMTI_ONLY(invalidate_jvmti_stack(_thread));
2392 
2393   _thread->set_cont_fastpath(_fastpath);
2394 
2395   intptr_t* sp = caller.sp();
2396 
2397   if (_preempted_case) {
2398     return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2399   }
2400   return sp;
2401 }
2402 
2403 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2404   log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2405   assert(!_cont.is_empty(), "no more frames");
2406   assert(num_frames > 0, "");
2407   assert(!heap_frame.is_empty(), "");
2408 
2409   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2410     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2411   } else if (!heap_frame.is_interpreted_frame()) {
2412     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2413   } else {
2414     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2415   }
2416 }
2417 
2418 template<typename FKind>
2419 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2420   assert(num_frames > 0, "");
2421 
2422   DEBUG_ONLY(_frames++;)
2423 
2424   int argsize = _stream.stack_argsize();
2425   CodeBlob* cb = _stream.cb();
2426 
2427   _stream.next(SmallRegisterMap::instance());
2428   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2429 
2430   // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
2431   // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
2432   // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
2433   // - Regardless of whether the frame was extended or not, we would need to copy the right arg
2434   //   size if its greater than the one given by the normal method signature (non-scalarized).
2435   // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
2436   //   the chunk (we need unextended_sp, but we only have sp).
2437   if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
2438     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2439     num_frames++;
2440   }
2441 
2442   if (num_frames == 1 || _stream.is_done()) { // end recursion
2443     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2444     return true; // bottom
2445   } else { // recurse
2446     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2447     return false;
2448   }
2449 }
2450 
2451 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2452   stackChunkOop chunk = _cont.tail();
2453 
2454   if (!_stream.is_done()) {
2455     assert(_stream.sp() >= chunk->sp_address(), "");
2456     chunk->set_sp(chunk->to_offset(_stream.sp()));
2457     chunk->set_pc(_stream.pc());
2458   } else {
2459     chunk->set_sp(chunk->bottom());
2460     chunk->set_pc(nullptr);
2461   }
2462   assert(_stream.is_done() == chunk->is_empty(), "");
2463 
2464   int total_thawed = pointer_delta_as_int(_stream.unextended_sp(), _top_unextended_sp_before_thaw);
2465   chunk->set_max_thawing_size(chunk->max_thawing_size() - total_thawed);
2466 
2467   _cont.set_argsize(argsize);
2468   entry = new_entry_frame();
2469 
2470   assert(entry.sp() == _cont.entrySP(), "");
2471   assert(Continuation::is_continuation_enterSpecial(entry), "");
2472   assert(_cont.is_entry_frame(entry), "");
2473 }
2474 
2475 inline void ThawBase::before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) {
2476   LogTarget(Trace, continuations) lt;
2477   if (lt.develop_is_enabled()) {
2478     LogStream ls(lt);
2479     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2480     assert(hf.is_heap_frame(), "should be");
2481     hf.print_value_on(&ls);
2482   }
2483   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2484 }
2485 
2486 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2487 #ifdef ASSERT
2488   LogTarget(Trace, continuations) lt;
2489   if (lt.develop_is_enabled()) {
2490     LogStream ls(lt);
2491     ls.print_cr("thawed frame:");
2492     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2493   }
2494 #endif
2495 }
2496 
2497 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
2498   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2499   if (bottom) {
2500     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2501                                                                  : StubRoutines::cont_returnBarrier());
2502   } else if (caller.is_compiled_frame()){
2503     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2504     // If the caller is not deoptimized, pc is unchanged.
2505     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
2506   }
2507 
2508   patch_pd(f, caller);
2509 
2510   if (f.is_interpreted_frame()) {
2511     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2512   }
2513 
2514   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2515   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2516 }
2517 
2518 void ThawBase::clear_bitmap_bits(address start, address end) {
2519   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2520   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2521 
2522   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2523   // or they will keep objects that are otherwise unreachable alive.
2524 
2525   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2526   // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2527   // If that's the case the bit range corresponding to the last stack slot should not have bits set
2528   // anyways and we assert that before returning.
2529   address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2530   log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2531   stackChunkOop chunk = _cont.tail();
2532   chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2533   assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2534 }
2535 
2536 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2537   assert(preempt_kind == Continuation::freeze_on_wait || preempt_kind == Continuation::freeze_on_monitorenter, "");
2538   frame top(sp);
2539   assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2540 
2541 #if INCLUDE_JVMTI
2542   // Finish the VTMS transition.
2543   assert(_thread->is_in_VTMS_transition(), "must be");
2544   bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2545   if (is_vthread) {
2546     if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
2547       jvmti_mount_end(_thread, _cont, top);
2548     } else {
2549       _thread->set_is_in_VTMS_transition(false);
2550       java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
2551     }
2552   }
2553 #endif
2554 
2555   if (fast_case) {
2556     // If we thawed in the slow path the runtime stub/native wrapper frame already
2557     // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2558     // we copied the fp patched during freeze, which will now have to be fixed.
2559     assert(top.is_runtime_frame() || top.is_native_frame(), "");
2560     int fsize = top.cb()->frame_size();
2561     patch_pd(top, sp + fsize);
2562   }
2563 
2564   if (preempt_kind == Continuation::freeze_on_wait) {
2565     // Check now if we need to throw IE exception.
2566     if (_thread->pending_interrupted_exception()) {
2567       throw_interrupted_exception(_thread, top);
2568       _thread->set_pending_interrupted_exception(false);
2569     }
2570   } else if (top.is_runtime_frame()) {
2571     // The continuation might now run on a different platform thread than the previous time so
2572     // we need to adjust the current thread saved in the stub frame before restoring registers.
2573     JavaThread** thread_addr = frame::saved_thread_address(top);
2574     if (thread_addr != nullptr) *thread_addr = _thread;
2575   }
2576   return sp;
2577 }
2578 
2579 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2580   ContinuationWrapper::SafepointOp so(current, _cont);
2581   // Since we might safepoint set the anchor so that the stack can be walked.
2582   set_anchor(current, top.sp());
2583   JRT_BLOCK
2584     THROW(vmSymbols::java_lang_InterruptedException());
2585   JRT_BLOCK_END
2586   clear_anchor(current);
2587 }
2588 
2589 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
2590   assert(hf.is_interpreted_frame(), "");
2591 
2592   if (UNLIKELY(seen_by_gc())) {
2593     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2594   }
2595 
2596   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2597 
2598   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2599 
2600   _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2601 
2602   frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2603 
2604   intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2605   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2606   intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2607   intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2608 
2609   assert(hf.is_heap_frame(), "should be");
2610   assert(!f.is_heap_frame(), "should not be");
2611 
2612   const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2613   assert((stack_frame_bottom == stack_frame_top + fsize), "");
2614 
2615   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
2616   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
2617   copy_from_chunk(heap_frame_top, stack_frame_top, fsize);
2618 
2619   // Make sure the relativized locals is already set.
2620   assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2621 
2622   derelativize_interpreted_frame_metadata(hf, f);
2623   patch(f, caller, is_bottom_frame);
2624 
2625   assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2626   assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2627 
2628   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2629 
2630   maybe_set_fastpath(f.sp());
2631 
2632   Method* m = hf.interpreter_frame_method();
2633   assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2634   const int locals = m->max_locals();
2635 
2636   if (!is_bottom_frame) {
2637     // can only fix caller once this frame is thawed (due to callee saved regs)
2638     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2639   } else if (_cont.tail()->has_bitmap() && locals > 0) {
2640     assert(hf.is_heap_frame(), "should be");
2641     address start = (address)(heap_frame_bottom - locals);
2642     address end = (address)heap_frame_bottom;
2643     clear_bitmap_bits(start, end);
2644   }
2645 
2646   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2647   caller = f;
2648 }
2649 
2650 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2651   assert(hf.is_compiled_frame(), "");
2652   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2653 
2654   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2655     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2656   }
2657 
2658   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2659 
2660   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2661 
2662   assert(caller.sp() == caller.unextended_sp(), "");
2663 
2664   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2665     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2666   }
2667 
2668   int fsize = 0;
2669   int added_argsize = 0;
2670   bool augmented = hf.was_augmented_on_entry(fsize);
2671   if (!augmented) {
2672     added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2673     fsize += added_argsize;
2674   }
2675   assert(!is_bottom_frame || !augmented, "");
2676 
2677 
2678   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2679   // yet laid out in the stack, and so the original_pc is not stored in it.
2680   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2681   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
2682   assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
2683 
2684   intptr_t* const stack_frame_top = f.sp();
2685   intptr_t* const heap_frame_top = hf.unextended_sp();
2686   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2687   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2688   // copy metadata, except the metadata at the top of the (unextended) entry frame
2689   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2690 
2691   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2692   // (we might have one padding word for alignment)
2693   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2694   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2695 
2696   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2697 
2698   patch(f, caller, is_bottom_frame, augmented);
2699 
2700   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2701   assert(!f.is_deoptimized_frame(), "");
2702   if (hf.is_deoptimized_frame()) {
2703     maybe_set_fastpath(f.sp());
2704   } else if (_thread->is_interp_only_mode()
2705               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2706     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2707     // cannot rely on nmethod patching for deopt.
2708     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2709 
2710     log_develop_trace(continuations)("Deoptimizing thawed frame");
2711     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2712 
2713     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2714     assert(f.is_deoptimized_frame(), "");
2715     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2716     maybe_set_fastpath(f.sp());
2717   }
2718 
2719   if (!is_bottom_frame) {
2720     // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2721     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2722   } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2723     address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2724     int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2725     int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2726     clear_bitmap_bits(start, start + argsize_in_bytes);
2727   }
2728 
2729   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2730   caller = f;
2731 }
2732 
2733 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2734   DEBUG_ONLY(_frames++;)
2735 
2736   if (UNLIKELY(seen_by_gc())) {
2737     // Process the stub's caller here since we might need the full map.
2738     RegisterMap map(nullptr,
2739                     RegisterMap::UpdateMap::include,
2740                     RegisterMap::ProcessFrames::skip,
2741                     RegisterMap::WalkContinuation::skip);
2742     map.set_include_argument_oops(false);
2743     _stream.next(&map);
2744     assert(!_stream.is_done(), "");
2745     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2746   } else {
2747     _stream.next(SmallRegisterMap::instance());
2748     assert(!_stream.is_done(), "");
2749   }
2750 
2751   recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2752 
2753   assert(caller.is_compiled_frame(), "");
2754   assert(caller.sp() == caller.unextended_sp(), "");
2755 
2756   DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2757 
2758   frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2759   intptr_t* stack_frame_top = f.sp();
2760   intptr_t* heap_frame_top = hf.sp();
2761   int fsize = ContinuationHelper::StubFrame::size(hf);
2762 
2763   copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2764                   fsize + frame::metadata_words);
2765 
2766   patch(f, caller, false /*is_bottom_frame*/);
2767 
2768   // can only fix caller once this frame is thawed (due to callee saved regs)
2769   RegisterMap map(nullptr,
2770                   RegisterMap::UpdateMap::include,
2771                   RegisterMap::ProcessFrames::skip,
2772                   RegisterMap::WalkContinuation::skip);
2773   map.set_include_argument_oops(false);
2774   f.oop_map()->update_register_map(&f, &map);
2775   ContinuationHelper::update_register_map_with_callee(caller, &map);
2776   _cont.tail()->fix_thawed_frame(caller, &map);
2777 
2778   DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2779   caller = f;
2780 }
2781 
2782 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2783   assert(hf.is_native_frame(), "");
2784   assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2785 
2786   if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2787     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2788   }
2789 
2790   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2791   assert(!is_bottom_frame, "");
2792 
2793   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2794 
2795   assert(caller.sp() == caller.unextended_sp(), "");
2796 
2797   if (caller.is_interpreted_frame()) {
2798     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2799   }
2800 
2801   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2802   // yet laid out in the stack, and so the original_pc is not stored in it.
2803   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2804   frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2805   intptr_t* const stack_frame_top = f.sp();
2806   intptr_t* const heap_frame_top = hf.unextended_sp();
2807 
2808   int fsize = ContinuationHelper::NativeFrame::size(hf);
2809   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2810 
2811   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2812   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2813   int sz = fsize + frame::metadata_words_at_bottom;
2814 
2815   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2816 
2817   patch(f, caller, false /* bottom */);
2818 
2819   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2820   assert(!f.is_deoptimized_frame(), "");
2821   assert(!hf.is_deoptimized_frame(), "");
2822   assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2823 
2824   // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2825   _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2826 
2827   DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2828   caller = f;
2829 }
2830 
2831 void ThawBase::finish_thaw(frame& f) {
2832   stackChunkOop chunk = _cont.tail();
2833 
2834   if (chunk->is_empty()) {
2835     // Only remove chunk from list if it can't be reused for another freeze
2836     if (seen_by_gc()) {
2837       _cont.set_tail(chunk->parent());
2838     } else {
2839       chunk->set_has_mixed_frames(false);
2840     }
2841     chunk->set_max_thawing_size(0);
2842   } else {
2843     chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2844   }
2845   assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
2846 
2847   if (!is_aligned(f.sp(), frame::frame_alignment)) {
2848     assert(f.is_interpreted_frame(), "");
2849     f.set_sp(align_down(f.sp(), frame::frame_alignment));
2850   }
2851   push_return_frame(f);
2852   chunk->fix_thawed_frame(f, SmallRegisterMap::instance()); // can only fix caller after push_return_frame (due to callee saved regs)
2853 
2854   assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
2855 
2856   log_develop_trace(continuations)("thawed %d frames", _frames);
2857 
2858   LogTarget(Trace, continuations) lt;
2859   if (lt.develop_is_enabled()) {
2860     LogStream ls(lt);
2861     ls.print_cr("top hframe after (thaw):");
2862     _cont.last_frame().print_value_on(&ls);
2863   }
2864 }
2865 
2866 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
2867   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
2868   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
2869 
2870   LogTarget(Trace, continuations) lt;
2871   if (lt.develop_is_enabled()) {
2872     LogStream ls(lt);
2873     ls.print_cr("push_return_frame");
2874     f.print_value_on(&ls);
2875   }
2876 
2877   assert(f.sp() - frame::metadata_words_at_bottom >= _top_stack_address, "overwrote past thawing space"
2878     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(f.sp() - frame::metadata_words), p2i(_top_stack_address));
2879   ContinuationHelper::Frame::patch_pc(f, f.raw_pc()); // in case we want to deopt the frame in a full transition, this is checked.
2880   ContinuationHelper::push_pd(f);
2881 
2882   assert(ContinuationHelper::Frame::assert_frame_laid_out(f), "");
2883 }
2884 
2885 // returns new top sp
2886 // called after preparations (stack overflow check and making room)
2887 template<typename ConfigT>
2888 static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind) {
2889   assert(thread == JavaThread::current(), "Must be current thread");
2890 
2891   CONT_JFR_ONLY(EventContinuationThaw event;)
2892 
2893   log_develop_trace(continuations)("~~~~ thaw kind: %d sp: " INTPTR_FORMAT, kind, p2i(thread->last_continuation()->entry_sp()));
2894 
2895   ContinuationEntry* entry = thread->last_continuation();
2896   assert(entry != nullptr, "");
2897   oop oopCont = entry->cont_oop(thread);
2898 
2899   assert(!jdk_internal_vm_Continuation::done(oopCont), "");
2900   assert(oopCont == get_continuation(thread), "");
2901   verify_continuation(oopCont);
2902 
2903   assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
2904 
2905   ContinuationWrapper cont(thread, oopCont);
2906   log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
2907 
2908 #ifdef ASSERT
2909   set_anchor_to_entry(thread, cont.entry());
2910   log_frames(thread);
2911   clear_anchor(thread);
2912 #endif
2913 
2914   DEBUG_ONLY(bool preempted = cont.tail()->preempted();)
2915   Thaw<ConfigT> thw(thread, cont);
2916   intptr_t* const sp = thw.thaw(kind);
2917   assert(is_aligned(sp, frame::frame_alignment), "");
2918   DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp, preempted);)
2919 
2920   CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
2921 
2922   verify_continuation(cont.continuation());
2923   log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
2924 
2925   return sp;
2926 }
2927 
2928 #ifdef ASSERT
2929 static void do_deopt_after_thaw(JavaThread* thread) {
2930   int i = 0;
2931   StackFrameStream fst(thread, true, false);
2932   fst.register_map()->set_include_argument_oops(false);
2933   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
2934   for (; !fst.is_done(); fst.next()) {
2935     if (fst.current()->cb()->is_nmethod()) {
2936       nmethod* nm = fst.current()->cb()->as_nmethod();
2937       if (!nm->method()->is_continuation_native_intrinsic()) {
2938         nm->make_deoptimized();
2939       }
2940     }
2941   }
2942 }
2943 
2944 class ThawVerifyOopsClosure: public OopClosure {
2945   intptr_t* _p;
2946   outputStream* _st;
2947   bool is_good_oop(oop o) {
2948     return dbg_is_safe(o, -1) && dbg_is_safe(o->klass(), -1) && oopDesc::is_oop(o) && o->klass()->is_klass();
2949   }
2950 public:
2951   ThawVerifyOopsClosure(outputStream* st) : _p(nullptr), _st(st) {}
2952   intptr_t* p() { return _p; }
2953   void reset() { _p = nullptr; }
2954 
2955   virtual void do_oop(oop* p) {
2956     oop o = *p;
2957     if (o == nullptr || is_good_oop(o)) {
2958       return;
2959     }
2960     _p = (intptr_t*)p;
2961     _st->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(*p), p2i(p));
2962   }
2963   virtual void do_oop(narrowOop* p) {
2964     oop o = RawAccess<>::oop_load(p);
2965     if (o == nullptr || is_good_oop(o)) {
2966       return;
2967     }
2968     _p = (intptr_t*)p;
2969     _st->print_cr("*** (narrow) non-oop %x found at " PTR_FORMAT, (int)(*p), p2i(p));
2970   }
2971 };
2972 
2973 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st) {
2974   assert(thread->has_last_Java_frame(), "");
2975 
2976   ResourceMark rm;
2977   ThawVerifyOopsClosure cl(st);
2978   NMethodToOopClosure cf(&cl, false);
2979 
2980   StackFrameStream fst(thread, true, false);
2981   fst.register_map()->set_include_argument_oops(false);
2982   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
2983   for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) {
2984     if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) {
2985       st->print_cr(">>> do_verify_after_thaw deopt");
2986       fst.current()->deoptimize(nullptr);
2987       fst.current()->print_on(st);
2988     }
2989 
2990     fst.current()->oops_do(&cl, &cf, fst.register_map());
2991     if (cl.p() != nullptr) {
2992       frame fr = *fst.current();
2993       st->print_cr("Failed for frame barriers: %d",chunk->requires_barriers());
2994       fr.print_on(st);
2995       if (!fr.is_interpreted_frame()) {
2996         st->print_cr("size: %d argsize: %d",
2997                      ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
2998                      ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
2999       }
3000       VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
3001       if (reg != nullptr) {
3002         st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
3003       }
3004       cl.reset();
3005       DEBUG_ONLY(thread->print_frame_layout();)
3006       if (chunk != nullptr) {
3007         chunk->print_on(true, st);
3008       }
3009       return false;
3010     }
3011   }
3012   return true;
3013 }
3014 
3015 static void log_frames(JavaThread* thread) {
3016   const static int show_entry_callers = 3;
3017   LogTarget(Trace, continuations) lt;
3018   if (!lt.develop_is_enabled()) {
3019     return;
3020   }
3021   LogStream ls(lt);
3022 
3023   ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
3024   if (!thread->has_last_Java_frame()) {
3025     ls.print_cr("NO ANCHOR!");
3026   }
3027 
3028   RegisterMap map(thread,
3029                   RegisterMap::UpdateMap::include,
3030                   RegisterMap::ProcessFrames::include,
3031                   RegisterMap::WalkContinuation::skip);
3032   map.set_include_argument_oops(false);
3033 
3034   if (false) {
3035     for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
3036       f.print_on(&ls);
3037     }
3038   } else {
3039     map.set_skip_missing(true);
3040     ResetNoHandleMark rnhm;
3041     ResourceMark rm;
3042     HandleMark hm(Thread::current());
3043     FrameValues values;
3044 
3045     int i = 0;
3046     int post_entry = -1;
3047     for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
3048       f.describe(values, i, &map, i == 0);
3049       if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
3050         post_entry++;
3051       if (post_entry >= show_entry_callers)
3052         break;
3053     }
3054     values.print_on(thread, &ls);
3055   }
3056 
3057   ls.print_cr("======= end frames =========");
3058 }
3059 
3060 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted) {
3061   intptr_t* sp0 = sp;
3062   address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
3063 
3064   if (preempted && sp0 == cont.entrySP()) {
3065     // Still preempted (monitor not acquired) so no frames were thawed.
3066     assert(cont.tail()->preempted(), "");
3067     set_anchor(thread, cont.entrySP(), cont.entryPC());
3068   } else {
3069     set_anchor(thread, sp0);
3070   }
3071 
3072   log_frames(thread);
3073   if (LoomVerifyAfterThaw) {
3074     assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
3075   }
3076   assert(ContinuationEntry::assert_entry_frame_laid_out(thread), "");
3077   clear_anchor(thread);
3078 
3079   LogTarget(Trace, continuations) lt;
3080   if (lt.develop_is_enabled()) {
3081     LogStream ls(lt);
3082     ls.print_cr("Jumping to frame (thaw):");
3083     frame(sp).print_value_on(&ls);
3084   }
3085 }
3086 #endif // ASSERT
3087 
3088 #include CPU_HEADER_INLINE(continuationFreezeThaw)
3089 
3090 #ifdef ASSERT
3091 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
3092   ResourceMark rm;
3093   FrameValues values;
3094   assert(f.get_cb() != nullptr, "");
3095   RegisterMap map(f.is_heap_frame() ?
3096                     nullptr :
3097                     JavaThread::current(),
3098                   RegisterMap::UpdateMap::include,
3099                   RegisterMap::ProcessFrames::skip,
3100                   RegisterMap::WalkContinuation::skip);
3101   map.set_include_argument_oops(false);
3102   map.set_skip_missing(true);
3103   if (callee_complete) {
3104     frame::update_map_with_saved_link(&map, ContinuationHelper::Frame::callee_link_address(f));
3105   }
3106   const_cast<frame&>(f).describe(values, 0, &map, true);
3107   values.print_on(static_cast<JavaThread*>(nullptr), st);
3108 }
3109 #endif
3110 
3111 static address thaw_entry   = nullptr;
3112 static address freeze_entry = nullptr;
3113 static address freeze_preempt_entry = nullptr;
3114 
3115 address Continuation::thaw_entry() {
3116   return ::thaw_entry;
3117 }
3118 
3119 address Continuation::freeze_entry() {
3120   return ::freeze_entry;
3121 }
3122 
3123 address Continuation::freeze_preempt_entry() {
3124   return ::freeze_preempt_entry;
3125 }
3126 
3127 class ConfigResolve {
3128 public:
3129   static void resolve() { resolve_compressed(); }
3130 
3131   static void resolve_compressed() {
3132     UseCompressedOops ? resolve_gc<true>()
3133                       : resolve_gc<false>();
3134   }
3135 
3136 private:
3137   template <bool use_compressed>
3138   static void resolve_gc() {
3139     BarrierSet* bs = BarrierSet::barrier_set();
3140     assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set");
3141     switch (bs->kind()) {
3142 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
3143       case BarrierSet::bs_name: {                                       \
3144         resolve<use_compressed, typename BarrierSet::GetType<BarrierSet::bs_name>::type>(); \
3145       }                                                                 \
3146         break;
3147       FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
3148 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
3149 
3150     default:
3151       fatal("BarrierSet resolving not implemented");
3152     };
3153   }
3154 
3155   template <bool use_compressed, typename BarrierSetT>
3156   static void resolve() {
3157     typedef Config<use_compressed ? oop_kind::NARROW : oop_kind::WIDE, BarrierSetT> SelectedConfigT;
3158 
3159     freeze_entry = (address)freeze<SelectedConfigT>;
3160     freeze_preempt_entry = (address)SelectedConfigT::freeze_preempt;
3161 
3162     // If we wanted, we could templatize by kind and have three different thaw entries
3163     thaw_entry   = (address)thaw<SelectedConfigT>;
3164   }
3165 };
3166 
3167 void Continuation::init() {
3168   ConfigResolve::resolve();
3169 }