1 /*
   2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.inline.hpp"
  28 #include "code/nmethod.inline.hpp"
  29 #include "code/vmreg.inline.hpp"
  30 #include "compiler/oopMap.inline.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shared/gc_globals.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "jfr/jfrEvents.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "oops/access.inline.hpp"
  41 #include "oops/method.inline.hpp"
  42 #include "oops/oopsHierarchy.hpp"
  43 #include "oops/objArrayOop.inline.hpp"
  44 #include "oops/stackChunkOop.inline.hpp"
  45 #include "prims/jvmtiThreadState.hpp"
  46 #include "runtime/arguments.hpp"
  47 #include "runtime/continuation.hpp"
  48 #include "runtime/continuationEntry.inline.hpp"
  49 #include "runtime/continuationHelper.inline.hpp"
  50 #include "runtime/continuationJavaClasses.inline.hpp"
  51 #include "runtime/continuationWrapper.inline.hpp"
  52 #include "runtime/frame.inline.hpp"
  53 #include "runtime/interfaceSupport.inline.hpp"
  54 #include "runtime/javaThread.inline.hpp"
  55 #include "runtime/jniHandles.inline.hpp"
  56 #include "runtime/keepStackGCProcessed.hpp"
  57 #include "runtime/objectMonitor.inline.hpp"
  58 #include "runtime/orderAccess.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "runtime/smallRegisterMap.inline.hpp"
  61 #include "runtime/sharedRuntime.hpp"
  62 #include "runtime/stackChunkFrameStream.inline.hpp"
  63 #include "runtime/stackFrameStream.inline.hpp"
  64 #include "runtime/stackOverflow.hpp"
  65 #include "runtime/stackWatermarkSet.inline.hpp"
  66 #include "utilities/debug.hpp"
  67 #include "utilities/exceptions.hpp"
  68 #include "utilities/macros.hpp"
  69 #include "utilities/vmError.hpp"
  70 #if INCLUDE_ZGC
  71 #include "gc/z/zStackChunkGCData.inline.hpp"
  72 #endif
  73 
  74 #include <type_traits>
  75 
  76 /*
  77  * This file contains the implementation of continuation freezing (yield) and thawing (run).
  78  *
  79  * This code is very latency-critical and very hot. An ordinary and well-behaved server application
  80  * would likely call these operations many thousands of times per second second, on every core.
  81  *
  82  * Freeze might be called every time the application performs any I/O operation, every time it
  83  * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
  84  * multiple times in each of those cases, as it is called by the return barrier, which may be
  85  * invoked on method return.
  86  *
  87  * The amortized budget for each of those two operations is ~100-150ns. That is why, for
  88  * example, every effort is made to avoid Java-VM transitions as much as possible.
  89  *
  90  * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
  91  * and so frames simply copied, and the bottom-most one is patched.
  92  * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets
  93  * and absolute pointers, and barriers invoked.
  94  */
  95 
  96 /************************************************
  97 
  98 Thread-stack layout on freeze/thaw.
  99 See corresponding stack-chunk layout in instanceStackChunkKlass.hpp
 100 
 101             +----------------------------+
 102             |      .                     |
 103             |      .                     |
 104             |      .                     |
 105             |   carrier frames           |
 106             |                            |
 107             |----------------------------|
 108             |                            |
 109             |    Continuation.run        |
 110             |                            |
 111             |============================|
 112             |    enterSpecial frame      |
 113             |  pc                        |
 114             |  rbp                       |
 115             |  -----                     |
 116         ^   |  int argsize               | = ContinuationEntry
 117         |   |  oopDesc* cont             |
 118         |   |  oopDesc* chunk            |
 119         |   |  ContinuationEntry* parent |
 120         |   |  ...                       |
 121         |   |============================| <------ JavaThread::_cont_entry = entry->sp()
 122         |   |  ? alignment word ?        |
 123         |   |----------------------------| <--\
 124         |   |                            |    |
 125         |   |  ? caller stack args ?     |    |   argsize (might not be 2-word aligned) words
 126 Address |   |                            |    |   Caller is still in the chunk.
 127         |   |----------------------------|    |
 128         |   |  pc (? return barrier ?)   |    |  This pc contains the return barrier when the bottom-most frame
 129         |   |  rbp                       |    |  isn't the last one in the continuation.
 130         |   |                            |    |
 131         |   |    frame                   |    |
 132         |   |                            |    |
 133             +----------------------------|     \__ Continuation frames to be frozen/thawed
 134             |                            |     /
 135             |    frame                   |    |
 136             |                            |    |
 137             |----------------------------|    |
 138             |                            |    |
 139             |    frame                   |    |
 140             |                            |    |
 141             |----------------------------| <--/
 142             |                            |
 143             |    doYield/safepoint stub  | When preempting forcefully, we could have a safepoint stub
 144             |                            | instead of a doYield stub
 145             |============================| <- the sp passed to freeze
 146             |                            |
 147             |  Native freeze/thaw frames |
 148             |      .                     |
 149             |      .                     |
 150             |      .                     |
 151             +----------------------------+
 152 
 153 ************************************************/
 154 
 155 static const bool TEST_THAW_ONE_CHUNK_FRAME = false; // force thawing frames one-at-a-time for testing
 156 
 157 #define CONT_JFR false // emit low-level JFR events that count slow/fast path for continuation performance debugging only
 158 #if CONT_JFR
 159   #define CONT_JFR_ONLY(code) code
 160 #else
 161   #define CONT_JFR_ONLY(code)
 162 #endif
 163 
 164 // TODO: See AbstractAssembler::generate_stack_overflow_check,
 165 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
 166 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
 167 
 168 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
 169 
 170 // Used to just annotatate cold/hot branches
 171 #define LIKELY(condition)   (condition)
 172 #define UNLIKELY(condition) (condition)
 173 
 174 // debugging functions
 175 #ifdef ASSERT
 176 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
 177 
 178 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
 179 
 180 static void do_deopt_after_thaw(JavaThread* thread);
 181 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
 182 static void log_frames(JavaThread* thread);
 183 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted);
 184 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
 185 
 186 #define assert_pfl(p, ...) \
 187 do {                                           \
 188   if (!(p)) {                                  \
 189     JavaThread* t = JavaThread::active();      \
 190     if (t->has_last_Java_frame()) {            \
 191       tty->print_cr("assert(" #p ") failed:"); \
 192       t->print_frame_layout();                 \
 193     }                                          \
 194   }                                            \
 195   vmassert(p, __VA_ARGS__);                    \
 196 } while(0)
 197 
 198 #else
 199 static void verify_continuation(oop continuation) { }
 200 #define assert_pfl(p, ...)
 201 #endif
 202 
 203 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
 204 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);
 205 
 206 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier);
 207 template<typename ConfigT> static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind);
 208 
 209 
 210 // Entry point to freeze. Transitions are handled manually
 211 // Called from gen_continuation_yield() in sharedRuntime_<cpu>.cpp through Continuation::freeze_entry();
 212 template<typename ConfigT>
 213 static JRT_BLOCK_ENTRY(int, freeze(JavaThread* current, intptr_t* sp))
 214   assert(sp == current->frame_anchor()->last_Java_sp(), "");
 215 
 216   if (current->raw_cont_fastpath() > current->last_continuation()->entry_sp() || current->raw_cont_fastpath() < sp) {
 217     current->set_cont_fastpath(nullptr);
 218   }
 219 
 220   return checked_cast<int>(ConfigT::freeze(current, sp));
 221 JRT_END
 222 
 223 JRT_LEAF(int, Continuation::prepare_thaw(JavaThread* thread, bool return_barrier))
 224   return prepare_thaw_internal(thread, return_barrier);
 225 JRT_END
 226 
 227 template<typename ConfigT>
 228 static JRT_LEAF(intptr_t*, thaw(JavaThread* thread, int kind))
 229   // TODO: JRT_LEAF and NoHandleMark is problematic for JFR events.
 230   // vFrameStreamCommon allocates Handles in RegisterMap for continuations.
 231   // Also the preemption case with JVMTI events enabled might safepoint so
 232   // undo the NoSafepointVerifier here and rely on handling by ContinuationWrapper.
 233   // JRT_ENTRY instead?
 234   ResetNoHandleMark rnhm;
 235   DEBUG_ONLY(PauseNoSafepointVerifier pnsv(&__nsv);)
 236 
 237   // we might modify the code cache via BarrierSetNMethod::nmethod_entry_barrier
 238   MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread));
 239   return ConfigT::thaw(thread, (Continuation::thaw_kind)kind);
 240 JRT_END
 241 
 242 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) {
 243   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
 244   return is_pinned0(thread, JNIHandles::resolve(cont_scope), false);
 245 }
 246 JVM_END
 247 
 248 ///////////
 249 
 250 enum class oop_kind { NARROW, WIDE };
 251 template <oop_kind oops, typename BarrierSetT>
 252 class Config {
 253 public:
 254   typedef Config<oops, BarrierSetT> SelfT;
 255   using OopT = std::conditional_t<oops == oop_kind::NARROW, narrowOop, oop>;
 256 
 257   static freeze_result freeze(JavaThread* thread, intptr_t* const sp) {
 258     freeze_result res = freeze_internal<SelfT, false>(thread, sp);
 259     JFR_ONLY(assert((res == freeze_ok) || (res == thread->last_freeze_fail_result()), "freeze failure not set"));
 260     return res;
 261   }
 262 
 263   static freeze_result freeze_preempt(JavaThread* thread, intptr_t* const sp) {
 264     return freeze_internal<SelfT, true>(thread, sp);
 265   }
 266 
 267   static intptr_t* thaw(JavaThread* thread, Continuation::thaw_kind kind) {
 268     return thaw_internal<SelfT>(thread, kind);
 269   }
 270 };
 271 
 272 #ifdef _WINDOWS
 273 static void map_stack_pages(JavaThread* thread, size_t size, address sp) {
 274   address new_sp = sp - size;
 275   address watermark = thread->stack_overflow_state()->shadow_zone_growth_watermark();
 276 
 277   if (new_sp < watermark) {
 278     size_t page_size = os::vm_page_size();
 279     address last_touched_page = watermark - StackOverflow::stack_shadow_zone_size();
 280     size_t pages_to_touch = align_up(watermark - new_sp, page_size) / page_size;
 281     while (pages_to_touch-- > 0) {
 282       last_touched_page -= page_size;
 283       *last_touched_page = 0;
 284     }
 285     thread->stack_overflow_state()->set_shadow_zone_growth_watermark(new_sp);
 286   }
 287 }
 288 #endif
 289 
 290 static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) {
 291   const size_t page_size = os::vm_page_size();
 292   if (size > page_size) {
 293     if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) {
 294       return false;
 295     }
 296     WINDOWS_ONLY(map_stack_pages(thread, size, sp));
 297   }
 298   return true;
 299 }
 300 
 301 #ifdef ASSERT
 302 static oop get_continuation(JavaThread* thread) {
 303   assert(thread != nullptr, "");
 304   assert(thread->threadObj() != nullptr, "");
 305   return java_lang_Thread::continuation(thread->threadObj());
 306 }
 307 #endif // ASSERT
 308 
 309 inline void clear_anchor(JavaThread* thread) {
 310   thread->frame_anchor()->clear();
 311 }
 312 
 313 static void set_anchor(JavaThread* thread, intptr_t* sp, address pc) {
 314   assert(pc != nullptr, "");
 315 
 316   JavaFrameAnchor* anchor = thread->frame_anchor();
 317   anchor->set_last_Java_sp(sp);
 318   anchor->set_last_Java_pc(pc);
 319   ContinuationHelper::set_anchor_pd(anchor, sp);
 320 
 321   assert(thread->has_last_Java_frame(), "");
 322   assert(thread->last_frame().cb() != nullptr, "");
 323 }
 324 
 325 static void set_anchor(JavaThread* thread, intptr_t* sp) {
 326   address pc = ContinuationHelper::return_address_at(
 327            sp - frame::sender_sp_ret_address_offset());
 328   set_anchor(thread, sp, pc);
 329 }
 330 
 331 static void set_anchor_to_entry(JavaThread* thread, ContinuationEntry* entry) {
 332   JavaFrameAnchor* anchor = thread->frame_anchor();
 333   anchor->set_last_Java_sp(entry->entry_sp());
 334   anchor->set_last_Java_pc(entry->entry_pc());
 335   ContinuationHelper::set_anchor_to_entry_pd(anchor, entry);
 336 
 337   assert(thread->has_last_Java_frame(), "");
 338   assert(thread->last_frame().cb() != nullptr, "");
 339 }
 340 
 341 #if CONT_JFR
 342 class FreezeThawJfrInfo : public StackObj {
 343   short _e_size;
 344   short _e_num_interpreted_frames;
 345  public:
 346 
 347   FreezeThawJfrInfo() : _e_size(0), _e_num_interpreted_frames(0) {}
 348   inline void record_interpreted_frame() { _e_num_interpreted_frames++; }
 349   inline void record_size_copied(int size) { _e_size += size << LogBytesPerWord; }
 350   template<typename Event> void post_jfr_event(Event *e, oop continuation, JavaThread* jt);
 351 };
 352 
 353 template<typename Event> void FreezeThawJfrInfo::post_jfr_event(Event* e, oop continuation, JavaThread* jt) {
 354   if (e->should_commit()) {
 355     log_develop_trace(continuations)("JFR event: iframes: %d size: %d", _e_num_interpreted_frames, _e_size);
 356     e->set_carrierThread(JFR_JVM_THREAD_ID(jt));
 357     e->set_continuationClass(continuation->klass());
 358     e->set_interpretedFrames(_e_num_interpreted_frames);
 359     e->set_size(_e_size);
 360     e->commit();
 361   }
 362 }
 363 #endif // CONT_JFR
 364 
 365 /////////////// FREEZE ////
 366 
 367 class FreezeBase : public StackObj {
 368 protected:
 369   JavaThread* const _thread;
 370   ContinuationWrapper& _cont;
 371   bool _barriers; // only set when we allocate a chunk
 372 
 373   intptr_t* _bottom_address;
 374 
 375   // Used for preemption only
 376   const bool _preempt;
 377   frame _last_frame;
 378 
 379   // Used to support freezing with held monitors
 380   int _monitors_in_lockstack;
 381 
 382   int _freeze_size; // total size of all frames plus metadata in words.
 383   int _total_align_size;
 384 
 385   intptr_t* _cont_stack_top;
 386   intptr_t* _cont_stack_bottom;
 387 
 388   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
 389 
 390 #ifdef ASSERT
 391   intptr_t* _orig_chunk_sp;
 392   int _fast_freeze_size;
 393   bool _empty;
 394 #endif
 395 
 396   JvmtiSampledObjectAllocEventCollector* _jvmti_event_collector;
 397 
 398   NOT_PRODUCT(int _frames;)
 399   DEBUG_ONLY(intptr_t* _last_write;)
 400 
 401   inline FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempt);
 402 
 403 public:
 404   NOINLINE freeze_result freeze_slow();
 405   void freeze_fast_existing_chunk();
 406 
 407   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
 408   void set_jvmti_event_collector(JvmtiSampledObjectAllocEventCollector* jsoaec) { _jvmti_event_collector = jsoaec; }
 409 
 410   inline int size_if_fast_freeze_available();
 411 
 412   inline frame& last_frame() { return _last_frame; }
 413 
 414 #ifdef ASSERT
 415   bool check_valid_fast_path();
 416 #endif
 417 
 418 protected:
 419   inline void init_rest();
 420   void throw_stack_overflow_on_humongous_chunk();
 421 
 422   // fast path
 423   inline void copy_to_chunk(intptr_t* from, intptr_t* to, int size);
 424   inline void unwind_frames();
 425   inline void patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp);
 426 
 427   // slow path
 428   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) = 0;
 429 
 430   int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); }
 431 
 432 private:
 433   // slow path
 434   frame freeze_start_frame();
 435   frame freeze_start_frame_on_preempt();
 436   NOINLINE freeze_result recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top);
 437   inline frame freeze_start_frame_yield_stub();
 438   template<typename FKind>
 439   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 440   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 441   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 442   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 443   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 444   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 445   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 446   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 447   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 448   NOINLINE void finish_freeze(const frame& f, const frame& top);
 449 
 450   void freeze_lockstack(stackChunkOop chunk);
 451 
 452   inline bool stack_overflow();
 453 
 454   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 455                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 456   template<typename FKind> static inline frame sender(const frame& f);
 457   template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
 458   inline void set_top_frame_metadata_pd(const frame& hf);
 459   inline void patch_pd(frame& callee, const frame& caller);
 460   void adjust_interpreted_frame_unextended_sp(frame& f);
 461   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 462   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 463 
 464 protected:
 465   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 466   bool freeze_fast_new_chunk(stackChunkOop chunk);
 467 };
 468 
 469 template <typename ConfigT>
 470 class Freeze : public FreezeBase {
 471 private:
 472   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 473 
 474 public:
 475   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 476     : FreezeBase(thread, cont, frame_sp, preempt) {}
 477 
 478   freeze_result try_freeze_fast();
 479 
 480 protected:
 481   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) override { return allocate_chunk(stack_size, argsize_md); }
 482 };
 483 
 484 FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt) :
 485     _thread(thread), _cont(cont), _barriers(false), _preempt(preempt), _last_frame(false /* no initialization */) {
 486   DEBUG_ONLY(_jvmti_event_collector = nullptr;)
 487 
 488   assert(_thread != nullptr, "");
 489   assert(_thread->last_continuation()->entry_sp() == _cont.entrySP(), "");
 490 
 491   DEBUG_ONLY(_cont.entry()->verify_cookie();)
 492 
 493   assert(!Interpreter::contains(_cont.entryPC()), "");
 494 
 495   _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
 496 #ifdef _LP64
 497   if (((intptr_t)_bottom_address & 0xf) != 0) {
 498     _bottom_address--;
 499   }
 500   assert(is_aligned(_bottom_address, frame::frame_alignment), "");
 501 #endif
 502 
 503   log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
 504                 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
 505   assert(_bottom_address != nullptr, "");
 506   assert(_bottom_address <= _cont.entrySP(), "");
 507   DEBUG_ONLY(_last_write = nullptr;)
 508 
 509   assert(_cont.chunk_invariant(), "");
 510   assert(!Interpreter::contains(_cont.entryPC()), "");
 511 #if !defined(PPC64) || defined(ZERO)
 512   static const int doYield_stub_frame_size = frame::metadata_words;
 513 #else
 514   static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
 515 #endif
 516   // With preemption doYield() might not have been resolved yet
 517   assert(_preempt || SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
 518 
 519   if (preempt) {
 520     _last_frame = _thread->last_frame();
 521   }
 522 
 523   // properties of the continuation on the stack; all sizes are in words
 524   _cont_stack_top    = frame_sp + (!preempt ? doYield_stub_frame_size : 0); // we don't freeze the doYield stub frame
 525   _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
 526       - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
 527 
 528   log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 529     cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 530   assert(cont_size() > 0, "");
 531 
 532   if (LockingMode != LM_LIGHTWEIGHT) {
 533     _monitors_in_lockstack = 0;
 534   } else {
 535     _monitors_in_lockstack = _thread->lock_stack().monitor_count();
 536   }
 537 }
 538 
 539 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
 540   _freeze_size = 0;
 541   _total_align_size = 0;
 542   NOT_PRODUCT(_frames = 0;)
 543 }
 544 
 545 void FreezeBase::freeze_lockstack(stackChunkOop chunk) {
 546   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "no room for lockstack");
 547 
 548   _thread->lock_stack().move_to_address((oop*)chunk->start_address());
 549   chunk->set_lockstack_size(checked_cast<uint8_t>(_monitors_in_lockstack));
 550   chunk->set_has_lockstack(true);
 551 }
 552 
 553 void FreezeBase::copy_to_chunk(intptr_t* from, intptr_t* to, int size) {
 554   stackChunkOop chunk = _cont.tail();
 555   chunk->copy_from_stack_to_chunk(from, to, size);
 556   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
 557 
 558 #ifdef ASSERT
 559   if (_last_write != nullptr) {
 560     assert(_last_write == to + size, "Missed a spot: _last_write: " INTPTR_FORMAT " to+size: " INTPTR_FORMAT
 561         " stack_size: %d _last_write offset: " PTR_FORMAT " to+size: " PTR_FORMAT, p2i(_last_write), p2i(to+size),
 562         chunk->stack_size(), _last_write-chunk->start_address(), to+size-chunk->start_address());
 563     _last_write = to;
 564   }
 565 #endif
 566 }
 567 
 568 // Called _after_ the last possible safepoint during the freeze operation (chunk allocation)
 569 void FreezeBase::unwind_frames() {
 570   ContinuationEntry* entry = _cont.entry();
 571   entry->flush_stack_processing(_thread);
 572   set_anchor_to_entry(_thread, entry);
 573 }
 574 
 575 template <typename ConfigT>
 576 freeze_result Freeze<ConfigT>::try_freeze_fast() {
 577   assert(_thread->thread_state() == _thread_in_vm, "");
 578   assert(_thread->cont_fastpath(), "");
 579 
 580   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 581   assert(_fast_freeze_size == 0, "");
 582 
 583   stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words + _monitors_in_lockstack, _cont.argsize() + frame::metadata_words_at_top);
 584   if (freeze_fast_new_chunk(chunk)) {
 585     return freeze_ok;
 586   }
 587   if (_thread->has_pending_exception()) {
 588     return freeze_exception;
 589   }
 590 
 591   // TODO R REMOVE when deopt change is fixed
 592   assert(!_thread->cont_fastpath() || _barriers, "");
 593   log_develop_trace(continuations)("-- RETRYING SLOW --");
 594   return freeze_slow();
 595 }
 596 
 597 // Returns size needed if the continuation fits, otherwise 0.
 598 int FreezeBase::size_if_fast_freeze_available() {
 599   stackChunkOop chunk = _cont.tail();
 600   if (chunk == nullptr || chunk->is_gc_mode() || chunk->requires_barriers() || chunk->has_mixed_frames()) {
 601     log_develop_trace(continuations)("chunk available %s", chunk == nullptr ? "no chunk" : "chunk requires barriers");
 602     return 0;
 603   }
 604 
 605   int total_size_needed = cont_size();
 606   const int chunk_sp = chunk->sp();
 607 
 608   // argsize can be nonzero if we have a caller, but the caller could be in a non-empty parent chunk,
 609   // so we subtract it only if we overlap with the caller, i.e. the current chunk isn't empty.
 610   // Consider leaving the chunk's argsize set when emptying it and removing the following branch,
 611   // although that would require changing stackChunkOopDesc::is_empty
 612   if (!chunk->is_empty()) {
 613     total_size_needed -= _cont.argsize() + frame::metadata_words_at_top;
 614   }
 615 
 616   total_size_needed += _monitors_in_lockstack;
 617 
 618   int chunk_free_room = chunk_sp - frame::metadata_words_at_bottom;
 619   bool available = chunk_free_room >= total_size_needed;
 620   log_develop_trace(continuations)("chunk available: %s size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 621     available ? "yes" : "no" , total_size_needed, _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 622   return available ? total_size_needed : 0;
 623 }
 624 
 625 void FreezeBase::freeze_fast_existing_chunk() {
 626   stackChunkOop chunk = _cont.tail();
 627 
 628   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 629   assert(_fast_freeze_size > 0, "");
 630 
 631   if (!chunk->is_empty()) { // we are copying into a non-empty chunk
 632     DEBUG_ONLY(_empty = false;)
 633     DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();)
 634 #ifdef ASSERT
 635     {
 636       intptr_t* retaddr_slot = (chunk->sp_address()
 637                                 - frame::sender_sp_ret_address_offset());
 638       assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 639              "unexpected saved return address");
 640     }
 641 #endif
 642 
 643     // the chunk's sp before the freeze, adjusted to point beyond the stack-passed arguments in the topmost frame
 644     // we overlap; we'll overwrite the chunk's top frame's callee arguments
 645     const int chunk_start_sp = chunk->sp() + _cont.argsize() + frame::metadata_words_at_top;
 646     assert(chunk_start_sp <= chunk->stack_size(), "sp not pointing into stack");
 647 
 648     // increase max_size by what we're freezing minus the overlap
 649     chunk->set_max_thawing_size(chunk->max_thawing_size() + cont_size() - _cont.argsize() - frame::metadata_words_at_top);
 650 
 651     intptr_t* const bottom_sp = _cont_stack_bottom - _cont.argsize() - frame::metadata_words_at_top;
 652     assert(bottom_sp == _bottom_address, "");
 653     // Because the chunk isn't empty, we know there's a caller in the chunk, therefore the bottom-most frame
 654     // should have a return barrier (installed back when we thawed it).
 655 #ifdef ASSERT
 656     {
 657       intptr_t* retaddr_slot = (bottom_sp
 658                                 - frame::sender_sp_ret_address_offset());
 659       assert(ContinuationHelper::return_address_at(retaddr_slot)
 660              == StubRoutines::cont_returnBarrier(),
 661              "should be the continuation return barrier");
 662     }
 663 #endif
 664     // We copy the fp from the chunk back to the stack because it contains some caller data,
 665     // including, possibly, an oop that might have gone stale since we thawed.
 666     patch_stack_pd(bottom_sp, chunk->sp_address());
 667     // we don't patch the return pc at this time, so as not to make the stack unwalkable for async walks
 668 
 669     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 670   } else { // the chunk is empty
 671     const int chunk_start_sp = chunk->stack_size();
 672 
 673     DEBUG_ONLY(_empty = true;)
 674     DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 675 
 676     chunk->set_max_thawing_size(cont_size());
 677     chunk->set_bottom(chunk_start_sp - _cont.argsize() - frame::metadata_words_at_top);
 678     chunk->set_sp(chunk->bottom());
 679 
 680     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 681   }
 682 }
 683 
 684 bool FreezeBase::freeze_fast_new_chunk(stackChunkOop chunk) {
 685   DEBUG_ONLY(_empty = true;)
 686 
 687   // Install new chunk
 688   _cont.set_tail(chunk);
 689 
 690   if (UNLIKELY(chunk == nullptr || !_thread->cont_fastpath() || _barriers)) { // OOME/probably humongous
 691     log_develop_trace(continuations)("Retrying slow. Barriers: %d", _barriers);
 692     return false;
 693   }
 694 
 695   chunk->set_max_thawing_size(cont_size());
 696 
 697   // in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments.
 698   // They'll then be stored twice: in the chunk and in the parent chunk's top frame
 699   const int chunk_start_sp = cont_size() + frame::metadata_words + _monitors_in_lockstack;
 700   assert(chunk_start_sp == chunk->stack_size(), "");
 701 
 702   DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 703 
 704   freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA true));
 705 
 706   return true;
 707 }
 708 
 709 void FreezeBase::freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated)) {
 710   assert(chunk != nullptr, "");
 711   assert(!chunk->has_mixed_frames(), "");
 712   assert(!chunk->is_gc_mode(), "");
 713   assert(!chunk->has_bitmap(), "");
 714   assert(!chunk->requires_barriers(), "");
 715   assert(chunk == _cont.tail(), "");
 716 
 717   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
 718   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
 719   // will either see no continuation on the stack, or a consistent chunk.
 720   unwind_frames();
 721 
 722   log_develop_trace(continuations)("freeze_fast start: chunk " INTPTR_FORMAT " size: %d orig sp: %d argsize: %d",
 723     p2i((oopDesc*)chunk), chunk->stack_size(), chunk_start_sp, _cont.argsize());
 724   assert(chunk_start_sp <= chunk->stack_size(), "");
 725   assert(chunk_start_sp >= cont_size(), "no room in the chunk");
 726 
 727   const int chunk_new_sp = chunk_start_sp - cont_size(); // the chunk's new sp, after freeze
 728   assert(!(_fast_freeze_size > 0) || (_orig_chunk_sp - (chunk->start_address() + chunk_new_sp)) == (_fast_freeze_size - _monitors_in_lockstack), "");
 729 
 730   intptr_t* chunk_top = chunk->start_address() + chunk_new_sp;
 731 #ifdef ASSERT
 732   if (!_empty) {
 733     intptr_t* retaddr_slot = (_orig_chunk_sp
 734                               - frame::sender_sp_ret_address_offset());
 735     assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 736            "unexpected saved return address");
 737   }
 738 #endif
 739 
 740   log_develop_trace(continuations)("freeze_fast start: " INTPTR_FORMAT " sp: %d chunk_top: " INTPTR_FORMAT,
 741                               p2i(chunk->start_address()), chunk_new_sp, p2i(chunk_top));
 742   intptr_t* from = _cont_stack_top - frame::metadata_words_at_bottom;
 743   intptr_t* to   = chunk_top - frame::metadata_words_at_bottom;
 744   copy_to_chunk(from, to, cont_size() + frame::metadata_words_at_bottom);
 745   // Because we're not patched yet, the chunk is now in a bad state
 746 
 747   // patch return pc of the bottom-most frozen frame (now in the chunk)
 748   // with the actual caller's return address
 749   intptr_t* chunk_bottom_retaddr_slot = (chunk_top + cont_size()
 750                                          - _cont.argsize()
 751                                          - frame::metadata_words_at_top
 752                                          - frame::sender_sp_ret_address_offset());
 753 #ifdef ASSERT
 754   if (!_empty) {
 755     assert(ContinuationHelper::return_address_at(chunk_bottom_retaddr_slot)
 756            == StubRoutines::cont_returnBarrier(),
 757            "should be the continuation return barrier");
 758   }
 759 #endif
 760   ContinuationHelper::patch_return_address_at(chunk_bottom_retaddr_slot,
 761                                               chunk->pc());
 762 
 763   // We're always writing to a young chunk, so the GC can't see it until the next safepoint.
 764   chunk->set_sp(chunk_new_sp);
 765 
 766   // set chunk->pc to the return address of the topmost frame in the chunk
 767   if (_preempt) {
 768     // On aarch64/riscv64, the return pc of the top frame won't necessarily be at sp[-1].
 769     // Also, on x64, if the top frame is the native wrapper frame, sp[-1] will not
 770     // be the pc we used when creating the oopmap. Get the top's frame last pc from
 771     // the anchor instead.
 772     address last_pc = _last_frame.pc();
 773     ContinuationHelper::patch_return_address_at(chunk_top - frame::sender_sp_ret_address_offset(), last_pc);
 774     chunk->set_pc(last_pc);
 775   } else {
 776     chunk->set_pc(ContinuationHelper::return_address_at(
 777                   _cont_stack_top - frame::sender_sp_ret_address_offset()));
 778   }
 779 
 780   if (_monitors_in_lockstack > 0) {
 781     freeze_lockstack(chunk);
 782   }
 783 
 784   _cont.write();
 785 
 786   log_develop_trace(continuations)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash());
 787   LogTarget(Trace, continuations) lt;
 788   if (lt.develop_is_enabled()) {
 789     LogStream ls(lt);
 790     chunk->print_on(true, &ls);
 791   }
 792 
 793   // Verification
 794   assert(_cont.chunk_invariant(), "");
 795   chunk->verify();
 796 
 797 #if CONT_JFR
 798   EventContinuationFreezeFast e;
 799   if (e.should_commit()) {
 800     e.set_id(cast_from_oop<u8>(chunk));
 801     DEBUG_ONLY(e.set_allocate(chunk_is_allocated);)
 802     e.set_size(cont_size() << LogBytesPerWord);
 803     e.commit();
 804   }
 805 #endif
 806 }
 807 
 808 NOINLINE freeze_result FreezeBase::freeze_slow() {
 809 #ifdef ASSERT
 810   ResourceMark rm;
 811 #endif
 812 
 813   log_develop_trace(continuations)("freeze_slow  #" INTPTR_FORMAT, _cont.hash());
 814   assert(_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, "");
 815 
 816 #if CONT_JFR
 817   EventContinuationFreezeSlow e;
 818   if (e.should_commit()) {
 819     e.set_id(cast_from_oop<u8>(_cont.continuation()));
 820     e.commit();
 821   }
 822 #endif
 823 
 824   init_rest();
 825 
 826   HandleMark hm(Thread::current());
 827 
 828   frame f = freeze_start_frame();
 829 
 830   LogTarget(Debug, continuations) lt;
 831   if (lt.develop_is_enabled()) {
 832     LogStream ls(lt);
 833     f.print_on(&ls);
 834   }
 835 
 836   frame caller; // the frozen caller in the chunk
 837   freeze_result res = recurse_freeze(f, caller, 0, false, true);
 838 
 839   if (res == freeze_ok) {
 840     finish_freeze(f, caller);
 841     _cont.write();
 842   }
 843 
 844   return res;
 845 }
 846 
 847 frame FreezeBase::freeze_start_frame() {
 848   if (LIKELY(!_preempt)) {
 849     return freeze_start_frame_yield_stub();
 850   } else {
 851     return freeze_start_frame_on_preempt();
 852   }
 853 }
 854 
 855 frame FreezeBase::freeze_start_frame_yield_stub() {
 856   frame f = _thread->last_frame();
 857   assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
 858   f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
 859   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
 860   return f;
 861 }
 862 
 863 frame FreezeBase::freeze_start_frame_on_preempt() {
 864   assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
 865   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
 866   return _last_frame;
 867 }
 868 
 869 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 870 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
 871   assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
 872   assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
 873          || ((top && _preempt) == f.is_native_frame()), "");
 874 
 875   if (stack_overflow()) {
 876     return freeze_exception;
 877   }
 878 
 879   if (f.is_compiled_frame()) {
 880     if (UNLIKELY(f.oop_map() == nullptr)) {
 881       // special native frame
 882       return freeze_pinned_native;
 883     }
 884     return recurse_freeze_compiled_frame(f, caller, callee_argsize, callee_interpreted);
 885   } else if (f.is_interpreted_frame()) {
 886     assert(!f.interpreter_frame_method()->is_native() || (top && _preempt), "");
 887     return recurse_freeze_interpreted_frame(f, caller, callee_argsize, callee_interpreted);
 888   } else if (top && _preempt) {
 889     assert(f.is_native_frame() || f.is_runtime_frame(), "");
 890     return f.is_native_frame() ? recurse_freeze_native_frame(f, caller) : recurse_freeze_stub_frame(f, caller);
 891   } else {
 892     // Frame can't be frozen. Most likely the call_stub or upcall_stub
 893     // which indicates there are further natives frames up the stack.
 894     return freeze_pinned_native;
 895   }
 896 }
 897 
 898 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 899 // See also StackChunkFrameStream<frame_kind>::frame_size()
 900 template<typename FKind>
 901 inline freeze_result FreezeBase::recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize) {
 902   assert(FKind::is_instance(f), "");
 903 
 904   assert(fsize > 0, "");
 905   assert(argsize >= 0, "");
 906   _freeze_size += fsize;
 907   NOT_PRODUCT(_frames++;)
 908 
 909   assert(FKind::frame_bottom(f) <= _bottom_address, "");
 910 
 911   // We don't use FKind::frame_bottom(f) == _bottom_address because on x64 there's sometimes an extra word between
 912   // enterSpecial and an interpreted frame
 913   if (FKind::frame_bottom(f) >= _bottom_address - 1) {
 914     return finalize_freeze(f, caller, argsize); // recursion end
 915   } else {
 916     frame senderf = sender<FKind>(f);
 917     assert(FKind::interpreted || senderf.sp() == senderf.unextended_sp(), "");
 918     freeze_result result = recurse_freeze(senderf, caller, argsize, FKind::interpreted, false); // recursive call
 919     return result;
 920   }
 921 }
 922 
 923 inline void FreezeBase::before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame) {
 924   LogTarget(Trace, continuations) lt;
 925   if (lt.develop_is_enabled()) {
 926     LogStream ls(lt);
 927     ls.print_cr("======== FREEZING FRAME interpreted: %d bottom: %d", f.is_interpreted_frame(), is_bottom_frame);
 928     ls.print_cr("fsize: %d argsize: %d", fsize, argsize);
 929     f.print_value_on(&ls);
 930   }
 931   assert(caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
 932 }
 933 
 934 inline void FreezeBase::after_freeze_java_frame(const frame& hf, bool is_bottom_frame) {
 935   LogTarget(Trace, continuations) lt;
 936   if (lt.develop_is_enabled()) {
 937     LogStream ls(lt);
 938     DEBUG_ONLY(hf.print_value_on(&ls);)
 939     assert(hf.is_heap_frame(), "should be");
 940     DEBUG_ONLY(print_frame_layout(hf, false, &ls);)
 941     if (is_bottom_frame) {
 942       ls.print_cr("bottom h-frame:");
 943       hf.print_on(&ls);
 944     }
 945   }
 946 }
 947 
 948 // The parameter argsize_md includes metadata that has to be part of caller/callee overlap.
 949 // See also StackChunkFrameStream<frame_kind>::frame_size()
 950 freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, int argsize_md) {
 951   int argsize = argsize_md - frame::metadata_words_at_top;
 952   assert(callee.is_interpreted_frame()
 953     || ContinuationHelper::Frame::is_stub(callee.cb())
 954     || callee.cb()->as_nmethod()->is_osr_method()
 955     || argsize == _cont.argsize(), "argsize: %d cont.argsize: %d", argsize, _cont.argsize());
 956   log_develop_trace(continuations)("bottom: " INTPTR_FORMAT " count %d size: %d argsize: %d",
 957     p2i(_bottom_address), _frames, _freeze_size << LogBytesPerWord, argsize);
 958 
 959   LogTarget(Trace, continuations) lt;
 960 
 961 #ifdef ASSERT
 962   bool empty = _cont.is_empty();
 963   log_develop_trace(continuations)("empty: %d", empty);
 964 #endif
 965 
 966   stackChunkOop chunk = _cont.tail();
 967 
 968   assert(chunk == nullptr || (chunk->max_thawing_size() == 0) == chunk->is_empty(), "");
 969 
 970   _freeze_size += frame::metadata_words; // for top frame's metadata
 971 
 972   int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind
 973   int unextended_sp = -1;
 974   if (chunk != nullptr) {
 975     if (!chunk->is_empty()) {
 976       StackChunkFrameStream<ChunkFrames::Mixed> last(chunk);
 977       unextended_sp = chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp());
 978       bool top_interpreted = Interpreter::contains(chunk->pc());
 979       if (callee.is_interpreted_frame() == top_interpreted) {
 980         overlap = argsize_md;
 981       }
 982     } else {
 983       unextended_sp = chunk->stack_size() - frame::metadata_words_at_top;
 984     }
 985   }
 986 
 987   log_develop_trace(continuations)("finalize _size: %d overlap: %d unextended_sp: %d", _freeze_size, overlap, unextended_sp);
 988 
 989   _freeze_size -= overlap;
 990   assert(_freeze_size >= 0, "");
 991 
 992   assert(chunk == nullptr || chunk->is_empty()
 993           || unextended_sp == chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp()), "");
 994   assert(chunk != nullptr || unextended_sp < _freeze_size, "");
 995 
 996   _freeze_size += _monitors_in_lockstack;
 997 
 998   // _barriers can be set to true by an allocation in freeze_fast, in which case the chunk is available
 999   bool allocated_old_in_freeze_fast = _barriers;
1000   assert(!allocated_old_in_freeze_fast || (unextended_sp >= _freeze_size && chunk->is_empty()),
1001     "Chunk allocated in freeze_fast is of insufficient size "
1002     "unextended_sp: %d size: %d is_empty: %d", unextended_sp, _freeze_size, chunk->is_empty());
1003   assert(!allocated_old_in_freeze_fast || (!UseZGC && !UseG1GC), "Unexpected allocation");
1004 
1005   DEBUG_ONLY(bool empty_chunk = true);
1006   if (unextended_sp < _freeze_size || chunk->is_gc_mode() || (!allocated_old_in_freeze_fast && chunk->requires_barriers())) {
1007     // ALLOCATE NEW CHUNK
1008 
1009     if (lt.develop_is_enabled()) {
1010       LogStream ls(lt);
1011       if (chunk == nullptr) {
1012         ls.print_cr("no chunk");
1013       } else {
1014         ls.print_cr("chunk barriers: %d _size: %d free size: %d",
1015           chunk->requires_barriers(), _freeze_size, chunk->sp() - frame::metadata_words);
1016         chunk->print_on(&ls);
1017       }
1018     }
1019 
1020     _freeze_size += overlap; // we're allocating a new chunk, so no overlap
1021     // overlap = 0;
1022 
1023     chunk = allocate_chunk_slow(_freeze_size, argsize_md);
1024     if (chunk == nullptr) {
1025       return freeze_exception;
1026     }
1027 
1028     // Install new chunk
1029     _cont.set_tail(chunk);
1030     assert(chunk->is_empty(), "");
1031   } else {
1032     // REUSE EXISTING CHUNK
1033     log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1034     if (chunk->is_empty()) {
1035       int sp = chunk->stack_size() - argsize_md;
1036       chunk->set_sp(sp);
1037       chunk->set_bottom(sp);
1038       _freeze_size += overlap;
1039       assert(chunk->max_thawing_size() == 0, "");
1040     } DEBUG_ONLY(else empty_chunk = false;)
1041   }
1042   assert(!chunk->is_gc_mode(), "");
1043   assert(!chunk->has_bitmap(), "");
1044   chunk->set_has_mixed_frames(true);
1045 
1046   assert(chunk->requires_barriers() == _barriers, "");
1047   assert(!_barriers || chunk->is_empty(), "");
1048 
1049   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1050   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1051 
1052   if (_preempt) {
1053     frame f = _thread->last_frame();
1054     if (f.is_interpreted_frame()) {
1055       // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1056       // We need it so that on resume we can restore the sp to the right place, since
1057       // thawing might add an alignment word to the expression stack (see finish_thaw()).
1058       // We do it now that we know freezing will be successful.
1059       prepare_freeze_interpreted_top_frame(f);
1060     }
1061   }
1062 
1063   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1064   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1065   // will either see no continuation or a consistent chunk.
1066   unwind_frames();
1067 
1068   chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1069 
1070   if (lt.develop_is_enabled()) {
1071     LogStream ls(lt);
1072     ls.print_cr("top chunk:");
1073     chunk->print_on(&ls);
1074   }
1075 
1076   if (_monitors_in_lockstack > 0) {
1077     freeze_lockstack(chunk);
1078   }
1079 
1080   // The topmost existing frame in the chunk; or an empty frame if the chunk is empty
1081   caller = StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame();
1082 
1083   DEBUG_ONLY(_last_write = caller.unextended_sp() + (empty_chunk ? argsize_md : overlap);)
1084 
1085   assert(chunk->is_in_chunk(_last_write - _freeze_size),
1086     "last_write-size: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(_last_write-_freeze_size), p2i(chunk->start_address()));
1087 #ifdef ASSERT
1088   if (lt.develop_is_enabled()) {
1089     LogStream ls(lt);
1090     ls.print_cr("top hframe before (freeze):");
1091     assert(caller.is_heap_frame(), "should be");
1092     caller.print_on(&ls);
1093   }
1094 
1095   assert(!empty || Continuation::is_continuation_entry_frame(callee, nullptr), "");
1096 
1097   frame entry = sender(callee);
1098 
1099   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1100   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1101 #endif
1102 
1103   return freeze_ok_bottom;
1104 }
1105 
1106 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1107 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1108   if (is_bottom_frame) {
1109     // If we're the bottom frame, we need to replace the return barrier with the real
1110     // caller's pc.
1111     address last_pc = caller.pc();
1112     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1113     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1114   } else {
1115     assert(!caller.is_empty(), "");
1116   }
1117 
1118   patch_pd(hf, caller);
1119 
1120   if (f.is_interpreted_frame()) {
1121     assert(hf.is_heap_frame(), "should be");
1122     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1123   }
1124 
1125 #ifdef ASSERT
1126   if (hf.is_compiled_frame()) {
1127     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1128       log_develop_trace(continuations)("Freezing deoptimized frame");
1129       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1130       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1131     }
1132   }
1133 #endif
1134 }
1135 
1136 #ifdef ASSERT
1137 static void verify_frame_top(const frame& f, intptr_t* top) {
1138   ResourceMark rm;
1139   InterpreterOopMap mask;
1140   f.interpreted_frame_oop_map(&mask);
1141   assert(top <= ContinuationHelper::InterpretedFrame::frame_top(f, &mask),
1142          "frame_top: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT,
1143            p2i(top), p2i(ContinuationHelper::InterpretedFrame::frame_top(f, &mask)));
1144 }
1145 #endif // ASSERT
1146 
1147 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1148 // See also StackChunkFrameStream<frame_kind>::frame_size()
1149 NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, frame& caller,
1150                                                                     int callee_argsize /* incl. metadata */,
1151                                                                     bool callee_interpreted) {
1152   adjust_interpreted_frame_unextended_sp(f);
1153 
1154   // The frame's top never includes the stack arguments to the callee
1155   intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted);
1156   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
1157   const int fsize = pointer_delta_as_int(stack_frame_bottom, stack_frame_top);
1158 
1159   DEBUG_ONLY(verify_frame_top(f, stack_frame_top));
1160 
1161   Method* frame_method = ContinuationHelper::Frame::frame_method(f);
1162   // including metadata between f and its args
1163   const int argsize = ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top;
1164 
1165   log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d",
1166     frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize, callee_interpreted);
1167   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1168   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1169 
1170   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::InterpretedFrame>(f, caller, fsize, argsize);
1171   if (UNLIKELY(result > freeze_ok_bottom)) {
1172     return result;
1173   }
1174 
1175   bool is_bottom_frame = result == freeze_ok_bottom;
1176   assert(!caller.is_empty() || is_bottom_frame, "");
1177 
1178   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, is_bottom_frame);)
1179 
1180   frame hf = new_heap_frame<ContinuationHelper::InterpretedFrame>(f, caller);
1181   _total_align_size += frame::align_wiggle; // add alignment room for internal interpreted frame alignment on AArch64/PPC64
1182 
1183   intptr_t* heap_frame_top = ContinuationHelper::InterpretedFrame::frame_top(hf, callee_argsize, callee_interpreted);
1184   intptr_t* heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
1185   assert(heap_frame_bottom == heap_frame_top + fsize, "");
1186 
1187   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
1188   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
1189   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1190   assert(!is_bottom_frame || !caller.is_interpreted_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1191 
1192   relativize_interpreted_frame_metadata(f, hf);
1193 
1194   patch(f, hf, caller, is_bottom_frame);
1195 
1196   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1197   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1198   caller = hf;
1199 
1200   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1201   frame_method->record_gc_epoch();
1202 
1203   return freeze_ok;
1204 }
1205 
1206 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1207 // See also StackChunkFrameStream<frame_kind>::frame_size()
1208 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1209                                                         int callee_argsize /* incl. metadata */,
1210                                                         bool callee_interpreted) {
1211   // The frame's top never includes the stack arguments to the callee
1212   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1213   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1214   // including metadata between f and its stackargs
1215   const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1216   const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1217 
1218   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1219                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1220                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1221                              _freeze_size, fsize, argsize);
1222   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1223   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1224 
1225   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1226   if (UNLIKELY(result > freeze_ok_bottom)) {
1227     return result;
1228   }
1229 
1230   bool is_bottom_frame = result == freeze_ok_bottom;
1231   assert(!caller.is_empty() || is_bottom_frame, "");
1232 
1233   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1234 
1235   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1236 
1237   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1238 
1239   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1240   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1241 
1242   if (caller.is_interpreted_frame()) {
1243     // When thawing the frame we might need to add alignment (see Thaw::align)
1244     _total_align_size += frame::align_wiggle;
1245   }
1246 
1247   patch(f, hf, caller, is_bottom_frame);
1248 
1249   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1250 
1251   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1252   caller = hf;
1253   return freeze_ok;
1254 }
1255 
1256 NOINLINE freeze_result FreezeBase::recurse_freeze_stub_frame(frame& f, frame& caller) {
1257   DEBUG_ONLY(frame fsender = sender(f);)
1258   assert(fsender.is_compiled_frame(), "sender should be compiled frame");
1259 
1260   intptr_t* const stack_frame_top = ContinuationHelper::StubFrame::frame_top(f);
1261   const int fsize = f.cb()->frame_size();
1262 
1263   log_develop_trace(continuations)("recurse_freeze_stub_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1264     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1265 
1266   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::StubFrame>(f, caller, fsize, 0);
1267   if (UNLIKELY(result > freeze_ok_bottom)) {
1268     return result;
1269   }
1270 
1271   assert(result == freeze_ok, "should have caller");
1272   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, false /*is_bottom_frame*/);)
1273 
1274   frame hf = new_heap_frame<ContinuationHelper::StubFrame>(f, caller);
1275   intptr_t* heap_frame_top = ContinuationHelper::StubFrame::frame_top(hf);
1276 
1277   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1278 
1279   patch(f, hf, caller, false /*is_bottom_frame*/);
1280 
1281   DEBUG_ONLY(after_freeze_java_frame(hf, false /*is_bottom_frame*/);)
1282 
1283   caller = hf;
1284   return freeze_ok;
1285 }
1286 
1287 NOINLINE freeze_result FreezeBase::recurse_freeze_native_frame(frame& f, frame& caller) {
1288   if (!f.cb()->as_nmethod()->method()->is_object_wait0()) {
1289     assert(f.cb()->as_nmethod()->method()->is_synchronized(), "");
1290     // Synchronized native method case. Unlike the interpreter native wrapper, the compiled
1291     // native wrapper tries to acquire the monitor after marshalling the arguments from the
1292     // caller into the native convention. This is so that we have a valid oopMap in case of
1293     // having to block in the slow path. But that would require freezing those registers too
1294     // and then fixing them back on thaw in case of oops. To avoid complicating things and
1295     // given that this would be a rare case anyways just pin the vthread to the carrier.
1296     return freeze_pinned_native;
1297   }
1298 
1299   intptr_t* const stack_frame_top = ContinuationHelper::NativeFrame::frame_top(f);
1300   // There are no stackargs but argsize must include the metadata
1301   const int argsize = frame::metadata_words_at_top;
1302   const int fsize = f.cb()->frame_size() + argsize;
1303 
1304   log_develop_trace(continuations)("recurse_freeze_native_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1305     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1306 
1307   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::NativeFrame>(f, caller, fsize, argsize);
1308   if (UNLIKELY(result > freeze_ok_bottom)) {
1309     return result;
1310   }
1311 
1312   assert(result == freeze_ok, "should have caller frame");
1313   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, false /* is_bottom_frame */);)
1314 
1315   frame hf = new_heap_frame<ContinuationHelper::NativeFrame>(f, caller);
1316   intptr_t* heap_frame_top = ContinuationHelper::NativeFrame::frame_top(hf);
1317 
1318   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1319 
1320   if (caller.is_interpreted_frame()) {
1321     // When thawing the frame we might need to add alignment (see Thaw::align)
1322     _total_align_size += frame::align_wiggle;
1323   }
1324 
1325   patch(f, hf, caller, false /* is_bottom_frame */);
1326 
1327   DEBUG_ONLY(after_freeze_java_frame(hf, false /* is_bottom_frame */);)
1328 
1329   caller = hf;
1330   return freeze_ok;
1331 }
1332 
1333 NOINLINE void FreezeBase::finish_freeze(const frame& f, const frame& top) {
1334   stackChunkOop chunk = _cont.tail();
1335 
1336   LogTarget(Trace, continuations) lt;
1337   if (lt.develop_is_enabled()) {
1338     LogStream ls(lt);
1339     assert(top.is_heap_frame(), "should be");
1340     top.print_on(&ls);
1341   }
1342 
1343   set_top_frame_metadata_pd(top);
1344 
1345   chunk->set_sp(chunk->to_offset(top.sp()));
1346   chunk->set_pc(top.pc());
1347 
1348   chunk->set_max_thawing_size(chunk->max_thawing_size() + _total_align_size);
1349 
1350   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "clash with lockstack");
1351 
1352   // At this point the chunk is consistent
1353 
1354   if (UNLIKELY(_barriers)) {
1355     log_develop_trace(continuations)("do barriers on old chunk");
1356     // Serial and Parallel GC can allocate objects directly into the old generation.
1357     // Then we want to relativize the derived pointers eagerly so that
1358     // old chunks are all in GC mode.
1359     assert(!UseG1GC, "G1 can not deal with allocating outside of eden");
1360     assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking");
1361     if (UseShenandoahGC) {
1362       _cont.tail()->relativize_derived_pointers_concurrently();
1363     } else {
1364       ContinuationGCSupport::transform_stack_chunk(_cont.tail());
1365     }
1366     // For objects in the old generation we must maintain the remembered set
1367     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>();
1368   }
1369 
1370   log_develop_trace(continuations)("finish_freeze: has_mixed_frames: %d", chunk->has_mixed_frames());
1371   if (lt.develop_is_enabled()) {
1372     LogStream ls(lt);
1373     chunk->print_on(true, &ls);
1374   }
1375 
1376   if (lt.develop_is_enabled()) {
1377     LogStream ls(lt);
1378     ls.print_cr("top hframe after (freeze):");
1379     assert(_cont.last_frame().is_heap_frame(), "should be");
1380     _cont.last_frame().print_on(&ls);
1381     DEBUG_ONLY(print_frame_layout(top, false, &ls);)
1382   }
1383 
1384   assert(_cont.chunk_invariant(), "");
1385 }
1386 
1387 inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive native code
1388   JavaThread* t = !_preempt ? _thread : JavaThread::current();
1389   assert(t == JavaThread::current(), "");
1390   if (os::current_stack_pointer() < t->stack_overflow_state()->shadow_zone_safe_limit()) {
1391     if (!_preempt) {
1392       ContinuationWrapper::SafepointOp so(t, _cont); // could also call _cont.done() instead
1393       Exceptions::_throw_msg(t, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Stack overflow while freezing");
1394     }
1395     return true;
1396   }
1397   return false;
1398 }
1399 
1400 class StackChunkAllocator : public MemAllocator {
1401   const size_t                                 _stack_size;
1402   int                                          _argsize_md;
1403   ContinuationWrapper&                         _continuation_wrapper;
1404   JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector;
1405   mutable bool                                 _took_slow_path;
1406 
1407   // Does the minimal amount of initialization needed for a TLAB allocation.
1408   // We don't need to do a full initialization, as such an allocation need not be immediately walkable.
1409   virtual oop initialize(HeapWord* mem) const override {
1410     assert(_stack_size > 0, "");
1411     assert(_stack_size <= max_jint, "");
1412     assert(_word_size > _stack_size, "");
1413 
1414     // zero out fields (but not the stack)
1415     const size_t hs = oopDesc::header_size();
1416     if (oopDesc::has_klass_gap()) {
1417       oopDesc::set_klass_gap(mem, 0);
1418     }
1419     Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
1420 
1421     int bottom = (int)_stack_size - _argsize_md;
1422 
1423     jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
1424     jdk_internal_vm_StackChunk::set_bottom(mem, bottom);
1425     jdk_internal_vm_StackChunk::set_sp(mem, bottom);
1426 
1427     return finish(mem);
1428   }
1429 
1430   stackChunkOop allocate_fast() const {
1431     if (!UseTLAB) {
1432       return nullptr;
1433     }
1434 
1435     HeapWord* const mem = MemAllocator::mem_allocate_inside_tlab_fast();
1436     if (mem == nullptr) {
1437       return nullptr;
1438     }
1439 
1440     oop obj = initialize(mem);
1441     return stackChunkOopDesc::cast(obj);
1442   }
1443 
1444 public:
1445   StackChunkAllocator(Klass* klass,
1446                       size_t word_size,
1447                       Thread* thread,
1448                       size_t stack_size,
1449                       int argsize_md,
1450                       ContinuationWrapper& continuation_wrapper,
1451                       JvmtiSampledObjectAllocEventCollector* jvmti_event_collector)
1452     : MemAllocator(klass, word_size, thread),
1453       _stack_size(stack_size),
1454       _argsize_md(argsize_md),
1455       _continuation_wrapper(continuation_wrapper),
1456       _jvmti_event_collector(jvmti_event_collector),
1457       _took_slow_path(false) {}
1458 
1459   // Provides it's own, specialized allocation which skips instrumentation
1460   // if the memory can be allocated without going to a slow-path.
1461   stackChunkOop allocate() const {
1462     // First try to allocate without any slow-paths or instrumentation.
1463     stackChunkOop obj = allocate_fast();
1464     if (obj != nullptr) {
1465       return obj;
1466     }
1467 
1468     // Now try full-blown allocation with all expensive operations,
1469     // including potentially safepoint operations.
1470     _took_slow_path = true;
1471 
1472     // Protect unhandled Loom oops
1473     ContinuationWrapper::SafepointOp so(_thread, _continuation_wrapper);
1474 
1475     // Can safepoint
1476     _jvmti_event_collector->start();
1477 
1478     // Can safepoint
1479     return stackChunkOopDesc::cast(MemAllocator::allocate());
1480   }
1481 
1482   bool took_slow_path() const {
1483     return _took_slow_path;
1484   }
1485 };
1486 
1487 template <typename ConfigT>
1488 stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size, int argsize_md) {
1489   log_develop_trace(continuations)("allocate_chunk allocating new chunk");
1490 
1491   InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass());
1492   size_t size_in_words = klass->instance_size(stack_size);
1493 
1494   if (CollectedHeap::stack_chunk_max_size() > 0 && size_in_words >= CollectedHeap::stack_chunk_max_size()) {
1495     if (!_preempt) {
1496       throw_stack_overflow_on_humongous_chunk();
1497     }
1498     return nullptr;
1499   }
1500 
1501   JavaThread* current = _preempt ? JavaThread::current() : _thread;
1502   assert(current == JavaThread::current(), "should be current");
1503 
1504   // Allocate the chunk.
1505   //
1506   // This might safepoint while allocating, but all safepointing due to
1507   // instrumentation have been deferred. This property is important for
1508   // some GCs, as this ensures that the allocated object is in the young
1509   // generation / newly allocated memory.
1510   StackChunkAllocator allocator(klass, size_in_words, current, stack_size, argsize_md, _cont, _jvmti_event_collector);
1511   stackChunkOop chunk = allocator.allocate();
1512 
1513   if (chunk == nullptr) {
1514     return nullptr; // OOME
1515   }
1516 
1517   // assert that chunk is properly initialized
1518   assert(chunk->stack_size() == (int)stack_size, "");
1519   assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size);
1520   assert(chunk->sp() == chunk->bottom(), "");
1521   assert((intptr_t)chunk->start_address() % 8 == 0, "");
1522   assert(chunk->max_thawing_size() == 0, "");
1523   assert(chunk->pc() == nullptr, "");
1524   assert(chunk->is_empty(), "");
1525   assert(chunk->flags() == 0, "");
1526   assert(chunk->is_gc_mode() == false, "");
1527   assert(chunk->lockstack_size() == 0, "");
1528 
1529   // fields are uninitialized
1530   chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk());
1531   chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation());
1532 
1533 #if INCLUDE_ZGC
1534   if (UseZGC) {
1535     ZStackChunkGCData::initialize(chunk);
1536     assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation");
1537     _barriers = false;
1538   } else
1539 #endif
1540 #if INCLUDE_SHENANDOAHGC
1541   if (UseShenandoahGC) {
1542     _barriers = chunk->requires_barriers();
1543   } else
1544 #endif
1545   {
1546     if (!allocator.took_slow_path()) {
1547       // Guaranteed to be in young gen / newly allocated memory
1548       assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation");
1549       _barriers = false;
1550     } else {
1551       // Some GCs could put direct allocations in old gen for slow-path
1552       // allocations; need to explicitly check if that was the case.
1553       _barriers = chunk->requires_barriers();
1554     }
1555   }
1556 
1557   if (_barriers) {
1558     log_develop_trace(continuations)("allocation requires barriers");
1559   }
1560 
1561   assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1562 
1563   return chunk;
1564 }
1565 
1566 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1567   ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1568   Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1569 }
1570 
1571 #if INCLUDE_JVMTI
1572 static int num_java_frames(ContinuationWrapper& cont) {
1573   ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1574   int count = 0;
1575   for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1576     count += chunk->num_java_frames();
1577   }
1578   return count;
1579 }
1580 
1581 static void invalidate_jvmti_stack(JavaThread* thread) {
1582   if (thread->is_interp_only_mode()) {
1583     JvmtiThreadState *state = thread->jvmti_thread_state();
1584     if (state != nullptr)
1585       state->invalidate_cur_stack_depth();
1586   }
1587 }
1588 
1589 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1590   if (JvmtiExport::can_post_frame_pop()) {
1591     int num_frames = num_java_frames(cont);
1592 
1593     ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1594     JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1595   }
1596   invalidate_jvmti_stack(thread);
1597 }
1598 
1599 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top) {
1600   assert(current->vthread() != nullptr, "must be");
1601 
1602   HandleMarkCleaner hm(current);
1603   Handle vth(current, current->vthread());
1604 
1605   ContinuationWrapper::SafepointOp so(current, cont);
1606 
1607   // Since we might safepoint set the anchor so that the stack can be walked.
1608   set_anchor(current, top.sp());
1609 
1610   JRT_BLOCK
1611     JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
1612 
1613     if (current->pending_contended_entered_event()) {
1614       JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1615       current->set_contended_entered_monitor(nullptr);
1616     }
1617   JRT_BLOCK_END
1618 
1619   clear_anchor(current);
1620 }
1621 #endif // INCLUDE_JVMTI
1622 
1623 #ifdef ASSERT
1624 static bool monitors_on_stack(JavaThread* thread) {
1625   ContinuationEntry* ce = thread->last_continuation();
1626   RegisterMap map(thread,
1627                   RegisterMap::UpdateMap::include,
1628                   RegisterMap::ProcessFrames::include,
1629                   RegisterMap::WalkContinuation::skip);
1630   map.set_include_argument_oops(false);
1631   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
1632     if ((f.is_interpreted_frame() && ContinuationHelper::InterpretedFrame::is_owning_locks(f)) ||
1633         (f.is_compiled_frame() && ContinuationHelper::CompiledFrame::is_owning_locks(map.thread(), &map, f)) ||
1634         (f.is_native_frame() && ContinuationHelper::NativeFrame::is_owning_locks(map.thread(), f))) {
1635       return true;
1636     }
1637   }
1638   return false;
1639 }
1640 
1641 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1642 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1643 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1644 bool FreezeBase::check_valid_fast_path() {
1645   ContinuationEntry* ce = _thread->last_continuation();
1646   RegisterMap map(_thread,
1647                   RegisterMap::UpdateMap::skip,
1648                   RegisterMap::ProcessFrames::skip,
1649                   RegisterMap::WalkContinuation::skip);
1650   map.set_include_argument_oops(false);
1651   bool is_top_frame = true;
1652   for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1653     if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1654       return false;
1655     }
1656   }
1657   return true;
1658 }
1659 #endif // ASSERT
1660 
1661 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1662   verify_continuation(cont.continuation());
1663   assert(!cont.is_empty(), "");
1664 
1665   log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1666   return freeze_ok;
1667 }
1668 
1669 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1670   if (UNLIKELY(res != freeze_ok)) {
1671     JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1672     verify_continuation(cont.continuation());
1673     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1674     return res;
1675   }
1676 
1677   JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1678   return freeze_epilog(cont);
1679 }
1680 
1681 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1682   if (UNLIKELY(res != freeze_ok)) {
1683     verify_continuation(cont.continuation());
1684     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1685     return res;
1686   }
1687 
1688   patch_return_pc_with_preempt_stub(old_last_frame);
1689   cont.tail()->set_preempted(true);
1690 
1691   return freeze_epilog(cont);
1692 }
1693 
1694 template<typename ConfigT, bool preempt>
1695 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1696   assert(!current->has_pending_exception(), "");
1697 
1698 #ifdef ASSERT
1699   log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1700   log_frames(current);
1701 #endif
1702 
1703   CONT_JFR_ONLY(EventContinuationFreeze event;)
1704 
1705   ContinuationEntry* entry = current->last_continuation();
1706 
1707   oop oopCont = entry->cont_oop(current);
1708   assert(oopCont == current->last_continuation()->cont_oop(current), "");
1709   assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1710 
1711   verify_continuation(oopCont);
1712   ContinuationWrapper cont(current, oopCont);
1713   log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1714 
1715   assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1716 
1717   assert(LockingMode != LM_LEGACY || (monitors_on_stack(current) == ((current->held_monitor_count() - current->jni_monitor_count()) > 0)),
1718          "Held monitor count and locks on stack invariant: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1719   assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
1720          "Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1721 
1722   if (entry->is_pinned() || current->held_monitor_count() > 0) {
1723     log_develop_debug(continuations)("PINNED due to critical section/hold monitor");
1724     verify_continuation(cont.continuation());
1725     freeze_result res = entry->is_pinned() ? freeze_pinned_cs : freeze_pinned_monitor;
1726     if (!preempt) {
1727       JFR_ONLY(current->set_last_freeze_fail_result(res);)
1728     }
1729     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1730     // Avoid Thread.yield() loops without safepoint polls.
1731     if (SafepointMechanism::should_process(current) && !preempt) {
1732       cont.done(); // allow safepoint
1733       ThreadInVMfromJava tivmfj(current);
1734     }
1735     return res;
1736   }
1737 
1738   Freeze<ConfigT> freeze(current, cont, sp, preempt);
1739 
1740   assert(!current->cont_fastpath() || freeze.check_valid_fast_path(), "");
1741   bool fast = UseContinuationFastPath && current->cont_fastpath();
1742   if (fast && freeze.size_if_fast_freeze_available() > 0) {
1743     freeze.freeze_fast_existing_chunk();
1744     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1745     return !preempt ? freeze_epilog(cont) : preempt_epilog(cont, freeze_ok, freeze.last_frame());
1746   }
1747 
1748   if (preempt) {
1749     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1750     freeze.set_jvmti_event_collector(&jsoaec);
1751 
1752     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1753 
1754     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1755     preempt_epilog(cont, res, freeze.last_frame());
1756     return res;
1757   }
1758 
1759   log_develop_trace(continuations)("chunk unavailable; transitioning to VM");
1760   assert(current == JavaThread::current(), "must be current thread");
1761   JRT_BLOCK
1762     // delays a possible JvmtiSampledObjectAllocEventCollector in alloc_chunk
1763     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1764     freeze.set_jvmti_event_collector(&jsoaec);
1765 
1766     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1767 
1768     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1769     freeze_epilog(current, cont, res);
1770     cont.done(); // allow safepoint in the transition back to Java
1771     return res;
1772   JRT_BLOCK_END
1773 }
1774 
1775 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) {
1776   ContinuationEntry* entry = thread->last_continuation();
1777   if (entry == nullptr) {
1778     return freeze_ok;
1779   }
1780   if (entry->is_pinned()) {
1781     return freeze_pinned_cs;
1782   } else if (thread->held_monitor_count() > 0) {
1783     return freeze_pinned_monitor;
1784   }
1785 
1786   RegisterMap map(thread,
1787                   RegisterMap::UpdateMap::include,
1788                   RegisterMap::ProcessFrames::skip,
1789                   RegisterMap::WalkContinuation::skip);
1790   map.set_include_argument_oops(false);
1791   frame f = thread->last_frame();
1792 
1793   if (!safepoint) {
1794     f = f.sender(&map); // this is the yield frame
1795   } else { // safepoint yield
1796 #if (defined(X86) || defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
1797     f.set_fp(f.real_fp()); // Instead of this, maybe in ContinuationWrapper::set_last_frame always use the real_fp?
1798 #else
1799     Unimplemented();
1800 #endif
1801     if (!Interpreter::contains(f.pc())) {
1802       assert(ContinuationHelper::Frame::is_stub(f.cb()), "must be");
1803       assert(f.oop_map() != nullptr, "must be");
1804       f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
1805     }
1806   }
1807 
1808   while (true) {
1809     if ((f.is_interpreted_frame() && f.interpreter_frame_method()->is_native()) || f.is_native_frame()) {
1810       return freeze_pinned_native;
1811     }
1812 
1813     f = f.sender(&map);
1814     if (!Continuation::is_frame_in_continuation(entry, f)) {
1815       oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop(thread));
1816       if (scope == cont_scope) {
1817         break;
1818       }
1819       intx monitor_count = entry->parent_held_monitor_count();
1820       entry = entry->parent();
1821       if (entry == nullptr) {
1822         break;
1823       }
1824       if (entry->is_pinned()) {
1825         return freeze_pinned_cs;
1826       } else if (monitor_count > 0) {
1827         return freeze_pinned_monitor;
1828       }
1829     }
1830   }
1831   return freeze_ok;
1832 }
1833 
1834 /////////////// THAW ////
1835 
1836 static int thaw_size(stackChunkOop chunk) {
1837   int size = chunk->max_thawing_size();
1838   size += frame::metadata_words; // For the top pc+fp in push_return_frame or top = stack_sp - frame::metadata_words in thaw_fast
1839   size += 2*frame::align_wiggle; // in case of alignments at the top and bottom
1840   return size;
1841 }
1842 
1843 // make room on the stack for thaw
1844 // returns the size in bytes, or 0 on failure
1845 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier) {
1846   log_develop_trace(continuations)("~~~~ prepare_thaw return_barrier: %d", return_barrier);
1847 
1848   assert(thread == JavaThread::current(), "");
1849 
1850   ContinuationEntry* ce = thread->last_continuation();
1851   assert(ce != nullptr, "");
1852   oop continuation = ce->cont_oop(thread);
1853   assert(continuation == get_continuation(thread), "");
1854   verify_continuation(continuation);
1855 
1856   stackChunkOop chunk = jdk_internal_vm_Continuation::tail(continuation);
1857   assert(chunk != nullptr, "");
1858 
1859   // The tail can be empty because it might still be available for another freeze.
1860   // However, here we want to thaw, so we get rid of it (it will be GCed).
1861   if (UNLIKELY(chunk->is_empty())) {
1862     chunk = chunk->parent();
1863     assert(chunk != nullptr, "");
1864     assert(!chunk->is_empty(), "");
1865     jdk_internal_vm_Continuation::set_tail(continuation, chunk);
1866   }
1867 
1868   // Verification
1869   chunk->verify();
1870   assert(chunk->max_thawing_size() > 0, "chunk invariant violated; expected to not be empty");
1871 
1872   // Only make space for the last chunk because we only thaw from the last chunk
1873   int size = thaw_size(chunk) << LogBytesPerWord;
1874 
1875   const address bottom = (address)thread->last_continuation()->entry_sp();
1876   // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
1877   // for the Java frames in the check below.
1878   if (!stack_overflow_check(thread, size + 300, bottom)) {
1879     return 0;
1880   }
1881 
1882   log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
1883                               p2i(bottom), p2i(bottom - size), size);
1884   return size;
1885 }
1886 
1887 class ThawBase : public StackObj {
1888 protected:
1889   JavaThread* _thread;
1890   ContinuationWrapper& _cont;
1891   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
1892 
1893   intptr_t* _fastpath;
1894   bool _barriers;
1895   bool _preempted_case;
1896   intptr_t* _top_unextended_sp_before_thaw;
1897   int _align_size;
1898   DEBUG_ONLY(intptr_t* _top_stack_address);
1899 
1900   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1901 
1902   NOT_PRODUCT(int _frames;)
1903 
1904 protected:
1905   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1906       _thread(thread), _cont(cont),
1907       _fastpath(nullptr) {
1908     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1909     assert (cont.tail() != nullptr, "no last chunk");
1910     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1911   }
1912 
1913   void clear_chunk(stackChunkOop chunk);
1914   template<bool check_stub>
1915   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
1916   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1917 
1918   void thaw_lockstack(stackChunkOop chunk);
1919 
1920   // fast path
1921   inline void prefetch_chunk_pd(void* start, int size_words);
1922   void patch_return(intptr_t* sp, bool is_last);
1923 
1924   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1925   inline intptr_t* push_cleanup_continuation();
1926   void throw_interrupted_exception(JavaThread* current, frame& top);
1927 
1928   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1929   void finish_thaw(frame& f);
1930 
1931 private:
1932   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1933   void finalize_thaw(frame& entry, int argsize);
1934 
1935   inline bool seen_by_gc();
1936 
1937   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1938   inline void after_thaw_java_frame(const frame& f, bool bottom);
1939   inline void patch(frame& f, const frame& caller, bool bottom);
1940   void clear_bitmap_bits(address start, address end);
1941 
1942   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1943   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1944   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1945   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1946 
1947   void push_return_frame(frame& f);
1948   inline frame new_entry_frame();
1949   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
1950   inline void patch_pd(frame& f, const frame& sender);
1951   inline void patch_pd(frame& f, intptr_t* caller_sp);
1952   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
1953 
1954   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
1955 
1956   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
1957 
1958  public:
1959   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
1960 };
1961 
1962 template <typename ConfigT>
1963 class Thaw : public ThawBase {
1964 public:
1965   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
1966 
1967   inline bool can_thaw_fast(stackChunkOop chunk) {
1968     return    !_barriers
1969            &&  _thread->cont_fastpath_thread_state()
1970            && !chunk->has_thaw_slowpath_condition()
1971            && !PreserveFramePointer;
1972   }
1973 
1974   inline intptr_t* thaw(Continuation::thaw_kind kind);
1975   template<bool check_stub = false>
1976   NOINLINE intptr_t* thaw_fast(stackChunkOop chunk);
1977   NOINLINE intptr_t* thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind);
1978   inline void patch_caller_links(intptr_t* sp, intptr_t* bottom);
1979 };
1980 
1981 template <typename ConfigT>
1982 inline intptr_t* Thaw<ConfigT>::thaw(Continuation::thaw_kind kind) {
1983   verify_continuation(_cont.continuation());
1984   assert(!jdk_internal_vm_Continuation::done(_cont.continuation()), "");
1985   assert(!_cont.is_empty(), "");
1986 
1987   stackChunkOop chunk = _cont.tail();
1988   assert(chunk != nullptr, "guaranteed by prepare_thaw");
1989   assert(!chunk->is_empty(), "guaranteed by prepare_thaw");
1990 
1991   _barriers = chunk->requires_barriers();
1992   return (LIKELY(can_thaw_fast(chunk))) ? thaw_fast(chunk)
1993                                         : thaw_slow(chunk, kind);
1994 }
1995 
1996 class ReconstructedStack : public StackObj {
1997   intptr_t* _base;  // _cont.entrySP(); // top of the entry frame
1998   int _thaw_size;
1999   int _argsize;
2000 public:
2001   ReconstructedStack(intptr_t* base, int thaw_size, int argsize)
2002   : _base(base), _thaw_size(thaw_size - (argsize == 0 ? frame::metadata_words_at_top : 0)), _argsize(argsize) {
2003     // The only possible source of misalignment is stack-passed arguments b/c compiled frames are 16-byte aligned.
2004     assert(argsize != 0 || (_base - _thaw_size) == ContinuationHelper::frame_align_pointer(_base - _thaw_size), "");
2005     // We're at most one alignment word away from entrySP
2006     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2007   }
2008 
2009   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2010 
2011   // top and bottom stack pointers
2012   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2013   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2014 
2015   // several operations operate on the totality of the stack being reconstructed,
2016   // including the metadata words
2017   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2018   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2019 };
2020 
2021 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2022   chunk->set_sp(chunk->bottom());
2023   chunk->set_max_thawing_size(0);
2024 }
2025 
2026 template<bool check_stub>
2027 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2028   bool empty = false;
2029   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2030   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2031   assert(chunk_sp == f.sp(), "");
2032   assert(chunk_sp == f.unextended_sp(), "");
2033 
2034   int frame_size = f.cb()->frame_size();
2035   argsize = f.stack_argsize();
2036 
2037   assert(!f.is_stub() || check_stub, "");
2038   if (check_stub && f.is_stub()) {
2039     // If we don't thaw the top compiled frame too, after restoring the saved
2040     // registers back in Java, we would hit the return barrier to thaw one more
2041     // frame effectively overwriting the restored registers during that call.
2042     f.next(SmallRegisterMap::instance(), true /* stop */);
2043     assert(!f.is_done(), "");
2044 
2045     f.get_cb();
2046     assert(f.is_compiled(), "");
2047     frame_size += f.cb()->frame_size();
2048     argsize = f.stack_argsize();
2049 
2050     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2051       // The caller of the runtime stub when the continuation is preempted is not at a
2052       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2053       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2054       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2055     }
2056   }
2057 
2058   f.next(SmallRegisterMap::instance(), true /* stop */);
2059   empty = f.is_done();
2060   assert(!empty || argsize == chunk->argsize(), "");
2061 
2062   if (empty) {
2063     clear_chunk(chunk);
2064   } else {
2065     chunk->set_sp(chunk->sp() + frame_size);
2066     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2067     // We set chunk->pc to the return pc into the next frame
2068     chunk->set_pc(f.pc());
2069 #ifdef ASSERT
2070     {
2071       intptr_t* retaddr_slot = (chunk_sp
2072                                 + frame_size
2073                                 - frame::sender_sp_ret_address_offset());
2074       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2075              "unexpected pc");
2076     }
2077 #endif
2078   }
2079   assert(empty == chunk->is_empty(), "");
2080   // returns the size required to store the frame on stack, and because it is a
2081   // compiled frame, it must include a copy of the arguments passed by the caller
2082   return frame_size + argsize + frame::metadata_words_at_top;
2083 }
2084 
2085 void ThawBase::thaw_lockstack(stackChunkOop chunk) {
2086   int lockStackSize = chunk->lockstack_size();
2087   assert(lockStackSize > 0 && lockStackSize <= LockStack::CAPACITY, "");
2088 
2089   oop tmp_lockstack[LockStack::CAPACITY];
2090   chunk->transfer_lockstack(tmp_lockstack, _barriers);
2091   _thread->lock_stack().move_from_address(tmp_lockstack, lockStackSize);
2092 
2093   chunk->set_lockstack_size(0);
2094   chunk->set_has_lockstack(false);
2095 }
2096 
2097 void ThawBase::copy_from_chunk(intptr_t* from, intptr_t* to, int size) {
2098   assert(to >= _top_stack_address, "overwrote past thawing space"
2099     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(to), p2i(_top_stack_address));
2100   assert(to + size <= _cont.entrySP(), "overwrote past thawing space");
2101   _cont.tail()->copy_from_chunk_to_stack(from, to, size);
2102   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
2103 }
2104 
2105 void ThawBase::patch_return(intptr_t* sp, bool is_last) {
2106   log_develop_trace(continuations)("thaw_fast patching -- sp: " INTPTR_FORMAT, p2i(sp));
2107 
2108   address pc = !is_last ? StubRoutines::cont_returnBarrier() : _cont.entryPC();
2109   ContinuationHelper::patch_return_address_at(
2110     sp - frame::sender_sp_ret_address_offset(),
2111     pc);
2112 }
2113 
2114 template <typename ConfigT>
2115 template<bool check_stub>
2116 NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) {
2117   assert(chunk == _cont.tail(), "");
2118   assert(!chunk->has_mixed_frames(), "");
2119   assert(!chunk->requires_barriers(), "");
2120   assert(!chunk->has_bitmap(), "");
2121   assert(!_thread->is_interp_only_mode(), "");
2122 
2123   LogTarget(Trace, continuations) lt;
2124   if (lt.develop_is_enabled()) {
2125     LogStream ls(lt);
2126     ls.print_cr("thaw_fast");
2127     chunk->print_on(true, &ls);
2128   }
2129 
2130   // Below this heuristic, we thaw the whole chunk, above it we thaw just one frame.
2131   static const int threshold = 500; // words
2132 
2133   const int full_chunk_size = chunk->stack_size() - chunk->sp(); // this initial size could be reduced if it's a partial thaw
2134   int argsize, thaw_size;
2135 
2136   intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();
2137 
2138   bool partial, empty;
2139   if (LIKELY(!TEST_THAW_ONE_CHUNK_FRAME && (full_chunk_size < threshold))) {
2140     prefetch_chunk_pd(chunk->start_address(), full_chunk_size); // prefetch anticipating memcpy starting at highest address
2141 
2142     partial = false;
2143     argsize = chunk->argsize(); // must be called *before* clearing the chunk
2144     clear_chunk(chunk);
2145     thaw_size = full_chunk_size;
2146     empty = true;
2147   } else { // thaw a single frame
2148     partial = true;
2149     thaw_size = remove_top_compiled_frame_from_chunk<check_stub>(chunk, argsize);
2150     empty = chunk->is_empty();
2151   }
2152 
2153   // Are we thawing the last frame(s) in the continuation
2154   const bool is_last = empty && chunk->parent() == nullptr;
2155   assert(!is_last || argsize == 0, "");
2156 
2157   log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT,
2158                               partial, is_last, empty, thaw_size, argsize, p2i(_cont.entrySP()));
2159 
2160   ReconstructedStack rs(_cont.entrySP(), thaw_size, argsize);
2161 
2162   // also copy metadata words at frame bottom
2163   copy_from_chunk(chunk_sp - frame::metadata_words_at_bottom, rs.top(), rs.total_size());
2164 
2165   // update the ContinuationEntry
2166   _cont.set_argsize(argsize);
2167   log_develop_trace(continuations)("setting entry argsize: %d", _cont.argsize());
2168   assert(rs.bottom_sp() == _cont.entry()->bottom_sender_sp(), "");
2169 
2170   // install the return barrier if not last frame, or the entry's pc if last
2171   patch_return(rs.bottom_sp(), is_last);
2172 
2173   // insert the back links from callee to caller frames
2174   patch_caller_links(rs.top(), rs.top() + rs.total_size());
2175 
2176   assert(is_last == _cont.is_empty(), "");
2177   assert(_cont.chunk_invariant(), "");
2178 
2179 #if CONT_JFR
2180   EventContinuationThawFast e;
2181   if (e.should_commit()) {
2182     e.set_id(cast_from_oop<u8>(chunk));
2183     e.set_size(thaw_size << LogBytesPerWord);
2184     e.set_full(!partial);
2185     e.commit();
2186   }
2187 #endif
2188 
2189 #ifdef ASSERT
2190   set_anchor(_thread, rs.sp());
2191   log_frames(_thread);
2192   if (LoomDeoptAfterThaw) {
2193     do_deopt_after_thaw(_thread);
2194   }
2195   clear_anchor(_thread);
2196 #endif
2197 
2198   return rs.sp();
2199 }
2200 
2201 inline bool ThawBase::seen_by_gc() {
2202   return _barriers || _cont.tail()->is_gc_mode();
2203 }
2204 
2205 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2206 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2207   if (UseZGC || UseShenandoahGC) {
2208     chunk->relativize_derived_pointers_concurrently();
2209   }
2210 #endif
2211 }
2212 
2213 template <typename ConfigT>
2214 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2215   Continuation::preempt_kind preempt_kind;
2216   bool retry_fast_path = false;
2217 
2218   _preempted_case = chunk->preempted();
2219   if (_preempted_case) {
2220     ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2221     if (waiter != nullptr) {
2222       // Mounted again after preemption. Resume the pending monitor operation,
2223       // which will be either a monitorenter or Object.wait() call.
2224       ObjectMonitor* mon = waiter->monitor();
2225       preempt_kind = waiter->is_wait() ? Continuation::freeze_on_wait : Continuation::freeze_on_monitorenter;
2226 
2227       bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2228       assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2229       if (!mon_acquired) {
2230         // Failed to acquire monitor. Return to enterSpecial to unmount again.
2231         return push_cleanup_continuation();
2232       }
2233       chunk = _cont.tail();  // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2234     } else {
2235       // Preemption cancelled in moniterenter case. We actually acquired
2236       // the monitor after freezing all frames so nothing to do.
2237       preempt_kind = Continuation::freeze_on_monitorenter;
2238     }
2239     // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2240     relativize_chunk_concurrently(chunk);
2241     chunk->set_preempted(false);
2242     retry_fast_path = true;
2243   } else {
2244     relativize_chunk_concurrently(chunk);
2245   }
2246 
2247   // On first thaw after freeze restore oops to the lockstack if any.
2248   assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2249   if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2250     thaw_lockstack(chunk);
2251     retry_fast_path = true;
2252   }
2253 
2254   // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2255   // and FLAG_PREEMPTED flags from the stackChunk.
2256   if (retry_fast_path && can_thaw_fast(chunk)) {
2257     intptr_t* sp = thaw_fast<true>(chunk);
2258     if (_preempted_case) {
2259       return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2260     }
2261     return sp;
2262   }
2263 
2264   LogTarget(Trace, continuations) lt;
2265   if (lt.develop_is_enabled()) {
2266     LogStream ls(lt);
2267     ls.print_cr("thaw slow return_barrier: %d " INTPTR_FORMAT, kind, p2i(chunk));
2268     chunk->print_on(true, &ls);
2269   }
2270 
2271 #if CONT_JFR
2272   EventContinuationThawSlow e;
2273   if (e.should_commit()) {
2274     e.set_id(cast_from_oop<u8>(_cont.continuation()));
2275     e.commit();
2276   }
2277 #endif
2278 
2279   DEBUG_ONLY(_frames = 0;)
2280   _align_size = 0;
2281   int num_frames = kind == Continuation::thaw_top ? 2 : 1;
2282 
2283   _stream = StackChunkFrameStream<ChunkFrames::Mixed>(chunk);
2284   _top_unextended_sp_before_thaw = _stream.unextended_sp();
2285 
2286   frame heap_frame = _stream.to_frame();
2287   if (lt.develop_is_enabled()) {
2288     LogStream ls(lt);
2289     ls.print_cr("top hframe before (thaw):");
2290     assert(heap_frame.is_heap_frame(), "should have created a relative frame");
2291     heap_frame.print_value_on(&ls);
2292   }
2293 
2294   frame caller; // the thawed caller on the stack
2295   recurse_thaw(heap_frame, caller, num_frames, _preempted_case);
2296   finish_thaw(caller); // caller is now the topmost thawed frame
2297   _cont.write();
2298 
2299   assert(_cont.chunk_invariant(), "");
2300 
2301   JVMTI_ONLY(invalidate_jvmti_stack(_thread));
2302 
2303   _thread->set_cont_fastpath(_fastpath);
2304 
2305   intptr_t* sp = caller.sp();
2306 
2307   if (_preempted_case) {
2308     return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2309   }
2310   return sp;
2311 }
2312 
2313 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2314   log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2315   assert(!_cont.is_empty(), "no more frames");
2316   assert(num_frames > 0, "");
2317   assert(!heap_frame.is_empty(), "");
2318 
2319   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2320     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2321   } else if (!heap_frame.is_interpreted_frame()) {
2322     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2323   } else {
2324     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2325   }
2326 }
2327 
2328 template<typename FKind>
2329 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2330   assert(num_frames > 0, "");
2331 
2332   DEBUG_ONLY(_frames++;)
2333 
2334   int argsize = _stream.stack_argsize();
2335 
2336   _stream.next(SmallRegisterMap::instance());
2337   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2338 
2339   // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2340   // as it makes detecting that situation and adjusting unextended_sp tricky
2341   if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2342     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2343     num_frames++;
2344   }
2345 
2346   if (num_frames == 1 || _stream.is_done()) { // end recursion
2347     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2348     return true; // bottom
2349   } else { // recurse
2350     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2351     return false;
2352   }
2353 }
2354 
2355 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2356   stackChunkOop chunk = _cont.tail();
2357 
2358   if (!_stream.is_done()) {
2359     assert(_stream.sp() >= chunk->sp_address(), "");
2360     chunk->set_sp(chunk->to_offset(_stream.sp()));
2361     chunk->set_pc(_stream.pc());
2362   } else {
2363     chunk->set_sp(chunk->bottom());
2364     chunk->set_pc(nullptr);
2365   }
2366   assert(_stream.is_done() == chunk->is_empty(), "");
2367 
2368   int total_thawed = pointer_delta_as_int(_stream.unextended_sp(), _top_unextended_sp_before_thaw);
2369   chunk->set_max_thawing_size(chunk->max_thawing_size() - total_thawed);
2370 
2371   _cont.set_argsize(argsize);
2372   entry = new_entry_frame();
2373 
2374   assert(entry.sp() == _cont.entrySP(), "");
2375   assert(Continuation::is_continuation_enterSpecial(entry), "");
2376   assert(_cont.is_entry_frame(entry), "");
2377 }
2378 
2379 inline void ThawBase::before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) {
2380   LogTarget(Trace, continuations) lt;
2381   if (lt.develop_is_enabled()) {
2382     LogStream ls(lt);
2383     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2384     assert(hf.is_heap_frame(), "should be");
2385     hf.print_value_on(&ls);
2386   }
2387   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2388 }
2389 
2390 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2391 #ifdef ASSERT
2392   LogTarget(Trace, continuations) lt;
2393   if (lt.develop_is_enabled()) {
2394     LogStream ls(lt);
2395     ls.print_cr("thawed frame:");
2396     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2397   }
2398 #endif
2399 }
2400 
2401 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2402   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2403   if (bottom) {
2404     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2405                                                                  : StubRoutines::cont_returnBarrier());
2406   } else {
2407     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2408     // If the caller is not deoptimized, pc is unchanged.
2409     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2410   }
2411 
2412   patch_pd(f, caller);
2413 
2414   if (f.is_interpreted_frame()) {
2415     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2416   }
2417 
2418   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2419   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2420 }
2421 
2422 void ThawBase::clear_bitmap_bits(address start, address end) {
2423   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2424   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2425 
2426   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2427   // or they will keep objects that are otherwise unreachable alive.
2428 
2429   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2430   // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2431   // If that's the case the bit range corresponding to the last stack slot should not have bits set
2432   // anyways and we assert that before returning.
2433   address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2434   log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2435   stackChunkOop chunk = _cont.tail();
2436   chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2437   assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2438 }
2439 
2440 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2441   assert(preempt_kind == Continuation::freeze_on_wait || preempt_kind == Continuation::freeze_on_monitorenter, "");
2442   frame top(sp);
2443   assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2444 
2445 #if INCLUDE_JVMTI
2446   // Finish the VTMS transition.
2447   assert(_thread->is_in_VTMS_transition(), "must be");
2448   bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2449   if (is_vthread) {
2450     if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
2451       jvmti_mount_end(_thread, _cont, top);
2452     } else {
2453       _thread->set_is_in_VTMS_transition(false);
2454       java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
2455     }
2456   }
2457 #endif
2458 
2459   if (fast_case) {
2460     // If we thawed in the slow path the runtime stub/native wrapper frame already
2461     // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2462     // we copied the original fp at the time of freeze which now will have to be fixed.
2463     assert(top.is_runtime_frame() || top.is_native_frame(), "");
2464     int fsize = top.cb()->frame_size();
2465     patch_pd(top, sp + fsize);
2466   }
2467 
2468   if (preempt_kind == Continuation::freeze_on_wait) {
2469     // Check now if we need to throw IE exception.
2470     if (_thread->pending_interrupted_exception()) {
2471       throw_interrupted_exception(_thread, top);
2472       _thread->set_pending_interrupted_exception(false);
2473     }
2474   } else if (top.is_runtime_frame()) {
2475     // The continuation might now run on a different platform thread than the previous time so
2476     // we need to adjust the current thread saved in the stub frame before restoring registers.
2477     JavaThread** thread_addr = frame::saved_thread_address(top);
2478     if (thread_addr != nullptr) *thread_addr = _thread;
2479   }
2480   return sp;
2481 }
2482 
2483 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2484   ContinuationWrapper::SafepointOp so(current, _cont);
2485   // Since we might safepoint set the anchor so that the stack can be walked.
2486   set_anchor(current, top.sp());
2487   JRT_BLOCK
2488     THROW(vmSymbols::java_lang_InterruptedException());
2489   JRT_BLOCK_END
2490   clear_anchor(current);
2491 }
2492 
2493 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
2494   assert(hf.is_interpreted_frame(), "");
2495 
2496   if (UNLIKELY(seen_by_gc())) {
2497     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2498   }
2499 
2500   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2501 
2502   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2503 
2504   _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2505 
2506   frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2507 
2508   intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2509   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2510   intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2511   intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2512 
2513   assert(hf.is_heap_frame(), "should be");
2514   assert(!f.is_heap_frame(), "should not be");
2515 
2516   const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2517   assert((stack_frame_bottom == stack_frame_top + fsize), "");
2518 
2519   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
2520   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
2521   copy_from_chunk(heap_frame_top, stack_frame_top, fsize);
2522 
2523   // Make sure the relativized locals is already set.
2524   assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2525 
2526   derelativize_interpreted_frame_metadata(hf, f);
2527   patch(f, caller, is_bottom_frame);
2528 
2529   assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2530   assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2531 
2532   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2533 
2534   maybe_set_fastpath(f.sp());
2535 
2536   Method* m = hf.interpreter_frame_method();
2537   assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2538   const int locals = m->max_locals();
2539 
2540   if (!is_bottom_frame) {
2541     // can only fix caller once this frame is thawed (due to callee saved regs)
2542     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2543   } else if (_cont.tail()->has_bitmap() && locals > 0) {
2544     assert(hf.is_heap_frame(), "should be");
2545     address start = (address)(heap_frame_bottom - locals);
2546     address end = (address)heap_frame_bottom;
2547     clear_bitmap_bits(start, end);
2548   }
2549 
2550   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2551   caller = f;
2552 }
2553 
2554 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2555   assert(hf.is_compiled_frame(), "");
2556   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2557 
2558   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2559     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2560   }
2561 
2562   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2563 
2564   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2565 
2566   assert(caller.sp() == caller.unextended_sp(), "");
2567 
2568   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2569     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2570   }
2571 
2572   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2573   // yet laid out in the stack, and so the original_pc is not stored in it.
2574   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2575   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2576   intptr_t* const stack_frame_top = f.sp();
2577   intptr_t* const heap_frame_top = hf.unextended_sp();
2578 
2579   const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2580   int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2581   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2582 
2583   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2584   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2585   // copy metadata, except the metadata at the top of the (unextended) entry frame
2586   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2587 
2588   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2589   // (we might have one padding word for alignment)
2590   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2591   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2592 
2593   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2594 
2595   patch(f, caller, is_bottom_frame);
2596 
2597   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2598   assert(!f.is_deoptimized_frame(), "");
2599   if (hf.is_deoptimized_frame()) {
2600     maybe_set_fastpath(f.sp());
2601   } else if (_thread->is_interp_only_mode()
2602               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2603     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2604     // cannot rely on nmethod patching for deopt.
2605     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2606 
2607     log_develop_trace(continuations)("Deoptimizing thawed frame");
2608     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2609 
2610     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2611     assert(f.is_deoptimized_frame(), "");
2612     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2613     maybe_set_fastpath(f.sp());
2614   }
2615 
2616   if (!is_bottom_frame) {
2617     // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2618     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2619   } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2620     address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2621     int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2622     int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2623     clear_bitmap_bits(start, start + argsize_in_bytes);
2624   }
2625 
2626   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2627   caller = f;
2628 }
2629 
2630 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2631   DEBUG_ONLY(_frames++;)
2632 
2633   if (UNLIKELY(seen_by_gc())) {
2634     // Process the stub's caller here since we might need the full map.
2635     RegisterMap map(nullptr,
2636                     RegisterMap::UpdateMap::include,
2637                     RegisterMap::ProcessFrames::skip,
2638                     RegisterMap::WalkContinuation::skip);
2639     map.set_include_argument_oops(false);
2640     _stream.next(&map);
2641     assert(!_stream.is_done(), "");
2642     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2643   } else {
2644     _stream.next(SmallRegisterMap::instance());
2645     assert(!_stream.is_done(), "");
2646   }
2647 
2648   recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2649 
2650   assert(caller.is_compiled_frame(), "");
2651   assert(caller.sp() == caller.unextended_sp(), "");
2652 
2653   DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2654 
2655   frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2656   intptr_t* stack_frame_top = f.sp();
2657   intptr_t* heap_frame_top = hf.sp();
2658   int fsize = ContinuationHelper::StubFrame::size(hf);
2659 
2660   copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2661                   fsize + frame::metadata_words);
2662 
2663   patch(f, caller, false /*is_bottom_frame*/);
2664 
2665   // can only fix caller once this frame is thawed (due to callee saved regs)
2666   RegisterMap map(nullptr,
2667                   RegisterMap::UpdateMap::include,
2668                   RegisterMap::ProcessFrames::skip,
2669                   RegisterMap::WalkContinuation::skip);
2670   map.set_include_argument_oops(false);
2671   f.oop_map()->update_register_map(&f, &map);
2672   ContinuationHelper::update_register_map_with_callee(caller, &map);
2673   _cont.tail()->fix_thawed_frame(caller, &map);
2674 
2675   DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2676   caller = f;
2677 }
2678 
2679 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2680   assert(hf.is_native_frame(), "");
2681   assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2682 
2683   if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2684     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2685   }
2686 
2687   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2688   assert(!is_bottom_frame, "");
2689 
2690   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2691 
2692   assert(caller.sp() == caller.unextended_sp(), "");
2693 
2694   if (caller.is_interpreted_frame()) {
2695     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2696   }
2697 
2698   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2699   // yet laid out in the stack, and so the original_pc is not stored in it.
2700   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2701   frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2702   intptr_t* const stack_frame_top = f.sp();
2703   intptr_t* const heap_frame_top = hf.unextended_sp();
2704 
2705   int fsize = ContinuationHelper::NativeFrame::size(hf);
2706   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2707 
2708   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2709   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2710   int sz = fsize + frame::metadata_words_at_bottom;
2711 
2712   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2713 
2714   patch(f, caller, false /* bottom */);
2715 
2716   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2717   assert(!f.is_deoptimized_frame(), "");
2718   assert(!hf.is_deoptimized_frame(), "");
2719   assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2720 
2721   // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2722   _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2723 
2724   DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2725   caller = f;
2726 }
2727 
2728 void ThawBase::finish_thaw(frame& f) {
2729   stackChunkOop chunk = _cont.tail();
2730 
2731   if (chunk->is_empty()) {
2732     // Only remove chunk from list if it can't be reused for another freeze
2733     if (seen_by_gc()) {
2734       _cont.set_tail(chunk->parent());
2735     } else {
2736       chunk->set_has_mixed_frames(false);
2737     }
2738     chunk->set_max_thawing_size(0);
2739   } else {
2740     chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2741   }
2742   assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
2743 
2744   if (!is_aligned(f.sp(), frame::frame_alignment)) {
2745     assert(f.is_interpreted_frame(), "");
2746     f.set_sp(align_down(f.sp(), frame::frame_alignment));
2747   }
2748   push_return_frame(f);
2749   chunk->fix_thawed_frame(f, SmallRegisterMap::instance()); // can only fix caller after push_return_frame (due to callee saved regs)
2750 
2751   assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
2752 
2753   log_develop_trace(continuations)("thawed %d frames", _frames);
2754 
2755   LogTarget(Trace, continuations) lt;
2756   if (lt.develop_is_enabled()) {
2757     LogStream ls(lt);
2758     ls.print_cr("top hframe after (thaw):");
2759     _cont.last_frame().print_value_on(&ls);
2760   }
2761 }
2762 
2763 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
2764   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
2765   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
2766 
2767   LogTarget(Trace, continuations) lt;
2768   if (lt.develop_is_enabled()) {
2769     LogStream ls(lt);
2770     ls.print_cr("push_return_frame");
2771     f.print_value_on(&ls);
2772   }
2773 
2774   assert(f.sp() - frame::metadata_words_at_bottom >= _top_stack_address, "overwrote past thawing space"
2775     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(f.sp() - frame::metadata_words), p2i(_top_stack_address));
2776   ContinuationHelper::Frame::patch_pc(f, f.raw_pc()); // in case we want to deopt the frame in a full transition, this is checked.
2777   ContinuationHelper::push_pd(f);
2778 
2779   assert(ContinuationHelper::Frame::assert_frame_laid_out(f), "");
2780 }
2781 
2782 // returns new top sp
2783 // called after preparations (stack overflow check and making room)
2784 template<typename ConfigT>
2785 static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind) {
2786   assert(thread == JavaThread::current(), "Must be current thread");
2787 
2788   CONT_JFR_ONLY(EventContinuationThaw event;)
2789 
2790   log_develop_trace(continuations)("~~~~ thaw kind: %d sp: " INTPTR_FORMAT, kind, p2i(thread->last_continuation()->entry_sp()));
2791 
2792   ContinuationEntry* entry = thread->last_continuation();
2793   assert(entry != nullptr, "");
2794   oop oopCont = entry->cont_oop(thread);
2795 
2796   assert(!jdk_internal_vm_Continuation::done(oopCont), "");
2797   assert(oopCont == get_continuation(thread), "");
2798   verify_continuation(oopCont);
2799 
2800   assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
2801 
2802   ContinuationWrapper cont(thread, oopCont);
2803   log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
2804 
2805 #ifdef ASSERT
2806   set_anchor_to_entry(thread, cont.entry());
2807   log_frames(thread);
2808   clear_anchor(thread);
2809 #endif
2810 
2811   DEBUG_ONLY(bool preempted = cont.tail()->preempted();)
2812   Thaw<ConfigT> thw(thread, cont);
2813   intptr_t* const sp = thw.thaw(kind);
2814   assert(is_aligned(sp, frame::frame_alignment), "");
2815   DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp, preempted);)
2816 
2817   CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
2818 
2819   verify_continuation(cont.continuation());
2820   log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
2821 
2822   return sp;
2823 }
2824 
2825 #ifdef ASSERT
2826 static void do_deopt_after_thaw(JavaThread* thread) {
2827   int i = 0;
2828   StackFrameStream fst(thread, true, false);
2829   fst.register_map()->set_include_argument_oops(false);
2830   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
2831   for (; !fst.is_done(); fst.next()) {
2832     if (fst.current()->cb()->is_nmethod()) {
2833       nmethod* nm = fst.current()->cb()->as_nmethod();
2834       if (!nm->method()->is_continuation_native_intrinsic()) {
2835         nm->make_deoptimized();
2836       }
2837     }
2838   }
2839 }
2840 
2841 class ThawVerifyOopsClosure: public OopClosure {
2842   intptr_t* _p;
2843   outputStream* _st;
2844   bool is_good_oop(oop o) {
2845     return dbg_is_safe(o, -1) && dbg_is_safe(o->klass(), -1) && oopDesc::is_oop(o) && o->klass()->is_klass();
2846   }
2847 public:
2848   ThawVerifyOopsClosure(outputStream* st) : _p(nullptr), _st(st) {}
2849   intptr_t* p() { return _p; }
2850   void reset() { _p = nullptr; }
2851 
2852   virtual void do_oop(oop* p) {
2853     oop o = *p;
2854     if (o == nullptr || is_good_oop(o)) {
2855       return;
2856     }
2857     _p = (intptr_t*)p;
2858     _st->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(*p), p2i(p));
2859   }
2860   virtual void do_oop(narrowOop* p) {
2861     oop o = RawAccess<>::oop_load(p);
2862     if (o == nullptr || is_good_oop(o)) {
2863       return;
2864     }
2865     _p = (intptr_t*)p;
2866     _st->print_cr("*** (narrow) non-oop %x found at " PTR_FORMAT, (int)(*p), p2i(p));
2867   }
2868 };
2869 
2870 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st) {
2871   assert(thread->has_last_Java_frame(), "");
2872 
2873   ResourceMark rm;
2874   ThawVerifyOopsClosure cl(st);
2875   NMethodToOopClosure cf(&cl, false);
2876 
2877   StackFrameStream fst(thread, true, false);
2878   fst.register_map()->set_include_argument_oops(false);
2879   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
2880   for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) {
2881     if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) {
2882       st->print_cr(">>> do_verify_after_thaw deopt");
2883       fst.current()->deoptimize(nullptr);
2884       fst.current()->print_on(st);
2885     }
2886 
2887     fst.current()->oops_do(&cl, &cf, fst.register_map());
2888     if (cl.p() != nullptr) {
2889       frame fr = *fst.current();
2890       st->print_cr("Failed for frame barriers: %d",chunk->requires_barriers());
2891       fr.print_on(st);
2892       if (!fr.is_interpreted_frame()) {
2893         st->print_cr("size: %d argsize: %d",
2894                      ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
2895                      ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
2896       }
2897       VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
2898       if (reg != nullptr) {
2899         st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
2900       }
2901       cl.reset();
2902       DEBUG_ONLY(thread->print_frame_layout();)
2903       if (chunk != nullptr) {
2904         chunk->print_on(true, st);
2905       }
2906       return false;
2907     }
2908   }
2909   return true;
2910 }
2911 
2912 static void log_frames(JavaThread* thread) {
2913   const static int show_entry_callers = 3;
2914   LogTarget(Trace, continuations) lt;
2915   if (!lt.develop_is_enabled()) {
2916     return;
2917   }
2918   LogStream ls(lt);
2919 
2920   ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
2921   if (!thread->has_last_Java_frame()) {
2922     ls.print_cr("NO ANCHOR!");
2923   }
2924 
2925   RegisterMap map(thread,
2926                   RegisterMap::UpdateMap::include,
2927                   RegisterMap::ProcessFrames::include,
2928                   RegisterMap::WalkContinuation::skip);
2929   map.set_include_argument_oops(false);
2930 
2931   if (false) {
2932     for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
2933       f.print_on(&ls);
2934     }
2935   } else {
2936     map.set_skip_missing(true);
2937     ResetNoHandleMark rnhm;
2938     ResourceMark rm;
2939     HandleMark hm(Thread::current());
2940     FrameValues values;
2941 
2942     int i = 0;
2943     int post_entry = -1;
2944     for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
2945       f.describe(values, i, &map, i == 0);
2946       if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
2947         post_entry++;
2948       if (post_entry >= show_entry_callers)
2949         break;
2950     }
2951     values.print_on(thread, &ls);
2952   }
2953 
2954   ls.print_cr("======= end frames =========");
2955 }
2956 
2957 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted) {
2958   intptr_t* sp0 = sp;
2959   address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
2960 
2961   if (preempted && sp0 == cont.entrySP()) {
2962     // Still preempted (monitor not acquired) so no frames were thawed.
2963     assert(cont.tail()->preempted(), "");
2964     set_anchor(thread, cont.entrySP(), cont.entryPC());
2965   } else {
2966     set_anchor(thread, sp0);
2967   }
2968 
2969   log_frames(thread);
2970   if (LoomVerifyAfterThaw) {
2971     assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
2972   }
2973   assert(ContinuationEntry::assert_entry_frame_laid_out(thread), "");
2974   clear_anchor(thread);
2975 
2976   LogTarget(Trace, continuations) lt;
2977   if (lt.develop_is_enabled()) {
2978     LogStream ls(lt);
2979     ls.print_cr("Jumping to frame (thaw):");
2980     frame(sp).print_value_on(&ls);
2981   }
2982 }
2983 #endif // ASSERT
2984 
2985 #include CPU_HEADER_INLINE(continuationFreezeThaw)
2986 
2987 #ifdef ASSERT
2988 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
2989   ResourceMark rm;
2990   FrameValues values;
2991   assert(f.get_cb() != nullptr, "");
2992   RegisterMap map(f.is_heap_frame() ?
2993                     nullptr :
2994                     JavaThread::current(),
2995                   RegisterMap::UpdateMap::include,
2996                   RegisterMap::ProcessFrames::skip,
2997                   RegisterMap::WalkContinuation::skip);
2998   map.set_include_argument_oops(false);
2999   map.set_skip_missing(true);
3000   if (callee_complete) {
3001     frame::update_map_with_saved_link(&map, ContinuationHelper::Frame::callee_link_address(f));
3002   }
3003   const_cast<frame&>(f).describe(values, 0, &map, true);
3004   values.print_on(static_cast<JavaThread*>(nullptr), st);
3005 }
3006 #endif
3007 
3008 static address thaw_entry   = nullptr;
3009 static address freeze_entry = nullptr;
3010 static address freeze_preempt_entry = nullptr;
3011 
3012 address Continuation::thaw_entry() {
3013   return ::thaw_entry;
3014 }
3015 
3016 address Continuation::freeze_entry() {
3017   return ::freeze_entry;
3018 }
3019 
3020 address Continuation::freeze_preempt_entry() {
3021   return ::freeze_preempt_entry;
3022 }
3023 
3024 class ConfigResolve {
3025 public:
3026   static void resolve() { resolve_compressed(); }
3027 
3028   static void resolve_compressed() {
3029     UseCompressedOops ? resolve_gc<true>()
3030                       : resolve_gc<false>();
3031   }
3032 
3033 private:
3034   template <bool use_compressed>
3035   static void resolve_gc() {
3036     BarrierSet* bs = BarrierSet::barrier_set();
3037     assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set");
3038     switch (bs->kind()) {
3039 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
3040       case BarrierSet::bs_name: {                                       \
3041         resolve<use_compressed, typename BarrierSet::GetType<BarrierSet::bs_name>::type>(); \
3042       }                                                                 \
3043         break;
3044       FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
3045 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
3046 
3047     default:
3048       fatal("BarrierSet resolving not implemented");
3049     };
3050   }
3051 
3052   template <bool use_compressed, typename BarrierSetT>
3053   static void resolve() {
3054     typedef Config<use_compressed ? oop_kind::NARROW : oop_kind::WIDE, BarrierSetT> SelectedConfigT;
3055 
3056     freeze_entry = (address)freeze<SelectedConfigT>;
3057     freeze_preempt_entry = (address)SelectedConfigT::freeze_preempt;
3058 
3059     // If we wanted, we could templatize by kind and have three different thaw entries
3060     thaw_entry   = (address)thaw<SelectedConfigT>;
3061   }
3062 };
3063 
3064 void Continuation::init() {
3065   ConfigResolve::resolve();
3066 }