1 /*
   2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.inline.hpp"
  28 #include "code/nmethod.inline.hpp"
  29 #include "code/vmreg.inline.hpp"
  30 #include "compiler/oopMap.inline.hpp"
  31 #include "cppstdlib/type_traits.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "gc/shared/continuationGCSupport.inline.hpp"
  34 #include "gc/shared/gc_globals.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  37 #include "interpreter/bytecodeStream.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "interpreter/interpreterRuntime.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "oops/access.inline.hpp"
  44 #include "oops/constantPool.inline.hpp"
  45 #include "oops/method.inline.hpp"
  46 #include "oops/objArrayOop.inline.hpp"
  47 #include "oops/oopsHierarchy.hpp"
  48 #include "oops/stackChunkOop.inline.hpp"
  49 #include "prims/jvmtiThreadState.hpp"
  50 #include "runtime/arguments.hpp"
  51 #include "runtime/continuation.hpp"
  52 #include "runtime/continuationEntry.inline.hpp"
  53 #include "runtime/continuationHelper.inline.hpp"
  54 #include "runtime/continuationJavaClasses.inline.hpp"
  55 #include "runtime/continuationWrapper.inline.hpp"
  56 #include "runtime/frame.inline.hpp"
  57 #include "runtime/interfaceSupport.inline.hpp"
  58 #include "runtime/javaThread.inline.hpp"
  59 #include "runtime/jniHandles.inline.hpp"
  60 #include "runtime/keepStackGCProcessed.hpp"
  61 #include "runtime/mountUnmountDisabler.hpp"
  62 #include "runtime/objectMonitor.inline.hpp"
  63 #include "runtime/orderAccess.hpp"
  64 #include "runtime/prefetch.inline.hpp"
  65 #include "runtime/sharedRuntime.hpp"
  66 #include "runtime/smallRegisterMap.inline.hpp"
  67 #include "runtime/stackChunkFrameStream.inline.hpp"
  68 #include "runtime/stackFrameStream.inline.hpp"
  69 #include "runtime/stackOverflow.hpp"
  70 #include "runtime/stackWatermarkSet.inline.hpp"
  71 #include "runtime/vframe.inline.hpp"
  72 #include "runtime/vframe_hp.hpp"
  73 #include "utilities/debug.hpp"
  74 #include "utilities/exceptions.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/vmError.hpp"
  77 #if INCLUDE_ZGC
  78 #include "gc/z/zStackChunkGCData.inline.hpp"
  79 #endif
  80 #if INCLUDE_JFR
  81 #include "jfr/jfr.inline.hpp"
  82 #endif
  83 #ifdef COMPILER1
  84 #include "c1/c1_Runtime1.hpp"
  85 #endif
  86 #ifdef COMPILER2
  87 #include "opto/runtime.hpp"
  88 #endif
  89 
  90 /*
  91  * This file contains the implementation of continuation freezing (yield) and thawing (run).
  92  *
  93  * This code is very latency-critical and very hot. An ordinary and well-behaved server application
  94  * would likely call these operations many thousands of times per second second, on every core.
  95  *
  96  * Freeze might be called every time the application performs any I/O operation, every time it
  97  * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
  98  * multiple times in each of those cases, as it is called by the return barrier, which may be
  99  * invoked on method return.
 100  *
 101  * The amortized budget for each of those two operations is ~100-150ns. That is why, for
 102  * example, every effort is made to avoid Java-VM transitions as much as possible.
 103  *
 104  * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
 105  * and so frames simply copied, and the bottom-most one is patched.
 106  * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets
 107  * and absolute pointers, and barriers invoked.
 108  */
 109 
 110 /************************************************
 111 
 112 Thread-stack layout on freeze/thaw.
 113 See corresponding stack-chunk layout in instanceStackChunkKlass.hpp
 114 
 115             +----------------------------+
 116             |      .                     |
 117             |      .                     |
 118             |      .                     |
 119             |   carrier frames           |
 120             |                            |
 121             |----------------------------|
 122             |                            |
 123             |    Continuation.run        |
 124             |                            |
 125             |============================|
 126             |    enterSpecial frame      |
 127             |  pc                        |
 128             |  rbp                       |
 129             |  -----                     |
 130         ^   |  int argsize               | = ContinuationEntry
 131         |   |  oopDesc* cont             |
 132         |   |  oopDesc* chunk            |
 133         |   |  ContinuationEntry* parent |
 134         |   |  ...                       |
 135         |   |============================| <------ JavaThread::_cont_entry = entry->sp()
 136         |   |  ? alignment word ?        |
 137         |   |----------------------------| <--\
 138         |   |                            |    |
 139         |   |  ? caller stack args ?     |    |   argsize (might not be 2-word aligned) words
 140 Address |   |                            |    |   Caller is still in the chunk.
 141         |   |----------------------------|    |
 142         |   |  pc (? return barrier ?)   |    |  This pc contains the return barrier when the bottom-most frame
 143         |   |  rbp                       |    |  isn't the last one in the continuation.
 144         |   |                            |    |
 145         |   |    frame                   |    |
 146         |   |                            |    |
 147             +----------------------------|     \__ Continuation frames to be frozen/thawed
 148             |                            |     /
 149             |    frame                   |    |
 150             |                            |    |
 151             |----------------------------|    |
 152             |                            |    |
 153             |    frame                   |    |
 154             |                            |    |
 155             |----------------------------| <--/
 156             |                            |
 157             |    doYield/safepoint stub  | When preempting forcefully, we could have a safepoint stub
 158             |                            | instead of a doYield stub
 159             |============================| <- the sp passed to freeze
 160             |                            |
 161             |  Native freeze/thaw frames |
 162             |      .                     |
 163             |      .                     |
 164             |      .                     |
 165             +----------------------------+
 166 
 167 ************************************************/
 168 
 169 static const bool TEST_THAW_ONE_CHUNK_FRAME = false; // force thawing frames one-at-a-time for testing
 170 
 171 #define CONT_JFR false // emit low-level JFR events that count slow/fast path for continuation performance debugging only
 172 #if CONT_JFR
 173   #define CONT_JFR_ONLY(code) code
 174 #else
 175   #define CONT_JFR_ONLY(code)
 176 #endif
 177 
 178 // TODO: See AbstractAssembler::generate_stack_overflow_check,
 179 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
 180 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
 181 
 182 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
 183 
 184 // Used to just annotatate cold/hot branches
 185 #define LIKELY(condition)   (condition)
 186 #define UNLIKELY(condition) (condition)
 187 
 188 // debugging functions
 189 #ifdef ASSERT
 190 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
 191 
 192 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
 193 
 194 static void do_deopt_after_thaw(JavaThread* thread);
 195 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
 196 static void log_frames(JavaThread* thread);
 197 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp);
 198 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
 199 static void verify_frame_kind(frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr = nullptr, const char** code_name_ptr = nullptr, int* bci_ptr = nullptr, stackChunkOop chunk = nullptr);
 200 
 201 #define assert_pfl(p, ...) \
 202 do {                                           \
 203   if (!(p)) {                                  \
 204     JavaThread* t = JavaThread::active();      \
 205     if (t->has_last_Java_frame()) {            \
 206       tty->print_cr("assert(" #p ") failed:"); \
 207       t->print_frame_layout();                 \
 208     }                                          \
 209   }                                            \
 210   vmassert(p, __VA_ARGS__);                    \
 211 } while(0)
 212 
 213 #else
 214 static void verify_continuation(oop continuation) { }
 215 #define assert_pfl(p, ...)
 216 #endif
 217 
 218 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
 219 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);
 220 
 221 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier);
 222 template<typename ConfigT> static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind);
 223 
 224 
 225 // Entry point to freeze. Transitions are handled manually
 226 // Called from gen_continuation_yield() in sharedRuntime_<cpu>.cpp through Continuation::freeze_entry();
 227 template<typename ConfigT>
 228 static JRT_BLOCK_ENTRY(int, freeze(JavaThread* current, intptr_t* sp))
 229   assert(sp == current->frame_anchor()->last_Java_sp(), "");
 230 
 231   if (current->raw_cont_fastpath() > current->last_continuation()->entry_sp() || current->raw_cont_fastpath() < sp) {
 232     current->set_cont_fastpath(nullptr);
 233   }
 234 
 235   return checked_cast<int>(ConfigT::freeze(current, sp));
 236 JRT_END
 237 
 238 JRT_LEAF(int, Continuation::prepare_thaw(JavaThread* thread, bool return_barrier))
 239   return prepare_thaw_internal(thread, return_barrier);
 240 JRT_END
 241 
 242 template<typename ConfigT>
 243 static JRT_LEAF(intptr_t*, thaw(JavaThread* thread, int kind))
 244   // TODO: JRT_LEAF and NoHandleMark is problematic for JFR events.
 245   // vFrameStreamCommon allocates Handles in RegisterMap for continuations.
 246   // Also the preemption case with JVMTI events enabled might safepoint so
 247   // undo the NoSafepointVerifier here and rely on handling by ContinuationWrapper.
 248   // JRT_ENTRY instead?
 249   ResetNoHandleMark rnhm;
 250   DEBUG_ONLY(PauseNoSafepointVerifier pnsv(&__nsv);)
 251 
 252   // we might modify the code cache via BarrierSetNMethod::nmethod_entry_barrier
 253   MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread));
 254   return ConfigT::thaw(thread, (Continuation::thaw_kind)kind);
 255 JRT_END
 256 
 257 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) {
 258   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
 259   return is_pinned0(thread, JNIHandles::resolve(cont_scope), false);
 260 }
 261 JVM_END
 262 
 263 ///////////
 264 
 265 enum class oop_kind { NARROW, WIDE };
 266 template <oop_kind oops, typename BarrierSetT>
 267 class Config {
 268 public:
 269   typedef Config<oops, BarrierSetT> SelfT;
 270   using OopT = std::conditional_t<oops == oop_kind::NARROW, narrowOop, oop>;
 271 
 272   static freeze_result freeze(JavaThread* thread, intptr_t* const sp) {
 273     freeze_result res = freeze_internal<SelfT, false>(thread, sp);
 274     JFR_ONLY(assert((res == freeze_ok) || (res == thread->last_freeze_fail_result()), "freeze failure not set"));
 275     return res;
 276   }
 277 
 278   static freeze_result freeze_preempt(JavaThread* thread, intptr_t* const sp) {
 279     return freeze_internal<SelfT, true>(thread, sp);
 280   }
 281 
 282   static intptr_t* thaw(JavaThread* thread, Continuation::thaw_kind kind) {
 283     return thaw_internal<SelfT>(thread, kind);
 284   }
 285 };
 286 
 287 #ifdef _WINDOWS
 288 static void map_stack_pages(JavaThread* thread, size_t size, address sp) {
 289   address new_sp = sp - size;
 290   address watermark = thread->stack_overflow_state()->shadow_zone_growth_watermark();
 291 
 292   if (new_sp < watermark) {
 293     size_t page_size = os::vm_page_size();
 294     address last_touched_page = watermark - StackOverflow::stack_shadow_zone_size();
 295     size_t pages_to_touch = align_up(watermark - new_sp, page_size) / page_size;
 296     while (pages_to_touch-- > 0) {
 297       last_touched_page -= page_size;
 298       *last_touched_page = 0;
 299     }
 300     thread->stack_overflow_state()->set_shadow_zone_growth_watermark(new_sp);
 301   }
 302 }
 303 #endif
 304 
 305 static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) {
 306   const size_t page_size = os::vm_page_size();
 307   if (size > page_size) {
 308     if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) {
 309       return false;
 310     }
 311     WINDOWS_ONLY(map_stack_pages(thread, size, sp));
 312   }
 313   return true;
 314 }
 315 
 316 #ifdef ASSERT
 317 static oop get_continuation(JavaThread* thread) {
 318   assert(thread != nullptr, "");
 319   assert(thread->threadObj() != nullptr, "");
 320   return java_lang_Thread::continuation(thread->threadObj());
 321 }
 322 #endif // ASSERT
 323 
 324 inline void clear_anchor(JavaThread* thread) {
 325   thread->frame_anchor()->clear();
 326 }
 327 
 328 static void set_anchor(JavaThread* thread, intptr_t* sp, address pc) {
 329   assert(pc != nullptr, "");
 330 
 331   JavaFrameAnchor* anchor = thread->frame_anchor();
 332   anchor->set_last_Java_sp(sp);
 333   anchor->set_last_Java_pc(pc);
 334   ContinuationHelper::set_anchor_pd(anchor, sp);
 335 
 336   assert(thread->has_last_Java_frame(), "");
 337   assert(thread->last_frame().cb() != nullptr, "");
 338 }
 339 
 340 static void set_anchor(JavaThread* thread, intptr_t* sp) {
 341   address pc = ContinuationHelper::return_address_at(
 342            sp - frame::sender_sp_ret_address_offset());
 343   set_anchor(thread, sp, pc);
 344 }
 345 
 346 static void set_anchor_to_entry(JavaThread* thread, ContinuationEntry* entry) {
 347   JavaFrameAnchor* anchor = thread->frame_anchor();
 348   anchor->set_last_Java_sp(entry->entry_sp());
 349   anchor->set_last_Java_pc(entry->entry_pc());
 350   ContinuationHelper::set_anchor_to_entry_pd(anchor, entry);
 351 
 352   assert(thread->has_last_Java_frame(), "");
 353   assert(thread->last_frame().cb() != nullptr, "");
 354 }
 355 
 356 #if CONT_JFR
 357 class FreezeThawJfrInfo : public StackObj {
 358   short _e_size;
 359   short _e_num_interpreted_frames;
 360  public:
 361 
 362   FreezeThawJfrInfo() : _e_size(0), _e_num_interpreted_frames(0) {}
 363   inline void record_interpreted_frame() { _e_num_interpreted_frames++; }
 364   inline void record_size_copied(int size) { _e_size += size << LogBytesPerWord; }
 365   template<typename Event> void post_jfr_event(Event *e, oop continuation, JavaThread* jt);
 366 };
 367 
 368 template<typename Event> void FreezeThawJfrInfo::post_jfr_event(Event* e, oop continuation, JavaThread* jt) {
 369   if (e->should_commit()) {
 370     log_develop_trace(continuations)("JFR event: iframes: %d size: %d", _e_num_interpreted_frames, _e_size);
 371     e->set_carrierThread(JFR_JVM_THREAD_ID(jt));
 372     e->set_continuationClass(continuation->klass());
 373     e->set_interpretedFrames(_e_num_interpreted_frames);
 374     e->set_size(_e_size);
 375     e->commit();
 376   }
 377 }
 378 #endif // CONT_JFR
 379 
 380 /////////////// FREEZE ////
 381 
 382 class FreezeBase : public StackObj {
 383 protected:
 384   JavaThread* const _thread;
 385   ContinuationWrapper& _cont;
 386   bool _barriers; // only set when we allocate a chunk
 387 
 388   intptr_t* _bottom_address;
 389 
 390   // Used for preemption only
 391   const bool _preempt;
 392   frame _last_frame;
 393 
 394   // Used to support freezing with held monitors
 395   int _monitors_in_lockstack;
 396 
 397   int _freeze_size; // total size of all frames plus metadata in words.
 398   int _total_align_size;
 399 
 400   intptr_t* _cont_stack_top;
 401   intptr_t* _cont_stack_bottom;
 402 
 403   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
 404 
 405 #ifdef ASSERT
 406   intptr_t* _orig_chunk_sp;
 407   int _fast_freeze_size;
 408   bool _empty;
 409 #endif
 410 
 411   JvmtiSampledObjectAllocEventCollector* _jvmti_event_collector;
 412 
 413   NOT_PRODUCT(int _frames;)
 414   DEBUG_ONLY(intptr_t* _last_write;)
 415 
 416   inline FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempt);
 417 
 418 public:
 419   NOINLINE freeze_result freeze_slow();
 420   void freeze_fast_existing_chunk();
 421 
 422   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
 423   void set_jvmti_event_collector(JvmtiSampledObjectAllocEventCollector* jsoaec) { _jvmti_event_collector = jsoaec; }
 424 
 425   inline int size_if_fast_freeze_available();
 426 
 427   inline frame& last_frame() { return _last_frame; }
 428 
 429 #ifdef ASSERT
 430   bool check_valid_fast_path();
 431 #endif
 432 
 433 protected:
 434   inline void init_rest();
 435   void throw_stack_overflow_on_humongous_chunk();
 436 
 437   // fast path
 438   inline void copy_to_chunk(intptr_t* from, intptr_t* to, int size);
 439   inline void unwind_frames();
 440   inline void patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp);
 441 
 442   // slow path
 443   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) = 0;
 444 
 445   int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); }
 446 
 447 private:
 448   // slow path
 449   frame freeze_start_frame();
 450   frame freeze_start_frame_on_preempt();
 451   NOINLINE freeze_result recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top);
 452   inline frame freeze_start_frame_yield_stub();
 453   template<typename FKind>
 454   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 455   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 456   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 457   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 458   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 459   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 460   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 461   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 462   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 463   NOINLINE void finish_freeze(const frame& f, const frame& top);
 464 
 465   void freeze_lockstack(stackChunkOop chunk);
 466 
 467   inline bool stack_overflow();
 468 
 469   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 470                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 471   template<typename FKind> static inline frame sender(const frame& f);
 472   template<typename FKind> frame new_heap_frame(frame& f, frame& caller, int size_adjust = 0);
 473   inline void set_top_frame_metadata_pd(const frame& hf);
 474   inline void patch_pd(frame& callee, const frame& caller, bool is_bottom_frame);
 475   inline void patch_pd_unused(intptr_t* sp);
 476   void adjust_interpreted_frame_unextended_sp(frame& f);
 477   inline void prepare_freeze_interpreted_top_frame(frame& f);
 478   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 479 
 480 protected:
 481   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 482   bool freeze_fast_new_chunk(stackChunkOop chunk);
 483 };
 484 
 485 template <typename ConfigT>
 486 class Freeze : public FreezeBase {
 487 private:
 488   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 489 
 490 public:
 491   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 492     : FreezeBase(thread, cont, frame_sp, preempt) {}
 493 
 494   freeze_result try_freeze_fast();
 495 
 496 protected:
 497   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) override { return allocate_chunk(stack_size, argsize_md); }
 498 };
 499 
 500 FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt) :
 501     _thread(thread), _cont(cont), _barriers(false), _preempt(preempt), _last_frame(false /* no initialization */) {
 502   DEBUG_ONLY(_jvmti_event_collector = nullptr;)
 503 
 504   assert(_thread != nullptr, "");
 505   assert(_thread->last_continuation()->entry_sp() == _cont.entrySP(), "");
 506 
 507   DEBUG_ONLY(_cont.entry()->verify_cookie();)
 508 
 509   assert(!Interpreter::contains(_cont.entryPC()), "");
 510 
 511   _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
 512 #ifdef _LP64
 513   if (((intptr_t)_bottom_address & 0xf) != 0) {
 514     _bottom_address--;
 515   }
 516   assert(is_aligned(_bottom_address, frame::frame_alignment), "");
 517 #endif
 518 
 519   log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
 520                 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
 521   assert(_bottom_address != nullptr, "");
 522   assert(_bottom_address <= _cont.entrySP(), "");
 523   DEBUG_ONLY(_last_write = nullptr;)
 524 
 525   assert(_cont.chunk_invariant(), "");
 526   assert(!Interpreter::contains(_cont.entryPC()), "");
 527 #if !defined(PPC64) || defined(ZERO)
 528   static const int doYield_stub_frame_size = frame::metadata_words;
 529 #else
 530   static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
 531 #endif
 532   // With preemption doYield() might not have been resolved yet
 533   assert(_preempt || SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
 534 
 535   if (preempt) {
 536     _last_frame = _thread->last_frame();
 537   }
 538 
 539   // properties of the continuation on the stack; all sizes are in words
 540   _cont_stack_top    = frame_sp + (!preempt ? doYield_stub_frame_size : 0); // we don't freeze the doYield stub frame
 541   _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
 542       - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
 543 
 544   log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 545     cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 546   assert(cont_size() > 0, "");
 547 
 548   _monitors_in_lockstack = _thread->lock_stack().monitor_count();
 549 }
 550 
 551 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
 552   _freeze_size = 0;
 553   _total_align_size = 0;
 554   NOT_PRODUCT(_frames = 0;)
 555 }
 556 
 557 void FreezeBase::freeze_lockstack(stackChunkOop chunk) {
 558   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "no room for lockstack");
 559 
 560   _thread->lock_stack().move_to_address((oop*)chunk->start_address());
 561   chunk->set_lockstack_size(checked_cast<uint8_t>(_monitors_in_lockstack));
 562   chunk->set_has_lockstack(true);
 563 }
 564 
 565 void FreezeBase::copy_to_chunk(intptr_t* from, intptr_t* to, int size) {
 566   stackChunkOop chunk = _cont.tail();
 567   chunk->copy_from_stack_to_chunk(from, to, size);
 568   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
 569 
 570 #ifdef ASSERT
 571   if (_last_write != nullptr) {
 572     assert(_last_write == to + size, "Missed a spot: _last_write: " INTPTR_FORMAT " to+size: " INTPTR_FORMAT
 573         " stack_size: %d _last_write offset: " PTR_FORMAT " to+size: " PTR_FORMAT, p2i(_last_write), p2i(to+size),
 574         chunk->stack_size(), _last_write-chunk->start_address(), to+size-chunk->start_address());
 575     _last_write = to;
 576   }
 577 #endif
 578 }
 579 
 580 static void assert_frames_in_continuation_are_safe(JavaThread* thread) {
 581 #ifdef ASSERT
 582   StackWatermark* watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc);
 583   if (watermark == nullptr) {
 584     return;
 585   }
 586   ContinuationEntry* ce = thread->last_continuation();
 587   RegisterMap map(thread,
 588                   RegisterMap::UpdateMap::include,
 589                   RegisterMap::ProcessFrames::skip,
 590                   RegisterMap::WalkContinuation::skip);
 591   map.set_include_argument_oops(false);
 592   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
 593     watermark->assert_is_frame_safe(f);
 594   }
 595 #endif // ASSERT
 596 }
 597 
 598 // Called _after_ the last possible safepoint during the freeze operation (chunk allocation)
 599 void FreezeBase::unwind_frames() {
 600   ContinuationEntry* entry = _cont.entry();
 601   entry->flush_stack_processing(_thread);
 602   assert_frames_in_continuation_are_safe(_thread);
 603   JFR_ONLY(Jfr::check_and_process_sample_request(_thread);)
 604   set_anchor_to_entry(_thread, entry);
 605 }
 606 
 607 template <typename ConfigT>
 608 freeze_result Freeze<ConfigT>::try_freeze_fast() {
 609   assert(_thread->thread_state() == _thread_in_vm, "");
 610   assert(_thread->cont_fastpath(), "");
 611 
 612   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 613   assert(_fast_freeze_size == 0, "");
 614 
 615   stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words + _monitors_in_lockstack, _cont.argsize() + frame::metadata_words_at_top);
 616   if (freeze_fast_new_chunk(chunk)) {
 617     return freeze_ok;
 618   }
 619   if (_thread->has_pending_exception()) {
 620     return freeze_exception;
 621   }
 622 
 623   // TODO R REMOVE when deopt change is fixed
 624   assert(!_thread->cont_fastpath() || _barriers, "");
 625   log_develop_trace(continuations)("-- RETRYING SLOW --");
 626   return freeze_slow();
 627 }
 628 
 629 // Returns size needed if the continuation fits, otherwise 0.
 630 int FreezeBase::size_if_fast_freeze_available() {
 631   stackChunkOop chunk = _cont.tail();
 632   if (chunk == nullptr || chunk->is_gc_mode() || chunk->requires_barriers() || chunk->has_mixed_frames()) {
 633     log_develop_trace(continuations)("chunk available %s", chunk == nullptr ? "no chunk" : "chunk requires barriers");
 634     return 0;
 635   }
 636 
 637   int total_size_needed = cont_size();
 638   const int chunk_sp = chunk->sp();
 639 
 640   // argsize can be nonzero if we have a caller, but the caller could be in a non-empty parent chunk,
 641   // so we subtract it only if we overlap with the caller, i.e. the current chunk isn't empty.
 642   // Consider leaving the chunk's argsize set when emptying it and removing the following branch,
 643   // although that would require changing stackChunkOopDesc::is_empty
 644   if (!chunk->is_empty()) {
 645     total_size_needed -= _cont.argsize() + frame::metadata_words_at_top;
 646   }
 647 
 648   total_size_needed += _monitors_in_lockstack;
 649 
 650   int chunk_free_room = chunk_sp - frame::metadata_words_at_bottom;
 651   bool available = chunk_free_room >= total_size_needed;
 652   log_develop_trace(continuations)("chunk available: %s size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 653     available ? "yes" : "no" , total_size_needed, _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 654   return available ? total_size_needed : 0;
 655 }
 656 
 657 void FreezeBase::freeze_fast_existing_chunk() {
 658   stackChunkOop chunk = _cont.tail();
 659 
 660   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 661   assert(_fast_freeze_size > 0, "");
 662 
 663   if (!chunk->is_empty()) { // we are copying into a non-empty chunk
 664     DEBUG_ONLY(_empty = false;)
 665     DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();)
 666 #ifdef ASSERT
 667     {
 668       intptr_t* retaddr_slot = (chunk->sp_address()
 669                                 - frame::sender_sp_ret_address_offset());
 670       assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 671              "unexpected saved return address");
 672     }
 673 #endif
 674 
 675     // the chunk's sp before the freeze, adjusted to point beyond the stack-passed arguments in the topmost frame
 676     // we overlap; we'll overwrite the chunk's top frame's callee arguments
 677     const int chunk_start_sp = chunk->sp() + _cont.argsize() + frame::metadata_words_at_top;
 678     assert(chunk_start_sp <= chunk->stack_size(), "sp not pointing into stack");
 679 
 680     // increase max_size by what we're freezing minus the overlap
 681     chunk->set_max_thawing_size(chunk->max_thawing_size() + cont_size() - _cont.argsize() - frame::metadata_words_at_top);
 682 
 683     intptr_t* const bottom_sp = _cont_stack_bottom - _cont.argsize() - frame::metadata_words_at_top;
 684     assert(bottom_sp == _bottom_address, "");
 685     // Because the chunk isn't empty, we know there's a caller in the chunk, therefore the bottom-most frame
 686     // should have a return barrier (installed back when we thawed it).
 687 #ifdef ASSERT
 688     {
 689       intptr_t* retaddr_slot = (bottom_sp
 690                                 - frame::sender_sp_ret_address_offset());
 691       assert(ContinuationHelper::return_address_at(retaddr_slot)
 692              == StubRoutines::cont_returnBarrier(),
 693              "should be the continuation return barrier");
 694     }
 695 #endif
 696     // We copy the fp from the chunk back to the stack because it contains some caller data,
 697     // including, possibly, an oop that might have gone stale since we thawed.
 698     patch_stack_pd(bottom_sp, chunk->sp_address());
 699     // we don't patch the return pc at this time, so as not to make the stack unwalkable for async walks
 700 
 701     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 702   } else { // the chunk is empty
 703     const int chunk_start_sp = chunk->stack_size();
 704 
 705     DEBUG_ONLY(_empty = true;)
 706     DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 707 
 708     chunk->set_max_thawing_size(cont_size());
 709     chunk->set_bottom(chunk_start_sp - _cont.argsize() - frame::metadata_words_at_top);
 710     chunk->set_sp(chunk->bottom());
 711 
 712     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 713   }
 714 }
 715 
 716 bool FreezeBase::freeze_fast_new_chunk(stackChunkOop chunk) {
 717   DEBUG_ONLY(_empty = true;)
 718 
 719   // Install new chunk
 720   _cont.set_tail(chunk);
 721 
 722   if (UNLIKELY(chunk == nullptr || !_thread->cont_fastpath() || _barriers)) { // OOME/probably humongous
 723     log_develop_trace(continuations)("Retrying slow. Barriers: %d", _barriers);
 724     return false;
 725   }
 726 
 727   chunk->set_max_thawing_size(cont_size());
 728 
 729   // in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments.
 730   // They'll then be stored twice: in the chunk and in the parent chunk's top frame
 731   const int chunk_start_sp = cont_size() + frame::metadata_words + _monitors_in_lockstack;
 732   assert(chunk_start_sp == chunk->stack_size(), "");
 733 
 734   DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 735 
 736   freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA true));
 737 
 738   return true;
 739 }
 740 
 741 void FreezeBase::freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated)) {
 742   assert(chunk != nullptr, "");
 743   assert(!chunk->has_mixed_frames(), "");
 744   assert(!chunk->is_gc_mode(), "");
 745   assert(!chunk->has_bitmap(), "");
 746   assert(!chunk->requires_barriers(), "");
 747   assert(chunk == _cont.tail(), "");
 748 
 749   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
 750   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
 751   // will either see no continuation on the stack, or a consistent chunk.
 752   unwind_frames();
 753 
 754   log_develop_trace(continuations)("freeze_fast start: chunk " INTPTR_FORMAT " size: %d orig sp: %d argsize: %d",
 755     p2i((oopDesc*)chunk), chunk->stack_size(), chunk_start_sp, _cont.argsize());
 756   assert(chunk_start_sp <= chunk->stack_size(), "");
 757   assert(chunk_start_sp >= cont_size(), "no room in the chunk");
 758 
 759   const int chunk_new_sp = chunk_start_sp - cont_size(); // the chunk's new sp, after freeze
 760   assert(!(_fast_freeze_size > 0) || (_orig_chunk_sp - (chunk->start_address() + chunk_new_sp)) == (_fast_freeze_size - _monitors_in_lockstack), "");
 761 
 762   intptr_t* chunk_top = chunk->start_address() + chunk_new_sp;
 763 #ifdef ASSERT
 764   if (!_empty) {
 765     intptr_t* retaddr_slot = (_orig_chunk_sp
 766                               - frame::sender_sp_ret_address_offset());
 767     assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 768            "unexpected saved return address");
 769   }
 770 #endif
 771 
 772   log_develop_trace(continuations)("freeze_fast start: " INTPTR_FORMAT " sp: %d chunk_top: " INTPTR_FORMAT,
 773                               p2i(chunk->start_address()), chunk_new_sp, p2i(chunk_top));
 774 
 775   int adjust = frame::metadata_words_at_bottom;
 776 #if INCLUDE_ASAN && defined(AARCH64)
 777   // Reading at offset frame::metadata_words_at_bottom from _cont_stack_top
 778   // will accesss memory at the callee frame, which on preemption cases will
 779   // be the VM native method being called. The Arm 64-bit ABI doesn't specify
 780   // a location where the frame record (returnpc+fp) has to be stored within
 781   // a stack frame, and GCC currently chooses to save it at the top of the
 782   // frame (lowest address). ASan treats this memory access in the callee as
 783   // an overflow access to one of the locals stored in that frame. For these
 784   // preemption cases we don't need to read these words anyways so we avoid it.
 785   if (_preempt) {
 786     adjust = 0;
 787   }
 788 #endif
 789   intptr_t* from = _cont_stack_top - adjust;
 790   intptr_t* to   = chunk_top - adjust;
 791   copy_to_chunk(from, to, cont_size() + adjust);
 792   // Because we're not patched yet, the chunk is now in a bad state
 793 
 794   // patch return pc of the bottom-most frozen frame (now in the chunk)
 795   // with the actual caller's return address
 796   intptr_t* chunk_bottom_retaddr_slot = (chunk_top + cont_size()
 797                                          - _cont.argsize()
 798                                          - frame::metadata_words_at_top
 799                                          - frame::sender_sp_ret_address_offset());
 800 #ifdef ASSERT
 801   if (!_empty) {
 802     assert(ContinuationHelper::return_address_at(chunk_bottom_retaddr_slot)
 803            == StubRoutines::cont_returnBarrier(),
 804            "should be the continuation return barrier");
 805   }
 806 #endif
 807   ContinuationHelper::patch_return_address_at(chunk_bottom_retaddr_slot,
 808                                               chunk->pc());
 809 
 810   // We're always writing to a young chunk, so the GC can't see it until the next safepoint.
 811   chunk->set_sp(chunk_new_sp);
 812 
 813   // set chunk->pc to the return address of the topmost frame in the chunk
 814   if (_preempt) {
 815     // On aarch64/riscv64, the return pc of the top frame won't necessarily be at sp[-1].
 816     // Also, on x64, if the top frame is the native wrapper frame, sp[-1] will not
 817     // be the pc we used when creating the oopmap. Get the top's frame last pc from
 818     // the anchor instead.
 819     address last_pc = _last_frame.pc();
 820     ContinuationHelper::patch_return_address_at(chunk_top - frame::sender_sp_ret_address_offset(), last_pc);
 821     chunk->set_pc(last_pc);
 822     // For stub/native frames the fp is not used while frozen, and will be constructed
 823     // again when thawing the frame (see ThawBase::handle_preempted_continuation). We
 824     // patch it with a special bad address to help with debugging, particularly when
 825     // inspecting frames and identifying invalid accesses.
 826     patch_pd_unused(chunk_top);
 827   } else {
 828     chunk->set_pc(ContinuationHelper::return_address_at(
 829                   _cont_stack_top - frame::sender_sp_ret_address_offset()));
 830   }
 831 
 832   if (_monitors_in_lockstack > 0) {
 833     freeze_lockstack(chunk);
 834   }
 835 
 836   _cont.write();
 837 
 838   log_develop_trace(continuations)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash());
 839   LogTarget(Trace, continuations) lt;
 840   if (lt.develop_is_enabled()) {
 841     LogStream ls(lt);
 842     chunk->print_on(true, &ls);
 843   }
 844 
 845   // Verification
 846   assert(_cont.chunk_invariant(), "");
 847   chunk->verify();
 848 
 849 #if CONT_JFR
 850   EventContinuationFreezeFast e;
 851   if (e.should_commit()) {
 852     e.set_id(cast_from_oop<u8>(chunk));
 853     DEBUG_ONLY(e.set_allocate(chunk_is_allocated);)
 854     e.set_size(cont_size() << LogBytesPerWord);
 855     e.commit();
 856   }
 857 #endif
 858 }
 859 
 860 NOINLINE freeze_result FreezeBase::freeze_slow() {
 861 #ifdef ASSERT
 862   ResourceMark rm;
 863 #endif
 864 
 865   log_develop_trace(continuations)("freeze_slow  #" INTPTR_FORMAT, _cont.hash());
 866   assert(_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, "");
 867 
 868 #if CONT_JFR
 869   EventContinuationFreezeSlow e;
 870   if (e.should_commit()) {
 871     e.set_id(cast_from_oop<u8>(_cont.continuation()));
 872     e.commit();
 873   }
 874 #endif
 875 
 876   init_rest();
 877 
 878   HandleMark hm(Thread::current());
 879 
 880   frame f = freeze_start_frame();
 881 
 882   LogTarget(Debug, continuations) lt;
 883   if (lt.develop_is_enabled()) {
 884     LogStream ls(lt);
 885     f.print_on(&ls);
 886   }
 887 
 888   frame caller; // the frozen caller in the chunk
 889   freeze_result res = recurse_freeze(f, caller, 0, false, true);
 890 
 891   if (res == freeze_ok) {
 892     finish_freeze(f, caller);
 893     _cont.write();
 894   }
 895 
 896   return res;
 897 }
 898 
 899 frame FreezeBase::freeze_start_frame() {
 900   if (LIKELY(!_preempt)) {
 901     return freeze_start_frame_yield_stub();
 902   } else {
 903     return freeze_start_frame_on_preempt();
 904   }
 905 }
 906 
 907 frame FreezeBase::freeze_start_frame_yield_stub() {
 908   frame f = _thread->last_frame();
 909   assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
 910   f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
 911   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
 912   return f;
 913 }
 914 
 915 frame FreezeBase::freeze_start_frame_on_preempt() {
 916   assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
 917   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
 918   return _last_frame;
 919 }
 920 
 921 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 922 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
 923   assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
 924   assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
 925          || ((top && _preempt) == f.is_native_frame()), "");
 926 
 927   if (stack_overflow()) {
 928     return freeze_exception;
 929   }
 930 
 931   if (f.is_compiled_frame()) {
 932     if (UNLIKELY(f.oop_map() == nullptr)) {
 933       // special native frame
 934       return freeze_pinned_native;
 935     }
 936     return recurse_freeze_compiled_frame(f, caller, callee_argsize, callee_interpreted);
 937   } else if (f.is_interpreted_frame()) {
 938     assert(!f.interpreter_frame_method()->is_native() || (top && _preempt), "");
 939     return recurse_freeze_interpreted_frame(f, caller, callee_argsize, callee_interpreted);
 940   } else if (top && _preempt) {
 941     assert(f.is_native_frame() || f.is_runtime_frame(), "");
 942     return f.is_native_frame() ? recurse_freeze_native_frame(f, caller) : recurse_freeze_stub_frame(f, caller);
 943   } else {
 944     // Frame can't be frozen. Most likely the call_stub or upcall_stub
 945     // which indicates there are further natives frames up the stack.
 946     return freeze_pinned_native;
 947   }
 948 }
 949 
 950 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 951 // See also StackChunkFrameStream<frame_kind>::frame_size()
 952 template<typename FKind>
 953 inline freeze_result FreezeBase::recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize) {
 954   assert(FKind::is_instance(f), "");
 955 
 956   assert(fsize > 0, "");
 957   assert(argsize >= 0, "");
 958   _freeze_size += fsize;
 959   NOT_PRODUCT(_frames++;)
 960 
 961   assert(FKind::frame_bottom(f) <= _bottom_address, "");
 962 
 963   // We don't use FKind::frame_bottom(f) == _bottom_address because on x64 there's sometimes an extra word between
 964   // enterSpecial and an interpreted frame
 965   if (FKind::frame_bottom(f) >= _bottom_address - 1) {
 966     return finalize_freeze(f, caller, argsize); // recursion end
 967   } else {
 968     frame senderf = sender<FKind>(f);
 969     assert(FKind::interpreted || senderf.sp() == senderf.unextended_sp(), "");
 970     freeze_result result = recurse_freeze(senderf, caller, argsize, FKind::interpreted, false); // recursive call
 971     return result;
 972   }
 973 }
 974 
 975 inline void FreezeBase::before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame) {
 976   LogTarget(Trace, continuations) lt;
 977   if (lt.develop_is_enabled()) {
 978     LogStream ls(lt);
 979     ls.print_cr("======== FREEZING FRAME interpreted: %d bottom: %d", f.is_interpreted_frame(), is_bottom_frame);
 980     ls.print_cr("fsize: %d argsize: %d", fsize, argsize);
 981     f.print_value_on(&ls);
 982   }
 983   assert(caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
 984 }
 985 
 986 inline void FreezeBase::after_freeze_java_frame(const frame& hf, bool is_bottom_frame) {
 987   LogTarget(Trace, continuations) lt;
 988   if (lt.develop_is_enabled()) {
 989     LogStream ls(lt);
 990     DEBUG_ONLY(hf.print_value_on(&ls);)
 991     assert(hf.is_heap_frame(), "should be");
 992     DEBUG_ONLY(print_frame_layout(hf, false, &ls);)
 993     if (is_bottom_frame) {
 994       ls.print_cr("bottom h-frame:");
 995       hf.print_on(&ls);
 996     }
 997   }
 998 }
 999 
1000 // The parameter argsize_md includes metadata that has to be part of caller/callee overlap.
1001 // See also StackChunkFrameStream<frame_kind>::frame_size()
1002 freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, int argsize_md) {
1003   int argsize = argsize_md - frame::metadata_words_at_top;
1004   assert(callee.is_interpreted_frame()
1005     || ContinuationHelper::Frame::is_stub(callee.cb())
1006     || callee.cb()->as_nmethod()->is_osr_method()
1007     || argsize == _cont.argsize(), "argsize: %d cont.argsize: %d", argsize, _cont.argsize());
1008   log_develop_trace(continuations)("bottom: " INTPTR_FORMAT " count %d size: %d argsize: %d",
1009     p2i(_bottom_address), _frames, _freeze_size << LogBytesPerWord, argsize);
1010 
1011   LogTarget(Trace, continuations) lt;
1012 
1013 #ifdef ASSERT
1014   bool empty = _cont.is_empty();
1015   log_develop_trace(continuations)("empty: %d", empty);
1016 #endif
1017 
1018   stackChunkOop chunk = _cont.tail();
1019 
1020   assert(chunk == nullptr || (chunk->max_thawing_size() == 0) == chunk->is_empty(), "");
1021 
1022   _freeze_size += frame::metadata_words; // for top frame's metadata
1023 
1024   int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind
1025   int unextended_sp = -1;
1026   if (chunk != nullptr) {
1027     if (!chunk->is_empty()) {
1028       StackChunkFrameStream<ChunkFrames::Mixed> last(chunk);
1029       unextended_sp = chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp());
1030       bool top_interpreted = Interpreter::contains(chunk->pc());
1031       if (callee.is_interpreted_frame() == top_interpreted) {
1032         overlap = argsize_md;
1033       }
1034     } else {
1035       unextended_sp = chunk->stack_size() - frame::metadata_words_at_top;
1036     }
1037   }
1038 
1039   log_develop_trace(continuations)("finalize _size: %d overlap: %d unextended_sp: %d", _freeze_size, overlap, unextended_sp);
1040 
1041   _freeze_size -= overlap;
1042   assert(_freeze_size >= 0, "");
1043 
1044   assert(chunk == nullptr || chunk->is_empty()
1045           || unextended_sp == chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp()), "");
1046   assert(chunk != nullptr || unextended_sp < _freeze_size, "");
1047 
1048   _freeze_size += _monitors_in_lockstack;
1049 
1050   // _barriers can be set to true by an allocation in freeze_fast, in which case the chunk is available
1051   bool allocated_old_in_freeze_fast = _barriers;
1052   assert(!allocated_old_in_freeze_fast || (unextended_sp >= _freeze_size && chunk->is_empty()),
1053     "Chunk allocated in freeze_fast is of insufficient size "
1054     "unextended_sp: %d size: %d is_empty: %d", unextended_sp, _freeze_size, chunk->is_empty());
1055   assert(!allocated_old_in_freeze_fast || (!UseZGC && !UseG1GC), "Unexpected allocation");
1056 
1057   DEBUG_ONLY(bool empty_chunk = true);
1058   if (unextended_sp < _freeze_size || chunk->is_gc_mode() || (!allocated_old_in_freeze_fast && chunk->requires_barriers())) {
1059     // ALLOCATE NEW CHUNK
1060 
1061     if (lt.develop_is_enabled()) {
1062       LogStream ls(lt);
1063       if (chunk == nullptr) {
1064         ls.print_cr("no chunk");
1065       } else {
1066         ls.print_cr("chunk barriers: %d _size: %d free size: %d",
1067           chunk->requires_barriers(), _freeze_size, chunk->sp() - frame::metadata_words);
1068         chunk->print_on(&ls);
1069       }
1070     }
1071 
1072     _freeze_size += overlap; // we're allocating a new chunk, so no overlap
1073     // overlap = 0;
1074 
1075     chunk = allocate_chunk_slow(_freeze_size, argsize_md);
1076     if (chunk == nullptr) {
1077       return freeze_exception;
1078     }
1079 
1080     // Install new chunk
1081     _cont.set_tail(chunk);
1082     assert(chunk->is_empty(), "");
1083   } else {
1084     // REUSE EXISTING CHUNK
1085     log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1086     if (chunk->is_empty()) {
1087       int sp = chunk->stack_size() - argsize_md;
1088       chunk->set_sp(sp);
1089       chunk->set_bottom(sp);
1090       _freeze_size += overlap;
1091       assert(chunk->max_thawing_size() == 0, "");
1092     } DEBUG_ONLY(else empty_chunk = false;)
1093   }
1094   assert(!chunk->is_gc_mode(), "");
1095   assert(!chunk->has_bitmap(), "");
1096   chunk->set_has_mixed_frames(true);
1097 
1098   assert(chunk->requires_barriers() == _barriers, "");
1099   assert(!_barriers || chunk->is_empty(), "");
1100 
1101   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1102   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1103 
1104   if (_preempt) {
1105     frame top_frame = _thread->last_frame();
1106     if (top_frame.is_interpreted_frame()) {
1107       // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1108       // We need it so that on resume we can restore the sp to the right place, since
1109       // thawing might add an alignment word to the expression stack (see finish_thaw()).
1110       // We do it now that we know freezing will be successful.
1111       prepare_freeze_interpreted_top_frame(top_frame);
1112     }
1113 
1114     // Do this now so should_process_args_at_top() is set before calling finish_freeze
1115     // in case we might need to apply GC barriers to frames in this stackChunk.
1116     if (_thread->at_preemptable_init()) {
1117       assert(top_frame.is_interpreted_frame(), "only InterpreterRuntime::_new/resolve_from_cache allowed");
1118       chunk->set_at_klass_init(true);
1119       methodHandle m(_thread, top_frame.interpreter_frame_method());
1120       Bytecode_invoke call = Bytecode_invoke_check(m, top_frame.interpreter_frame_bci());
1121       assert(!call.is_valid() || call.is_invokestatic(), "only invokestatic allowed");
1122       if (call.is_invokestatic() && call.size_of_parameters() > 0) {
1123         assert(top_frame.interpreter_frame_expression_stack_size() > 0, "should have parameters in exp stack");
1124         chunk->set_has_args_at_top(true);
1125       }
1126     }
1127   }
1128 
1129   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1130   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1131   // will either see no continuation or a consistent chunk.
1132   unwind_frames();
1133 
1134   chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1135 
1136   if (lt.develop_is_enabled()) {
1137     LogStream ls(lt);
1138     ls.print_cr("top chunk:");
1139     chunk->print_on(&ls);
1140   }
1141 
1142   if (_monitors_in_lockstack > 0) {
1143     freeze_lockstack(chunk);
1144   }
1145 
1146   // The topmost existing frame in the chunk; or an empty frame if the chunk is empty
1147   caller = StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame();
1148 
1149   DEBUG_ONLY(_last_write = caller.unextended_sp() + (empty_chunk ? argsize_md : overlap);)
1150 
1151   assert(chunk->is_in_chunk(_last_write - _freeze_size),
1152     "last_write-size: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(_last_write-_freeze_size), p2i(chunk->start_address()));
1153 #ifdef ASSERT
1154   if (lt.develop_is_enabled()) {
1155     LogStream ls(lt);
1156     ls.print_cr("top hframe before (freeze):");
1157     assert(caller.is_heap_frame(), "should be");
1158     caller.print_on(&ls);
1159   }
1160 
1161   assert(!empty || Continuation::is_continuation_entry_frame(callee, nullptr), "");
1162 
1163   frame entry = sender(callee);
1164 
1165   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1166   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1167 #endif
1168 
1169   return freeze_ok_bottom;
1170 }
1171 
1172 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1173 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1174   if (is_bottom_frame) {
1175     // If we're the bottom frame, we need to replace the return barrier with the real
1176     // caller's pc.
1177     address last_pc = caller.pc();
1178     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1179     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1180   } else {
1181     assert(!caller.is_empty(), "");
1182   }
1183 
1184   patch_pd(hf, caller, is_bottom_frame);
1185 
1186   if (f.is_interpreted_frame()) {
1187     assert(hf.is_heap_frame(), "should be");
1188     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1189   }
1190 
1191 #ifdef ASSERT
1192   if (hf.is_compiled_frame()) {
1193     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1194       log_develop_trace(continuations)("Freezing deoptimized frame");
1195       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1196       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1197     }
1198   }
1199 #endif
1200 }
1201 
1202 #ifdef ASSERT
1203 static void verify_frame_top(const frame& f, intptr_t* top) {
1204   ResourceMark rm;
1205   InterpreterOopMap mask;
1206   f.interpreted_frame_oop_map(&mask);
1207   assert(top <= ContinuationHelper::InterpretedFrame::frame_top(f, &mask),
1208          "frame_top: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT,
1209            p2i(top), p2i(ContinuationHelper::InterpretedFrame::frame_top(f, &mask)));
1210 }
1211 #endif // ASSERT
1212 
1213 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1214 // See also StackChunkFrameStream<frame_kind>::frame_size()
1215 NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, frame& caller,
1216                                                                     int callee_argsize /* incl. metadata */,
1217                                                                     bool callee_interpreted) {
1218   adjust_interpreted_frame_unextended_sp(f);
1219 
1220   // The frame's top never includes the stack arguments to the callee
1221   intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted);
1222   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
1223   const int fsize = pointer_delta_as_int(stack_frame_bottom, stack_frame_top);
1224 
1225   DEBUG_ONLY(verify_frame_top(f, stack_frame_top));
1226 
1227   Method* frame_method = ContinuationHelper::Frame::frame_method(f);
1228   // including metadata between f and its args
1229   const int argsize = ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top;
1230 
1231   log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d",
1232     frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize, callee_interpreted);
1233   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1234   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1235 
1236   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::InterpretedFrame>(f, caller, fsize, argsize);
1237   if (UNLIKELY(result > freeze_ok_bottom)) {
1238     return result;
1239   }
1240 
1241   bool is_bottom_frame = result == freeze_ok_bottom;
1242   assert(!caller.is_empty() || is_bottom_frame, "");
1243 
1244   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, is_bottom_frame);)
1245 
1246   frame hf = new_heap_frame<ContinuationHelper::InterpretedFrame>(f, caller);
1247   _total_align_size += frame::align_wiggle; // add alignment room for internal interpreted frame alignment on AArch64/PPC64
1248 
1249   intptr_t* heap_frame_top = ContinuationHelper::InterpretedFrame::frame_top(hf, callee_argsize, callee_interpreted);
1250   intptr_t* heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
1251   assert(heap_frame_bottom == heap_frame_top + fsize, "");
1252 
1253   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
1254   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
1255   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1256   assert(!is_bottom_frame || !caller.is_interpreted_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1257 
1258   relativize_interpreted_frame_metadata(f, hf);
1259 
1260   patch(f, hf, caller, is_bottom_frame);
1261 
1262   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1263   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1264   caller = hf;
1265 
1266   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1267   frame_method->record_gc_epoch();
1268 
1269   return freeze_ok;
1270 }
1271 
1272 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1273 // See also StackChunkFrameStream<frame_kind>::frame_size()
1274 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1275                                                         int callee_argsize /* incl. metadata */,
1276                                                         bool callee_interpreted) {
1277   // The frame's top never includes the stack arguments to the callee
1278   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1279   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1280   // including metadata between f and its stackargs
1281   int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1282   int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1283 
1284   int real_frame_size = 0;
1285   bool augmented = f.was_augmented_on_entry(real_frame_size);
1286   if (augmented) {
1287     // The args reside inside the frame so clear argsize. If the caller is compiled,
1288     // this will cause the stack arguments passed by the caller to be freezed when
1289     // freezing the caller frame itself. If the caller is interpreted this will have
1290     // the effect of discarding the arg area created in the i2c stub.
1291     argsize = 0;
1292     fsize = real_frame_size - (callee_interpreted ? 0 : callee_argsize);
1293 #ifdef ASSERT
1294     nmethod* nm = f.cb()->as_nmethod();
1295     Method* method = nm->method();
1296     address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);
1297     CodeBlob* caller_cb = CodeCache::find_blob_fast(return_pc);
1298     assert(nm->is_compiled_by_c2() || (caller_cb->is_nmethod() && caller_cb->as_nmethod()->is_compiled_by_c2()), "caller or callee should be c2 compiled");
1299     assert((!caller_cb->is_nmethod() && nm->is_compiled_by_c2()) ||
1300            (nm->compiler_type() != caller_cb->as_nmethod()->compiler_type()) ||
1301            (nm->is_compiled_by_c2() && !method->is_static() && method->method_holder()->is_inline_klass()),
1302            "frame should not be extended");
1303 #endif
1304   }
1305 
1306   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d augmented: %d",
1307                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1308                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1309                              _freeze_size, fsize, argsize, augmented);
1310   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1311   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1312 
1313   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1314   if (UNLIKELY(result > freeze_ok_bottom)) {
1315     return result;
1316   }
1317 
1318   bool is_bottom_frame = result == freeze_ok_bottom;
1319   assert(!caller.is_empty() || is_bottom_frame, "");
1320   assert(!is_bottom_frame || !augmented, "thaw extended frame without caller?");
1321 
1322   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1323 
1324   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller, augmented ? real_frame_size - f.cb()->as_nmethod()->frame_size() : 0);
1325 
1326   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1327 
1328   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1329   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1330 
1331   if (caller.is_interpreted_frame()) {
1332     // When thawing the frame we might need to add alignment (see Thaw::align)
1333     _total_align_size += frame::align_wiggle;
1334   }
1335 
1336   patch(f, hf, caller, is_bottom_frame);
1337 
1338   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1339 
1340   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1341   caller = hf;
1342   return freeze_ok;
1343 }
1344 
1345 NOINLINE freeze_result FreezeBase::recurse_freeze_stub_frame(frame& f, frame& caller) {
1346   DEBUG_ONLY(frame fsender = sender(f);)
1347   assert(fsender.is_compiled_frame(), "sender should be compiled frame");
1348 
1349   intptr_t* const stack_frame_top = ContinuationHelper::StubFrame::frame_top(f);
1350   const int fsize = f.cb()->frame_size();
1351 
1352   log_develop_trace(continuations)("recurse_freeze_stub_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1353     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1354 
1355   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::StubFrame>(f, caller, fsize, 0);
1356   if (UNLIKELY(result > freeze_ok_bottom)) {
1357     return result;
1358   }
1359 
1360   assert(result == freeze_ok, "should have caller");
1361   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, false /*is_bottom_frame*/);)
1362 
1363   frame hf = new_heap_frame<ContinuationHelper::StubFrame>(f, caller);
1364   intptr_t* heap_frame_top = ContinuationHelper::StubFrame::frame_top(hf);
1365 
1366   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1367 
1368   patch(f, hf, caller, false /*is_bottom_frame*/);
1369 
1370   DEBUG_ONLY(after_freeze_java_frame(hf, false /*is_bottom_frame*/);)
1371 
1372   caller = hf;
1373   return freeze_ok;
1374 }
1375 
1376 NOINLINE freeze_result FreezeBase::recurse_freeze_native_frame(frame& f, frame& caller) {
1377   if (!f.cb()->as_nmethod()->method()->is_object_wait0()) {
1378     assert(f.cb()->as_nmethod()->method()->is_synchronized(), "");
1379     // Synchronized native method case. Unlike the interpreter native wrapper, the compiled
1380     // native wrapper tries to acquire the monitor after marshalling the arguments from the
1381     // caller into the native convention. This is so that we have a valid oopMap in case of
1382     // having to block in the slow path. But that would require freezing those registers too
1383     // and then fixing them back on thaw in case of oops. To avoid complicating things and
1384     // given that this would be a rare case anyways just pin the vthread to the carrier.
1385     return freeze_pinned_native;
1386   }
1387 
1388   intptr_t* const stack_frame_top = ContinuationHelper::NativeFrame::frame_top(f);
1389   // There are no stackargs but argsize must include the metadata
1390   const int argsize = frame::metadata_words_at_top;
1391   const int fsize = f.cb()->frame_size() + argsize;
1392 
1393   log_develop_trace(continuations)("recurse_freeze_native_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1394     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1395 
1396   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::NativeFrame>(f, caller, fsize, argsize);
1397   if (UNLIKELY(result > freeze_ok_bottom)) {
1398     return result;
1399   }
1400 
1401   assert(result == freeze_ok, "should have caller frame");
1402   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, false /* is_bottom_frame */);)
1403 
1404   frame hf = new_heap_frame<ContinuationHelper::NativeFrame>(f, caller);
1405   intptr_t* heap_frame_top = ContinuationHelper::NativeFrame::frame_top(hf);
1406 
1407   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1408 
1409   if (caller.is_interpreted_frame()) {
1410     // When thawing the frame we might need to add alignment (see Thaw::align)
1411     _total_align_size += frame::align_wiggle;
1412   }
1413 
1414   patch(f, hf, caller, false /* is_bottom_frame */);
1415 
1416   DEBUG_ONLY(after_freeze_java_frame(hf, false /* is_bottom_frame */);)
1417 
1418   caller = hf;
1419   return freeze_ok;
1420 }
1421 
1422 NOINLINE void FreezeBase::finish_freeze(const frame& f, const frame& top) {
1423   stackChunkOop chunk = _cont.tail();
1424 
1425   LogTarget(Trace, continuations) lt;
1426   if (lt.develop_is_enabled()) {
1427     LogStream ls(lt);
1428     assert(top.is_heap_frame(), "should be");
1429     top.print_on(&ls);
1430   }
1431 
1432   set_top_frame_metadata_pd(top);
1433 
1434   chunk->set_sp(chunk->to_offset(top.sp()));
1435   chunk->set_pc(top.pc());
1436 
1437   chunk->set_max_thawing_size(chunk->max_thawing_size() + _total_align_size);
1438 
1439   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "clash with lockstack");
1440 
1441   // At this point the chunk is consistent
1442 
1443   if (UNLIKELY(_barriers)) {
1444     log_develop_trace(continuations)("do barriers on old chunk");
1445     // Serial and Parallel GC can allocate objects directly into the old generation.
1446     // Then we want to relativize the derived pointers eagerly so that
1447     // old chunks are all in GC mode.
1448     assert(!UseG1GC, "G1 can not deal with allocating outside of eden");
1449     assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking");
1450     if (UseShenandoahGC) {
1451       _cont.tail()->relativize_derived_pointers_concurrently();
1452     } else {
1453       ContinuationGCSupport::transform_stack_chunk(_cont.tail());
1454     }
1455     // For objects in the old generation we must maintain the remembered set
1456     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>();
1457   }
1458 
1459   log_develop_trace(continuations)("finish_freeze: has_mixed_frames: %d", chunk->has_mixed_frames());
1460   if (lt.develop_is_enabled()) {
1461     LogStream ls(lt);
1462     chunk->print_on(true, &ls);
1463   }
1464 
1465   if (lt.develop_is_enabled()) {
1466     LogStream ls(lt);
1467     ls.print_cr("top hframe after (freeze):");
1468     assert(_cont.last_frame().is_heap_frame(), "should be");
1469     _cont.last_frame().print_on(&ls);
1470     DEBUG_ONLY(print_frame_layout(top, false, &ls);)
1471   }
1472 
1473   assert(_cont.chunk_invariant(), "");
1474 }
1475 
1476 inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive native code
1477   JavaThread* t = !_preempt ? _thread : JavaThread::current();
1478   assert(t == JavaThread::current(), "");
1479   if (os::current_stack_pointer() < t->stack_overflow_state()->shadow_zone_safe_limit()) {
1480     if (!_preempt) {
1481       ContinuationWrapper::SafepointOp so(t, _cont); // could also call _cont.done() instead
1482       Exceptions::_throw_msg(t, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Stack overflow while freezing");
1483     }
1484     return true;
1485   }
1486   return false;
1487 }
1488 
1489 class StackChunkAllocator : public MemAllocator {
1490   const size_t                                 _stack_size;
1491   int                                          _argsize_md;
1492   ContinuationWrapper&                         _continuation_wrapper;
1493   JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector;
1494   mutable bool                                 _took_slow_path;
1495 
1496   // Does the minimal amount of initialization needed for a TLAB allocation.
1497   // We don't need to do a full initialization, as such an allocation need not be immediately walkable.
1498   virtual oop initialize(HeapWord* mem) const override {
1499     assert(_stack_size > 0, "");
1500     assert(_stack_size <= max_jint, "");
1501     assert(_word_size > _stack_size, "");
1502 
1503     // zero out fields (but not the stack)
1504     const size_t hs = oopDesc::header_size();
1505     if (oopDesc::has_klass_gap()) {
1506       oopDesc::set_klass_gap(mem, 0);
1507     }
1508     Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
1509 
1510     int bottom = (int)_stack_size - _argsize_md;
1511 
1512     jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
1513     jdk_internal_vm_StackChunk::set_bottom(mem, bottom);
1514     jdk_internal_vm_StackChunk::set_sp(mem, bottom);
1515 
1516     return finish(mem);
1517   }
1518 
1519   stackChunkOop allocate_fast() const {
1520     if (!UseTLAB) {
1521       return nullptr;
1522     }
1523 
1524     HeapWord* const mem = MemAllocator::mem_allocate_inside_tlab_fast();
1525     if (mem == nullptr) {
1526       return nullptr;
1527     }
1528 
1529     oop obj = initialize(mem);
1530     return stackChunkOopDesc::cast(obj);
1531   }
1532 
1533 public:
1534   StackChunkAllocator(Klass* klass,
1535                       size_t word_size,
1536                       Thread* thread,
1537                       size_t stack_size,
1538                       int argsize_md,
1539                       ContinuationWrapper& continuation_wrapper,
1540                       JvmtiSampledObjectAllocEventCollector* jvmti_event_collector)
1541     : MemAllocator(klass, word_size, thread),
1542       _stack_size(stack_size),
1543       _argsize_md(argsize_md),
1544       _continuation_wrapper(continuation_wrapper),
1545       _jvmti_event_collector(jvmti_event_collector),
1546       _took_slow_path(false) {}
1547 
1548   // Provides it's own, specialized allocation which skips instrumentation
1549   // if the memory can be allocated without going to a slow-path.
1550   stackChunkOop allocate() const {
1551     // First try to allocate without any slow-paths or instrumentation.
1552     stackChunkOop obj = allocate_fast();
1553     if (obj != nullptr) {
1554       return obj;
1555     }
1556 
1557     // Now try full-blown allocation with all expensive operations,
1558     // including potentially safepoint operations.
1559     _took_slow_path = true;
1560 
1561     // Protect unhandled Loom oops
1562     ContinuationWrapper::SafepointOp so(_thread, _continuation_wrapper);
1563 
1564     // Can safepoint
1565     _jvmti_event_collector->start();
1566 
1567     // Can safepoint
1568     return stackChunkOopDesc::cast(MemAllocator::allocate());
1569   }
1570 
1571   bool took_slow_path() const {
1572     return _took_slow_path;
1573   }
1574 };
1575 
1576 template <typename ConfigT>
1577 stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size, int argsize_md) {
1578   log_develop_trace(continuations)("allocate_chunk allocating new chunk");
1579 
1580   InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass());
1581   size_t size_in_words = klass->instance_size(stack_size);
1582 
1583   if (CollectedHeap::stack_chunk_max_size() > 0 && size_in_words >= CollectedHeap::stack_chunk_max_size()) {
1584     if (!_preempt) {
1585       throw_stack_overflow_on_humongous_chunk();
1586     }
1587     return nullptr;
1588   }
1589 
1590   JavaThread* current = _preempt ? JavaThread::current() : _thread;
1591   assert(current == JavaThread::current(), "should be current");
1592 
1593   // Allocate the chunk.
1594   //
1595   // This might safepoint while allocating, but all safepointing due to
1596   // instrumentation have been deferred. This property is important for
1597   // some GCs, as this ensures that the allocated object is in the young
1598   // generation / newly allocated memory.
1599   StackChunkAllocator allocator(klass, size_in_words, current, stack_size, argsize_md, _cont, _jvmti_event_collector);
1600   stackChunkOop chunk = allocator.allocate();
1601 
1602   if (chunk == nullptr) {
1603     return nullptr; // OOME
1604   }
1605 
1606   // assert that chunk is properly initialized
1607   assert(chunk->stack_size() == (int)stack_size, "");
1608   assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size);
1609   assert(chunk->sp() == chunk->bottom(), "");
1610   assert((intptr_t)chunk->start_address() % 8 == 0, "");
1611   assert(chunk->max_thawing_size() == 0, "");
1612   assert(chunk->pc() == nullptr, "");
1613   assert(chunk->is_empty(), "");
1614   assert(chunk->flags() == 0, "");
1615   assert(chunk->is_gc_mode() == false, "");
1616   assert(chunk->lockstack_size() == 0, "");
1617 
1618   // fields are uninitialized
1619   chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk());
1620   chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation());
1621 
1622 #if INCLUDE_ZGC
1623   if (UseZGC) {
1624     ZStackChunkGCData::initialize(chunk);
1625     assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation");
1626     _barriers = false;
1627   } else
1628 #endif
1629 #if INCLUDE_SHENANDOAHGC
1630   if (UseShenandoahGC) {
1631     _barriers = chunk->requires_barriers();
1632   } else
1633 #endif
1634   {
1635     if (!allocator.took_slow_path()) {
1636       // Guaranteed to be in young gen / newly allocated memory
1637       assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation");
1638       _barriers = false;
1639     } else {
1640       // Some GCs could put direct allocations in old gen for slow-path
1641       // allocations; need to explicitly check if that was the case.
1642       _barriers = chunk->requires_barriers();
1643     }
1644   }
1645 
1646   if (_barriers) {
1647     log_develop_trace(continuations)("allocation requires barriers");
1648   }
1649 
1650   assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1651 
1652   return chunk;
1653 }
1654 
1655 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1656   ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1657   Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1658 }
1659 
1660 class AnchorMark : public StackObj {
1661   JavaThread* _current;
1662   frame& _top_frame;
1663   intptr_t* _last_sp_from_frame;
1664   bool _is_interpreted;
1665 
1666  public:
1667   AnchorMark(JavaThread* current, frame& f) : _current(current), _top_frame(f), _is_interpreted(false) {
1668     intptr_t* sp = anchor_mark_set_pd();
1669     set_anchor(_current, sp);
1670   }
1671   ~AnchorMark() {
1672     clear_anchor(_current);
1673     anchor_mark_clear_pd();
1674   }
1675   inline intptr_t* anchor_mark_set_pd();
1676   inline void anchor_mark_clear_pd();
1677 };
1678 
1679 #if INCLUDE_JVMTI
1680 static int num_java_frames(ContinuationWrapper& cont) {
1681   ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1682   int count = 0;
1683   for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1684     count += chunk->num_java_frames();
1685   }
1686   return count;
1687 }
1688 
1689 static void invalidate_jvmti_stack(JavaThread* thread) {
1690   JvmtiThreadState *state = thread->jvmti_thread_state();
1691   if (state != nullptr) {
1692     state->invalidate_cur_stack_depth();
1693   }
1694 }
1695 
1696 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1697   if (!cont.entry()->is_virtual_thread()) {
1698     if (JvmtiExport::has_frame_pops(thread)) {
1699       int num_frames = num_java_frames(cont);
1700 
1701       ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1702       JvmtiExport::continuation_yield_cleanup(thread, num_frames);
1703     }
1704     invalidate_jvmti_stack(thread);
1705   }
1706 }
1707 
1708 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top, Continuation::preempt_kind pk) {
1709   assert(current->vthread() != nullptr, "must be");
1710 
1711   HandleMarkCleaner hm(current);  // Cleanup all handles (including so._conth) before returning to Java.
1712   Handle vth(current, current->vthread());
1713   ContinuationWrapper::SafepointOp so(current, cont);
1714   AnchorMark am(current, top);  // Set anchor so that the stack is walkable.
1715 
1716   JRT_BLOCK
1717     MountUnmountDisabler::end_transition(current, vth(), true /*is_mount*/, false /*is_thread_start*/);
1718 
1719     if (current->pending_contended_entered_event()) {
1720       // No monitor JVMTI events for ObjectLocker case.
1721       if (pk != Continuation::object_locker) {
1722         JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1723       }
1724       current->set_contended_entered_monitor(nullptr);
1725     }
1726   JRT_BLOCK_END
1727 }
1728 #endif // INCLUDE_JVMTI
1729 
1730 #ifdef ASSERT
1731 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1732 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1733 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1734 bool FreezeBase::check_valid_fast_path() {
1735   ContinuationEntry* ce = _thread->last_continuation();
1736   RegisterMap map(_thread,
1737                   RegisterMap::UpdateMap::skip,
1738                   RegisterMap::ProcessFrames::skip,
1739                   RegisterMap::WalkContinuation::skip);
1740   map.set_include_argument_oops(false);
1741   bool is_top_frame = true;
1742   for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1743     if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1744       return false;
1745     }
1746   }
1747   return true;
1748 }
1749 
1750 static void verify_frame_kind(frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr, const char** code_name_ptr, int* bci_ptr, stackChunkOop chunk) {
1751   Method* m;
1752   const char* code_name;
1753   int bci;
1754   if (preempt_kind == Continuation::monitorenter) {
1755     assert(top.is_interpreted_frame() || top.is_runtime_frame(), "unexpected %sframe",
1756       top.is_compiled_frame() ? "compiled " : top.is_native_frame() ? "native " : "");
1757     bool at_sync_method;
1758     if (top.is_interpreted_frame()) {
1759       m = top.interpreter_frame_method();
1760       assert(!m->is_native() || m->is_synchronized(), "invalid method %s", m->external_name());
1761       address bcp = top.interpreter_frame_bcp();
1762       assert(bcp != 0 || m->is_native(), "");
1763       at_sync_method = m->is_synchronized() && (bcp == 0 || bcp == m->code_base());
1764       // bcp is advanced on monitorenter before making the VM call, adjust for that.
1765       bool at_sync_bytecode = bcp > m->code_base() && Bytecode(m, bcp - 1).code() == Bytecodes::Code::_monitorenter;
1766       assert(at_sync_method || at_sync_bytecode, "");
1767       bci = at_sync_method ? -1 : top.interpreter_frame_bci();
1768     } else {
1769       JavaThread* current = JavaThread::current();
1770       ResourceMark rm(current);
1771       CodeBlob* cb = top.cb();
1772       RegisterMap reg_map(current,
1773                   RegisterMap::UpdateMap::skip,
1774                   RegisterMap::ProcessFrames::skip,
1775                   RegisterMap::WalkContinuation::include);
1776       if (top.is_heap_frame()) {
1777         assert(chunk != nullptr, "");
1778         reg_map.set_stack_chunk(chunk);
1779         top = chunk->relativize(top);
1780         top.set_frame_index(0);
1781       }
1782       frame fr = top.sender(&reg_map);
1783       vframe*  vf  = vframe::new_vframe(&fr, &reg_map, current);
1784       compiledVFrame* cvf = compiledVFrame::cast(vf);
1785       m = cvf->method();
1786       bci = cvf->scope()->bci();
1787       at_sync_method = bci == SynchronizationEntryBCI;
1788       assert(!at_sync_method || m->is_synchronized(), "bci is %d but method %s is not synchronized", bci, m->external_name());
1789       bool is_c1_monitorenter = false, is_c2_monitorenter = false;
1790       COMPILER1_PRESENT(is_c1_monitorenter = cb == Runtime1::blob_for(StubId::c1_monitorenter_id) ||
1791                                              cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id);)
1792       COMPILER2_PRESENT(is_c2_monitorenter = cb == CodeCache::find_blob(OptoRuntime::complete_monitor_locking_Java());)
1793       assert(is_c1_monitorenter || is_c2_monitorenter, "wrong runtime stub frame");
1794     }
1795     code_name = at_sync_method ? "synchronized method" : "monitorenter";
1796   } else if (preempt_kind == Continuation::object_wait) {
1797     assert(top.is_interpreted_frame() || top.is_native_frame(), "");
1798     m  = top.is_interpreted_frame() ? top.interpreter_frame_method() : top.cb()->as_nmethod()->method();
1799     assert(m->is_object_wait0(), "");
1800     bci = 0;
1801     code_name = "";
1802   } else {
1803     assert(preempt_kind == Continuation::object_locker, "invalid preempt kind");
1804     assert(top.is_interpreted_frame(), "");
1805     m = top.interpreter_frame_method();
1806     Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
1807     Bytecodes::Code code = current_bytecode.code();
1808     assert(code == Bytecodes::Code::_new || code == Bytecodes::Code::_invokestatic ||
1809            (code == Bytecodes::Code::_getstatic || code == Bytecodes::Code::_putstatic), "invalid bytecode");
1810     bci = top.interpreter_frame_bci();
1811     code_name = Bytecodes::name(current_bytecode.code());
1812   }
1813   assert(bci >= 0 || m->is_synchronized(), "invalid bci:%d at method %s", bci, m->external_name());
1814 
1815   if (m_ptr != nullptr) {
1816     *m_ptr = m;
1817     *code_name_ptr = code_name;
1818     *bci_ptr = bci;
1819   }
1820 }
1821 
1822 static void log_preempt_after_freeze(const ContinuationWrapper& cont) {
1823   JavaThread* current = cont.thread();
1824   int64_t tid = current->monitor_owner_id();
1825 
1826   StackChunkFrameStream<ChunkFrames::Mixed> sfs(cont.tail());
1827   frame top_frame = sfs.to_frame();
1828   bool at_init = current->at_preemptable_init();
1829   bool at_enter = current->current_pending_monitor() != nullptr;
1830   bool at_wait = current->current_waiting_monitor() != nullptr;
1831   assert((at_enter && !at_wait) || (!at_enter && at_wait), "");
1832   Continuation::preempt_kind pk = at_init ? Continuation::object_locker : at_enter ? Continuation::monitorenter : Continuation::object_wait;
1833 
1834   Method* m = nullptr;
1835   const char* code_name = nullptr;
1836   int bci = InvalidFrameStateBci;
1837   verify_frame_kind(top_frame, pk, &m, &code_name, &bci, cont.tail());
1838   assert(m != nullptr && code_name != nullptr && bci != InvalidFrameStateBci, "should be set");
1839 
1840   ResourceMark rm(current);
1841   if (bci < 0) {
1842     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " while synchronizing on %smethod %s", tid, m->is_native() ? "native " : "", m->external_name());
1843   } else if (m->is_object_wait0()) {
1844     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at native method %s", tid, m->external_name());
1845   } else {
1846     Klass* k = current->preempt_init_klass();
1847     assert(k != nullptr || !at_init, "");
1848     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at %s(bci:%d) in method %s %s%s", tid, code_name, bci,
1849             m->external_name(), at_init ? "trying to initialize klass " : "", at_init ? k->external_name() : "");
1850   }
1851 }
1852 #endif // ASSERT
1853 
1854 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1855   verify_continuation(cont.continuation());
1856   assert(!cont.is_empty(), "");
1857 
1858   log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1859   return freeze_ok;
1860 }
1861 
1862 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1863   if (UNLIKELY(res != freeze_ok)) {
1864     JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1865     verify_continuation(cont.continuation());
1866     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1867     return res;
1868   }
1869 
1870   JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1871   return freeze_epilog(cont);
1872 }
1873 
1874 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1875   if (UNLIKELY(res != freeze_ok)) {
1876     verify_continuation(cont.continuation());
1877     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1878     return res;
1879   }
1880 
1881   // Set up things so that on return to Java we jump to preempt stub.
1882   patch_return_pc_with_preempt_stub(old_last_frame);
1883   cont.tail()->set_preempted(true);
1884   DEBUG_ONLY(log_preempt_after_freeze(cont);)
1885   return freeze_epilog(cont);
1886 }
1887 
1888 template<typename ConfigT, bool preempt>
1889 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1890   assert(!current->has_pending_exception(), "");
1891 
1892 #ifdef ASSERT
1893   log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1894   log_frames(current);
1895 #endif
1896 
1897   CONT_JFR_ONLY(EventContinuationFreeze event;)
1898 
1899   ContinuationEntry* entry = current->last_continuation();
1900 
1901   oop oopCont = entry->cont_oop(current);
1902   assert(oopCont == current->last_continuation()->cont_oop(current), "");
1903   assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1904 
1905   verify_continuation(oopCont);
1906   ContinuationWrapper cont(current, oopCont);
1907   log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1908 
1909   assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1910 
1911   if (entry->is_pinned()) {
1912     log_develop_debug(continuations)("PINNED due to critical section");
1913     verify_continuation(cont.continuation());
1914     const freeze_result res = freeze_pinned_cs;
1915     if (!preempt) {
1916       JFR_ONLY(current->set_last_freeze_fail_result(res);)
1917     }
1918     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1919     // Avoid Thread.yield() loops without safepoint polls.
1920     if (SafepointMechanism::should_process(current) && !preempt) {
1921       cont.done(); // allow safepoint
1922       ThreadInVMfromJava tivmfj(current);
1923     }
1924     return res;
1925   }
1926 
1927   Freeze<ConfigT> freeze(current, cont, sp, preempt);
1928 
1929   assert(!current->cont_fastpath() || freeze.check_valid_fast_path(), "");
1930   bool fast = UseContinuationFastPath && current->cont_fastpath();
1931   if (fast && freeze.size_if_fast_freeze_available() > 0) {
1932     freeze.freeze_fast_existing_chunk();
1933     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1934     return !preempt ? freeze_epilog(cont) : preempt_epilog(cont, freeze_ok, freeze.last_frame());
1935   }
1936 
1937   if (preempt) {
1938     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1939     freeze.set_jvmti_event_collector(&jsoaec);
1940 
1941     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1942 
1943     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1944     preempt_epilog(cont, res, freeze.last_frame());
1945     return res;
1946   }
1947 
1948   log_develop_trace(continuations)("chunk unavailable; transitioning to VM");
1949   assert(current == JavaThread::current(), "must be current thread");
1950   JRT_BLOCK
1951     // delays a possible JvmtiSampledObjectAllocEventCollector in alloc_chunk
1952     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1953     freeze.set_jvmti_event_collector(&jsoaec);
1954 
1955     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1956 
1957     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1958     freeze_epilog(current, cont, res);
1959     cont.done(); // allow safepoint in the transition back to Java
1960     return res;
1961   JRT_BLOCK_END
1962 }
1963 
1964 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) {
1965   ContinuationEntry* entry = thread->last_continuation();
1966   if (entry == nullptr) {
1967     return freeze_ok;
1968   }
1969   if (entry->is_pinned()) {
1970     return freeze_pinned_cs;
1971   }
1972 
1973   RegisterMap map(thread,
1974                   RegisterMap::UpdateMap::include,
1975                   RegisterMap::ProcessFrames::skip,
1976                   RegisterMap::WalkContinuation::skip);
1977   map.set_include_argument_oops(false);
1978   frame f = thread->last_frame();
1979 
1980   if (!safepoint) {
1981     f = f.sender(&map); // this is the yield frame
1982   } else { // safepoint yield
1983 #if (defined(X86) || defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
1984     f.set_fp(f.real_fp()); // Instead of this, maybe in ContinuationWrapper::set_last_frame always use the real_fp?
1985 #else
1986     Unimplemented();
1987 #endif
1988     if (!Interpreter::contains(f.pc())) {
1989       assert(ContinuationHelper::Frame::is_stub(f.cb()), "must be");
1990       assert(f.oop_map() != nullptr, "must be");
1991       f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
1992     }
1993   }
1994 
1995   while (true) {
1996     if ((f.is_interpreted_frame() && f.interpreter_frame_method()->is_native()) || f.is_native_frame()) {
1997       return freeze_pinned_native;
1998     }
1999 
2000     f = f.sender(&map);
2001     if (!Continuation::is_frame_in_continuation(entry, f)) {
2002       oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop(thread));
2003       if (scope == cont_scope) {
2004         break;
2005       }
2006       entry = entry->parent();
2007       if (entry == nullptr) {
2008         break;
2009       }
2010       if (entry->is_pinned()) {
2011         return freeze_pinned_cs;
2012       }
2013     }
2014   }
2015   return freeze_ok;
2016 }
2017 
2018 /////////////// THAW ////
2019 
2020 static int thaw_size(stackChunkOop chunk) {
2021   int size = chunk->max_thawing_size();
2022   size += frame::metadata_words; // For the top pc+fp in push_return_frame or top = stack_sp - frame::metadata_words in thaw_fast
2023   size += 2*frame::align_wiggle; // in case of alignments at the top and bottom
2024   return size;
2025 }
2026 
2027 // make room on the stack for thaw
2028 // returns the size in bytes, or 0 on failure
2029 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier) {
2030   log_develop_trace(continuations)("~~~~ prepare_thaw return_barrier: %d", return_barrier);
2031 
2032   assert(thread == JavaThread::current(), "");
2033 
2034   ContinuationEntry* ce = thread->last_continuation();
2035   assert(ce != nullptr, "");
2036   oop continuation = ce->cont_oop(thread);
2037   assert(continuation == get_continuation(thread), "");
2038   verify_continuation(continuation);
2039 
2040   stackChunkOop chunk = jdk_internal_vm_Continuation::tail(continuation);
2041   assert(chunk != nullptr, "");
2042 
2043   // The tail can be empty because it might still be available for another freeze.
2044   // However, here we want to thaw, so we get rid of it (it will be GCed).
2045   if (UNLIKELY(chunk->is_empty())) {
2046     chunk = chunk->parent();
2047     assert(chunk != nullptr, "");
2048     assert(!chunk->is_empty(), "");
2049     jdk_internal_vm_Continuation::set_tail(continuation, chunk);
2050   }
2051 
2052   // Verification
2053   chunk->verify();
2054   assert(chunk->max_thawing_size() > 0, "chunk invariant violated; expected to not be empty");
2055 
2056   // Only make space for the last chunk because we only thaw from the last chunk
2057   int size = thaw_size(chunk) << LogBytesPerWord;
2058 
2059   const address bottom = (address)thread->last_continuation()->entry_sp();
2060   // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
2061   // for the Java frames in the check below.
2062   if (!stack_overflow_check(thread, size + 300, bottom)) {
2063     return 0;
2064   }
2065 
2066   log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
2067                               p2i(bottom), p2i(bottom - size), size);
2068   return size;
2069 }
2070 
2071 class ThawBase : public StackObj {
2072 protected:
2073   JavaThread* _thread;
2074   ContinuationWrapper& _cont;
2075   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
2076 
2077   intptr_t* _fastpath;
2078   bool _barriers;
2079   bool _preempted_case;
2080   bool _process_args_at_top;
2081   intptr_t* _top_unextended_sp_before_thaw;
2082   int _align_size;
2083   DEBUG_ONLY(intptr_t* _top_stack_address);
2084 
2085   // Only used for preemption on ObjectLocker
2086   ObjectMonitor* _init_lock;
2087 
2088   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2089 
2090   NOT_PRODUCT(int _frames;)
2091 
2092 protected:
2093   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2094       _thread(thread), _cont(cont),
2095       _fastpath(nullptr) {
2096     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2097     assert (cont.tail() != nullptr, "no last chunk");
2098     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2099   }
2100 
2101   void clear_chunk(stackChunkOop chunk);
2102   template<bool check_stub>
2103   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2104   int remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& scfs, int &argsize);
2105   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2106 
2107   void thaw_lockstack(stackChunkOop chunk);
2108 
2109   // fast path
2110   inline void prefetch_chunk_pd(void* start, int size_words);
2111   void patch_return(intptr_t* sp, bool is_last);
2112 
2113   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2114   inline intptr_t* push_cleanup_continuation();
2115   inline intptr_t* push_preempt_adapter();
2116   intptr_t* redo_vmcall(JavaThread* current, frame& top);
2117   void throw_interrupted_exception(JavaThread* current, frame& top);
2118 
2119   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2120   void finish_thaw(frame& f);
2121 
2122 private:
2123   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2124   void finalize_thaw(frame& entry, int argsize);
2125 
2126   inline bool seen_by_gc();
2127 
2128   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2129   inline void after_thaw_java_frame(const frame& f, bool bottom);
2130   inline void patch(frame& f, const frame& caller, bool bottom, bool augmented = false);
2131   void clear_bitmap_bits(address start, address end);
2132 
2133   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2134   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2135   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2136   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2137 
2138   void push_return_frame(const frame& f);
2139   inline frame new_entry_frame();
2140   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust = 0);
2141   inline void patch_pd(frame& f, const frame& sender);
2142   inline void patch_pd(frame& f, intptr_t* caller_sp);
2143   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2144 
2145   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2146 
2147   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2148 
2149  public:
2150   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2151 };
2152 
2153 template <typename ConfigT>
2154 class Thaw : public ThawBase {
2155 public:
2156   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2157 
2158   inline bool can_thaw_fast(stackChunkOop chunk) {
2159     return    !_barriers
2160            &&  _thread->cont_fastpath_thread_state()
2161            && !chunk->has_thaw_slowpath_condition()
2162            && !PreserveFramePointer;
2163   }
2164 
2165   inline intptr_t* thaw(Continuation::thaw_kind kind);
2166   template<bool check_stub = false>
2167   NOINLINE intptr_t* thaw_fast(stackChunkOop chunk);
2168   NOINLINE intptr_t* thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind);
2169   inline void patch_caller_links(intptr_t* sp, intptr_t* bottom);
2170 };
2171 
2172 template <typename ConfigT>
2173 inline intptr_t* Thaw<ConfigT>::thaw(Continuation::thaw_kind kind) {
2174   verify_continuation(_cont.continuation());
2175   assert(!jdk_internal_vm_Continuation::done(_cont.continuation()), "");
2176   assert(!_cont.is_empty(), "");
2177 
2178   stackChunkOop chunk = _cont.tail();
2179   assert(chunk != nullptr, "guaranteed by prepare_thaw");
2180   assert(!chunk->is_empty(), "guaranteed by prepare_thaw");
2181 
2182   _barriers = chunk->requires_barriers();
2183   return (LIKELY(can_thaw_fast(chunk))) ? thaw_fast(chunk)
2184                                         : thaw_slow(chunk, kind);
2185 }
2186 
2187 class ReconstructedStack : public StackObj {
2188   intptr_t* _base;  // _cont.entrySP(); // top of the entry frame
2189   int _thaw_size;
2190   int _argsize;
2191 public:
2192   ReconstructedStack(intptr_t* base, int thaw_size, int argsize)
2193   : _base(base), _thaw_size(thaw_size - (argsize == 0 ? frame::metadata_words_at_top : 0)), _argsize(argsize) {
2194     // The only possible source of misalignment is stack-passed arguments b/c compiled frames are 16-byte aligned.
2195     assert(argsize != 0 || (_base - _thaw_size) == ContinuationHelper::frame_align_pointer(_base - _thaw_size), "");
2196     // We're at most one alignment word away from entrySP
2197     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2198   }
2199 
2200   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2201 
2202   // top and bottom stack pointers
2203   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2204   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2205 
2206   // several operations operate on the totality of the stack being reconstructed,
2207   // including the metadata words
2208   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2209   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2210 };
2211 
2212 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2213   chunk->set_sp(chunk->bottom());
2214   chunk->set_max_thawing_size(0);
2215 }
2216 
2217 int ThawBase::remove_scalarized_frames(StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, int &argsize) {
2218   intptr_t* top = f.sp();
2219 
2220   while (f.cb()->as_nmethod()->needs_stack_repair()) {
2221     f.next(SmallRegisterMap::instance_no_args(), false /* stop */);
2222   }
2223   assert(!f.is_done(), "");
2224   assert(f.is_compiled(), "");
2225 
2226   intptr_t* bottom = f.sp() + f.cb()->frame_size();
2227   argsize = f.stack_argsize();
2228   return bottom - top;
2229 }
2230 
2231 template<bool check_stub>
2232 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2233   bool empty = false;
2234   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2235   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2236   assert(chunk_sp == f.sp(), "");
2237   assert(chunk_sp == f.unextended_sp(), "");
2238 
2239   int frame_size = f.cb()->frame_size();
2240   argsize = f.stack_argsize();
2241 
2242   assert(!f.is_stub() || check_stub, "");
2243   if (check_stub && f.is_stub()) {
2244     // If we don't thaw the top compiled frame too, after restoring the saved
2245     // registers back in Java, we would hit the return barrier to thaw one more
2246     // frame effectively overwriting the restored registers during that call.
2247     f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2248     assert(!f.is_done(), "");
2249 
2250     f.get_cb();
2251     assert(f.is_compiled(), "");
2252     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2253       // The caller of the runtime stub when the continuation is preempted is not at a
2254       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2255       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2256       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2257     }
2258 
2259     if (f.cb()->as_nmethod()->needs_stack_repair()) {
2260       frame_size += remove_scalarized_frames(f, argsize);
2261     } else {
2262       frame_size += f.cb()->frame_size();
2263       argsize = f.stack_argsize();
2264     }
2265   } else if (f.cb()->as_nmethod()->needs_stack_repair()) {
2266     frame_size = remove_scalarized_frames(f, argsize);
2267   }
2268 
2269   f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2270   empty = f.is_done();
2271   assert(!empty || argsize == chunk->argsize(), "");
2272 
2273   if (empty) {
2274     clear_chunk(chunk);
2275   } else {
2276     chunk->set_sp(chunk->sp() + frame_size);
2277     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2278     // We set chunk->pc to the return pc into the next frame
2279     chunk->set_pc(f.pc());
2280 #ifdef ASSERT
2281     {
2282       intptr_t* retaddr_slot = (chunk_sp
2283                                 + frame_size
2284                                 - frame::sender_sp_ret_address_offset());
2285       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2286              "unexpected pc");
2287     }
2288 #endif
2289   }
2290   assert(empty == chunk->is_empty(), "");
2291   // returns the size required to store the frame on stack, and because it is a
2292   // compiled frame, it must include a copy of the arguments passed by the caller
2293   return frame_size + argsize + frame::metadata_words_at_top;
2294 }
2295 
2296 void ThawBase::thaw_lockstack(stackChunkOop chunk) {
2297   int lockStackSize = chunk->lockstack_size();
2298   assert(lockStackSize > 0 && lockStackSize <= LockStack::CAPACITY, "");
2299 
2300   oop tmp_lockstack[LockStack::CAPACITY];
2301   chunk->transfer_lockstack(tmp_lockstack, _barriers);
2302   _thread->lock_stack().move_from_address(tmp_lockstack, lockStackSize);
2303 
2304   chunk->set_lockstack_size(0);
2305   chunk->set_has_lockstack(false);
2306 }
2307 
2308 void ThawBase::copy_from_chunk(intptr_t* from, intptr_t* to, int size) {
2309   assert(to >= _top_stack_address, "overwrote past thawing space"
2310     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(to), p2i(_top_stack_address));
2311   assert(to + size <= _cont.entrySP(), "overwrote past thawing space");
2312   _cont.tail()->copy_from_chunk_to_stack(from, to, size);
2313   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
2314 }
2315 
2316 void ThawBase::patch_return(intptr_t* sp, bool is_last) {
2317   log_develop_trace(continuations)("thaw_fast patching -- sp: " INTPTR_FORMAT, p2i(sp));
2318 
2319   address pc = !is_last ? StubRoutines::cont_returnBarrier() : _cont.entryPC();
2320   ContinuationHelper::patch_return_address_at(
2321     sp - frame::sender_sp_ret_address_offset(),
2322     pc);
2323 }
2324 
2325 template <typename ConfigT>
2326 template<bool check_stub>
2327 NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) {
2328   assert(chunk == _cont.tail(), "");
2329   assert(!chunk->has_mixed_frames(), "");
2330   assert(!chunk->requires_barriers(), "");
2331   assert(!chunk->has_bitmap(), "");
2332   assert(!_thread->is_interp_only_mode(), "");
2333 
2334   LogTarget(Trace, continuations) lt;
2335   if (lt.develop_is_enabled()) {
2336     LogStream ls(lt);
2337     ls.print_cr("thaw_fast");
2338     chunk->print_on(true, &ls);
2339   }
2340 
2341   // Below this heuristic, we thaw the whole chunk, above it we thaw just one frame.
2342   static const int threshold = 500; // words
2343 
2344   const int full_chunk_size = chunk->stack_size() - chunk->sp(); // this initial size could be reduced if it's a partial thaw
2345   int argsize, thaw_size;
2346 
2347   intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();
2348 
2349   bool partial, empty;
2350   if (LIKELY(!TEST_THAW_ONE_CHUNK_FRAME && (full_chunk_size < threshold))) {
2351     prefetch_chunk_pd(chunk->start_address(), full_chunk_size); // prefetch anticipating memcpy starting at highest address
2352 
2353     partial = false;
2354     argsize = chunk->argsize(); // must be called *before* clearing the chunk
2355     clear_chunk(chunk);
2356     thaw_size = full_chunk_size;
2357     empty = true;
2358   } else { // thaw a single frame
2359     partial = true;
2360     thaw_size = remove_top_compiled_frame_from_chunk<check_stub>(chunk, argsize);
2361     empty = chunk->is_empty();
2362   }
2363 
2364   // Are we thawing the last frame(s) in the continuation
2365   const bool is_last = empty && chunk->parent() == nullptr;
2366   assert(!is_last || argsize == 0, "");
2367 
2368   log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT,
2369                               partial, is_last, empty, thaw_size, argsize, p2i(_cont.entrySP()));
2370 
2371   ReconstructedStack rs(_cont.entrySP(), thaw_size, argsize);
2372 
2373   // also copy metadata words at frame bottom
2374   copy_from_chunk(chunk_sp - frame::metadata_words_at_bottom, rs.top(), rs.total_size());
2375 
2376   // update the ContinuationEntry
2377   _cont.set_argsize(argsize);
2378   log_develop_trace(continuations)("setting entry argsize: %d", _cont.argsize());
2379   assert(rs.bottom_sp() == _cont.entry()->bottom_sender_sp(), "");
2380 
2381   // install the return barrier if not last frame, or the entry's pc if last
2382   patch_return(rs.bottom_sp(), is_last);
2383 
2384   // insert the back links from callee to caller frames
2385   patch_caller_links(rs.top(), rs.top() + rs.total_size());
2386 
2387   assert(is_last == _cont.is_empty(), "");
2388   assert(_cont.chunk_invariant(), "");
2389 
2390 #if CONT_JFR
2391   EventContinuationThawFast e;
2392   if (e.should_commit()) {
2393     e.set_id(cast_from_oop<u8>(chunk));
2394     e.set_size(thaw_size << LogBytesPerWord);
2395     e.set_full(!partial);
2396     e.commit();
2397   }
2398 #endif
2399 
2400 #ifdef ASSERT
2401   if (LoomDeoptAfterThaw) {
2402     frame top(rs.sp());
2403     AnchorMark am(_thread, top);
2404     log_frames(_thread);
2405     do_deopt_after_thaw(_thread);
2406   }
2407 #endif
2408 
2409   return rs.sp();
2410 }
2411 
2412 inline bool ThawBase::seen_by_gc() {
2413   return _barriers || _cont.tail()->is_gc_mode();
2414 }
2415 
2416 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2417 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2418   if (UseZGC || UseShenandoahGC) {
2419     chunk->relativize_derived_pointers_concurrently();
2420   }
2421 #endif
2422 }
2423 
2424 template <typename ConfigT>
2425 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2426   Continuation::preempt_kind preempt_kind;
2427   bool retry_fast_path = false;
2428 
2429   _process_args_at_top = false;
2430   _preempted_case = chunk->preempted();
2431   if (_preempted_case) {
2432     ObjectMonitor* mon = nullptr;
2433     ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2434     if (waiter != nullptr) {
2435       // Mounted again after preemption. Resume the pending monitor operation,
2436       // which will be either a monitorenter or Object.wait() call.
2437       mon = waiter->monitor();
2438       preempt_kind = waiter->is_wait() ? Continuation::object_wait : Continuation::monitorenter;
2439 
2440       bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2441       assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2442       if (!mon_acquired) {
2443         // Failed to acquire monitor. Return to enterSpecial to unmount again.
2444         log_develop_trace(continuations, preempt)("Failed to acquire monitor, unmounting again");
2445         return push_cleanup_continuation();
2446       }
2447       chunk = _cont.tail();  // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2448       JVMTI_ONLY(assert(_thread->contended_entered_monitor() == nullptr || _thread->contended_entered_monitor() == mon, ""));
2449     } else {
2450       // Preemption cancelled on moniterenter or ObjectLocker case. We
2451       // actually acquired the monitor after freezing all frames so no
2452       // need to call resume_operation. If this is the ObjectLocker case
2453       // we released the monitor already at ~ObjectLocker, so _init_lock
2454       // will be set to nullptr below since there is no monitor to release.
2455       preempt_kind = Continuation::monitorenter;
2456     }
2457 
2458     // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2459     relativize_chunk_concurrently(chunk);
2460 
2461     if (chunk->at_klass_init()) {
2462       preempt_kind = Continuation::object_locker;
2463       chunk->set_at_klass_init(false);
2464       _process_args_at_top = chunk->has_args_at_top();
2465       if (_process_args_at_top) {
2466         // Only needed for the top frame which will be thawed.
2467         chunk->set_has_args_at_top(false);
2468       }
2469       assert(waiter == nullptr || mon != nullptr, "should have a monitor");
2470       _init_lock = mon;  // remember monitor since we will need it on handle_preempted_continuation()
2471     }
2472     chunk->set_preempted(false);
2473     retry_fast_path = true;
2474   } else {
2475     relativize_chunk_concurrently(chunk);
2476   }
2477 
2478   // On first thaw after freeze restore oops to the lockstack if any.
2479   assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2480   if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2481     thaw_lockstack(chunk);
2482     retry_fast_path = true;
2483   }
2484 
2485   // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2486   // and FLAG_PREEMPTED flags from the stackChunk.
2487   if (retry_fast_path && can_thaw_fast(chunk)) {
2488     intptr_t* sp = thaw_fast<true>(chunk);
2489     if (_preempted_case) {
2490       return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2491     }
2492     return sp;
2493   }
2494 
2495   LogTarget(Trace, continuations) lt;
2496   if (lt.develop_is_enabled()) {
2497     LogStream ls(lt);
2498     ls.print_cr("thaw slow return_barrier: %d " INTPTR_FORMAT, kind, p2i(chunk));
2499     chunk->print_on(true, &ls);
2500   }
2501 
2502 #if CONT_JFR
2503   EventContinuationThawSlow e;
2504   if (e.should_commit()) {
2505     e.set_id(cast_from_oop<u8>(_cont.continuation()));
2506     e.commit();
2507   }
2508 #endif
2509 
2510   DEBUG_ONLY(_frames = 0;)
2511   _align_size = 0;
2512   int num_frames = kind == Continuation::thaw_top ? 2 : 1;
2513 
2514   _stream = StackChunkFrameStream<ChunkFrames::Mixed>(chunk);
2515   _top_unextended_sp_before_thaw = _stream.unextended_sp();
2516 
2517   frame heap_frame = _stream.to_frame();
2518   if (lt.develop_is_enabled()) {
2519     LogStream ls(lt);
2520     ls.print_cr("top hframe before (thaw):");
2521     assert(heap_frame.is_heap_frame(), "should have created a relative frame");
2522     heap_frame.print_value_on(&ls);
2523   }
2524 
2525   frame caller; // the thawed caller on the stack
2526   recurse_thaw(heap_frame, caller, num_frames, _preempted_case);
2527   finish_thaw(caller); // caller is now the topmost thawed frame
2528   _cont.write();
2529 
2530   assert(_cont.chunk_invariant(), "");
2531 
2532   JVMTI_ONLY(if (!_cont.entry()->is_virtual_thread()) invalidate_jvmti_stack(_thread));
2533 
2534   _thread->set_cont_fastpath(_fastpath);
2535 
2536   intptr_t* sp = caller.sp();
2537 
2538   if (_preempted_case) {
2539     return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2540   }
2541   return sp;
2542 }
2543 
2544 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2545   log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2546   assert(!_cont.is_empty(), "no more frames");
2547   assert(num_frames > 0, "");
2548   assert(!heap_frame.is_empty(), "");
2549 
2550   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2551     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2552   } else if (!heap_frame.is_interpreted_frame()) {
2553     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2554   } else {
2555     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2556   }
2557 }
2558 
2559 template<typename FKind>
2560 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2561   assert(num_frames > 0, "");
2562 
2563   DEBUG_ONLY(_frames++;)
2564 
2565   int argsize = _stream.stack_argsize();
2566   CodeBlob* cb = _stream.cb();
2567 
2568   _stream.next(SmallRegisterMap::instance_no_args());
2569   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2570 
2571   // We never leave a compiled caller of an interpreted frame as the top frame in the chunk
2572   // as it makes detecting that situation and adjusting unextended_sp tricky. We also always
2573   // thaw the caller of a frame that needs_stack_repair, as it would otherwise complicate things:
2574   // - Regardless of whether the frame was extended or not, we would need to copy the right arg
2575   //   size if its greater than the one given by the normal method signature (non-scalarized).
2576   // - If the frame was indeed extended, leaving its caller as the top frame would complicate walking
2577   //   the chunk (we need unextended_sp, but we only have sp).
2578   if (num_frames == 1 && !_stream.is_done() && ((FKind::interpreted && _stream.is_compiled()) || (FKind::compiled && cb->as_nmethod_or_null()->needs_stack_repair()))) {
2579     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2580     num_frames++;
2581   }
2582 
2583   if (num_frames == 1 || _stream.is_done()) { // end recursion
2584     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2585     return true; // bottom
2586   } else { // recurse
2587     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2588     return false;
2589   }
2590 }
2591 
2592 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2593   stackChunkOop chunk = _cont.tail();
2594 
2595   if (!_stream.is_done()) {
2596     assert(_stream.sp() >= chunk->sp_address(), "");
2597     chunk->set_sp(chunk->to_offset(_stream.sp()));
2598     chunk->set_pc(_stream.pc());
2599   } else {
2600     chunk->set_sp(chunk->bottom());
2601     chunk->set_pc(nullptr);
2602   }
2603   assert(_stream.is_done() == chunk->is_empty(), "");
2604 
2605   int total_thawed = pointer_delta_as_int(_stream.unextended_sp(), _top_unextended_sp_before_thaw);
2606   chunk->set_max_thawing_size(chunk->max_thawing_size() - total_thawed);
2607 
2608   _cont.set_argsize(argsize);
2609   entry = new_entry_frame();
2610 
2611   assert(entry.sp() == _cont.entrySP(), "");
2612   assert(Continuation::is_continuation_enterSpecial(entry), "");
2613   assert(_cont.is_entry_frame(entry), "");
2614 }
2615 
2616 inline void ThawBase::before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) {
2617   LogTarget(Trace, continuations) lt;
2618   if (lt.develop_is_enabled()) {
2619     LogStream ls(lt);
2620     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2621     assert(hf.is_heap_frame(), "should be");
2622     hf.print_value_on(&ls);
2623   }
2624   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2625 }
2626 
2627 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2628 #ifdef ASSERT
2629   LogTarget(Trace, continuations) lt;
2630   if (lt.develop_is_enabled()) {
2631     LogStream ls(lt);
2632     ls.print_cr("thawed frame:");
2633     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2634   }
2635 #endif
2636 }
2637 
2638 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom, bool augmented) {
2639   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2640   if (bottom) {
2641     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2642                                                                  : StubRoutines::cont_returnBarrier());
2643   } else if (caller.is_compiled_frame()){
2644     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2645     // If the caller is not deoptimized, pc is unchanged.
2646     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc(), augmented /*callee_augmented*/);
2647   }
2648 
2649   patch_pd(f, caller);
2650 
2651   if (f.is_interpreted_frame()) {
2652     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2653   }
2654 
2655   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2656   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2657 }
2658 
2659 void ThawBase::clear_bitmap_bits(address start, address end) {
2660   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2661   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2662 
2663   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2664   // or they will keep objects that are otherwise unreachable alive.
2665 
2666   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2667   // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2668   // If that's the case the bit range corresponding to the last stack slot should not have bits set
2669   // anyways and we assert that before returning.
2670   address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2671   log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2672   stackChunkOop chunk = _cont.tail();
2673   chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2674   assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2675 }
2676 
2677 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2678   frame top(sp);
2679   assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2680   DEBUG_ONLY(verify_frame_kind(top, preempt_kind);)
2681   NOT_PRODUCT(int64_t tid = _thread->monitor_owner_id();)
2682 
2683   // Finish the VTMS transition.
2684   assert(_thread->is_in_vthread_transition(), "must be");
2685   bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2686   if (is_vthread) {
2687 #if INCLUDE_JVMTI
2688     if (MountUnmountDisabler::notify_jvmti_events()) {
2689       jvmti_mount_end(_thread, _cont, top, preempt_kind);
2690     } else
2691 #endif
2692     { // Faster version of MountUnmountDisabler::end_transition() to avoid
2693       // unnecessary extra instructions from jvmti_mount_end().
2694       java_lang_Thread::set_is_in_vthread_transition(_thread->vthread(), false);
2695       _thread->set_is_in_vthread_transition(false);
2696     }
2697   }
2698 
2699   if (fast_case) {
2700     // If we thawed in the slow path the runtime stub/native wrapper frame already
2701     // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2702     // we copied the fp patched during freeze, which will now have to be fixed.
2703     assert(top.is_runtime_frame() || top.is_native_frame(), "");
2704     int fsize = top.cb()->frame_size();
2705     patch_pd(top, sp + fsize);
2706   }
2707 
2708   if (preempt_kind == Continuation::object_wait) {
2709     // Check now if we need to throw IE exception.
2710     bool throw_ie = _thread->pending_interrupted_exception();
2711     if (throw_ie) {
2712       throw_interrupted_exception(_thread, top);
2713       _thread->set_pending_interrupted_exception(false);
2714     }
2715     log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT" after preemption on Object.wait%s", tid, throw_ie ? "(throwing IE)" : "");
2716   } else if (preempt_kind == Continuation::monitorenter) {
2717     if (top.is_runtime_frame()) {
2718       // The continuation might now run on a different platform thread than the previous time so
2719       // we need to adjust the current thread saved in the stub frame before restoring registers.
2720       JavaThread** thread_addr = frame::saved_thread_address(top);
2721       if (thread_addr != nullptr) *thread_addr = _thread;
2722     }
2723     log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT " after preemption on monitorenter", tid);
2724   } else {
2725     // We need to redo the original call into the VM. First though, we need
2726     // to exit the monitor we just acquired (except on preemption cancelled
2727     // case where it was already released).
2728     assert(preempt_kind == Continuation::object_locker, "");
2729     if (_init_lock != nullptr) _init_lock->exit(_thread);
2730     sp = redo_vmcall(_thread, top);
2731   }
2732   return sp;
2733 }
2734 
2735 intptr_t* ThawBase::redo_vmcall(JavaThread* current, frame& top) {
2736   assert(!current->preempting(), "");
2737   NOT_PRODUCT(int64_t tid = current->monitor_owner_id();)
2738   intptr_t* sp = top.sp();
2739 
2740   {
2741     HandleMarkCleaner hmc(current);  // Cleanup all handles (including so._conth) before returning to Java.
2742     ContinuationWrapper::SafepointOp so(current, _cont);
2743     AnchorMark am(current, top);    // Set the anchor so that the stack is walkable.
2744 
2745     Method* m = top.interpreter_frame_method();
2746     Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
2747     Bytecodes::Code code = current_bytecode.code();
2748     log_develop_trace(continuations, preempt)("Redoing InterpreterRuntime::%s for " INT64_FORMAT, code == Bytecodes::Code::_new ? "_new" : "resolve_from_cache", tid);
2749 
2750     // These InterpreterRuntime entry points use JRT_ENTRY which uses a HandleMarkCleaner.
2751     // Create a HandeMark to avoid destroying so._conth.
2752     HandleMark hm(current);
2753     DEBUG_ONLY(JavaThread::AtRedoVMCall apvmc(current);)
2754     if (code == Bytecodes::Code::_new) {
2755       InterpreterRuntime::_new(current, m->constants(), current_bytecode.get_index_u2(code));
2756     } else {
2757       InterpreterRuntime::resolve_from_cache(current, code);
2758     }
2759   }
2760 
2761   if (current->preempting()) {
2762     // Preempted again so we just arrange to return to preempt stub to unmount.
2763     sp = push_preempt_adapter();
2764     current->set_preempt_alternate_return(nullptr);
2765     bool cancelled = current->preemption_cancelled();
2766     if (cancelled) {
2767       // Since preemption was cancelled, the thread will call thaw again from the preempt
2768       // stub. These retries could happen several times due to contention on the init_lock,
2769       // so just let the vthread umount to give a chance for other vthreads to run.
2770       current->set_preemption_cancelled(false);
2771       oop vthread = current->vthread();
2772       assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2773       java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::YIELDING);
2774 #if INCLUDE_JVMTI
2775       if (current->contended_entered_monitor() != nullptr) {
2776         current->set_contended_entered_monitor(nullptr);
2777       }
2778 #endif
2779     }
2780     log_develop_trace(continuations, preempt)("Preempted " INT64_FORMAT " again%s", tid, cancelled ? "(preemption cancelled, setting state to YIELDING)" : "");
2781   } else {
2782     log_develop_trace(continuations, preempt)("Call succesful, resuming " INT64_FORMAT, tid);
2783   }
2784   return sp;
2785 }
2786 
2787 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2788   HandleMarkCleaner hm(current);  // Cleanup all handles (including so._conth) before returning to Java.
2789   ContinuationWrapper::SafepointOp so(current, _cont);
2790   AnchorMark am(current, top);  // Set the anchor so that the stack is walkable.
2791   JRT_BLOCK
2792     THROW(vmSymbols::java_lang_InterruptedException());
2793   JRT_BLOCK_END
2794 }
2795 
2796 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top) {
2797   assert(hf.is_interpreted_frame(), "");
2798 
2799   if (UNLIKELY(seen_by_gc())) {
2800     if (is_top && _process_args_at_top) {
2801       _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_with_args());
2802     } else {
2803       _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2804     }
2805   }
2806 
2807   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2808 
2809   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2810 
2811   _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2812 
2813   frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2814 
2815   intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2816   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2817   intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2818   intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2819 
2820   assert(hf.is_heap_frame(), "should be");
2821   assert(!f.is_heap_frame(), "should not be");
2822 
2823   const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2824   assert((stack_frame_bottom == stack_frame_top + fsize), "");
2825 
2826   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
2827   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
2828   copy_from_chunk(heap_frame_top, stack_frame_top, fsize);
2829 
2830   // Make sure the relativized locals is already set.
2831   assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2832 
2833   derelativize_interpreted_frame_metadata(hf, f);
2834   patch(f, caller, is_bottom_frame);
2835 
2836   assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2837   assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2838 
2839   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2840 
2841   maybe_set_fastpath(f.sp());
2842 
2843   Method* m = hf.interpreter_frame_method();
2844   assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2845   const int locals = m->max_locals();
2846 
2847   if (!is_bottom_frame) {
2848     // can only fix caller once this frame is thawed (due to callee saved regs)
2849     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2850   } else if (_cont.tail()->has_bitmap() && locals > 0) {
2851     assert(hf.is_heap_frame(), "should be");
2852     address start = (address)(heap_frame_bottom - locals);
2853     address end = (address)heap_frame_bottom;
2854     clear_bitmap_bits(start, end);
2855   }
2856 
2857   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2858   caller = f;
2859 }
2860 
2861 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2862   assert(hf.is_compiled_frame(), "");
2863   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2864 
2865   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2866     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2867   }
2868 
2869   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2870 
2871   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2872 
2873   assert(caller.sp() == caller.unextended_sp(), "");
2874 
2875   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2876     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2877   }
2878 
2879   int fsize = 0;
2880   int added_argsize = 0;
2881   bool augmented = hf.was_augmented_on_entry(fsize);
2882   if (!augmented) {
2883     added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2884     fsize += added_argsize;
2885   }
2886   assert(!is_bottom_frame || !augmented, "");
2887 
2888 
2889   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2890   // yet laid out in the stack, and so the original_pc is not stored in it.
2891   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2892   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame, augmented ? fsize - hf.cb()->frame_size() : 0);
2893   assert(f.cb()->frame_size() == (int)(caller.sp() - f.sp()), "");
2894 
2895   intptr_t* const stack_frame_top = f.sp();
2896   intptr_t* const heap_frame_top = hf.unextended_sp();
2897   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2898   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2899   // copy metadata, except the metadata at the top of the (unextended) entry frame
2900   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2901 
2902   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2903   // (we might have one padding word for alignment)
2904   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2905   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2906 
2907   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2908 
2909   patch(f, caller, is_bottom_frame, augmented);
2910 
2911   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2912   assert(!f.is_deoptimized_frame(), "");
2913   if (hf.is_deoptimized_frame()) {
2914     maybe_set_fastpath(f.sp());
2915   } else if (_thread->is_interp_only_mode()
2916               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2917     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2918     // cannot rely on nmethod patching for deopt.
2919     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2920 
2921     log_develop_trace(continuations)("Deoptimizing thawed frame");
2922     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2923 
2924     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2925     assert(f.is_deoptimized_frame(), "");
2926     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2927     maybe_set_fastpath(f.sp());
2928   }
2929 
2930   if (!is_bottom_frame) {
2931     // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2932     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2933   } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2934     address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2935     int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2936     int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2937     clear_bitmap_bits(start, start + argsize_in_bytes);
2938   }
2939 
2940   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2941   caller = f;
2942 }
2943 
2944 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2945   DEBUG_ONLY(_frames++;)
2946 
2947   if (UNLIKELY(seen_by_gc())) {
2948     // Process the stub's caller here since we might need the full map.
2949     RegisterMap map(nullptr,
2950                     RegisterMap::UpdateMap::include,
2951                     RegisterMap::ProcessFrames::skip,
2952                     RegisterMap::WalkContinuation::skip);
2953     map.set_include_argument_oops(false);
2954     _stream.next(&map);
2955     assert(!_stream.is_done(), "");
2956     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2957   } else {
2958     _stream.next(SmallRegisterMap::instance_no_args());
2959     assert(!_stream.is_done(), "");
2960   }
2961 
2962   recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2963 
2964   assert(caller.is_compiled_frame(), "");
2965   assert(caller.sp() == caller.unextended_sp(), "");
2966 
2967   DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2968 
2969   frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2970   intptr_t* stack_frame_top = f.sp();
2971   intptr_t* heap_frame_top = hf.sp();
2972   int fsize = ContinuationHelper::StubFrame::size(hf);
2973 
2974   copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2975                   fsize + frame::metadata_words);
2976 
2977   patch(f, caller, false /*is_bottom_frame*/);
2978 
2979   // can only fix caller once this frame is thawed (due to callee saved regs)
2980   RegisterMap map(nullptr,
2981                   RegisterMap::UpdateMap::include,
2982                   RegisterMap::ProcessFrames::skip,
2983                   RegisterMap::WalkContinuation::skip);
2984   map.set_include_argument_oops(false);
2985   f.oop_map()->update_register_map(&f, &map);
2986   ContinuationHelper::update_register_map_with_callee(caller, &map);
2987   _cont.tail()->fix_thawed_frame(caller, &map);
2988 
2989   DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2990   caller = f;
2991 }
2992 
2993 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2994   assert(hf.is_native_frame(), "");
2995   assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2996 
2997   if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2998     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2999   }
3000 
3001   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
3002   assert(!is_bottom_frame, "");
3003 
3004   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
3005 
3006   assert(caller.sp() == caller.unextended_sp(), "");
3007 
3008   if (caller.is_interpreted_frame()) {
3009     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
3010   }
3011 
3012   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
3013   // yet laid out in the stack, and so the original_pc is not stored in it.
3014   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
3015   frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
3016   intptr_t* const stack_frame_top = f.sp();
3017   intptr_t* const heap_frame_top = hf.unextended_sp();
3018 
3019   int fsize = ContinuationHelper::NativeFrame::size(hf);
3020   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
3021 
3022   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
3023   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
3024   int sz = fsize + frame::metadata_words_at_bottom;
3025 
3026   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
3027 
3028   patch(f, caller, false /* bottom */);
3029 
3030   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
3031   assert(!f.is_deoptimized_frame(), "");
3032   assert(!hf.is_deoptimized_frame(), "");
3033   assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
3034 
3035   // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
3036   _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
3037 
3038   DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
3039   caller = f;
3040 }
3041 
3042 void ThawBase::finish_thaw(frame& f) {
3043   stackChunkOop chunk = _cont.tail();
3044 
3045   if (chunk->is_empty()) {
3046     // Only remove chunk from list if it can't be reused for another freeze
3047     if (seen_by_gc()) {
3048       _cont.set_tail(chunk->parent());
3049     } else {
3050       chunk->set_has_mixed_frames(false);
3051     }
3052     chunk->set_max_thawing_size(0);
3053   } else {
3054     chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
3055   }
3056   assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
3057 
3058   if (!is_aligned(f.sp(), frame::frame_alignment)) {
3059     assert(f.is_interpreted_frame(), "");
3060     f.set_sp(align_down(f.sp(), frame::frame_alignment));
3061   }
3062   push_return_frame(f);
3063    // can only fix caller after push_return_frame (due to callee saved regs)
3064   if (_process_args_at_top) {
3065     chunk->fix_thawed_frame(f, SmallRegisterMap::instance_with_args());
3066   } else {
3067     chunk->fix_thawed_frame(f, SmallRegisterMap::instance_no_args());
3068   }
3069 
3070   assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
3071 
3072   log_develop_trace(continuations)("thawed %d frames", _frames);
3073 
3074   LogTarget(Trace, continuations) lt;
3075   if (lt.develop_is_enabled()) {
3076     LogStream ls(lt);
3077     ls.print_cr("top hframe after (thaw):");
3078     _cont.last_frame().print_value_on(&ls);
3079   }
3080 }
3081 
3082 void ThawBase::push_return_frame(const frame& f) { // see generate_cont_thaw
3083   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
3084   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
3085 
3086   LogTarget(Trace, continuations) lt;
3087   if (lt.develop_is_enabled()) {
3088     LogStream ls(lt);
3089     ls.print_cr("push_return_frame");
3090     f.print_value_on(&ls);
3091   }
3092 
3093   assert(f.sp() - frame::metadata_words_at_bottom >= _top_stack_address, "overwrote past thawing space"
3094     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(f.sp() - frame::metadata_words), p2i(_top_stack_address));
3095   ContinuationHelper::Frame::patch_pc(f, f.raw_pc()); // in case we want to deopt the frame in a full transition, this is checked.
3096   ContinuationHelper::push_pd(f);
3097 
3098   assert(ContinuationHelper::Frame::assert_frame_laid_out(f), "");
3099 }
3100 
3101 // returns new top sp
3102 // called after preparations (stack overflow check and making room)
3103 template<typename ConfigT>
3104 static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind) {
3105   assert(thread == JavaThread::current(), "Must be current thread");
3106 
3107   CONT_JFR_ONLY(EventContinuationThaw event;)
3108 
3109   log_develop_trace(continuations)("~~~~ thaw kind: %d sp: " INTPTR_FORMAT, kind, p2i(thread->last_continuation()->entry_sp()));
3110 
3111   ContinuationEntry* entry = thread->last_continuation();
3112   assert(entry != nullptr, "");
3113   oop oopCont = entry->cont_oop(thread);
3114 
3115   assert(!jdk_internal_vm_Continuation::done(oopCont), "");
3116   assert(oopCont == get_continuation(thread), "");
3117   verify_continuation(oopCont);
3118 
3119   assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
3120 
3121   ContinuationWrapper cont(thread, oopCont);
3122   log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
3123 
3124 #ifdef ASSERT
3125   set_anchor_to_entry(thread, cont.entry());
3126   log_frames(thread);
3127   clear_anchor(thread);
3128 #endif
3129 
3130   Thaw<ConfigT> thw(thread, cont);
3131   intptr_t* const sp = thw.thaw(kind);
3132   assert(is_aligned(sp, frame::frame_alignment), "");
3133   DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp);)
3134 
3135   CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
3136 
3137   verify_continuation(cont.continuation());
3138   log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
3139 
3140   return sp;
3141 }
3142 
3143 #ifdef ASSERT
3144 static void do_deopt_after_thaw(JavaThread* thread) {
3145   int i = 0;
3146   StackFrameStream fst(thread, true, false);
3147   fst.register_map()->set_include_argument_oops(false);
3148   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3149   for (; !fst.is_done(); fst.next()) {
3150     if (fst.current()->cb()->is_nmethod()) {
3151       nmethod* nm = fst.current()->cb()->as_nmethod();
3152       if (!nm->method()->is_continuation_native_intrinsic()) {
3153         nm->make_deoptimized();
3154       }
3155     }
3156   }
3157 }
3158 
3159 class ThawVerifyOopsClosure: public OopClosure {
3160   intptr_t* _p;
3161   outputStream* _st;
3162   bool is_good_oop(oop o) {
3163     return dbg_is_safe(o, -1) && dbg_is_safe(o->klass(), -1) && oopDesc::is_oop(o) && o->klass()->is_klass();
3164   }
3165 public:
3166   ThawVerifyOopsClosure(outputStream* st) : _p(nullptr), _st(st) {}
3167   intptr_t* p() { return _p; }
3168   void reset() { _p = nullptr; }
3169 
3170   virtual void do_oop(oop* p) {
3171     oop o = *p;
3172     if (o == nullptr || is_good_oop(o)) {
3173       return;
3174     }
3175     _p = (intptr_t*)p;
3176     _st->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(*p), p2i(p));
3177   }
3178   virtual void do_oop(narrowOop* p) {
3179     oop o = RawAccess<>::oop_load(p);
3180     if (o == nullptr || is_good_oop(o)) {
3181       return;
3182     }
3183     _p = (intptr_t*)p;
3184     _st->print_cr("*** (narrow) non-oop %x found at " PTR_FORMAT, (int)(*p), p2i(p));
3185   }
3186 };
3187 
3188 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st) {
3189   assert(thread->has_last_Java_frame(), "");
3190 
3191   ResourceMark rm;
3192   ThawVerifyOopsClosure cl(st);
3193   NMethodToOopClosure cf(&cl, false);
3194 
3195   StackFrameStream fst(thread, true, false);
3196   fst.register_map()->set_include_argument_oops(false);
3197   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3198   for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) {
3199     if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) {
3200       st->print_cr(">>> do_verify_after_thaw deopt");
3201       fst.current()->deoptimize(nullptr);
3202       fst.current()->print_on(st);
3203     }
3204 
3205     fst.current()->oops_do(&cl, &cf, fst.register_map());
3206     if (cl.p() != nullptr) {
3207       frame fr = *fst.current();
3208       st->print_cr("Failed for frame barriers: %d",chunk->requires_barriers());
3209       fr.print_on(st);
3210       if (!fr.is_interpreted_frame()) {
3211         st->print_cr("size: %d argsize: %d",
3212                      ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
3213                      ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
3214       }
3215       VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
3216       if (reg != nullptr) {
3217         st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
3218       }
3219       cl.reset();
3220       DEBUG_ONLY(thread->print_frame_layout();)
3221       if (chunk != nullptr) {
3222         chunk->print_on(true, st);
3223       }
3224       return false;
3225     }
3226   }
3227   return true;
3228 }
3229 
3230 static void log_frames(JavaThread* thread) {
3231   const static int show_entry_callers = 3;
3232   LogTarget(Trace, continuations) lt;
3233   if (!lt.develop_is_enabled()) {
3234     return;
3235   }
3236   LogStream ls(lt);
3237 
3238   ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
3239   if (!thread->has_last_Java_frame()) {
3240     ls.print_cr("NO ANCHOR!");
3241   }
3242 
3243   RegisterMap map(thread,
3244                   RegisterMap::UpdateMap::include,
3245                   RegisterMap::ProcessFrames::include,
3246                   RegisterMap::WalkContinuation::skip);
3247   map.set_include_argument_oops(false);
3248 
3249   if (false) {
3250     for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
3251       f.print_on(&ls);
3252     }
3253   } else {
3254     map.set_skip_missing(true);
3255     ResetNoHandleMark rnhm;
3256     ResourceMark rm;
3257     HandleMark hm(Thread::current());
3258     FrameValues values;
3259 
3260     int i = 0;
3261     int post_entry = -1;
3262     for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
3263       f.describe(values, i, &map, i == 0);
3264       if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
3265         post_entry++;
3266       if (post_entry >= show_entry_callers)
3267         break;
3268     }
3269     values.print_on(thread, &ls);
3270   }
3271 
3272   ls.print_cr("======= end frames =========");
3273 }
3274 
3275 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp) {
3276   intptr_t* sp0 = sp;
3277   address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
3278 
3279   bool preempted = false;
3280   stackChunkOop tail = cont.tail();
3281   if (tail != nullptr && tail->preempted()) {
3282     // Still preempted (monitor not acquired) so no frames were thawed.
3283     set_anchor(thread, cont.entrySP(), cont.entryPC());
3284     preempted = true;
3285   } else {
3286     set_anchor(thread, sp0);
3287   }
3288 
3289   log_frames(thread);
3290   if (LoomVerifyAfterThaw) {
3291     assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
3292   }
3293   assert(preempted || ContinuationEntry::assert_entry_frame_laid_out(thread), "");
3294   clear_anchor(thread);
3295 
3296   LogTarget(Trace, continuations) lt;
3297   if (lt.develop_is_enabled()) {
3298     LogStream ls(lt);
3299     ls.print_cr("Jumping to frame (thaw):");
3300     frame(sp).print_value_on(&ls);
3301   }
3302 }
3303 #endif // ASSERT
3304 
3305 #include CPU_HEADER_INLINE(continuationFreezeThaw)
3306 
3307 #ifdef ASSERT
3308 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
3309   ResourceMark rm;
3310   FrameValues values;
3311   assert(f.get_cb() != nullptr, "");
3312   RegisterMap map(f.is_heap_frame() ?
3313                     nullptr :
3314                     JavaThread::current(),
3315                   RegisterMap::UpdateMap::include,
3316                   RegisterMap::ProcessFrames::skip,
3317                   RegisterMap::WalkContinuation::skip);
3318   map.set_include_argument_oops(false);
3319   map.set_skip_missing(true);
3320   if (callee_complete) {
3321     frame::update_map_with_saved_link(&map, ContinuationHelper::Frame::callee_link_address(f));
3322   }
3323   const_cast<frame&>(f).describe(values, 0, &map, true);
3324   values.print_on(static_cast<JavaThread*>(nullptr), st);
3325 }
3326 #endif
3327 
3328 static address thaw_entry   = nullptr;
3329 static address freeze_entry = nullptr;
3330 static address freeze_preempt_entry = nullptr;
3331 
3332 address Continuation::thaw_entry() {
3333   return ::thaw_entry;
3334 }
3335 
3336 address Continuation::freeze_entry() {
3337   return ::freeze_entry;
3338 }
3339 
3340 address Continuation::freeze_preempt_entry() {
3341   return ::freeze_preempt_entry;
3342 }
3343 
3344 class ConfigResolve {
3345 public:
3346   static void resolve() { resolve_compressed(); }
3347 
3348   static void resolve_compressed() {
3349     UseCompressedOops ? resolve_gc<true>()
3350                       : resolve_gc<false>();
3351   }
3352 
3353 private:
3354   template <bool use_compressed>
3355   static void resolve_gc() {
3356     BarrierSet* bs = BarrierSet::barrier_set();
3357     assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set");
3358     switch (bs->kind()) {
3359 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
3360       case BarrierSet::bs_name: {                                       \
3361         resolve<use_compressed, typename BarrierSet::GetType<BarrierSet::bs_name>::type>(); \
3362       }                                                                 \
3363         break;
3364       FOR_EACH_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
3365 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
3366 
3367     default:
3368       fatal("BarrierSet resolving not implemented");
3369     };
3370   }
3371 
3372   template <bool use_compressed, typename BarrierSetT>
3373   static void resolve() {
3374     typedef Config<use_compressed ? oop_kind::NARROW : oop_kind::WIDE, BarrierSetT> SelectedConfigT;
3375 
3376     freeze_entry = (address)freeze<SelectedConfigT>;
3377     freeze_preempt_entry = (address)SelectedConfigT::freeze_preempt;
3378 
3379     // If we wanted, we could templatize by kind and have three different thaw entries
3380     thaw_entry   = (address)thaw<SelectedConfigT>;
3381   }
3382 };
3383 
3384 void Continuation::init() {
3385   ConfigResolve::resolve();
3386 }