1 /*
   2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "code/codeCache.inline.hpp"
  28 #include "code/nmethod.inline.hpp"
  29 #include "code/vmreg.inline.hpp"
  30 #include "compiler/oopMap.inline.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shared/gc_globals.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  36 #include "interpreter/bytecodeStream.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "interpreter/interpreterRuntime.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "logging/log.hpp"
  41 #include "logging/logStream.hpp"
  42 #include "oops/access.inline.hpp"
  43 #include "oops/constantPool.inline.hpp"
  44 #include "oops/method.inline.hpp"
  45 #include "oops/oopsHierarchy.hpp"
  46 #include "oops/objArrayOop.inline.hpp"
  47 #include "oops/stackChunkOop.inline.hpp"
  48 #include "prims/jvmtiThreadState.hpp"
  49 #include "runtime/arguments.hpp"
  50 #include "runtime/continuation.hpp"
  51 #include "runtime/continuationEntry.inline.hpp"
  52 #include "runtime/continuationHelper.inline.hpp"
  53 #include "runtime/continuationJavaClasses.inline.hpp"
  54 #include "runtime/continuationWrapper.inline.hpp"
  55 #include "runtime/frame.inline.hpp"
  56 #include "runtime/interfaceSupport.inline.hpp"
  57 #include "runtime/javaThread.inline.hpp"
  58 #include "runtime/jniHandles.inline.hpp"
  59 #include "runtime/keepStackGCProcessed.hpp"
  60 #include "runtime/objectMonitor.inline.hpp"
  61 #include "runtime/orderAccess.hpp"
  62 #include "runtime/prefetch.inline.hpp"
  63 #include "runtime/smallRegisterMap.inline.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/stackChunkFrameStream.inline.hpp"
  66 #include "runtime/stackFrameStream.inline.hpp"
  67 #include "runtime/stackOverflow.hpp"
  68 #include "runtime/stackWatermarkSet.inline.hpp"
  69 #include "runtime/vframe.inline.hpp"
  70 #include "runtime/vframe_hp.hpp"
  71 #include "utilities/debug.hpp"
  72 #include "utilities/exceptions.hpp"
  73 #include "utilities/macros.hpp"
  74 #include "utilities/vmError.hpp"
  75 #if INCLUDE_ZGC
  76 #include "gc/z/zStackChunkGCData.inline.hpp"
  77 #endif
  78 #if INCLUDE_JFR
  79 #include "jfr/jfr.inline.hpp"
  80 #endif
  81 #ifdef COMPILER1
  82 #include "c1/c1_Runtime1.hpp"
  83 #endif
  84 #ifdef COMPILER2
  85 #include "opto/runtime.hpp"
  86 #endif
  87 
  88 #include <type_traits>
  89 
  90 /*
  91  * This file contains the implementation of continuation freezing (yield) and thawing (run).
  92  *
  93  * This code is very latency-critical and very hot. An ordinary and well-behaved server application
  94  * would likely call these operations many thousands of times per second second, on every core.
  95  *
  96  * Freeze might be called every time the application performs any I/O operation, every time it
  97  * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
  98  * multiple times in each of those cases, as it is called by the return barrier, which may be
  99  * invoked on method return.
 100  *
 101  * The amortized budget for each of those two operations is ~100-150ns. That is why, for
 102  * example, every effort is made to avoid Java-VM transitions as much as possible.
 103  *
 104  * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
 105  * and so frames simply copied, and the bottom-most one is patched.
 106  * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets
 107  * and absolute pointers, and barriers invoked.
 108  */
 109 
 110 /************************************************
 111 
 112 Thread-stack layout on freeze/thaw.
 113 See corresponding stack-chunk layout in instanceStackChunkKlass.hpp
 114 
 115             +----------------------------+
 116             |      .                     |
 117             |      .                     |
 118             |      .                     |
 119             |   carrier frames           |
 120             |                            |
 121             |----------------------------|
 122             |                            |
 123             |    Continuation.run        |
 124             |                            |
 125             |============================|
 126             |    enterSpecial frame      |
 127             |  pc                        |
 128             |  rbp                       |
 129             |  -----                     |
 130         ^   |  int argsize               | = ContinuationEntry
 131         |   |  oopDesc* cont             |
 132         |   |  oopDesc* chunk            |
 133         |   |  ContinuationEntry* parent |
 134         |   |  ...                       |
 135         |   |============================| <------ JavaThread::_cont_entry = entry->sp()
 136         |   |  ? alignment word ?        |
 137         |   |----------------------------| <--\
 138         |   |                            |    |
 139         |   |  ? caller stack args ?     |    |   argsize (might not be 2-word aligned) words
 140 Address |   |                            |    |   Caller is still in the chunk.
 141         |   |----------------------------|    |
 142         |   |  pc (? return barrier ?)   |    |  This pc contains the return barrier when the bottom-most frame
 143         |   |  rbp                       |    |  isn't the last one in the continuation.
 144         |   |                            |    |
 145         |   |    frame                   |    |
 146         |   |                            |    |
 147             +----------------------------|     \__ Continuation frames to be frozen/thawed
 148             |                            |     /
 149             |    frame                   |    |
 150             |                            |    |
 151             |----------------------------|    |
 152             |                            |    |
 153             |    frame                   |    |
 154             |                            |    |
 155             |----------------------------| <--/
 156             |                            |
 157             |    doYield/safepoint stub  | When preempting forcefully, we could have a safepoint stub
 158             |                            | instead of a doYield stub
 159             |============================| <- the sp passed to freeze
 160             |                            |
 161             |  Native freeze/thaw frames |
 162             |      .                     |
 163             |      .                     |
 164             |      .                     |
 165             +----------------------------+
 166 
 167 ************************************************/
 168 
 169 static const bool TEST_THAW_ONE_CHUNK_FRAME = false; // force thawing frames one-at-a-time for testing
 170 
 171 #define CONT_JFR false // emit low-level JFR events that count slow/fast path for continuation performance debugging only
 172 #if CONT_JFR
 173   #define CONT_JFR_ONLY(code) code
 174 #else
 175   #define CONT_JFR_ONLY(code)
 176 #endif
 177 
 178 // TODO: See AbstractAssembler::generate_stack_overflow_check,
 179 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
 180 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
 181 
 182 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
 183 
 184 // Used to just annotatate cold/hot branches
 185 #define LIKELY(condition)   (condition)
 186 #define UNLIKELY(condition) (condition)
 187 
 188 // debugging functions
 189 #ifdef ASSERT
 190 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
 191 
 192 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
 193 
 194 static void do_deopt_after_thaw(JavaThread* thread);
 195 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
 196 static void log_frames(JavaThread* thread, bool dolog = false);
 197 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp);
 198 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
 199 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr = nullptr, const char** code_name_ptr = nullptr, int* bci_ptr = nullptr);
 200 
 201 #define assert_pfl(p, ...) \
 202 do {                                           \
 203   if (!(p)) {                                  \
 204     JavaThread* t = JavaThread::active();      \
 205     if (t->has_last_Java_frame()) {            \
 206       tty->print_cr("assert(" #p ") failed:"); \
 207       t->print_frame_layout();                 \
 208     }                                          \
 209   }                                            \
 210   vmassert(p, __VA_ARGS__);                    \
 211 } while(0)
 212 
 213 #else
 214 static void verify_continuation(oop continuation) { }
 215 #define assert_pfl(p, ...)
 216 #endif
 217 
 218 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
 219 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);
 220 
 221 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier);
 222 template<typename ConfigT> static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind);
 223 
 224 
 225 // Entry point to freeze. Transitions are handled manually
 226 // Called from gen_continuation_yield() in sharedRuntime_<cpu>.cpp through Continuation::freeze_entry();
 227 template<typename ConfigT>
 228 static JRT_BLOCK_ENTRY(int, freeze(JavaThread* current, intptr_t* sp))
 229   assert(sp == current->frame_anchor()->last_Java_sp(), "");
 230 
 231   if (current->raw_cont_fastpath() > current->last_continuation()->entry_sp() || current->raw_cont_fastpath() < sp) {
 232     current->set_cont_fastpath(nullptr);
 233   }
 234 
 235   return checked_cast<int>(ConfigT::freeze(current, sp));
 236 JRT_END
 237 
 238 JRT_LEAF(int, Continuation::prepare_thaw(JavaThread* thread, bool return_barrier))
 239   return prepare_thaw_internal(thread, return_barrier);
 240 JRT_END
 241 
 242 template<typename ConfigT>
 243 static JRT_LEAF(intptr_t*, thaw(JavaThread* thread, int kind))
 244   // TODO: JRT_LEAF and NoHandleMark is problematic for JFR events.
 245   // vFrameStreamCommon allocates Handles in RegisterMap for continuations.
 246   // Also the preemption case with JVMTI events enabled might safepoint so
 247   // undo the NoSafepointVerifier here and rely on handling by ContinuationWrapper.
 248   // JRT_ENTRY instead?
 249   ResetNoHandleMark rnhm;
 250   DEBUG_ONLY(PauseNoSafepointVerifier pnsv(&__nsv);)
 251 
 252   // we might modify the code cache via BarrierSetNMethod::nmethod_entry_barrier
 253   MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread));
 254   return ConfigT::thaw(thread, (Continuation::thaw_kind)kind);
 255 JRT_END
 256 
 257 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) {
 258   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
 259   return is_pinned0(thread, JNIHandles::resolve(cont_scope), false);
 260 }
 261 JVM_END
 262 
 263 ///////////
 264 
 265 enum class oop_kind { NARROW, WIDE };
 266 template <oop_kind oops, typename BarrierSetT>
 267 class Config {
 268 public:
 269   typedef Config<oops, BarrierSetT> SelfT;
 270   using OopT = std::conditional_t<oops == oop_kind::NARROW, narrowOop, oop>;
 271 
 272   static freeze_result freeze(JavaThread* thread, intptr_t* const sp) {
 273     freeze_result res = freeze_internal<SelfT, false>(thread, sp);
 274     JFR_ONLY(assert((res == freeze_ok) || (res == thread->last_freeze_fail_result()), "freeze failure not set"));
 275     return res;
 276   }
 277 
 278   static freeze_result freeze_preempt(JavaThread* thread, intptr_t* const sp) {
 279     return freeze_internal<SelfT, true>(thread, sp);
 280   }
 281 
 282   static intptr_t* thaw(JavaThread* thread, Continuation::thaw_kind kind) {
 283     return thaw_internal<SelfT>(thread, kind);
 284   }
 285 };
 286 
 287 #ifdef _WINDOWS
 288 static void map_stack_pages(JavaThread* thread, size_t size, address sp) {
 289   address new_sp = sp - size;
 290   address watermark = thread->stack_overflow_state()->shadow_zone_growth_watermark();
 291 
 292   if (new_sp < watermark) {
 293     size_t page_size = os::vm_page_size();
 294     address last_touched_page = watermark - StackOverflow::stack_shadow_zone_size();
 295     size_t pages_to_touch = align_up(watermark - new_sp, page_size) / page_size;
 296     while (pages_to_touch-- > 0) {
 297       last_touched_page -= page_size;
 298       *last_touched_page = 0;
 299     }
 300     thread->stack_overflow_state()->set_shadow_zone_growth_watermark(new_sp);
 301   }
 302 }
 303 #endif
 304 
 305 static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) {
 306   const size_t page_size = os::vm_page_size();
 307   if (size > page_size) {
 308     if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) {
 309       return false;
 310     }
 311     WINDOWS_ONLY(map_stack_pages(thread, size, sp));
 312   }
 313   return true;
 314 }
 315 
 316 #ifdef ASSERT
 317 static oop get_continuation(JavaThread* thread) {
 318   assert(thread != nullptr, "");
 319   assert(thread->threadObj() != nullptr, "");
 320   return java_lang_Thread::continuation(thread->threadObj());
 321 }
 322 #endif // ASSERT
 323 
 324 inline void clear_anchor(JavaThread* thread) {
 325   thread->frame_anchor()->clear();
 326 }
 327 
 328 static void set_anchor(JavaThread* thread, intptr_t* sp, address pc) {
 329   assert(pc != nullptr, "");
 330 
 331   JavaFrameAnchor* anchor = thread->frame_anchor();
 332   anchor->set_last_Java_sp(sp);
 333   anchor->set_last_Java_pc(pc);
 334   ContinuationHelper::set_anchor_pd(anchor, sp);
 335 
 336   assert(thread->has_last_Java_frame(), "");
 337   assert(thread->last_frame().cb() != nullptr, "");
 338 }
 339 
 340 static void set_anchor(JavaThread* thread, intptr_t* sp) {
 341   address pc = ContinuationHelper::return_address_at(
 342            sp - frame::sender_sp_ret_address_offset());
 343   set_anchor(thread, sp, pc);
 344 }
 345 
 346 static void set_anchor_to_entry(JavaThread* thread, ContinuationEntry* entry) {
 347   JavaFrameAnchor* anchor = thread->frame_anchor();
 348   anchor->set_last_Java_sp(entry->entry_sp());
 349   anchor->set_last_Java_pc(entry->entry_pc());
 350   ContinuationHelper::set_anchor_to_entry_pd(anchor, entry);
 351 
 352   assert(thread->has_last_Java_frame(), "");
 353   assert(thread->last_frame().cb() != nullptr, "");
 354 }
 355 
 356 #if CONT_JFR
 357 class FreezeThawJfrInfo : public StackObj {
 358   short _e_size;
 359   short _e_num_interpreted_frames;
 360  public:
 361 
 362   FreezeThawJfrInfo() : _e_size(0), _e_num_interpreted_frames(0) {}
 363   inline void record_interpreted_frame() { _e_num_interpreted_frames++; }
 364   inline void record_size_copied(int size) { _e_size += size << LogBytesPerWord; }
 365   template<typename Event> void post_jfr_event(Event *e, oop continuation, JavaThread* jt);
 366 };
 367 
 368 template<typename Event> void FreezeThawJfrInfo::post_jfr_event(Event* e, oop continuation, JavaThread* jt) {
 369   if (e->should_commit()) {
 370     log_develop_trace(continuations)("JFR event: iframes: %d size: %d", _e_num_interpreted_frames, _e_size);
 371     e->set_carrierThread(JFR_JVM_THREAD_ID(jt));
 372     e->set_continuationClass(continuation->klass());
 373     e->set_interpretedFrames(_e_num_interpreted_frames);
 374     e->set_size(_e_size);
 375     e->commit();
 376   }
 377 }
 378 #endif // CONT_JFR
 379 
 380 /////////////// FREEZE ////
 381 
 382 class FreezeBase : public StackObj {
 383 protected:
 384   JavaThread* const _thread;
 385   ContinuationWrapper& _cont;
 386   bool _barriers; // only set when we allocate a chunk
 387 
 388   intptr_t* _bottom_address;
 389 
 390   // Used for preemption only
 391   const bool _preempt;
 392   frame _last_frame;
 393 
 394   // Used to support freezing with held monitors
 395   int _monitors_in_lockstack;
 396 
 397   int _freeze_size; // total size of all frames plus metadata in words.
 398   int _total_align_size;
 399 
 400   intptr_t* _cont_stack_top;
 401   intptr_t* _cont_stack_bottom;
 402 
 403   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
 404 
 405 #ifdef ASSERT
 406   intptr_t* _orig_chunk_sp;
 407   int _fast_freeze_size;
 408   bool _empty;
 409 #endif
 410 
 411   JvmtiSampledObjectAllocEventCollector* _jvmti_event_collector;
 412 
 413   NOT_PRODUCT(int _frames;)
 414   DEBUG_ONLY(intptr_t* _last_write;)
 415 
 416   inline FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempt);
 417 
 418 public:
 419   NOINLINE freeze_result freeze_slow();
 420   void freeze_fast_existing_chunk();
 421 
 422   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
 423   void set_jvmti_event_collector(JvmtiSampledObjectAllocEventCollector* jsoaec) { _jvmti_event_collector = jsoaec; }
 424 
 425   inline int size_if_fast_freeze_available();
 426 
 427   inline frame& last_frame() { return _last_frame; }
 428 
 429 #ifdef ASSERT
 430   bool check_valid_fast_path();
 431 #endif
 432 
 433 protected:
 434   inline void init_rest();
 435   void throw_stack_overflow_on_humongous_chunk();
 436 
 437   // fast path
 438   inline void copy_to_chunk(intptr_t* from, intptr_t* to, int size);
 439   inline void unwind_frames();
 440   inline void patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp);
 441 
 442   // slow path
 443   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) = 0;
 444 
 445   int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); }
 446 
 447 private:
 448   // slow path
 449   frame freeze_start_frame();
 450   frame freeze_start_frame_on_preempt();
 451   NOINLINE freeze_result recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top);
 452   inline frame freeze_start_frame_yield_stub();
 453   template<typename FKind>
 454   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
 455   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
 456   inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
 457   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
 458   void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
 459   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 460   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
 461   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
 462   NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
 463   NOINLINE void finish_freeze(const frame& f, const frame& top);
 464 
 465   void freeze_lockstack(stackChunkOop chunk);
 466 
 467   inline bool stack_overflow();
 468 
 469   static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
 470                                                                         : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
 471   template<typename FKind> static inline frame sender(const frame& f);
 472   template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
 473   inline void set_top_frame_metadata_pd(const frame& hf);
 474   inline void patch_pd(frame& callee, const frame& caller);
 475   void adjust_interpreted_frame_unextended_sp(frame& f);
 476   static inline void prepare_freeze_interpreted_top_frame(frame& f);
 477   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 478 
 479 protected:
 480   void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
 481   bool freeze_fast_new_chunk(stackChunkOop chunk);
 482 };
 483 
 484 template <typename ConfigT>
 485 class Freeze : public FreezeBase {
 486 private:
 487   stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
 488 
 489 public:
 490   inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
 491     : FreezeBase(thread, cont, frame_sp, preempt) {}
 492 
 493   freeze_result try_freeze_fast();
 494 
 495 protected:
 496   virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) override { return allocate_chunk(stack_size, argsize_md); }
 497 };
 498 
 499 FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt) :
 500     _thread(thread), _cont(cont), _barriers(false), _preempt(preempt), _last_frame(false /* no initialization */) {
 501   DEBUG_ONLY(_jvmti_event_collector = nullptr;)
 502 
 503   assert(_thread != nullptr, "");
 504   assert(_thread->last_continuation()->entry_sp() == _cont.entrySP(), "");
 505 
 506   DEBUG_ONLY(_cont.entry()->verify_cookie();)
 507 
 508   assert(!Interpreter::contains(_cont.entryPC()), "");
 509 
 510   _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
 511 #ifdef _LP64
 512   if (((intptr_t)_bottom_address & 0xf) != 0) {
 513     _bottom_address--;
 514   }
 515   assert(is_aligned(_bottom_address, frame::frame_alignment), "");
 516 #endif
 517 
 518   log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
 519                 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
 520   assert(_bottom_address != nullptr, "");
 521   assert(_bottom_address <= _cont.entrySP(), "");
 522   DEBUG_ONLY(_last_write = nullptr;)
 523 
 524   assert(_cont.chunk_invariant(), "");
 525   assert(!Interpreter::contains(_cont.entryPC()), "");
 526 #if !defined(PPC64) || defined(ZERO)
 527   static const int do_yield_frame_size = frame::metadata_words;
 528 #else
 529   static const int do_yield_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
 530 #endif
 531   // With preemption doYield() might not have been resolved yet
 532   assert(_preempt || ContinuationEntry::do_yield_nmethod()->frame_size() == do_yield_frame_size, "");
 533 
 534   if (preempt) {
 535     _last_frame = _thread->last_frame();
 536   }
 537 
 538   // properties of the continuation on the stack; all sizes are in words
 539   _cont_stack_top    = frame_sp + (!preempt ? do_yield_frame_size : 0); // we don't freeze the doYield stub frame
 540   _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
 541       - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
 542 
 543   log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 544     cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 545   assert(cont_size() > 0, "");
 546 
 547   if (LockingMode != LM_LIGHTWEIGHT) {
 548     _monitors_in_lockstack = 0;
 549   } else {
 550     _monitors_in_lockstack = _thread->lock_stack().monitor_count();
 551   }
 552 }
 553 
 554 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
 555   _freeze_size = 0;
 556   _total_align_size = 0;
 557   NOT_PRODUCT(_frames = 0;)
 558 }
 559 
 560 void FreezeBase::freeze_lockstack(stackChunkOop chunk) {
 561   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "no room for lockstack");
 562 
 563   _thread->lock_stack().move_to_address((oop*)chunk->start_address());
 564   chunk->set_lockstack_size(checked_cast<uint8_t>(_monitors_in_lockstack));
 565   chunk->set_has_lockstack(true);
 566 }
 567 
 568 void FreezeBase::copy_to_chunk(intptr_t* from, intptr_t* to, int size) {
 569   stackChunkOop chunk = _cont.tail();
 570   chunk->copy_from_stack_to_chunk(from, to, size);
 571   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
 572 
 573 #ifdef ASSERT
 574   if (_last_write != nullptr) {
 575     assert(_last_write == to + size, "Missed a spot: _last_write: " INTPTR_FORMAT " to+size: " INTPTR_FORMAT
 576         " stack_size: %d _last_write offset: " PTR_FORMAT " to+size: " PTR_FORMAT, p2i(_last_write), p2i(to+size),
 577         chunk->stack_size(), _last_write-chunk->start_address(), to+size-chunk->start_address());
 578     _last_write = to;
 579   }
 580 #endif
 581 }
 582 
 583 static void assert_frames_in_continuation_are_safe(JavaThread* thread) {
 584 #ifdef ASSERT
 585   StackWatermark* watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc);
 586   if (watermark == nullptr) {
 587     return;
 588   }
 589   ContinuationEntry* ce = thread->last_continuation();
 590   RegisterMap map(thread,
 591                   RegisterMap::UpdateMap::include,
 592                   RegisterMap::ProcessFrames::skip,
 593                   RegisterMap::WalkContinuation::skip);
 594   map.set_include_argument_oops(false);
 595   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
 596     watermark->assert_is_frame_safe(f);
 597   }
 598 #endif // ASSERT
 599 }
 600 
 601 #ifdef ASSERT
 602 static bool monitors_on_stack(JavaThread* thread) {
 603   assert_frames_in_continuation_are_safe(thread);
 604   ContinuationEntry* ce = thread->last_continuation();
 605   RegisterMap map(thread,
 606                   RegisterMap::UpdateMap::include,
 607                   RegisterMap::ProcessFrames::skip,
 608                   RegisterMap::WalkContinuation::skip);
 609   map.set_include_argument_oops(false);
 610   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
 611     if ((f.is_interpreted_frame() && ContinuationHelper::InterpretedFrame::is_owning_locks(f)) ||
 612         (f.is_compiled_frame() && ContinuationHelper::CompiledFrame::is_owning_locks(map.thread(), &map, f)) ||
 613         (f.is_native_frame() && ContinuationHelper::NativeFrame::is_owning_locks(map.thread(), f))) {
 614       return true;
 615     }
 616   }
 617   return false;
 618 }
 619 #endif // ASSERT
 620 
 621 // Called _after_ the last possible safepoint during the freeze operation (chunk allocation)
 622 void FreezeBase::unwind_frames() {
 623   ContinuationEntry* entry = _cont.entry();
 624   entry->flush_stack_processing(_thread);
 625   assert_frames_in_continuation_are_safe(_thread);
 626   JFR_ONLY(Jfr::check_and_process_sample_request(_thread);)
 627   assert(LockingMode != LM_LEGACY || !monitors_on_stack(_thread), "unexpected monitors on stack");
 628   set_anchor_to_entry(_thread, entry);
 629 }
 630 
 631 template <typename ConfigT>
 632 freeze_result Freeze<ConfigT>::try_freeze_fast() {
 633   assert(_thread->thread_state() == _thread_in_vm, "");
 634   assert(_thread->cont_fastpath(), "");
 635 
 636   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 637   assert(_fast_freeze_size == 0, "");
 638 
 639   stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words + _monitors_in_lockstack, _cont.argsize() + frame::metadata_words_at_top);
 640   if (freeze_fast_new_chunk(chunk)) {
 641     return freeze_ok;
 642   }
 643   if (_thread->has_pending_exception()) {
 644     return freeze_exception;
 645   }
 646 
 647   // TODO R REMOVE when deopt change is fixed
 648   assert(!_thread->cont_fastpath() || _barriers, "");
 649   log_develop_trace(continuations)("-- RETRYING SLOW --");
 650   return freeze_slow();
 651 }
 652 
 653 // Returns size needed if the continuation fits, otherwise 0.
 654 int FreezeBase::size_if_fast_freeze_available() {
 655   stackChunkOop chunk = _cont.tail();
 656   if (chunk == nullptr || chunk->is_gc_mode() || chunk->requires_barriers() || chunk->has_mixed_frames()) {
 657     log_develop_trace(continuations)("chunk available %s", chunk == nullptr ? "no chunk" : "chunk requires barriers");
 658     return 0;
 659   }
 660 
 661   int total_size_needed = cont_size();
 662   const int chunk_sp = chunk->sp();
 663 
 664   // argsize can be nonzero if we have a caller, but the caller could be in a non-empty parent chunk,
 665   // so we subtract it only if we overlap with the caller, i.e. the current chunk isn't empty.
 666   // Consider leaving the chunk's argsize set when emptying it and removing the following branch,
 667   // although that would require changing stackChunkOopDesc::is_empty
 668   if (!chunk->is_empty()) {
 669     total_size_needed -= _cont.argsize() + frame::metadata_words_at_top;
 670   }
 671 
 672   total_size_needed += _monitors_in_lockstack;
 673 
 674   int chunk_free_room = chunk_sp - frame::metadata_words_at_bottom;
 675   bool available = chunk_free_room >= total_size_needed;
 676   log_develop_trace(continuations)("chunk available: %s size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
 677     available ? "yes" : "no" , total_size_needed, _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
 678   return available ? total_size_needed : 0;
 679 }
 680 
 681 void FreezeBase::freeze_fast_existing_chunk() {
 682   stackChunkOop chunk = _cont.tail();
 683 
 684   DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
 685   assert(_fast_freeze_size > 0, "");
 686 
 687   if (!chunk->is_empty()) { // we are copying into a non-empty chunk
 688     DEBUG_ONLY(_empty = false;)
 689     DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();)
 690 #ifdef ASSERT
 691     {
 692       intptr_t* retaddr_slot = (chunk->sp_address()
 693                                 - frame::sender_sp_ret_address_offset());
 694       assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 695              "unexpected saved return address");
 696     }
 697 #endif
 698 
 699     // the chunk's sp before the freeze, adjusted to point beyond the stack-passed arguments in the topmost frame
 700     // we overlap; we'll overwrite the chunk's top frame's callee arguments
 701     const int chunk_start_sp = chunk->sp() + _cont.argsize() + frame::metadata_words_at_top;
 702     assert(chunk_start_sp <= chunk->stack_size(), "sp not pointing into stack");
 703 
 704     // increase max_size by what we're freezing minus the overlap
 705     chunk->set_max_thawing_size(chunk->max_thawing_size() + cont_size() - _cont.argsize() - frame::metadata_words_at_top);
 706 
 707     intptr_t* const bottom_sp = _cont_stack_bottom - _cont.argsize() - frame::metadata_words_at_top;
 708     assert(bottom_sp == _bottom_address, "");
 709     // Because the chunk isn't empty, we know there's a caller in the chunk, therefore the bottom-most frame
 710     // should have a return barrier (installed back when we thawed it).
 711 #ifdef ASSERT
 712     {
 713       intptr_t* retaddr_slot = (bottom_sp
 714                                 - frame::sender_sp_ret_address_offset());
 715       assert(ContinuationHelper::return_address_at(retaddr_slot)
 716              == StubRoutines::cont_returnBarrier(),
 717              "should be the continuation return barrier");
 718     }
 719 #endif
 720     // We copy the fp from the chunk back to the stack because it contains some caller data,
 721     // including, possibly, an oop that might have gone stale since we thawed.
 722     patch_stack_pd(bottom_sp, chunk->sp_address());
 723     // we don't patch the return pc at this time, so as not to make the stack unwalkable for async walks
 724 
 725     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 726   } else { // the chunk is empty
 727     const int chunk_start_sp = chunk->stack_size();
 728 
 729     DEBUG_ONLY(_empty = true;)
 730     DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 731 
 732     chunk->set_max_thawing_size(cont_size());
 733     chunk->set_bottom(chunk_start_sp - _cont.argsize() - frame::metadata_words_at_top);
 734     chunk->set_sp(chunk->bottom());
 735 
 736     freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
 737   }
 738 }
 739 
 740 bool FreezeBase::freeze_fast_new_chunk(stackChunkOop chunk) {
 741   DEBUG_ONLY(_empty = true;)
 742 
 743   // Install new chunk
 744   _cont.set_tail(chunk);
 745 
 746   if (UNLIKELY(chunk == nullptr || !_thread->cont_fastpath() || _barriers)) { // OOME/probably humongous
 747     log_develop_trace(continuations)("Retrying slow. Barriers: %d", _barriers);
 748     return false;
 749   }
 750 
 751   chunk->set_max_thawing_size(cont_size());
 752 
 753   // in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments.
 754   // They'll then be stored twice: in the chunk and in the parent chunk's top frame
 755   const int chunk_start_sp = cont_size() + frame::metadata_words + _monitors_in_lockstack;
 756   assert(chunk_start_sp == chunk->stack_size(), "");
 757 
 758   DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
 759 
 760   freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA true));
 761 
 762   return true;
 763 }
 764 
 765 void FreezeBase::freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated)) {
 766   assert(chunk != nullptr, "");
 767   assert(!chunk->has_mixed_frames(), "");
 768   assert(!chunk->is_gc_mode(), "");
 769   assert(!chunk->has_bitmap(), "");
 770   assert(!chunk->requires_barriers(), "");
 771   assert(chunk == _cont.tail(), "");
 772 
 773   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
 774   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
 775   // will either see no continuation on the stack, or a consistent chunk.
 776   unwind_frames();
 777 
 778   log_develop_trace(continuations)("freeze_fast start: chunk " INTPTR_FORMAT " size: %d orig sp: %d argsize: %d",
 779     p2i((oopDesc*)chunk), chunk->stack_size(), chunk_start_sp, _cont.argsize());
 780   assert(chunk_start_sp <= chunk->stack_size(), "");
 781   assert(chunk_start_sp >= cont_size(), "no room in the chunk");
 782 
 783   const int chunk_new_sp = chunk_start_sp - cont_size(); // the chunk's new sp, after freeze
 784   assert(!(_fast_freeze_size > 0) || (_orig_chunk_sp - (chunk->start_address() + chunk_new_sp)) == (_fast_freeze_size - _monitors_in_lockstack), "");
 785 
 786   intptr_t* chunk_top = chunk->start_address() + chunk_new_sp;
 787 #ifdef ASSERT
 788   if (!_empty) {
 789     intptr_t* retaddr_slot = (_orig_chunk_sp
 790                               - frame::sender_sp_ret_address_offset());
 791     assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
 792            "unexpected saved return address");
 793   }
 794 #endif
 795 
 796   log_develop_trace(continuations)("freeze_fast start: " INTPTR_FORMAT " sp: %d chunk_top: " INTPTR_FORMAT,
 797                               p2i(chunk->start_address()), chunk_new_sp, p2i(chunk_top));
 798   intptr_t* from = _cont_stack_top - frame::metadata_words_at_bottom;
 799   intptr_t* to   = chunk_top - frame::metadata_words_at_bottom;
 800   copy_to_chunk(from, to, cont_size() + frame::metadata_words_at_bottom);
 801   // Because we're not patched yet, the chunk is now in a bad state
 802 
 803   // patch return pc of the bottom-most frozen frame (now in the chunk)
 804   // with the actual caller's return address
 805   intptr_t* chunk_bottom_retaddr_slot = (chunk_top + cont_size()
 806                                          - _cont.argsize()
 807                                          - frame::metadata_words_at_top
 808                                          - frame::sender_sp_ret_address_offset());
 809 #ifdef ASSERT
 810   if (!_empty) {
 811     assert(ContinuationHelper::return_address_at(chunk_bottom_retaddr_slot)
 812            == StubRoutines::cont_returnBarrier(),
 813            "should be the continuation return barrier");
 814   }
 815 #endif
 816   ContinuationHelper::patch_return_address_at(chunk_bottom_retaddr_slot,
 817                                               chunk->pc());
 818 
 819   // We're always writing to a young chunk, so the GC can't see it until the next safepoint.
 820   chunk->set_sp(chunk_new_sp);
 821 
 822   // set chunk->pc to the return address of the topmost frame in the chunk
 823   if (_preempt) {
 824     // On aarch64/riscv64, the return pc of the top frame won't necessarily be at sp[-1].
 825     // Also, on x64, if the top frame is the native wrapper frame, sp[-1] will not
 826     // be the pc we used when creating the oopmap. Get the top's frame last pc from
 827     // the anchor instead.
 828     address last_pc = _last_frame.pc();
 829     ContinuationHelper::patch_return_address_at(chunk_top - frame::sender_sp_ret_address_offset(), last_pc);
 830     chunk->set_pc(last_pc);
 831   } else {
 832     chunk->set_pc(ContinuationHelper::return_address_at(
 833                   _cont_stack_top - frame::sender_sp_ret_address_offset()));
 834   }
 835 
 836   if (_monitors_in_lockstack > 0) {
 837     freeze_lockstack(chunk);
 838   }
 839 
 840   _cont.write();
 841 
 842   log_develop_trace(continuations)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash());
 843   LogTarget(Trace, continuations) lt;
 844   if (lt.develop_is_enabled()) {
 845     LogStream ls(lt);
 846     chunk->print_on(true, &ls);
 847   }
 848 
 849   // Verification
 850   assert(_cont.chunk_invariant(), "");
 851   chunk->verify();
 852 
 853 #if CONT_JFR
 854   EventContinuationFreezeFast e;
 855   if (e.should_commit()) {
 856     e.set_id(cast_from_oop<u8>(chunk));
 857     DEBUG_ONLY(e.set_allocate(chunk_is_allocated);)
 858     e.set_size(cont_size() << LogBytesPerWord);
 859     e.commit();
 860   }
 861 #endif
 862 }
 863 
 864 NOINLINE freeze_result FreezeBase::freeze_slow() {
 865 #ifdef ASSERT
 866   ResourceMark rm;
 867 #endif
 868 
 869   log_develop_trace(continuations)("freeze_slow  #" INTPTR_FORMAT, _cont.hash());
 870   assert(_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, "");
 871 
 872 #if CONT_JFR
 873   EventContinuationFreezeSlow e;
 874   if (e.should_commit()) {
 875     e.set_id(cast_from_oop<u8>(_cont.continuation()));
 876     e.commit();
 877   }
 878 #endif
 879 
 880   init_rest();
 881 
 882   HandleMark hm(Thread::current());
 883 
 884   frame f = freeze_start_frame();
 885 
 886   LogTarget(Debug, continuations) lt;
 887   if (lt.develop_is_enabled()) {
 888     LogStream ls(lt);
 889     f.print_on(&ls);
 890   }
 891 
 892   frame caller; // the frozen caller in the chunk
 893   freeze_result res = recurse_freeze(f, caller, 0, false, true);
 894 
 895   if (res == freeze_ok) {
 896     finish_freeze(f, caller);
 897     _cont.write();
 898   }
 899 
 900   return res;
 901 }
 902 
 903 frame FreezeBase::freeze_start_frame() {
 904   if (LIKELY(!_preempt)) {
 905     return freeze_start_frame_yield_stub();
 906   } else {
 907     return freeze_start_frame_on_preempt();
 908   }
 909 }
 910 
 911 frame FreezeBase::freeze_start_frame_yield_stub() {
 912   frame f = _thread->last_frame();
 913   assert(ContinuationEntry::do_yield_nmethod()->contains(f.pc()), "must be");
 914   f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
 915   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
 916   return f;
 917 }
 918 
 919 frame FreezeBase::freeze_start_frame_on_preempt() {
 920   assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
 921   assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
 922   return _last_frame;
 923 }
 924 
 925 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 926 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
 927   assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
 928   assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
 929          || ((top && _preempt) == f.is_native_frame()), "");
 930 
 931   if (stack_overflow()) {
 932     return freeze_exception;
 933   }
 934 
 935   if (f.is_compiled_frame()) {
 936     if (UNLIKELY(f.oop_map() == nullptr)) {
 937       // special native frame
 938       return freeze_pinned_native;
 939     }
 940     return recurse_freeze_compiled_frame(f, caller, callee_argsize, callee_interpreted);
 941   } else if (f.is_interpreted_frame()) {
 942     assert(!f.interpreter_frame_method()->is_native() || (top && _preempt), "");
 943     return recurse_freeze_interpreted_frame(f, caller, callee_argsize, callee_interpreted);
 944   } else if (top && _preempt) {
 945     assert(f.is_native_frame() || f.is_runtime_frame(), "");
 946     return f.is_native_frame() ? recurse_freeze_native_frame(f, caller) : recurse_freeze_stub_frame(f, caller);
 947   } else {
 948     // Frame can't be frozen. Most likely the call_stub or upcall_stub
 949     // which indicates there are further natives frames up the stack.
 950     return freeze_pinned_native;
 951   }
 952 }
 953 
 954 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
 955 // See also StackChunkFrameStream<frame_kind>::frame_size()
 956 template<typename FKind>
 957 inline freeze_result FreezeBase::recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize) {
 958   assert(FKind::is_instance(f), "");
 959 
 960   assert(fsize > 0, "");
 961   assert(argsize >= 0, "");
 962   _freeze_size += fsize;
 963   NOT_PRODUCT(_frames++;)
 964 
 965   assert(FKind::frame_bottom(f) <= _bottom_address, "");
 966 
 967   // We don't use FKind::frame_bottom(f) == _bottom_address because on x64 there's sometimes an extra word between
 968   // enterSpecial and an interpreted frame
 969   if (FKind::frame_bottom(f) >= _bottom_address - 1) {
 970     return finalize_freeze(f, caller, argsize); // recursion end
 971   } else {
 972     frame senderf = sender<FKind>(f);
 973     assert(FKind::interpreted || senderf.sp() == senderf.unextended_sp(), "");
 974     freeze_result result = recurse_freeze(senderf, caller, argsize, FKind::interpreted, false); // recursive call
 975     return result;
 976   }
 977 }
 978 
 979 inline void FreezeBase::before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame) {
 980   LogTarget(Trace, continuations) lt;
 981   if (lt.develop_is_enabled()) {
 982     LogStream ls(lt);
 983     ls.print_cr("======== FREEZING FRAME interpreted: %d bottom: %d", f.is_interpreted_frame(), is_bottom_frame);
 984     ls.print_cr("fsize: %d argsize: %d", fsize, argsize);
 985     f.print_value_on(&ls);
 986   }
 987   assert(caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
 988 }
 989 
 990 inline void FreezeBase::after_freeze_java_frame(const frame& hf, bool is_bottom_frame) {
 991   LogTarget(Trace, continuations) lt;
 992   if (lt.develop_is_enabled()) {
 993     LogStream ls(lt);
 994     DEBUG_ONLY(hf.print_value_on(&ls);)
 995     assert(hf.is_heap_frame(), "should be");
 996     DEBUG_ONLY(print_frame_layout(hf, false, &ls);)
 997     if (is_bottom_frame) {
 998       ls.print_cr("bottom h-frame:");
 999       hf.print_on(&ls);
1000     }
1001   }
1002 }
1003 
1004 // The parameter argsize_md includes metadata that has to be part of caller/callee overlap.
1005 // See also StackChunkFrameStream<frame_kind>::frame_size()
1006 freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, int argsize_md) {
1007   int argsize = argsize_md - frame::metadata_words_at_top;
1008   assert(callee.is_interpreted_frame()
1009     || ContinuationHelper::Frame::is_stub(callee.cb())
1010     || callee.cb()->as_nmethod()->is_osr_method()
1011     || argsize == _cont.argsize(), "argsize: %d cont.argsize: %d", argsize, _cont.argsize());
1012   log_develop_trace(continuations)("bottom: " INTPTR_FORMAT " count %d size: %d argsize: %d",
1013     p2i(_bottom_address), _frames, _freeze_size << LogBytesPerWord, argsize);
1014 
1015   LogTarget(Trace, continuations) lt;
1016 
1017 #ifdef ASSERT
1018   bool empty = _cont.is_empty();
1019   log_develop_trace(continuations)("empty: %d", empty);
1020 #endif
1021 
1022   stackChunkOop chunk = _cont.tail();
1023 
1024   assert(chunk == nullptr || (chunk->max_thawing_size() == 0) == chunk->is_empty(), "");
1025 
1026   _freeze_size += frame::metadata_words; // for top frame's metadata
1027 
1028   int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind
1029   int unextended_sp = -1;
1030   if (chunk != nullptr) {
1031     if (!chunk->is_empty()) {
1032       StackChunkFrameStream<ChunkFrames::Mixed> last(chunk);
1033       unextended_sp = chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp());
1034       bool top_interpreted = Interpreter::contains(chunk->pc());
1035       if (callee.is_interpreted_frame() == top_interpreted) {
1036         overlap = argsize_md;
1037       }
1038     } else {
1039       unextended_sp = chunk->stack_size() - frame::metadata_words_at_top;
1040     }
1041   }
1042 
1043   log_develop_trace(continuations)("finalize _size: %d overlap: %d unextended_sp: %d", _freeze_size, overlap, unextended_sp);
1044 
1045   _freeze_size -= overlap;
1046   assert(_freeze_size >= 0, "");
1047 
1048   assert(chunk == nullptr || chunk->is_empty()
1049           || unextended_sp == chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp()), "");
1050   assert(chunk != nullptr || unextended_sp < _freeze_size, "");
1051 
1052   _freeze_size += _monitors_in_lockstack;
1053 
1054   // _barriers can be set to true by an allocation in freeze_fast, in which case the chunk is available
1055   bool allocated_old_in_freeze_fast = _barriers;
1056   assert(!allocated_old_in_freeze_fast || (unextended_sp >= _freeze_size && chunk->is_empty()),
1057     "Chunk allocated in freeze_fast is of insufficient size "
1058     "unextended_sp: %d size: %d is_empty: %d", unextended_sp, _freeze_size, chunk->is_empty());
1059   assert(!allocated_old_in_freeze_fast || (!UseZGC && !UseG1GC), "Unexpected allocation");
1060 
1061   DEBUG_ONLY(bool empty_chunk = true);
1062   if (unextended_sp < _freeze_size || chunk->is_gc_mode() || (!allocated_old_in_freeze_fast && chunk->requires_barriers())) {
1063     // ALLOCATE NEW CHUNK
1064 
1065     if (lt.develop_is_enabled()) {
1066       LogStream ls(lt);
1067       if (chunk == nullptr) {
1068         ls.print_cr("no chunk");
1069       } else {
1070         ls.print_cr("chunk barriers: %d _size: %d free size: %d",
1071           chunk->requires_barriers(), _freeze_size, chunk->sp() - frame::metadata_words);
1072         chunk->print_on(&ls);
1073       }
1074     }
1075 
1076     _freeze_size += overlap; // we're allocating a new chunk, so no overlap
1077     // overlap = 0;
1078 
1079     chunk = allocate_chunk_slow(_freeze_size, argsize_md);
1080     if (chunk == nullptr) {
1081       return freeze_exception;
1082     }
1083 
1084     // Install new chunk
1085     _cont.set_tail(chunk);
1086     assert(chunk->is_empty(), "");
1087   } else {
1088     // REUSE EXISTING CHUNK
1089     log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1090     if (chunk->is_empty()) {
1091       int sp = chunk->stack_size() - argsize_md;
1092       chunk->set_sp(sp);
1093       chunk->set_bottom(sp);
1094       _freeze_size += overlap;
1095       assert(chunk->max_thawing_size() == 0, "");
1096     } DEBUG_ONLY(else empty_chunk = false;)
1097   }
1098   assert(!chunk->is_gc_mode(), "");
1099   assert(!chunk->has_bitmap(), "");
1100   chunk->set_has_mixed_frames(true);
1101 
1102   assert(chunk->requires_barriers() == _barriers, "");
1103   assert(!_barriers || chunk->is_empty(), "");
1104 
1105   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1106   assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1107 
1108   if (_preempt) {
1109     frame top_frame = _thread->last_frame();
1110     if (top_frame.is_interpreted_frame()) {
1111       // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1112       // We need it so that on resume we can restore the sp to the right place, since
1113       // thawing might add an alignment word to the expression stack (see finish_thaw()).
1114       // We do it now that we know freezing will be successful.
1115       prepare_freeze_interpreted_top_frame(top_frame);
1116     }
1117 
1118     // Do this now so should_process_args_at_top() is set before calling finish_freeze
1119     // in case we might need to apply GC barriers to frames in this stackChunk.
1120     if (_thread->at_preemptable_init()) {
1121       assert(top_frame.is_interpreted_frame(), "only InterpreterRuntime::_new/resolve_from_cache allowed");
1122       chunk->set_at_klass_init(true);
1123       Method* m = top_frame.interpreter_frame_method();
1124       Bytecode current_bytecode = Bytecode(m, top_frame.interpreter_frame_bcp());
1125       Bytecodes::Code code = current_bytecode.code();
1126       int exp_size = top_frame.interpreter_frame_expression_stack_size();
1127       if (code == Bytecodes::Code::_invokestatic && exp_size > 0) {
1128         chunk->set_has_args_at_top(true);
1129       }
1130     }
1131   }
1132 
1133   // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1134   // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1135   // will either see no continuation or a consistent chunk.
1136   unwind_frames();
1137 
1138   chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1139 
1140   if (lt.develop_is_enabled()) {
1141     LogStream ls(lt);
1142     ls.print_cr("top chunk:");
1143     chunk->print_on(&ls);
1144   }
1145 
1146   if (_monitors_in_lockstack > 0) {
1147     freeze_lockstack(chunk);
1148   }
1149 
1150   // The topmost existing frame in the chunk; or an empty frame if the chunk is empty
1151   caller = StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame();
1152 
1153   DEBUG_ONLY(_last_write = caller.unextended_sp() + (empty_chunk ? argsize_md : overlap);)
1154 
1155   assert(chunk->is_in_chunk(_last_write - _freeze_size),
1156     "last_write-size: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(_last_write-_freeze_size), p2i(chunk->start_address()));
1157 #ifdef ASSERT
1158   if (lt.develop_is_enabled()) {
1159     LogStream ls(lt);
1160     ls.print_cr("top hframe before (freeze):");
1161     assert(caller.is_heap_frame(), "should be");
1162     caller.print_on(&ls);
1163   }
1164 
1165   assert(!empty || Continuation::is_continuation_entry_frame(callee, nullptr), "");
1166 
1167   frame entry = sender(callee);
1168 
1169   assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1170   assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1171 #endif
1172 
1173   return freeze_ok_bottom;
1174 }
1175 
1176 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1177 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1178   if (is_bottom_frame) {
1179     // If we're the bottom frame, we need to replace the return barrier with the real
1180     // caller's pc.
1181     address last_pc = caller.pc();
1182     assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1183     ContinuationHelper::Frame::patch_pc(caller, last_pc);
1184   } else {
1185     assert(!caller.is_empty(), "");
1186   }
1187 
1188   patch_pd(hf, caller);
1189 
1190   if (f.is_interpreted_frame()) {
1191     assert(hf.is_heap_frame(), "should be");
1192     ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1193   }
1194 
1195 #ifdef ASSERT
1196   if (hf.is_compiled_frame()) {
1197     if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1198       log_develop_trace(continuations)("Freezing deoptimized frame");
1199       assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1200       assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1201     }
1202   }
1203 #endif
1204 }
1205 
1206 #ifdef ASSERT
1207 static void verify_frame_top(const frame& f, intptr_t* top) {
1208   ResourceMark rm;
1209   InterpreterOopMap mask;
1210   f.interpreted_frame_oop_map(&mask);
1211   assert(top <= ContinuationHelper::InterpretedFrame::frame_top(f, &mask),
1212          "frame_top: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT,
1213            p2i(top), p2i(ContinuationHelper::InterpretedFrame::frame_top(f, &mask)));
1214 }
1215 #endif // ASSERT
1216 
1217 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1218 // See also StackChunkFrameStream<frame_kind>::frame_size()
1219 NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, frame& caller,
1220                                                                     int callee_argsize /* incl. metadata */,
1221                                                                     bool callee_interpreted) {
1222   adjust_interpreted_frame_unextended_sp(f);
1223 
1224   // The frame's top never includes the stack arguments to the callee
1225   intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted);
1226   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
1227   const int fsize = pointer_delta_as_int(stack_frame_bottom, stack_frame_top);
1228 
1229   DEBUG_ONLY(verify_frame_top(f, stack_frame_top));
1230 
1231   Method* frame_method = ContinuationHelper::Frame::frame_method(f);
1232   // including metadata between f and its args
1233   const int argsize = ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top;
1234 
1235   log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d",
1236     frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize, callee_interpreted);
1237   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1238   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1239 
1240   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::InterpretedFrame>(f, caller, fsize, argsize);
1241   if (UNLIKELY(result > freeze_ok_bottom)) {
1242     return result;
1243   }
1244 
1245   bool is_bottom_frame = result == freeze_ok_bottom;
1246   assert(!caller.is_empty() || is_bottom_frame, "");
1247 
1248   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, is_bottom_frame);)
1249 
1250   frame hf = new_heap_frame<ContinuationHelper::InterpretedFrame>(f, caller);
1251   _total_align_size += frame::align_wiggle; // add alignment room for internal interpreted frame alignment on AArch64/PPC64
1252 
1253   intptr_t* heap_frame_top = ContinuationHelper::InterpretedFrame::frame_top(hf, callee_argsize, callee_interpreted);
1254   intptr_t* heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
1255   assert(heap_frame_bottom == heap_frame_top + fsize, "");
1256 
1257   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
1258   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
1259   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1260   assert(!is_bottom_frame || !caller.is_interpreted_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1261 
1262   relativize_interpreted_frame_metadata(f, hf);
1263 
1264   patch(f, hf, caller, is_bottom_frame);
1265 
1266   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1267   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1268   caller = hf;
1269 
1270   // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1271   frame_method->record_gc_epoch();
1272 
1273   return freeze_ok;
1274 }
1275 
1276 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1277 // See also StackChunkFrameStream<frame_kind>::frame_size()
1278 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1279                                                         int callee_argsize /* incl. metadata */,
1280                                                         bool callee_interpreted) {
1281   // The frame's top never includes the stack arguments to the callee
1282   intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1283   intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1284   // including metadata between f and its stackargs
1285   const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1286   const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1287 
1288   log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1289                              ContinuationHelper::Frame::frame_method(f) != nullptr ?
1290                              ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1291                              _freeze_size, fsize, argsize);
1292   // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1293   assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1294 
1295   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1296   if (UNLIKELY(result > freeze_ok_bottom)) {
1297     return result;
1298   }
1299 
1300   bool is_bottom_frame = result == freeze_ok_bottom;
1301   assert(!caller.is_empty() || is_bottom_frame, "");
1302 
1303   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1304 
1305   frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1306 
1307   intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1308 
1309   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1310   assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1311 
1312   if (caller.is_interpreted_frame()) {
1313     // When thawing the frame we might need to add alignment (see Thaw::align)
1314     _total_align_size += frame::align_wiggle;
1315   }
1316 
1317   patch(f, hf, caller, is_bottom_frame);
1318 
1319   assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1320 
1321   DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1322   caller = hf;
1323   return freeze_ok;
1324 }
1325 
1326 NOINLINE freeze_result FreezeBase::recurse_freeze_stub_frame(frame& f, frame& caller) {
1327   DEBUG_ONLY(frame fsender = sender(f);)
1328   assert(fsender.is_compiled_frame(), "sender should be compiled frame");
1329 
1330   intptr_t* const stack_frame_top = ContinuationHelper::StubFrame::frame_top(f);
1331   const int fsize = f.cb()->frame_size();
1332 
1333   log_develop_trace(continuations)("recurse_freeze_stub_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1334     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1335 
1336   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::StubFrame>(f, caller, fsize, 0);
1337   if (UNLIKELY(result > freeze_ok_bottom)) {
1338     return result;
1339   }
1340 
1341   assert(result == freeze_ok, "should have caller");
1342   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, false /*is_bottom_frame*/);)
1343 
1344   frame hf = new_heap_frame<ContinuationHelper::StubFrame>(f, caller);
1345   intptr_t* heap_frame_top = ContinuationHelper::StubFrame::frame_top(hf);
1346 
1347   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1348 
1349   patch(f, hf, caller, false /*is_bottom_frame*/);
1350 
1351   DEBUG_ONLY(after_freeze_java_frame(hf, false /*is_bottom_frame*/);)
1352 
1353   caller = hf;
1354   return freeze_ok;
1355 }
1356 
1357 NOINLINE freeze_result FreezeBase::recurse_freeze_native_frame(frame& f, frame& caller) {
1358   if (!f.cb()->as_nmethod()->method()->is_object_wait0()) {
1359     assert(f.cb()->as_nmethod()->method()->is_synchronized(), "");
1360     // Synchronized native method case. Unlike the interpreter native wrapper, the compiled
1361     // native wrapper tries to acquire the monitor after marshalling the arguments from the
1362     // caller into the native convention. This is so that we have a valid oopMap in case of
1363     // having to block in the slow path. But that would require freezing those registers too
1364     // and then fixing them back on thaw in case of oops. To avoid complicating things and
1365     // given that this would be a rare case anyways just pin the vthread to the carrier.
1366     return freeze_pinned_native;
1367   }
1368 
1369   intptr_t* const stack_frame_top = ContinuationHelper::NativeFrame::frame_top(f);
1370   // There are no stackargs but argsize must include the metadata
1371   const int argsize = frame::metadata_words_at_top;
1372   const int fsize = f.cb()->frame_size() + argsize;
1373 
1374   log_develop_trace(continuations)("recurse_freeze_native_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1375     f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1376 
1377   freeze_result result = recurse_freeze_java_frame<ContinuationHelper::NativeFrame>(f, caller, fsize, argsize);
1378   if (UNLIKELY(result > freeze_ok_bottom)) {
1379     return result;
1380   }
1381 
1382   assert(result == freeze_ok, "should have caller frame");
1383   DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, false /* is_bottom_frame */);)
1384 
1385   frame hf = new_heap_frame<ContinuationHelper::NativeFrame>(f, caller);
1386   intptr_t* heap_frame_top = ContinuationHelper::NativeFrame::frame_top(hf);
1387 
1388   copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1389 
1390   if (caller.is_interpreted_frame()) {
1391     // When thawing the frame we might need to add alignment (see Thaw::align)
1392     _total_align_size += frame::align_wiggle;
1393   }
1394 
1395   patch(f, hf, caller, false /* is_bottom_frame */);
1396 
1397   DEBUG_ONLY(after_freeze_java_frame(hf, false /* is_bottom_frame */);)
1398 
1399   caller = hf;
1400   return freeze_ok;
1401 }
1402 
1403 NOINLINE void FreezeBase::finish_freeze(const frame& f, const frame& top) {
1404   stackChunkOop chunk = _cont.tail();
1405 
1406   LogTarget(Trace, continuations) lt;
1407   if (lt.develop_is_enabled()) {
1408     LogStream ls(lt);
1409     assert(top.is_heap_frame(), "should be");
1410     top.print_on(&ls);
1411   }
1412 
1413   set_top_frame_metadata_pd(top);
1414 
1415   chunk->set_sp(chunk->to_offset(top.sp()));
1416   chunk->set_pc(top.pc());
1417 
1418   chunk->set_max_thawing_size(chunk->max_thawing_size() + _total_align_size);
1419 
1420   assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "clash with lockstack");
1421 
1422   // At this point the chunk is consistent
1423 
1424   if (UNLIKELY(_barriers)) {
1425     log_develop_trace(continuations)("do barriers on old chunk");
1426     // Serial and Parallel GC can allocate objects directly into the old generation.
1427     // Then we want to relativize the derived pointers eagerly so that
1428     // old chunks are all in GC mode.
1429     assert(!UseG1GC, "G1 can not deal with allocating outside of eden");
1430     assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking");
1431     if (UseShenandoahGC) {
1432       _cont.tail()->relativize_derived_pointers_concurrently();
1433     } else {
1434       ContinuationGCSupport::transform_stack_chunk(_cont.tail());
1435     }
1436     // For objects in the old generation we must maintain the remembered set
1437     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>();
1438   }
1439 
1440   log_develop_trace(continuations)("finish_freeze: has_mixed_frames: %d", chunk->has_mixed_frames());
1441   if (lt.develop_is_enabled()) {
1442     LogStream ls(lt);
1443     chunk->print_on(true, &ls);
1444   }
1445 
1446   if (lt.develop_is_enabled()) {
1447     LogStream ls(lt);
1448     ls.print_cr("top hframe after (freeze):");
1449     assert(_cont.last_frame().is_heap_frame(), "should be");
1450     _cont.last_frame().print_on(&ls);
1451     DEBUG_ONLY(print_frame_layout(top, false, &ls);)
1452   }
1453 
1454   assert(_cont.chunk_invariant(), "");
1455 }
1456 
1457 inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive native code
1458   JavaThread* t = !_preempt ? _thread : JavaThread::current();
1459   assert(t == JavaThread::current(), "");
1460   if (os::current_stack_pointer() < t->stack_overflow_state()->shadow_zone_safe_limit()) {
1461     if (!_preempt) {
1462       ContinuationWrapper::SafepointOp so(t, _cont); // could also call _cont.done() instead
1463       Exceptions::_throw_msg(t, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Stack overflow while freezing");
1464     }
1465     return true;
1466   }
1467   return false;
1468 }
1469 
1470 class StackChunkAllocator : public MemAllocator {
1471   const size_t                                 _stack_size;
1472   int                                          _argsize_md;
1473   ContinuationWrapper&                         _continuation_wrapper;
1474   JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector;
1475   mutable bool                                 _took_slow_path;
1476 
1477   // Does the minimal amount of initialization needed for a TLAB allocation.
1478   // We don't need to do a full initialization, as such an allocation need not be immediately walkable.
1479   virtual oop initialize(HeapWord* mem) const override {
1480     assert(_stack_size > 0, "");
1481     assert(_stack_size <= max_jint, "");
1482     assert(_word_size > _stack_size, "");
1483 
1484     // zero out fields (but not the stack)
1485     const size_t hs = oopDesc::header_size();
1486     if (oopDesc::has_klass_gap()) {
1487       oopDesc::set_klass_gap(mem, 0);
1488     }
1489     Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
1490 
1491     int bottom = (int)_stack_size - _argsize_md;
1492 
1493     jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
1494     jdk_internal_vm_StackChunk::set_bottom(mem, bottom);
1495     jdk_internal_vm_StackChunk::set_sp(mem, bottom);
1496 
1497     return finish(mem);
1498   }
1499 
1500   stackChunkOop allocate_fast() const {
1501     if (!UseTLAB) {
1502       return nullptr;
1503     }
1504 
1505     HeapWord* const mem = MemAllocator::mem_allocate_inside_tlab_fast();
1506     if (mem == nullptr) {
1507       return nullptr;
1508     }
1509 
1510     oop obj = initialize(mem);
1511     return stackChunkOopDesc::cast(obj);
1512   }
1513 
1514 public:
1515   StackChunkAllocator(Klass* klass,
1516                       size_t word_size,
1517                       Thread* thread,
1518                       size_t stack_size,
1519                       int argsize_md,
1520                       ContinuationWrapper& continuation_wrapper,
1521                       JvmtiSampledObjectAllocEventCollector* jvmti_event_collector)
1522     : MemAllocator(klass, word_size, thread),
1523       _stack_size(stack_size),
1524       _argsize_md(argsize_md),
1525       _continuation_wrapper(continuation_wrapper),
1526       _jvmti_event_collector(jvmti_event_collector),
1527       _took_slow_path(false) {}
1528 
1529   // Provides it's own, specialized allocation which skips instrumentation
1530   // if the memory can be allocated without going to a slow-path.
1531   stackChunkOop allocate() const {
1532     // First try to allocate without any slow-paths or instrumentation.
1533     stackChunkOop obj = allocate_fast();
1534     if (obj != nullptr) {
1535       return obj;
1536     }
1537 
1538     // Now try full-blown allocation with all expensive operations,
1539     // including potentially safepoint operations.
1540     _took_slow_path = true;
1541 
1542     // Protect unhandled Loom oops
1543     ContinuationWrapper::SafepointOp so(_thread, _continuation_wrapper);
1544 
1545     // Can safepoint
1546     _jvmti_event_collector->start();
1547 
1548     // Can safepoint
1549     return stackChunkOopDesc::cast(MemAllocator::allocate());
1550   }
1551 
1552   bool took_slow_path() const {
1553     return _took_slow_path;
1554   }
1555 };
1556 
1557 template <typename ConfigT>
1558 stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size, int argsize_md) {
1559   log_develop_trace(continuations)("allocate_chunk allocating new chunk");
1560 
1561   InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass());
1562   size_t size_in_words = klass->instance_size(stack_size);
1563 
1564   if (CollectedHeap::stack_chunk_max_size() > 0 && size_in_words >= CollectedHeap::stack_chunk_max_size()) {
1565     if (!_preempt) {
1566       throw_stack_overflow_on_humongous_chunk();
1567     }
1568     return nullptr;
1569   }
1570 
1571   JavaThread* current = _preempt ? JavaThread::current() : _thread;
1572   assert(current == JavaThread::current(), "should be current");
1573 
1574   // Allocate the chunk.
1575   //
1576   // This might safepoint while allocating, but all safepointing due to
1577   // instrumentation have been deferred. This property is important for
1578   // some GCs, as this ensures that the allocated object is in the young
1579   // generation / newly allocated memory.
1580   StackChunkAllocator allocator(klass, size_in_words, current, stack_size, argsize_md, _cont, _jvmti_event_collector);
1581   stackChunkOop chunk = allocator.allocate();
1582 
1583   if (chunk == nullptr) {
1584     return nullptr; // OOME
1585   }
1586 
1587   // assert that chunk is properly initialized
1588   assert(chunk->stack_size() == (int)stack_size, "");
1589   assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size);
1590   assert(chunk->sp() == chunk->bottom(), "");
1591   assert((intptr_t)chunk->start_address() % 8 == 0, "");
1592   assert(chunk->max_thawing_size() == 0, "");
1593   assert(chunk->pc() == nullptr, "");
1594   assert(chunk->is_empty(), "");
1595   assert(chunk->flags() == 0, "");
1596   assert(chunk->is_gc_mode() == false, "");
1597   assert(chunk->lockstack_size() == 0, "");
1598 
1599   // fields are uninitialized
1600   chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk());
1601   chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation());
1602 
1603 #if INCLUDE_ZGC
1604   if (UseZGC) {
1605     ZStackChunkGCData::initialize(chunk);
1606     assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation");
1607     _barriers = false;
1608   } else
1609 #endif
1610 #if INCLUDE_SHENANDOAHGC
1611   if (UseShenandoahGC) {
1612     _barriers = chunk->requires_barriers();
1613   } else
1614 #endif
1615   {
1616     if (!allocator.took_slow_path()) {
1617       // Guaranteed to be in young gen / newly allocated memory
1618       assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation");
1619       _barriers = false;
1620     } else {
1621       // Some GCs could put direct allocations in old gen for slow-path
1622       // allocations; need to explicitly check if that was the case.
1623       _barriers = chunk->requires_barriers();
1624     }
1625   }
1626 
1627   if (_barriers) {
1628     log_develop_trace(continuations)("allocation requires barriers");
1629   }
1630 
1631   assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1632 
1633   return chunk;
1634 }
1635 
1636 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1637   ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1638   Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1639 }
1640 
1641 class AnchorMark : public StackObj {
1642   JavaThread* _current;
1643   frame& _top_frame;
1644   intptr_t* _last_sp_from_frame;
1645   bool _is_interpreted;
1646 
1647  public:
1648   AnchorMark(JavaThread* current, frame& f) : _current(current), _top_frame(f), _is_interpreted(false) {
1649     intptr_t* sp = anchor_mark_set_pd();
1650     set_anchor(_current, sp);
1651   }
1652   ~AnchorMark() {
1653     clear_anchor(_current);
1654     anchor_mark_clear_pd();
1655   }
1656   inline intptr_t* anchor_mark_set_pd();
1657   inline void anchor_mark_clear_pd();
1658 };
1659 
1660 #if INCLUDE_JVMTI
1661 static int num_java_frames(ContinuationWrapper& cont) {
1662   ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1663   int count = 0;
1664   for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1665     count += chunk->num_java_frames();
1666   }
1667   return count;
1668 }
1669 
1670 static void invalidate_jvmti_stack(JavaThread* thread) {
1671   if (thread->is_interp_only_mode()) {
1672     JvmtiThreadState *state = thread->jvmti_thread_state();
1673     if (state != nullptr)
1674       state->invalidate_cur_stack_depth();
1675   }
1676 }
1677 
1678 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1679   if (JvmtiExport::can_post_frame_pop()) {
1680     int num_frames = num_java_frames(cont);
1681 
1682     ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1683     JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1684   }
1685   invalidate_jvmti_stack(thread);
1686 }
1687 
1688 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top, Continuation::preempt_kind pk) {
1689   assert(current->vthread() != nullptr, "must be");
1690 
1691   HandleMarkCleaner hm(current);  // Cleanup vth and so._conth Handles
1692   Handle vth(current, current->vthread());
1693   ContinuationWrapper::SafepointOp so(current, cont);
1694 
1695   AnchorMark am(current, top);  // Set anchor so that the stack is walkable.
1696 
1697   JRT_BLOCK
1698     JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
1699 
1700     if (current->pending_contended_entered_event()) {
1701       // No monitor JVMTI events for ObjectLocker case.
1702       if (pk != Continuation::object_locker) {
1703         JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1704       }
1705       current->set_contended_entered_monitor(nullptr);
1706     }
1707   JRT_BLOCK_END
1708 }
1709 #endif // INCLUDE_JVMTI
1710 
1711 #ifdef ASSERT
1712 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1713 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1714 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1715 bool FreezeBase::check_valid_fast_path() {
1716   ContinuationEntry* ce = _thread->last_continuation();
1717   RegisterMap map(_thread,
1718                   RegisterMap::UpdateMap::skip,
1719                   RegisterMap::ProcessFrames::skip,
1720                   RegisterMap::WalkContinuation::skip);
1721   map.set_include_argument_oops(false);
1722   bool is_top_frame = true;
1723   for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1724     if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1725       return false;
1726     }
1727   }
1728   return true;
1729 }
1730 
1731 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr, const char** code_name_ptr, int* bci_ptr) {
1732   JavaThread* current = JavaThread::current();
1733   ResourceMark rm(current);
1734 
1735   Method* m;
1736   const char* code_name;
1737   int bci;
1738   if (preempt_kind == Continuation::monitorenter) {
1739     assert(top.is_interpreted_frame() || top.is_runtime_frame(), "");
1740     bool at_sync_method;
1741     if (top.is_interpreted_frame()) {
1742       m = top.interpreter_frame_method();
1743       assert(!m->is_native() || m->is_synchronized(), "invalid method %s", m->external_name());
1744       address bcp = top.interpreter_frame_bcp();
1745       assert(bcp != 0 || m->is_native(), "");
1746       at_sync_method = m->is_synchronized() && (bcp == 0 || bcp == m->code_base());
1747       // bcp is advanced on monitorenter before making the VM call, adjust for that.
1748       bool at_sync_bytecode = bcp > m->code_base() && Bytecode(m, bcp - 1).code() == Bytecodes::Code::_monitorenter;
1749       assert(at_sync_method || at_sync_bytecode, "");
1750       bci = at_sync_method ? -1 : top.interpreter_frame_bci();
1751     } else {
1752       CodeBlob* cb = top.cb();
1753       RegisterMap reg_map(current,
1754                   RegisterMap::UpdateMap::skip,
1755                   RegisterMap::ProcessFrames::skip,
1756                   RegisterMap::WalkContinuation::skip);
1757       frame fr = top.sender(&reg_map);
1758       vframe*  vf  = vframe::new_vframe(&fr, &reg_map, current);
1759       compiledVFrame* cvf = compiledVFrame::cast(vf);
1760       m = cvf->method();
1761       bci = cvf->scope()->bci();
1762       at_sync_method = bci == SynchronizationEntryBCI;
1763       assert(!at_sync_method || m->is_synchronized(), "bci is %d but method %s is not synchronized", bci, m->external_name());
1764       bool is_c1_monitorenter = false, is_c2_monitorenter = false;
1765       COMPILER1_PRESENT(is_c1_monitorenter = cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
1766                                              cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id);)
1767       COMPILER2_PRESENT(is_c2_monitorenter = cb == CodeCache::find_blob(OptoRuntime::complete_monitor_locking_Java());)
1768       assert(is_c1_monitorenter || is_c2_monitorenter, "wrong runtime stub frame");
1769     }
1770     code_name = at_sync_method ? "synchronized method" : "monitorenter";
1771   } else if (preempt_kind == Continuation::object_wait) {
1772     assert(top.is_interpreted_frame() || top.is_native_frame(), "");
1773     m  = top.is_interpreted_frame() ? top.interpreter_frame_method() : top.cb()->as_nmethod()->method();
1774     assert(m->is_object_wait0(), "");
1775     bci = 0;
1776     code_name = "";
1777   } else {
1778     assert(preempt_kind == Continuation::object_locker, "invalid preempt kind");
1779     assert(top.is_interpreted_frame(), "");
1780     m = top.interpreter_frame_method();
1781     Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
1782     Bytecodes::Code code = current_bytecode.code();
1783     assert(code == Bytecodes::Code::_new || code == Bytecodes::Code::_invokestatic ||
1784            (code == Bytecodes::Code::_getstatic || code == Bytecodes::Code::_putstatic), "invalid bytecode");
1785     bci = top.interpreter_frame_bci();
1786     code_name = Bytecodes::name(current_bytecode.code());
1787   }
1788   assert(bci >= 0 || m->is_synchronized(), "invalid bci:%d at method %s", bci, m->external_name());
1789 
1790   if (m_ptr != nullptr) {
1791     *m_ptr = m;
1792     *code_name_ptr = code_name;
1793     *bci_ptr = bci;
1794   }
1795 }
1796 
1797 static void log_preempt_after_freeze(ContinuationWrapper& cont) {
1798   JavaThread* current = cont.thread();
1799   StackChunkFrameStream<ChunkFrames::Mixed> sfs(cont.tail());
1800   frame top_frame = sfs.to_frame();
1801   bool at_init = current->at_preemptable_init();
1802   bool at_enter = current->current_pending_monitor() != nullptr;
1803   bool at_wait = current->current_waiting_monitor() != nullptr;
1804   assert((at_enter && !at_wait) || (!at_enter && at_wait), "");
1805   Continuation::preempt_kind pk = at_init ? Continuation::object_locker : at_enter ? Continuation::monitorenter : Continuation::object_wait;
1806 
1807   Method* m = nullptr;
1808   const char* code_name = nullptr;
1809   int bci = InvalidFrameStateBci;
1810   verify_frame_kind(top_frame, pk, &m, &code_name, &bci);
1811   assert(m != nullptr && code_name != nullptr && bci != InvalidFrameStateBci, "should be set");
1812 
1813   ResourceMark rm(current);
1814   if (bci < 0) {
1815     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " while synchronizing on %smethod %s", current->monitor_owner_id(), m->is_native() ? "native " : "", m->external_name());
1816   } else if (m->is_object_wait0()) {
1817     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at native method %s", current->monitor_owner_id(), m->external_name());
1818   } else {
1819     Klass* k = current->preempt_init_klass();
1820     assert(k != nullptr || !at_init, "");
1821     log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at %s(bci:%d) in method %s %s%s", current->monitor_owner_id(),
1822             code_name, bci, m->external_name(), at_init ? "trying to initialize klass " : "", at_init ? k->external_name() : "");
1823   }
1824 }
1825 #endif // ASSERT
1826 
1827 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1828   verify_continuation(cont.continuation());
1829   assert(!cont.is_empty(), "");
1830 
1831   log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1832   return freeze_ok;
1833 }
1834 
1835 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1836   if (UNLIKELY(res != freeze_ok)) {
1837     JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1838     verify_continuation(cont.continuation());
1839     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1840     return res;
1841   }
1842 
1843   JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1844   return freeze_epilog(cont);
1845 }
1846 
1847 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1848   if (UNLIKELY(res != freeze_ok)) {
1849     verify_continuation(cont.continuation());
1850     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1851     return res;
1852   }
1853 
1854   // Set up things so that on return to Java we jump to preempt stub.
1855   patch_return_pc_with_preempt_stub(old_last_frame);
1856   cont.tail()->set_preempted(true);
1857   DEBUG_ONLY(log_preempt_after_freeze(cont);)
1858   return freeze_epilog(cont);
1859 }
1860 
1861 template<typename ConfigT, bool preempt>
1862 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1863   assert(!current->has_pending_exception(), "");
1864 
1865 #ifdef ASSERT
1866   log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1867   log_frames(current, false);
1868 #endif
1869 
1870   CONT_JFR_ONLY(EventContinuationFreeze event;)
1871 
1872   ContinuationEntry* entry = current->last_continuation();
1873 
1874   oop oopCont = entry->cont_oop(current);
1875   assert(oopCont == current->last_continuation()->cont_oop(current), "");
1876   assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1877 
1878   verify_continuation(oopCont);
1879   ContinuationWrapper cont(current, oopCont);
1880   log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1881 
1882   assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1883 
1884   assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
1885          "Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1886 
1887   if (entry->is_pinned() || current->held_monitor_count() > 0) {
1888     log_develop_debug(continuations)("PINNED due to critical section/hold monitor");
1889     verify_continuation(cont.continuation());
1890     freeze_result res = entry->is_pinned() ? freeze_pinned_cs : freeze_pinned_monitor;
1891     if (!preempt) {
1892       JFR_ONLY(current->set_last_freeze_fail_result(res);)
1893     }
1894     log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1895     // Avoid Thread.yield() loops without safepoint polls.
1896     if (SafepointMechanism::should_process(current) && !preempt) {
1897       cont.done(); // allow safepoint
1898       ThreadInVMfromJava tivmfj(current);
1899     }
1900     return res;
1901   }
1902 
1903   Freeze<ConfigT> freeze(current, cont, sp, preempt);
1904 
1905   assert(!current->cont_fastpath() || freeze.check_valid_fast_path(), "");
1906   bool fast = UseContinuationFastPath && current->cont_fastpath();
1907   if (fast && freeze.size_if_fast_freeze_available() > 0) {
1908     freeze.freeze_fast_existing_chunk();
1909     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1910     return !preempt ? freeze_epilog(cont) : preempt_epilog(cont, freeze_ok, freeze.last_frame());
1911   }
1912 
1913   if (preempt) {
1914     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1915     freeze.set_jvmti_event_collector(&jsoaec);
1916 
1917     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1918 
1919     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1920     preempt_epilog(cont, res, freeze.last_frame());
1921     return res;
1922   }
1923 
1924   log_develop_trace(continuations)("chunk unavailable; transitioning to VM");
1925   assert(current == JavaThread::current(), "must be current thread");
1926   JRT_BLOCK
1927     // delays a possible JvmtiSampledObjectAllocEventCollector in alloc_chunk
1928     JvmtiSampledObjectAllocEventCollector jsoaec(false);
1929     freeze.set_jvmti_event_collector(&jsoaec);
1930 
1931     freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1932 
1933     CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1934     freeze_epilog(current, cont, res);
1935     cont.done(); // allow safepoint in the transition back to Java
1936     return res;
1937   JRT_BLOCK_END
1938 }
1939 
1940 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) {
1941   ContinuationEntry* entry = thread->last_continuation();
1942   if (entry == nullptr) {
1943     return freeze_ok;
1944   }
1945   if (entry->is_pinned()) {
1946     return freeze_pinned_cs;
1947   } else if (thread->held_monitor_count() > 0) {
1948     return freeze_pinned_monitor;
1949   }
1950 
1951   RegisterMap map(thread,
1952                   RegisterMap::UpdateMap::include,
1953                   RegisterMap::ProcessFrames::skip,
1954                   RegisterMap::WalkContinuation::skip);
1955   map.set_include_argument_oops(false);
1956   frame f = thread->last_frame();
1957 
1958   if (!safepoint) {
1959     f = f.sender(&map); // this is the yield frame
1960   } else { // safepoint yield
1961 #if (defined(X86) || defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
1962     f.set_fp(f.real_fp()); // Instead of this, maybe in ContinuationWrapper::set_last_frame always use the real_fp?
1963 #else
1964     Unimplemented();
1965 #endif
1966     if (!Interpreter::contains(f.pc())) {
1967       assert(ContinuationHelper::Frame::is_stub(f.cb()), "must be");
1968       assert(f.oop_map() != nullptr, "must be");
1969       f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
1970     }
1971   }
1972 
1973   while (true) {
1974     if ((f.is_interpreted_frame() && f.interpreter_frame_method()->is_native()) || f.is_native_frame()) {
1975       return freeze_pinned_native;
1976     }
1977 
1978     f = f.sender(&map);
1979     if (!Continuation::is_frame_in_continuation(entry, f)) {
1980       oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop(thread));
1981       if (scope == cont_scope) {
1982         break;
1983       }
1984       intx monitor_count = entry->parent_held_monitor_count();
1985       entry = entry->parent();
1986       if (entry == nullptr) {
1987         break;
1988       }
1989       if (entry->is_pinned()) {
1990         return freeze_pinned_cs;
1991       } else if (monitor_count > 0) {
1992         return freeze_pinned_monitor;
1993       }
1994     }
1995   }
1996   return freeze_ok;
1997 }
1998 
1999 /////////////// THAW ////
2000 
2001 static int thaw_size(stackChunkOop chunk) {
2002   int size = chunk->max_thawing_size();
2003   size += frame::metadata_words; // For the top pc+fp in push_return_frame or top = stack_sp - frame::metadata_words in thaw_fast
2004   size += 2*frame::align_wiggle; // in case of alignments at the top and bottom
2005   return size;
2006 }
2007 
2008 // make room on the stack for thaw
2009 // returns the size in bytes, or 0 on failure
2010 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier) {
2011   log_develop_trace(continuations)("~~~~ prepare_thaw return_barrier: %d", return_barrier);
2012 
2013   assert(thread == JavaThread::current(), "");
2014 
2015   ContinuationEntry* ce = thread->last_continuation();
2016   assert(ce != nullptr, "");
2017   oop continuation = ce->cont_oop(thread);
2018   assert(continuation == get_continuation(thread), "");
2019   verify_continuation(continuation);
2020 
2021   stackChunkOop chunk = jdk_internal_vm_Continuation::tail(continuation);
2022   assert(chunk != nullptr, "");
2023 
2024   // The tail can be empty because it might still be available for another freeze.
2025   // However, here we want to thaw, so we get rid of it (it will be GCed).
2026   if (UNLIKELY(chunk->is_empty())) {
2027     chunk = chunk->parent();
2028     assert(chunk != nullptr, "");
2029     assert(!chunk->is_empty(), "");
2030     jdk_internal_vm_Continuation::set_tail(continuation, chunk);
2031   }
2032 
2033   // Verification
2034   chunk->verify();
2035   assert(chunk->max_thawing_size() > 0, "chunk invariant violated; expected to not be empty");
2036 
2037   // Only make space for the last chunk because we only thaw from the last chunk
2038   int size = thaw_size(chunk) << LogBytesPerWord;
2039 
2040   const address bottom = (address)thread->last_continuation()->entry_sp();
2041   // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
2042   // for the Java frames in the check below.
2043   if (!stack_overflow_check(thread, size + 300, bottom)) {
2044     return 0;
2045   }
2046 
2047   log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
2048                               p2i(bottom), p2i(bottom - size), size);
2049   return size;
2050 }
2051 
2052 class ThawBase : public StackObj {
2053 protected:
2054   JavaThread* _thread;
2055   ContinuationWrapper& _cont;
2056   CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
2057 
2058   intptr_t* _fastpath;
2059   bool _barriers;
2060   bool _preempted_case;
2061   bool _process_args_at_top;
2062   intptr_t* _top_unextended_sp_before_thaw;
2063   int _align_size;
2064   DEBUG_ONLY(intptr_t* _top_stack_address);
2065 
2066   // Only used for some preemption cases.
2067   ObjectMonitor* _monitor;
2068 
2069   StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2070 
2071   NOT_PRODUCT(int _frames;)
2072 
2073 protected:
2074   ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2075       _thread(thread), _cont(cont),
2076       _fastpath(nullptr) {
2077     DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2078     assert (cont.tail() != nullptr, "no last chunk");
2079     DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2080   }
2081 
2082   void clear_chunk(stackChunkOop chunk);
2083   template<bool check_stub>
2084   int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2085   void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2086 
2087   void thaw_lockstack(stackChunkOop chunk);
2088 
2089   // fast path
2090   inline void prefetch_chunk_pd(void* start, int size_words);
2091   void patch_return(intptr_t* sp, bool is_last);
2092 
2093   intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2094   inline intptr_t* push_cleanup_continuation();
2095   inline intptr_t* push_preempt_adapter();
2096   intptr_t* redo_vmcall(JavaThread* current, frame& top);
2097   void throw_interrupted_exception(JavaThread* current, frame& top);
2098 
2099   void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2100   void finish_thaw(frame& f);
2101 
2102 private:
2103   template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2104   void finalize_thaw(frame& entry, int argsize);
2105 
2106   inline bool seen_by_gc();
2107 
2108   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2109   inline void after_thaw_java_frame(const frame& f, bool bottom);
2110   inline void patch(frame& f, const frame& caller, bool bottom);
2111   void clear_bitmap_bits(address start, address end);
2112 
2113   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2114   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2115   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2116   void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2117 
2118   void push_return_frame(frame& f);
2119   inline frame new_entry_frame();
2120   template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
2121   inline void patch_pd(frame& f, const frame& sender);
2122   inline void patch_pd(frame& f, intptr_t* caller_sp);
2123   inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2124 
2125   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2126 
2127   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2128 
2129  public:
2130   CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2131 };
2132 
2133 template <typename ConfigT>
2134 class Thaw : public ThawBase {
2135 public:
2136   Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2137 
2138   inline bool can_thaw_fast(stackChunkOop chunk) {
2139     return    !_barriers
2140            &&  _thread->cont_fastpath_thread_state()
2141            && !chunk->has_thaw_slowpath_condition()
2142            && !PreserveFramePointer;
2143   }
2144 
2145   inline intptr_t* thaw(Continuation::thaw_kind kind);
2146   template<bool check_stub = false>
2147   NOINLINE intptr_t* thaw_fast(stackChunkOop chunk);
2148   NOINLINE intptr_t* thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind);
2149   inline void patch_caller_links(intptr_t* sp, intptr_t* bottom);
2150 };
2151 
2152 template <typename ConfigT>
2153 inline intptr_t* Thaw<ConfigT>::thaw(Continuation::thaw_kind kind) {
2154   verify_continuation(_cont.continuation());
2155   assert(!jdk_internal_vm_Continuation::done(_cont.continuation()), "");
2156   assert(!_cont.is_empty(), "");
2157 
2158   stackChunkOop chunk = _cont.tail();
2159   assert(chunk != nullptr, "guaranteed by prepare_thaw");
2160   assert(!chunk->is_empty(), "guaranteed by prepare_thaw");
2161 
2162   _barriers = chunk->requires_barriers();
2163   return (LIKELY(can_thaw_fast(chunk))) ? thaw_fast(chunk)
2164                                         : thaw_slow(chunk, kind);
2165 }
2166 
2167 class ReconstructedStack : public StackObj {
2168   intptr_t* _base;  // _cont.entrySP(); // top of the entry frame
2169   int _thaw_size;
2170   int _argsize;
2171 public:
2172   ReconstructedStack(intptr_t* base, int thaw_size, int argsize)
2173   : _base(base), _thaw_size(thaw_size - (argsize == 0 ? frame::metadata_words_at_top : 0)), _argsize(argsize) {
2174     // The only possible source of misalignment is stack-passed arguments b/c compiled frames are 16-byte aligned.
2175     assert(argsize != 0 || (_base - _thaw_size) == ContinuationHelper::frame_align_pointer(_base - _thaw_size), "");
2176     // We're at most one alignment word away from entrySP
2177     assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2178   }
2179 
2180   int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2181 
2182   // top and bottom stack pointers
2183   intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2184   intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2185 
2186   // several operations operate on the totality of the stack being reconstructed,
2187   // including the metadata words
2188   intptr_t* top() const { return sp() - frame::metadata_words_at_bottom;  }
2189   int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2190 };
2191 
2192 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2193   chunk->set_sp(chunk->bottom());
2194   chunk->set_max_thawing_size(0);
2195 }
2196 
2197 template<bool check_stub>
2198 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2199   bool empty = false;
2200   StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2201   DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2202   assert(chunk_sp == f.sp(), "");
2203   assert(chunk_sp == f.unextended_sp(), "");
2204 
2205   int frame_size = f.cb()->frame_size();
2206   argsize = f.stack_argsize();
2207 
2208   assert(!f.is_stub() || check_stub, "");
2209   if (check_stub && f.is_stub()) {
2210     // If we don't thaw the top compiled frame too, after restoring the saved
2211     // registers back in Java, we would hit the return barrier to thaw one more
2212     // frame effectively overwriting the restored registers during that call.
2213     f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2214     assert(!f.is_done(), "");
2215 
2216     f.get_cb();
2217     assert(f.is_compiled(), "");
2218     frame_size += f.cb()->frame_size();
2219     argsize = f.stack_argsize();
2220 
2221     if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2222       // The caller of the runtime stub when the continuation is preempted is not at a
2223       // Java call instruction, and so cannot rely on nmethod patching for deopt.
2224       log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2225       f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2226     }
2227   }
2228 
2229   f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2230   empty = f.is_done();
2231   assert(!empty || argsize == chunk->argsize(), "");
2232 
2233   if (empty) {
2234     clear_chunk(chunk);
2235   } else {
2236     chunk->set_sp(chunk->sp() + frame_size);
2237     chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2238     // We set chunk->pc to the return pc into the next frame
2239     chunk->set_pc(f.pc());
2240 #ifdef ASSERT
2241     {
2242       intptr_t* retaddr_slot = (chunk_sp
2243                                 + frame_size
2244                                 - frame::sender_sp_ret_address_offset());
2245       assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2246              "unexpected pc");
2247     }
2248 #endif
2249   }
2250   assert(empty == chunk->is_empty(), "");
2251   // returns the size required to store the frame on stack, and because it is a
2252   // compiled frame, it must include a copy of the arguments passed by the caller
2253   return frame_size + argsize + frame::metadata_words_at_top;
2254 }
2255 
2256 void ThawBase::thaw_lockstack(stackChunkOop chunk) {
2257   int lockStackSize = chunk->lockstack_size();
2258   assert(lockStackSize > 0 && lockStackSize <= LockStack::CAPACITY, "");
2259 
2260   oop tmp_lockstack[LockStack::CAPACITY];
2261   chunk->transfer_lockstack(tmp_lockstack, _barriers);
2262   _thread->lock_stack().move_from_address(tmp_lockstack, lockStackSize);
2263 
2264   chunk->set_lockstack_size(0);
2265   chunk->set_has_lockstack(false);
2266 }
2267 
2268 void ThawBase::copy_from_chunk(intptr_t* from, intptr_t* to, int size) {
2269   assert(to >= _top_stack_address, "overwrote past thawing space"
2270     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(to), p2i(_top_stack_address));
2271   assert(to + size <= _cont.entrySP(), "overwrote past thawing space");
2272   _cont.tail()->copy_from_chunk_to_stack(from, to, size);
2273   CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
2274 }
2275 
2276 void ThawBase::patch_return(intptr_t* sp, bool is_last) {
2277   log_develop_trace(continuations)("thaw_fast patching -- sp: " INTPTR_FORMAT, p2i(sp));
2278 
2279   address pc = !is_last ? StubRoutines::cont_returnBarrier() : _cont.entryPC();
2280   ContinuationHelper::patch_return_address_at(
2281     sp - frame::sender_sp_ret_address_offset(),
2282     pc);
2283 }
2284 
2285 template <typename ConfigT>
2286 template<bool check_stub>
2287 NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) {
2288   assert(chunk == _cont.tail(), "");
2289   assert(!chunk->has_mixed_frames(), "");
2290   assert(!chunk->requires_barriers(), "");
2291   assert(!chunk->has_bitmap(), "");
2292   assert(!_thread->is_interp_only_mode(), "");
2293 
2294   LogTarget(Trace, continuations) lt;
2295   if (lt.develop_is_enabled()) {
2296     LogStream ls(lt);
2297     ls.print_cr("thaw_fast");
2298     chunk->print_on(true, &ls);
2299   }
2300 
2301   // Below this heuristic, we thaw the whole chunk, above it we thaw just one frame.
2302   static const int threshold = 500; // words
2303 
2304   const int full_chunk_size = chunk->stack_size() - chunk->sp(); // this initial size could be reduced if it's a partial thaw
2305   int argsize, thaw_size;
2306 
2307   intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();
2308 
2309   bool partial, empty;
2310   if (LIKELY(!TEST_THAW_ONE_CHUNK_FRAME && (full_chunk_size < threshold))) {
2311     prefetch_chunk_pd(chunk->start_address(), full_chunk_size); // prefetch anticipating memcpy starting at highest address
2312 
2313     partial = false;
2314     argsize = chunk->argsize(); // must be called *before* clearing the chunk
2315     clear_chunk(chunk);
2316     thaw_size = full_chunk_size;
2317     empty = true;
2318   } else { // thaw a single frame
2319     partial = true;
2320     thaw_size = remove_top_compiled_frame_from_chunk<check_stub>(chunk, argsize);
2321     empty = chunk->is_empty();
2322   }
2323 
2324   // Are we thawing the last frame(s) in the continuation
2325   const bool is_last = empty && chunk->parent() == nullptr;
2326   assert(!is_last || argsize == 0, "");
2327 
2328   log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT,
2329                               partial, is_last, empty, thaw_size, argsize, p2i(_cont.entrySP()));
2330 
2331   ReconstructedStack rs(_cont.entrySP(), thaw_size, argsize);
2332 
2333   // also copy metadata words at frame bottom
2334   copy_from_chunk(chunk_sp - frame::metadata_words_at_bottom, rs.top(), rs.total_size());
2335 
2336   // update the ContinuationEntry
2337   _cont.set_argsize(argsize);
2338   log_develop_trace(continuations)("setting entry argsize: %d", _cont.argsize());
2339   assert(rs.bottom_sp() == _cont.entry()->bottom_sender_sp(), "");
2340 
2341   // install the return barrier if not last frame, or the entry's pc if last
2342   patch_return(rs.bottom_sp(), is_last);
2343 
2344   // insert the back links from callee to caller frames
2345   patch_caller_links(rs.top(), rs.top() + rs.total_size());
2346 
2347   assert(is_last == _cont.is_empty(), "");
2348   assert(_cont.chunk_invariant(), "");
2349 
2350 #if CONT_JFR
2351   EventContinuationThawFast e;
2352   if (e.should_commit()) {
2353     e.set_id(cast_from_oop<u8>(chunk));
2354     e.set_size(thaw_size << LogBytesPerWord);
2355     e.set_full(!partial);
2356     e.commit();
2357   }
2358 #endif
2359 
2360 #ifdef ASSERT
2361   set_anchor(_thread, rs.sp());
2362   log_frames(_thread);
2363   if (LoomDeoptAfterThaw) {
2364     do_deopt_after_thaw(_thread);
2365   }
2366   clear_anchor(_thread);
2367 #endif
2368 
2369   return rs.sp();
2370 }
2371 
2372 inline bool ThawBase::seen_by_gc() {
2373   return _barriers || _cont.tail()->is_gc_mode();
2374 }
2375 
2376 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2377 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2378   if (UseZGC || UseShenandoahGC) {
2379     chunk->relativize_derived_pointers_concurrently();
2380   }
2381 #endif
2382 }
2383 
2384 template <typename ConfigT>
2385 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2386   Continuation::preempt_kind preempt_kind;
2387   bool retry_fast_path = false;
2388 
2389   _process_args_at_top = false;
2390   _preempted_case = chunk->preempted();
2391   if (_preempted_case) {
2392     ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2393     if (waiter != nullptr) {
2394       // Mounted again after preemption. Resume the pending monitor operation,
2395       // which will be either a monitorenter or Object.wait() call.
2396       ObjectMonitor* mon = waiter->monitor();
2397       preempt_kind = waiter->is_wait() ? Continuation::object_wait : Continuation::monitorenter;
2398 
2399       bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2400       assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2401       if (!mon_acquired) {
2402         // Failed to acquire monitor. Return to enterSpecial to unmount again.
2403         log_trace(continuations, tracking)("Failed to acquire monitor, unmounting again");
2404         return push_cleanup_continuation();
2405       }
2406       _monitor = mon;        // remember monitor since we might need it on handle_preempted_continuation()
2407       chunk = _cont.tail();  // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2408     } else {
2409       // Preemption cancelled in moniterenter case. We actually acquired
2410       // the monitor after freezing all frames so nothing to do. In case
2411       // of preemption on ObjectLocker during klass init, we released the
2412       // monitor already at ~ObjectLocker so here we just set _monitor to
2413       // nullptr so we know there is no need to release it later.
2414       preempt_kind = Continuation::monitorenter;
2415       _monitor = nullptr;
2416     }
2417 
2418     // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2419     relativize_chunk_concurrently(chunk);
2420 
2421     if (chunk->at_klass_init()) {
2422       preempt_kind = Continuation::object_locker;
2423       chunk->set_at_klass_init(false);
2424       _process_args_at_top = chunk->has_args_at_top();
2425       if (_process_args_at_top) chunk->set_has_args_at_top(false);
2426     }
2427     chunk->set_preempted(false);
2428     retry_fast_path = true;
2429   } else {
2430     relativize_chunk_concurrently(chunk);
2431   }
2432 
2433   // On first thaw after freeze restore oops to the lockstack if any.
2434   assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2435   if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2436     thaw_lockstack(chunk);
2437     retry_fast_path = true;
2438   }
2439 
2440   // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2441   // and FLAG_PREEMPTED flags from the stackChunk.
2442   if (retry_fast_path && can_thaw_fast(chunk)) {
2443     intptr_t* sp = thaw_fast<true>(chunk);
2444     if (_preempted_case) {
2445       return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2446     }
2447     return sp;
2448   }
2449 
2450   LogTarget(Trace, continuations) lt;
2451   if (lt.develop_is_enabled()) {
2452     LogStream ls(lt);
2453     ls.print_cr("thaw slow return_barrier: %d " INTPTR_FORMAT, kind, p2i(chunk));
2454     chunk->print_on(true, &ls);
2455   }
2456 
2457 #if CONT_JFR
2458   EventContinuationThawSlow e;
2459   if (e.should_commit()) {
2460     e.set_id(cast_from_oop<u8>(_cont.continuation()));
2461     e.commit();
2462   }
2463 #endif
2464 
2465   DEBUG_ONLY(_frames = 0;)
2466   _align_size = 0;
2467   int num_frames = kind == Continuation::thaw_top ? 2 : 1;
2468 
2469   _stream = StackChunkFrameStream<ChunkFrames::Mixed>(chunk);
2470   _top_unextended_sp_before_thaw = _stream.unextended_sp();
2471 
2472   frame heap_frame = _stream.to_frame();
2473   if (lt.develop_is_enabled()) {
2474     LogStream ls(lt);
2475     ls.print_cr("top hframe before (thaw):");
2476     assert(heap_frame.is_heap_frame(), "should have created a relative frame");
2477     heap_frame.print_value_on(&ls);
2478   }
2479 
2480   frame caller; // the thawed caller on the stack
2481   recurse_thaw(heap_frame, caller, num_frames, _preempted_case);
2482   finish_thaw(caller); // caller is now the topmost thawed frame
2483   _cont.write();
2484 
2485   assert(_cont.chunk_invariant(), "");
2486 
2487   JVMTI_ONLY(invalidate_jvmti_stack(_thread));
2488 
2489   _thread->set_cont_fastpath(_fastpath);
2490 
2491   intptr_t* sp = caller.sp();
2492 
2493   if (_preempted_case) {
2494     return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2495   }
2496   return sp;
2497 }
2498 
2499 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2500   log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2501   assert(!_cont.is_empty(), "no more frames");
2502   assert(num_frames > 0, "");
2503   assert(!heap_frame.is_empty(), "");
2504 
2505   if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2506     heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2507   } else if (!heap_frame.is_interpreted_frame()) {
2508     recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2509   } else {
2510     recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2511   }
2512 }
2513 
2514 template<typename FKind>
2515 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2516   assert(num_frames > 0, "");
2517 
2518   DEBUG_ONLY(_frames++;)
2519 
2520   int argsize = _stream.stack_argsize();
2521 
2522   _stream.next(SmallRegisterMap::instance_no_args());
2523   assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2524 
2525   // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2526   // as it makes detecting that situation and adjusting unextended_sp tricky
2527   if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2528     log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2529     num_frames++;
2530   }
2531 
2532   if (num_frames == 1 || _stream.is_done()) { // end recursion
2533     finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2534     return true; // bottom
2535   } else { // recurse
2536     recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2537     return false;
2538   }
2539 }
2540 
2541 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2542   stackChunkOop chunk = _cont.tail();
2543 
2544   if (!_stream.is_done()) {
2545     assert(_stream.sp() >= chunk->sp_address(), "");
2546     chunk->set_sp(chunk->to_offset(_stream.sp()));
2547     chunk->set_pc(_stream.pc());
2548   } else {
2549     chunk->set_sp(chunk->bottom());
2550     chunk->set_pc(nullptr);
2551   }
2552   assert(_stream.is_done() == chunk->is_empty(), "");
2553 
2554   int total_thawed = pointer_delta_as_int(_stream.unextended_sp(), _top_unextended_sp_before_thaw);
2555   chunk->set_max_thawing_size(chunk->max_thawing_size() - total_thawed);
2556 
2557   _cont.set_argsize(argsize);
2558   entry = new_entry_frame();
2559 
2560   assert(entry.sp() == _cont.entrySP(), "");
2561   assert(Continuation::is_continuation_enterSpecial(entry), "");
2562   assert(_cont.is_entry_frame(entry), "");
2563 }
2564 
2565 inline void ThawBase::before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) {
2566   LogTarget(Trace, continuations) lt;
2567   if (lt.develop_is_enabled()) {
2568     LogStream ls(lt);
2569     ls.print_cr("======== THAWING FRAME: %d", num_frame);
2570     assert(hf.is_heap_frame(), "should be");
2571     hf.print_value_on(&ls);
2572   }
2573   assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2574 }
2575 
2576 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2577 #ifdef ASSERT
2578   LogTarget(Trace, continuations) lt;
2579   if (lt.develop_is_enabled()) {
2580     LogStream ls(lt);
2581     ls.print_cr("thawed frame:");
2582     print_frame_layout(f, false, &ls); // f.print_on(&ls);
2583   }
2584 #endif
2585 }
2586 
2587 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2588   assert(!bottom || caller.fp() == _cont.entryFP(), "");
2589   if (bottom) {
2590     ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2591                                                                  : StubRoutines::cont_returnBarrier());
2592   } else {
2593     // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2594     // If the caller is not deoptimized, pc is unchanged.
2595     ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2596   }
2597 
2598   patch_pd(f, caller);
2599 
2600   if (f.is_interpreted_frame()) {
2601     ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2602   }
2603 
2604   assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2605   assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2606 }
2607 
2608 void ThawBase::clear_bitmap_bits(address start, address end) {
2609   assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2610   assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2611 
2612   // we need to clear the bits that correspond to arguments as they reside in the caller frame
2613   // or they will keep objects that are otherwise unreachable alive.
2614 
2615   // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2616   // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2617   // If that's the case the bit range corresponding to the last stack slot should not have bits set
2618   // anyways and we assert that before returning.
2619   address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2620   log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2621   stackChunkOop chunk = _cont.tail();
2622   chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2623   assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2624 }
2625 
2626 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2627   frame top(sp);
2628   assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2629   DEBUG_ONLY(verify_frame_kind(top, preempt_kind);)
2630   NOT_PRODUCT(int64_t tid = _thread->monitor_owner_id();)
2631 
2632 #if INCLUDE_JVMTI
2633   // Finish the VTMS transition.
2634   assert(_thread->is_in_VTMS_transition(), "must be");
2635   bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2636   if (is_vthread) {
2637     if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
2638       jvmti_mount_end(_thread, _cont, top, preempt_kind);
2639     } else {
2640       _thread->set_is_in_VTMS_transition(false);
2641       java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
2642     }
2643   }
2644 #endif
2645 
2646   if (fast_case) {
2647     // If we thawed in the slow path the runtime stub/native wrapper frame already
2648     // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2649     // we copied the original fp at the time of freeze which now will have to be fixed.
2650     assert(top.is_runtime_frame() || top.is_native_frame(), "");
2651     int fsize = top.cb()->frame_size();
2652     patch_pd(top, sp + fsize);
2653   }
2654 
2655   if (preempt_kind == Continuation::object_wait) {
2656     // Check now if we need to throw IE exception.
2657     bool throw_ie = _thread->pending_interrupted_exception();
2658     if (throw_ie) {
2659       throw_interrupted_exception(_thread, top);
2660       _thread->set_pending_interrupted_exception(false);
2661     }
2662     log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT" after preemption on Object.wait%s", tid, throw_ie ? "(throwing IE)" : "");
2663   } else if (preempt_kind == Continuation::monitorenter) {
2664     if (top.is_runtime_frame()) {
2665       // The continuation might now run on a different platform thread than the previous time so
2666       // we need to adjust the current thread saved in the stub frame before restoring registers.
2667       JavaThread** thread_addr = frame::saved_thread_address(top);
2668       if (thread_addr != nullptr) *thread_addr = _thread;
2669     }
2670     log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT " after preemption on monitorenter", tid);
2671   } else {
2672     // We need to redo the original call into the VM. First though, we need
2673     // to exit the monitor we just acquired (except on preemption cancelled
2674     // case where it was already released).
2675     assert(preempt_kind == Continuation::object_locker, "");
2676     if (_monitor != nullptr) _monitor->exit(_thread);
2677     sp = redo_vmcall(_thread, top);
2678   }
2679   return sp;
2680 }
2681 
2682 intptr_t* ThawBase::redo_vmcall(JavaThread* current, frame& top) {
2683   assert(!current->preempting(), "");
2684   NOT_PRODUCT(int64_t tid = current->monitor_owner_id();)
2685   intptr_t* sp = top.sp();
2686 
2687   {
2688     HandleMarkCleaner hmc(current);  // Cleanup so._conth Handle
2689     ContinuationWrapper::SafepointOp so(current, _cont);
2690     AnchorMark am(current, top);    // Set the anchor so that the stack is walkable.
2691 
2692     Method* m = top.interpreter_frame_method();
2693     Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
2694     Bytecodes::Code code = current_bytecode.code();
2695     log_develop_trace(continuations, preempt)("Redoing InterpreterRuntime::%s for " INT64_FORMAT, code == Bytecodes::Code::_new ? "_new" : "resolve_from_cache", tid);
2696 
2697     // These InterpreterRuntime entry points use JRT_ENTRY which uses a HandleMarkCleaner.
2698     // Create a HandeMark to avoid destroying so._conth.
2699     HandleMark hm(current);
2700     if (code == Bytecodes::Code::_new) {
2701       InterpreterRuntime::_new(current, m->constants(), current_bytecode.get_index_u2(code));
2702     } else {
2703       InterpreterRuntime::resolve_from_cache(current, code);
2704     }
2705   }
2706 
2707   if (current->preempting()) {
2708     // Preempted again so we just arrange to return to preempt stub to unmount.
2709     sp = push_preempt_adapter();
2710     current->set_preempt_alternate_return(nullptr);
2711     bool cancelled = current->preemption_cancelled();
2712     if (cancelled) {
2713       // Instead of calling thaw again from the preempt stub just unmount anyways with
2714       // state of YIELDING. This will give a chance for other vthreads to run while
2715       // minimizing repeated loops of "thaw->redo_vmcall->try_preempt->preemption_cancelled->thaw..."
2716       // in case of multiple vthreads contending for the same init_lock().
2717       current->set_preemption_cancelled(false);
2718       oop vthread = current->vthread();
2719       assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2720       java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::YIELDING);
2721     }
2722     log_develop_trace(continuations, preempt)("Preempted " INT64_FORMAT " again%s", tid, cancelled ? "(preemption cancelled, setting state to YIELDING)" : "");
2723   } else {
2724     log_develop_trace(continuations, preempt)("Call succesful, resuming " INT64_FORMAT, tid);
2725   }
2726   return sp;
2727 }
2728 
2729 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2730   HandleMarkCleaner hm(current);  // Cleanup so._conth Handle
2731   ContinuationWrapper::SafepointOp so(current, _cont);
2732   // Since we might safepoint set the anchor so that the stack can be walked.
2733   set_anchor(current, top.sp());
2734   JRT_BLOCK
2735     THROW(vmSymbols::java_lang_InterruptedException());
2736   JRT_BLOCK_END
2737   clear_anchor(current);
2738 }
2739 
2740 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top) {
2741   assert(hf.is_interpreted_frame(), "");
2742 
2743   if (UNLIKELY(seen_by_gc())) {
2744     if (is_top && _process_args_at_top) {
2745       log_trace(continuations, tracking)("Processing arguments in recurse_thaw_interpreted_frame");
2746       _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_with_args());  
2747     } else {
2748       _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());  
2749     }
2750   }
2751 
2752   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2753 
2754   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2755 
2756   _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2757 
2758   frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2759 
2760   intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2761   intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2762   intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2763   intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2764 
2765   assert(hf.is_heap_frame(), "should be");
2766   assert(!f.is_heap_frame(), "should not be");
2767 
2768   const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2769   assert((stack_frame_bottom == stack_frame_top + fsize), "");
2770 
2771   // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
2772   // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
2773   copy_from_chunk(heap_frame_top, stack_frame_top, fsize);
2774 
2775   // Make sure the relativized locals is already set.
2776   assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2777 
2778   derelativize_interpreted_frame_metadata(hf, f);
2779   patch(f, caller, is_bottom_frame);
2780 
2781   assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2782   assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2783 
2784   CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2785 
2786   maybe_set_fastpath(f.sp());
2787 
2788   Method* m = hf.interpreter_frame_method();
2789   assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2790   const int locals = m->max_locals();
2791 
2792   if (!is_bottom_frame) {
2793     // can only fix caller once this frame is thawed (due to callee saved regs)
2794     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2795   } else if (_cont.tail()->has_bitmap() && locals > 0) {
2796     assert(hf.is_heap_frame(), "should be");
2797     address start = (address)(heap_frame_bottom - locals);
2798     address end = (address)heap_frame_bottom;
2799     clear_bitmap_bits(start, end);
2800   }
2801 
2802   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2803   caller = f;
2804 }
2805 
2806 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2807   assert(hf.is_compiled_frame(), "");
2808   assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2809 
2810   if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2811     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2812   }
2813 
2814   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2815 
2816   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2817 
2818   assert(caller.sp() == caller.unextended_sp(), "");
2819 
2820   if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2821     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2822   }
2823 
2824   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2825   // yet laid out in the stack, and so the original_pc is not stored in it.
2826   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2827   frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2828   intptr_t* const stack_frame_top = f.sp();
2829   intptr_t* const heap_frame_top = hf.unextended_sp();
2830 
2831   const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2832   int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2833   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2834 
2835   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2836   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2837   // copy metadata, except the metadata at the top of the (unextended) entry frame
2838   int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2839 
2840   // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2841   // (we might have one padding word for alignment)
2842   assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2843   assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2844 
2845   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2846 
2847   patch(f, caller, is_bottom_frame);
2848 
2849   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2850   assert(!f.is_deoptimized_frame(), "");
2851   if (hf.is_deoptimized_frame()) {
2852     maybe_set_fastpath(f.sp());
2853   } else if (_thread->is_interp_only_mode()
2854               || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2855     // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2856     // cannot rely on nmethod patching for deopt.
2857     assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2858 
2859     log_develop_trace(continuations)("Deoptimizing thawed frame");
2860     DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2861 
2862     f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2863     assert(f.is_deoptimized_frame(), "");
2864     assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2865     maybe_set_fastpath(f.sp());
2866   }
2867 
2868   if (!is_bottom_frame) {
2869     // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2870     _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2871   } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2872     address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2873     int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2874     int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2875     clear_bitmap_bits(start, start + argsize_in_bytes);
2876   }
2877 
2878   DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2879   caller = f;
2880 }
2881 
2882 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2883   DEBUG_ONLY(_frames++;)
2884 
2885   if (UNLIKELY(seen_by_gc())) {
2886     // Process the stub's caller here since we might need the full map.
2887     RegisterMap map(nullptr,
2888                     RegisterMap::UpdateMap::include,
2889                     RegisterMap::ProcessFrames::skip,
2890                     RegisterMap::WalkContinuation::skip);
2891     map.set_include_argument_oops(false);
2892     _stream.next(&map);
2893     assert(!_stream.is_done(), "");
2894     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2895   } else {
2896     _stream.next(SmallRegisterMap::instance_no_args());
2897     assert(!_stream.is_done(), "");
2898   }
2899 
2900   recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2901 
2902   assert(caller.is_compiled_frame(), "");
2903   assert(caller.sp() == caller.unextended_sp(), "");
2904 
2905   DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2906 
2907   frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2908   intptr_t* stack_frame_top = f.sp();
2909   intptr_t* heap_frame_top = hf.sp();
2910   int fsize = ContinuationHelper::StubFrame::size(hf);
2911 
2912   copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2913                   fsize + frame::metadata_words);
2914 
2915   patch(f, caller, false /*is_bottom_frame*/);
2916 
2917   // can only fix caller once this frame is thawed (due to callee saved regs)
2918   RegisterMap map(nullptr,
2919                   RegisterMap::UpdateMap::include,
2920                   RegisterMap::ProcessFrames::skip,
2921                   RegisterMap::WalkContinuation::skip);
2922   map.set_include_argument_oops(false);
2923   f.oop_map()->update_register_map(&f, &map);
2924   ContinuationHelper::update_register_map_with_callee(caller, &map);
2925   _cont.tail()->fix_thawed_frame(caller, &map);
2926 
2927   DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2928   caller = f;
2929 }
2930 
2931 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2932   assert(hf.is_native_frame(), "");
2933   assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2934 
2935   if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2936     _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2937   }
2938 
2939   const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2940   assert(!is_bottom_frame, "");
2941 
2942   DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2943 
2944   assert(caller.sp() == caller.unextended_sp(), "");
2945 
2946   if (caller.is_interpreted_frame()) {
2947     _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2948   }
2949 
2950   // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2951   // yet laid out in the stack, and so the original_pc is not stored in it.
2952   // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2953   frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2954   intptr_t* const stack_frame_top = f.sp();
2955   intptr_t* const heap_frame_top = hf.unextended_sp();
2956 
2957   int fsize = ContinuationHelper::NativeFrame::size(hf);
2958   assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2959 
2960   intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2961   intptr_t* to   = stack_frame_top - frame::metadata_words_at_bottom;
2962   int sz = fsize + frame::metadata_words_at_bottom;
2963 
2964   copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2965 
2966   patch(f, caller, false /* bottom */);
2967 
2968   // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2969   assert(!f.is_deoptimized_frame(), "");
2970   assert(!hf.is_deoptimized_frame(), "");
2971   assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2972 
2973   // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2974   _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2975 
2976   DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2977   caller = f;
2978 }
2979 
2980 void ThawBase::finish_thaw(frame& f) {
2981   stackChunkOop chunk = _cont.tail();
2982 
2983   if (chunk->is_empty()) {
2984     // Only remove chunk from list if it can't be reused for another freeze
2985     if (seen_by_gc()) {
2986       _cont.set_tail(chunk->parent());
2987     } else {
2988       chunk->set_has_mixed_frames(false);
2989     }
2990     chunk->set_max_thawing_size(0);
2991   } else {
2992     chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2993   }
2994   assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
2995 
2996   if (!is_aligned(f.sp(), frame::frame_alignment)) {
2997     assert(f.is_interpreted_frame(), "");
2998     f.set_sp(align_down(f.sp(), frame::frame_alignment));
2999   }
3000   push_return_frame(f);
3001    // can only fix caller after push_return_frame (due to callee saved regs)
3002   if (_process_args_at_top) {
3003     log_trace(continuations, tracking)("Processing arguments in finish_thaw");
3004     chunk->fix_thawed_frame(f, SmallRegisterMap::instance_with_args());
3005   } else {
3006     chunk->fix_thawed_frame(f, SmallRegisterMap::instance_no_args());  
3007   }
3008 
3009   assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
3010 
3011   log_develop_trace(continuations)("thawed %d frames", _frames);
3012 
3013   LogTarget(Trace, continuations) lt;
3014   if (lt.develop_is_enabled()) {
3015     LogStream ls(lt);
3016     ls.print_cr("top hframe after (thaw):");
3017     _cont.last_frame().print_value_on(&ls);
3018   }
3019 }
3020 
3021 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
3022   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
3023   assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
3024 
3025   LogTarget(Trace, continuations) lt;
3026   if (lt.develop_is_enabled()) {
3027     LogStream ls(lt);
3028     ls.print_cr("push_return_frame");
3029     f.print_value_on(&ls);
3030   }
3031 
3032   assert(f.sp() - frame::metadata_words_at_bottom >= _top_stack_address, "overwrote past thawing space"
3033     " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(f.sp() - frame::metadata_words), p2i(_top_stack_address));
3034   ContinuationHelper::Frame::patch_pc(f, f.raw_pc()); // in case we want to deopt the frame in a full transition, this is checked.
3035   ContinuationHelper::push_pd(f);
3036 
3037   assert(ContinuationHelper::Frame::assert_frame_laid_out(f), "");
3038 }
3039 
3040 // returns new top sp
3041 // called after preparations (stack overflow check and making room)
3042 template<typename ConfigT>
3043 static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind) {
3044   assert(thread == JavaThread::current(), "Must be current thread");
3045 
3046   CONT_JFR_ONLY(EventContinuationThaw event;)
3047 
3048   log_develop_trace(continuations)("~~~~ thaw kind: %d sp: " INTPTR_FORMAT, kind, p2i(thread->last_continuation()->entry_sp()));
3049 
3050   ContinuationEntry* entry = thread->last_continuation();
3051   assert(entry != nullptr, "");
3052   oop oopCont = entry->cont_oop(thread);
3053 
3054   assert(!jdk_internal_vm_Continuation::done(oopCont), "");
3055   assert(oopCont == get_continuation(thread), "");
3056   verify_continuation(oopCont);
3057 
3058   assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
3059 
3060   ContinuationWrapper cont(thread, oopCont);
3061   log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
3062 
3063 #ifdef ASSERT
3064   set_anchor_to_entry(thread, cont.entry());
3065   log_frames(thread);
3066   clear_anchor(thread);
3067 #endif
3068 
3069   Thaw<ConfigT> thw(thread, cont);
3070   intptr_t* const sp = thw.thaw(kind);
3071   assert(is_aligned(sp, frame::frame_alignment), "");
3072   DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp);)
3073 
3074   CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
3075 
3076   verify_continuation(cont.continuation());
3077   log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
3078 
3079   return sp;
3080 }
3081 
3082 #ifdef ASSERT
3083 static void do_deopt_after_thaw(JavaThread* thread) {
3084   int i = 0;
3085   StackFrameStream fst(thread, true, false);
3086   fst.register_map()->set_include_argument_oops(false);
3087   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3088   for (; !fst.is_done(); fst.next()) {
3089     if (fst.current()->cb()->is_nmethod()) {
3090       nmethod* nm = fst.current()->cb()->as_nmethod();
3091       if (!nm->method()->is_continuation_native_intrinsic()) {
3092         nm->make_deoptimized();
3093       }
3094     }
3095   }
3096 }
3097 
3098 class ThawVerifyOopsClosure: public OopClosure {
3099   intptr_t* _p;
3100   outputStream* _st;
3101   bool is_good_oop(oop o) {
3102     return dbg_is_safe(o, -1) && dbg_is_safe(o->klass(), -1) && oopDesc::is_oop(o) && o->klass()->is_klass();
3103   }
3104 public:
3105   ThawVerifyOopsClosure(outputStream* st) : _p(nullptr), _st(st) {}
3106   intptr_t* p() { return _p; }
3107   void reset() { _p = nullptr; }
3108 
3109   virtual void do_oop(oop* p) {
3110     oop o = *p;
3111     if (o == nullptr || is_good_oop(o)) {
3112       return;
3113     }
3114     _p = (intptr_t*)p;
3115     _st->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(*p), p2i(p));
3116   }
3117   virtual void do_oop(narrowOop* p) {
3118     oop o = RawAccess<>::oop_load(p);
3119     if (o == nullptr || is_good_oop(o)) {
3120       return;
3121     }
3122     _p = (intptr_t*)p;
3123     _st->print_cr("*** (narrow) non-oop %x found at " PTR_FORMAT, (int)(*p), p2i(p));
3124   }
3125 };
3126 
3127 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st) {
3128   assert(thread->has_last_Java_frame(), "");
3129 
3130   ResourceMark rm;
3131   ThawVerifyOopsClosure cl(st);
3132   NMethodToOopClosure cf(&cl, false);
3133 
3134   StackFrameStream fst(thread, true, false);
3135   fst.register_map()->set_include_argument_oops(false);
3136   ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3137   for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) {
3138     if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) {
3139       st->print_cr(">>> do_verify_after_thaw deopt");
3140       fst.current()->deoptimize(nullptr);
3141       fst.current()->print_on(st);
3142     }
3143 
3144     fst.current()->oops_do(&cl, &cf, fst.register_map());
3145     if (cl.p() != nullptr) {
3146       frame fr = *fst.current();
3147       st->print_cr("Failed for frame barriers: %d",chunk->requires_barriers());
3148       fr.print_on(st);
3149       if (!fr.is_interpreted_frame()) {
3150         st->print_cr("size: %d argsize: %d",
3151                      ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
3152                      ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
3153       }
3154       VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
3155       if (reg != nullptr) {
3156         st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
3157       }
3158       cl.reset();
3159       DEBUG_ONLY(thread->print_frame_layout();)
3160       if (chunk != nullptr) {
3161         chunk->print_on(true, st);
3162       }
3163       return false;
3164     }
3165   }
3166   return true;
3167 }
3168 
3169 static void log_frames(JavaThread* thread, bool dolog) {
3170   const static int show_entry_callers = 3;
3171   LogTarget(Trace, continuations, tracking) lt;
3172   if (!lt.develop_is_enabled() || !dolog) {
3173     return;
3174   }
3175   LogStream ls(lt);
3176 
3177   ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
3178   if (!thread->has_last_Java_frame()) {
3179     ls.print_cr("NO ANCHOR!");
3180   }
3181 
3182   RegisterMap map(thread,
3183                   RegisterMap::UpdateMap::include,
3184                   RegisterMap::ProcessFrames::include,
3185                   RegisterMap::WalkContinuation::skip);
3186   map.set_include_argument_oops(false);
3187 
3188   if (false) {
3189     for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
3190       f.print_on(&ls);
3191     }
3192   } else {
3193     map.set_skip_missing(true);
3194     ResetNoHandleMark rnhm;
3195     ResourceMark rm;
3196     HandleMark hm(Thread::current());
3197     FrameValues values;
3198 
3199     int i = 0;
3200     int post_entry = -1;
3201     for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
3202       f.describe(values, i, &map, i == 0);
3203       if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
3204         post_entry++;
3205       if (post_entry >= show_entry_callers)
3206         break;
3207     }
3208     values.print_on(thread, &ls);
3209   }
3210 
3211   ls.print_cr("======= end frames =========");
3212 }
3213 
3214 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp) {
3215   intptr_t* sp0 = sp;
3216   address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
3217 
3218   bool preempted = false;
3219   stackChunkOop tail = cont.tail();
3220   if (tail != nullptr && tail->preempted()) {
3221     // Still preempted (monitor not acquired) so no frames were thawed.
3222     set_anchor(thread, cont.entrySP(), cont.entryPC());
3223     preempted = true;
3224   } else {
3225     set_anchor(thread, sp0);
3226   }
3227 
3228   log_frames(thread);
3229   if (LoomVerifyAfterThaw) {
3230     assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
3231   }
3232   assert(ContinuationEntry::assert_entry_frame_laid_out(thread, preempted), "");
3233   clear_anchor(thread);
3234 
3235   LogTarget(Trace, continuations) lt;
3236   if (lt.develop_is_enabled()) {
3237     LogStream ls(lt);
3238     ls.print_cr("Jumping to frame (thaw):");
3239     frame(sp).print_value_on(&ls);
3240   }
3241 }
3242 #endif // ASSERT
3243 
3244 #include CPU_HEADER_INLINE(continuationFreezeThaw)
3245 
3246 #ifdef ASSERT
3247 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
3248   ResourceMark rm;
3249   FrameValues values;
3250   assert(f.get_cb() != nullptr, "");
3251   RegisterMap map(f.is_heap_frame() ?
3252                     nullptr :
3253                     JavaThread::current(),
3254                   RegisterMap::UpdateMap::include,
3255                   RegisterMap::ProcessFrames::skip,
3256                   RegisterMap::WalkContinuation::skip);
3257   map.set_include_argument_oops(false);
3258   map.set_skip_missing(true);
3259   if (callee_complete) {
3260     frame::update_map_with_saved_link(&map, ContinuationHelper::Frame::callee_link_address(f));
3261   }
3262   const_cast<frame&>(f).describe(values, 0, &map, true);
3263   values.print_on(static_cast<JavaThread*>(nullptr), st);
3264 }
3265 #endif
3266 
3267 static address thaw_entry   = nullptr;
3268 static address freeze_entry = nullptr;
3269 static address freeze_preempt_entry = nullptr;
3270 
3271 address Continuation::thaw_entry() {
3272   return ::thaw_entry;
3273 }
3274 
3275 address Continuation::freeze_entry() {
3276   return ::freeze_entry;
3277 }
3278 
3279 address Continuation::freeze_preempt_entry() {
3280   return ::freeze_preempt_entry;
3281 }
3282 
3283 class ConfigResolve {
3284 public:
3285   static void resolve() { resolve_compressed(); }
3286 
3287   static void resolve_compressed() {
3288     UseCompressedOops ? resolve_gc<true>()
3289                       : resolve_gc<false>();
3290   }
3291 
3292 private:
3293   template <bool use_compressed>
3294   static void resolve_gc() {
3295     BarrierSet* bs = BarrierSet::barrier_set();
3296     assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set");
3297     switch (bs->kind()) {
3298 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
3299       case BarrierSet::bs_name: {                                       \
3300         resolve<use_compressed, typename BarrierSet::GetType<BarrierSet::bs_name>::type>(); \
3301       }                                                                 \
3302         break;
3303       FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
3304 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
3305 
3306     default:
3307       fatal("BarrierSet resolving not implemented");
3308     };
3309   }
3310 
3311   template <bool use_compressed, typename BarrierSetT>
3312   static void resolve() {
3313     typedef Config<use_compressed ? oop_kind::NARROW : oop_kind::WIDE, BarrierSetT> SelectedConfigT;
3314 
3315     freeze_entry = (address)freeze<SelectedConfigT>;
3316     freeze_preempt_entry = (address)SelectedConfigT::freeze_preempt;
3317 
3318     // If we wanted, we could templatize by kind and have three different thaw entries
3319     thaw_entry   = (address)thaw<SelectedConfigT>;
3320   }
3321 };
3322 
3323 void Continuation::init() {
3324   ConfigResolve::resolve();
3325 }