1 /*
   2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "gc/shared/gc_globals.hpp"
  28 #include "oops/instanceStackChunkKlass.hpp"
  29 #include "oops/oopsHierarchy.hpp"
  30 #include "oops/stackChunkOop.hpp"
  31 #include "classfile/javaClasses.inline.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/codeCache.inline.hpp"
  34 #include "code/compiledMethod.inline.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "code/vmreg.inline.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "compiler/oopMap.inline.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "gc/shared/barrierSet.hpp"
  41 #include "gc/shared/memAllocator.hpp"
  42 #include "gc/shared/oopStorage.hpp"
  43 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  44 #include "interpreter/interpreter.hpp"
  45 #include "interpreter/linkResolver.hpp"
  46 #include "interpreter/oopMapCache.hpp"
  47 #include "logging/log.hpp"
  48 #include "logging/logStream.hpp"
  49 #include "metaprogramming/conditional.hpp"
  50 #include "oops/access.inline.hpp"
  51 #include "oops/instanceStackChunkKlass.inline.hpp"
  52 #include "oops/objArrayOop.inline.hpp"
  53 #include "oops/weakHandle.hpp"
  54 #include "oops/weakHandle.inline.hpp"
  55 #include "prims/jvmtiDeferredUpdates.hpp"
  56 #include "prims/jvmtiThreadState.hpp"
  57 #include "runtime/continuation.inline.hpp"
  58 #include "runtime/deoptimization.hpp"
  59 #include "runtime/frame.hpp"
  60 #include "runtime/frame.inline.hpp"
  61 #include "runtime/interfaceSupport.inline.hpp"
  62 #include "runtime/javaCalls.hpp"
  63 #include "runtime/jniHandles.inline.hpp"
  64 #include "runtime/keepStackGCProcessed.hpp"
  65 #include "runtime/orderAccess.hpp"
  66 #include "runtime/prefetch.inline.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stackFrameStream.inline.hpp"
  69 #include "runtime/stackWatermarkSet.inline.hpp"
  70 #include "runtime/stackOverflow.hpp"
  71 #include "runtime/vframe_hp.hpp"
  72 #include "utilities/copy.hpp"
  73 #include "utilities/debug.hpp"
  74 #include "utilities/exceptions.hpp"
  75 #include "utilities/macros.hpp"
  76 
  77 #define CONT_JFR false
  78 
  79 static const bool TEST_THAW_ONE_CHUNK_FRAME = false; // force thawing frames one-at-a-time from chunks for testing purposes
  80 
  81 #ifdef __has_include
  82 #  if __has_include(<valgrind/callgrind.h>)
  83 #    include <valgrind/callgrind.h>
  84 #  endif
  85 #endif
  86 #ifndef VG
  87 #define VG(X)
  88 #endif
  89 #ifdef __has_include
  90 #  if __has_include(<valgrind/memcheck.h>)
  91 #    include <valgrind/memcheck.h>
  92 #    undef VG
  93 #    define VG(x) x
  94 #  endif
  95 #endif
  96 
  97 #ifdef CALLGRIND_START_INSTRUMENTATION
  98   static int callgrind_counter = 1;
  99   // static void callgrind() {
 100   //   if (callgrind_counter != 0) {
 101   //     if (callgrind_counter > 20000) {
 102   //       tty->print_cr("Starting callgrind instrumentation");
 103   //       CALLGRIND_START_INSTRUMENTATION;
 104   //       callgrind_counter = 0;
 105   //     } else
 106   //       callgrind_counter++;
 107   //   }
 108   // }
 109 #else
 110   // static void callgrind() {}
 111 #endif
 112 
 113 // #undef log_develop_info
 114 // #undef log_develop_debug
 115 // #undef log_develop_trace
 116 // #undef log_develop_is_enabled
 117 // #define log_develop_info(...)  (!log_is_enabled(Info, __VA_ARGS__))   ? (void)0 : LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::Info>
 118 // #define log_develop_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::Debug>
 119 // #define log_develop_trace(...) (!log_is_enabled(Trace, __VA_ARGS__))  ? (void)0 : LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::Trace>
 120 // #define log_develop_is_enabled(level, ...)  log_is_enabled(level, __VA_ARGS__)
 121 
 122 // #undef ASSERT
 123 // #undef assert
 124 // #define assert(p, ...)
 125 
 126 #ifdef ASSERT
 127 template<int x> NOINLINE static bool verify_continuation(oop cont) { return Continuation::debug_verify_continuation(cont); }
 128 template<int x> NOINLINE static bool verify_stack_chunk(oop chunk) { return InstanceStackChunkKlass::verify(chunk); }
 129 #endif
 130 
 131 #ifdef ASSERT
 132 extern "C" void pns2();
 133 extern "C" void pfl();
 134 extern "C" void find(intptr_t x);
 135 #endif
 136 
 137 // Returns true iff the address p is readable and *(intptr_t*)p != errvalue
 138 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue);
 139 static bool is_good_oop(oop o) { return dbg_is_safe(o, -1) && dbg_is_safe(o->klass(), -1) && oopDesc::is_oop(o) && o->klass()->is_klass(); }
 140 
 141 // Freeze:
 142 // 5 - no call into C
 143 // 10 - immediate return from C
 144 // <100 - don't allocate
 145 // 100 - everything
 146 //
 147 // Thaw:
 148 // 105 - no call into C (prepare_thaw)
 149 // 110 - immediate return from C (prepare_thaw)
 150 // 130 - thaw oops
 151 
 152 // TODO: See AbstractAssembler::generate_stack_overflow_check (assembler.cpp), Compile::bang_size_in_bytes() (compile.cpp), m->as_SafePoint()->jvms()->interpreter_frame_size()
 153 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
 154 
 155 
 156 // TODO
 157 //
 158 // Nested continuations: must restore fastpath, held_monitor_count, cont_frame->sp (entrySP of parent)
 159 // Add:
 160 //  - compress interpreted frames
 161 //  - compiled->intrepreted for serialization (look at scopeDesc)
 162 //  - caching h-stacks in thread stacks
 163 //
 164 // Things to compress in interpreted frames: return address, monitors, last_sp
 165 //
 166 
 167 //
 168 // The data structure invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
 169 //
 170 
 171 #define YIELD_SIG  "jdk.internal.vm.Continuation.yield(Ljdk/internal/vm/ContinuationScope;)V"
 172 #define YIELD0_SIG "jdk.internal.vm.Continuation.yield0(Ljdk/internal/vm/ContinuationScope;Ljdk/internal/vm/Continuation;)Z"
 173 #define ENTER_SIG  "jdk.internal.vm.Continuation.enter(Ljdk/internal/vm/Continuation;Z)V"
 174 #define ENTER_SPECIAL_SIG "jdk.internal.vm.Continuation.enterSpecial(Ljdk/internal/vm/Continuation;Z)V"
 175 #define RUN_SIG    "jdk.internal.vm.Continuation.run()V"
 176 
 177 // debugging functions
 178 bool do_verify_after_thaw(JavaThread* thread);
 179 template<int x> NOINLINE static bool do_verify_after_thaw1(JavaThread* thread) { return do_verify_after_thaw(thread); }
 180 static void print_vframe(frame f, const RegisterMap* map = nullptr, outputStream* st = tty);
 181 void do_deopt_after_thaw(JavaThread* thread);
 182 
 183 #ifndef PRODUCT
 184   template <bool relative>
 185   static void print_frame_layout(const frame& f, outputStream* st = tty);
 186   static void print_frames(JavaThread* thread, outputStream* st = tty);
 187   static jlong java_tid(JavaThread* thread);
 188 
 189   // static void print_blob(outputStream* st, address addr);
 190   // template<int x> static void walk_frames(JavaThread* thread);
 191   // void static stop();
 192   // void static stop(const frame& f);
 193   // static void print_JavaThread_offsets();
 194 #endif
 195 
 196 void continuations_init() {
 197   Continuations::init();
 198 }
 199 
 200 class SmallRegisterMap;
 201 
 202 
 203 class Frame {
 204 public:
 205   template<typename RegisterMapT> static inline intptr_t** map_link_address(const RegisterMapT* map, intptr_t* sp);
 206   static inline intptr_t** callee_link_address(const frame& f);
 207   static inline Method* frame_method(const frame& f);
 208   static inline address real_pc(const frame& f);
 209   static inline void patch_pc(const frame& f, address pc);
 210   static address* return_pc_address(const frame& f);
 211   static address return_pc(const frame& f);
 212   static bool is_stub(CodeBlob* cb);
 213 
 214 #ifdef ASSERT
 215   static inline intptr_t* frame_top(const frame &f);
 216   static inline bool is_deopt_return(address pc, const frame& sender);
 217   static bool assert_frame_laid_out(frame f);
 218 
 219   static char* method_name(Method* m);
 220   static Method* top_java_frame_method(const frame& f);
 221   static Method* bottom_java_frame_method(const frame& f)  { return Frame::frame_method(f); }
 222   static char* top_java_frame_name(const frame& f) { return method_name(top_java_frame_method(f)); }
 223   static char* bottom_java_frame_name(const frame& f) { return method_name(bottom_java_frame_method(f)); }
 224   static bool assert_bottom_java_frame_name(const frame& f, const char* name);
 225 #endif
 226 };
 227 
 228 template<typename Self>
 229 class FrameCommon : public Frame {
 230 public:
 231   static inline Method* frame_method(const frame& f);
 232 
 233   template <typename FrameT> static bool is_instance(const FrameT& f);
 234 };
 235 
 236 class Interpreted : public FrameCommon<Interpreted> {
 237 public:
 238   DEBUG_ONLY(static const char* name;)
 239   static const bool interpreted = true;
 240   static const bool stub = false;
 241   static const int extra_oops = 0;
 242   static const char type = 'i';
 243 
 244 public:
 245 
 246   static inline intptr_t* frame_top(const frame& f, InterpreterOopMap* mask);
 247   static inline intptr_t* frame_top(const frame& f);
 248   static inline intptr_t* frame_top(const frame& f, int callee_argsize, bool callee_interpreted);
 249   template <bool relative = false>
 250   static inline intptr_t* frame_bottom(const frame& f);
 251   template <bool relative = false>
 252   static inline intptr_t* sender_unextended_sp(const frame& f);
 253   template <bool relative = false>
 254   static inline int stack_argsize(const frame& f);
 255 
 256   static inline address* return_pc_address(const frame& f);
 257   static inline address return_pc(const frame& f);
 258   template <bool relative>
 259   static void patch_sender_sp(frame& f, intptr_t* sp);
 260 
 261   static int num_oops(const frame&f, InterpreterOopMap* mask);
 262   static int size(const frame& f, InterpreterOopMap* mask);
 263   static int size(const frame& f);
 264   static inline int expression_stack_size(const frame &f, InterpreterOopMap* mask);
 265   static bool is_owning_locks(const frame& f);
 266 
 267   typedef InterpreterOopMap* ExtraT;
 268 };
 269 
 270 DEBUG_ONLY(const char* Interpreted::name = "Interpreted";)
 271 
 272 template<typename Self>
 273 class NonInterpreted : public FrameCommon<Self>  {
 274 public:
 275   static inline intptr_t* frame_top(const frame& f, int callee_argsize, bool callee_interpreted);
 276   static inline intptr_t* frame_top(const frame& f);
 277   static inline intptr_t* frame_bottom(const frame& f);
 278 
 279   static inline int size(const frame& f);
 280   static inline int stack_argsize(const frame& f);
 281   static inline int num_oops(const frame& f);
 282 };
 283 
 284 class NonInterpretedUnknown : public NonInterpreted<NonInterpretedUnknown>  {
 285 public:
 286   DEBUG_ONLY(static const char* name;)
 287   static const bool interpreted = false;
 288 
 289   template <typename FrameT> static bool is_instance(const FrameT& f);
 290 };
 291 
 292 DEBUG_ONLY(const char* NonInterpretedUnknown::name = "NonInterpretedUnknown";)
 293 
 294 class Compiled : public NonInterpreted<Compiled>  {
 295 public:
 296   DEBUG_ONLY(static const char* name;)
 297   static const bool interpreted = false;
 298   static const bool stub = false;
 299   static const int extra_oops = 1;
 300   static const char type = 'c';
 301 
 302   template <typename RegisterMapT>
 303   static bool is_owning_locks(JavaThread* thread, RegisterMapT* map, const frame& f);
 304   static address deopt_original_pc(intptr_t* sp, address pc, CodeBlob* cb);
 305 };
 306 
 307 DEBUG_ONLY(const char* Compiled::name = "Compiled";)
 308 
 309 class StubF : public NonInterpreted<StubF> {
 310 public:
 311   DEBUG_ONLY(static const char* name;)
 312   static const bool interpreted = false;
 313   static const bool stub = true;
 314   static const int extra_oops = 0;
 315   static const char type = 's';
 316 };
 317 
 318 DEBUG_ONLY(const char* StubF::name = "Stub";)
 319 
 320 template<typename Self>
 321 template <typename FrameT>
 322 bool FrameCommon<Self>::is_instance(const FrameT& f) {
 323   return (Self::interpreted == f.is_interpreted_frame()) && (Self::stub == (!Self::interpreted && is_stub(f.cb())));
 324 }
 325 
 326 template <typename FrameT>
 327 bool NonInterpretedUnknown::is_instance(const FrameT& f) {
 328   return (interpreted == f.is_interpreted_frame());
 329 }
 330 
 331 bool Frame::is_stub(CodeBlob* cb) {
 332   return cb != nullptr && (cb->is_safepoint_stub() || cb->is_runtime_stub());
 333 }
 334 
 335 inline Method* Frame::frame_method(const frame& f) {
 336   return f.is_interpreted_frame() ? f.interpreter_frame_method() : f.cb()->as_compiled_method()->method();
 337 }
 338 
 339 address Frame::return_pc(const frame& f) {
 340   return *return_pc_address(f);
 341 }
 342 
 343 
 344 #ifdef ASSERT
 345   intptr_t* Frame::frame_top(const frame &f) {
 346     if (f.is_interpreted_frame()) {
 347       InterpreterOopMap mask;
 348       f.interpreted_frame_oop_map(&mask);
 349       return Interpreted::frame_top(f, &mask);
 350     } else {
 351       return Compiled::frame_top(f);
 352     }
 353   }
 354 
 355 
 356 char* Frame::method_name(Method* m) {
 357   return m != nullptr ? m->name_and_sig_as_C_string() : nullptr;
 358 }
 359 
 360 Method* Frame::top_java_frame_method(const frame& f) {
 361   Method* m = nullptr;
 362   if (f.is_interpreted_frame()) {
 363     m = f.interpreter_frame_method();
 364   } else if (f.is_compiled_frame()) {
 365     CompiledMethod* cm = f.cb()->as_compiled_method();
 366     ScopeDesc* scope = cm->scope_desc_at(f.pc());
 367     m = scope->method();
 368   } else if (f.is_native_frame()) {
 369     return f.cb()->as_nmethod()->method();
 370   }
 371   // m = ((CompiledMethod*)f.cb())->method();
 372   return m;
 373 }
 374 
 375 bool Frame::assert_bottom_java_frame_name(const frame& f, const char* name) {
 376   ResourceMark rm;
 377   bool res = (strcmp(bottom_java_frame_name(f), name) == 0);
 378   assert (res, "name: %s", bottom_java_frame_name(f));
 379   return res;
 380 }
 381 
 382 bool Frame::is_deopt_return(address pc, const frame& sender) {
 383   if (sender.is_interpreted_frame()) return false;
 384 
 385   CompiledMethod* cm = sender.cb()->as_compiled_method();
 386   return cm->is_deopt_pc(pc);
 387 }
 388 
 389 #endif
 390 
 391 address Interpreted::return_pc(const frame& f) {
 392   return *return_pc_address(f);
 393 }
 394 
 395 int Interpreted::num_oops(const frame&f, InterpreterOopMap* mask) {
 396   // all locks must be nullptr when freezing, but f.oops_do walks them, so we count them
 397   return f.interpreted_frame_num_oops(mask);
 398 }
 399 
 400 int Interpreted::size(const frame&f) {
 401   return Interpreted::frame_bottom<true>(f) - Interpreted::frame_top(f);
 402 }
 403 
 404 template <bool relative>
 405 inline int Interpreted::stack_argsize(const frame& f) {
 406   return f.interpreter_frame_method()->size_of_parameters();
 407 }
 408 
 409 inline int Interpreted::expression_stack_size(const frame &f, InterpreterOopMap* mask) {
 410   int size = mask->expression_stack_size();
 411   assert (size <= f.interpreter_frame_expression_stack_size(), "size1: %d size2: %d", size, f.interpreter_frame_expression_stack_size());
 412   return size;
 413 }
 414 
 415 bool Interpreted::is_owning_locks(const frame& f) {
 416   assert (f.interpreter_frame_monitor_end() <= f.interpreter_frame_monitor_begin(), "must be");
 417   if (f.interpreter_frame_monitor_end() == f.interpreter_frame_monitor_begin())
 418     return false;
 419 
 420   for (BasicObjectLock* current = f.previous_monitor_in_interpreter_frame(f.interpreter_frame_monitor_begin());
 421         current >= f.interpreter_frame_monitor_end();
 422         current = f.previous_monitor_in_interpreter_frame(current)) {
 423 
 424       oop obj = current->obj();
 425       if (obj != nullptr) {
 426         return true;
 427       }
 428   }
 429   return false;
 430 }
 431 
 432 inline intptr_t* Interpreted::frame_top(const frame& f) { // inclusive; this will be copied with the frame
 433   return f.unextended_sp();
 434 }
 435 
 436 int Interpreted::size(const frame&f, InterpreterOopMap* mask) {
 437   return Interpreted::frame_bottom(f) - Interpreted::frame_top(f, mask);
 438 }
 439 
 440 template<typename Self>
 441 inline intptr_t* NonInterpreted<Self>::frame_top(const frame& f, int callee_argsize, bool callee_interpreted) {
 442   return f.unextended_sp() + (callee_interpreted ? 0 : callee_argsize);
 443 }
 444 
 445 template<typename Self>
 446 inline intptr_t* NonInterpreted<Self>::frame_top(const frame& f) { // inclusive; this will be copied with the frame
 447   return f.unextended_sp();
 448 }
 449 
 450 template<typename Self>
 451 inline intptr_t* NonInterpreted<Self>::frame_bottom(const frame& f) { // exclusive; this will not be copied with the frame
 452   return f.unextended_sp() + f.cb()->frame_size();
 453 }
 454 
 455 template<typename Self>
 456 inline int NonInterpreted<Self>::size(const frame& f) {
 457   assert (!f.is_interpreted_frame() && Self::is_instance(f), "");
 458   return f.cb()->frame_size();
 459 }
 460 
 461 template<typename Self>
 462 inline int NonInterpreted<Self>::stack_argsize(const frame& f) {
 463   return f.compiled_frame_stack_argsize();
 464 }
 465 
 466 template<typename Self>
 467 inline int NonInterpreted<Self>::num_oops(const frame& f) {
 468   assert (!f.is_interpreted_frame() && Self::is_instance(f), "");
 469   return f.num_oops() + Self::extra_oops;
 470 }
 471 
 472 
 473 address Compiled::deopt_original_pc(intptr_t* sp, address pc, CodeBlob* cb) {
 474   // TODO DEOPT: unnecessary in the long term solution of unroll on freeze
 475 
 476   assert (cb != nullptr && cb->is_compiled(), "");
 477   CompiledMethod* cm = cb->as_compiled_method();
 478   if (cm->is_deopt_pc(pc)) {
 479     log_develop_trace(jvmcont)("Compiled::deopt_original_pc deoptimized frame");
 480     pc = *(address*)((address)sp + cm->orig_pc_offset());
 481     assert(pc != nullptr, "");
 482     assert(cm->insts_contains_inclusive(pc), "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
 483     assert(!cm->is_deopt_pc(pc), "");
 484     // _deopt_state = is_deoptimized;
 485   }
 486 
 487   return pc;
 488 }
 489 
 490 template<typename RegisterMapT>
 491 bool Compiled::is_owning_locks(JavaThread* thread, RegisterMapT* map, const frame& f) {
 492   // if (!DetectLocksInCompiledFrames) return false;
 493   assert (!f.is_interpreted_frame() && Compiled::is_instance(f), "");
 494 
 495   CompiledMethod* cm = f.cb()->as_compiled_method();
 496   assert (!cm->is_compiled() || !cm->as_compiled_method()->is_native_method(), ""); // See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp
 497 
 498   if (!cm->has_monitors()) return false;
 499 
 500   frame::update_map_with_saved_link(map, Frame::callee_link_address(f)); // the monitor object could be stored in the link register
 501   ResourceMark rm;
 502   for (ScopeDesc* scope = cm->scope_desc_at(f.pc()); scope != nullptr; scope = scope->sender()) {
 503     GrowableArray<MonitorValue*>* mons = scope->monitors();
 504     if (mons == nullptr || mons->is_empty())
 505       continue;
 506 
 507     for (int index = (mons->length()-1); index >= 0; index--) { // see compiledVFrame::monitors()
 508       MonitorValue* mon = mons->at(index);
 509       if (mon->eliminated())
 510         continue; // we ignore scalar-replaced monitors
 511       ScopeValue* ov = mon->owner();
 512       StackValue* owner_sv = StackValue::create_stack_value(&f, map, ov); // it is an oop
 513       oop owner = owner_sv->get_obj()();
 514       if (owner != nullptr) {
 515         //assert(cm->has_monitors(), "");
 516         return true;
 517       }
 518     }
 519   }
 520   return false;
 521 }
 522 
 523 
 524 // Mirrors the Java continuation objects.
 525 // This object is created when we begin a freeze/thaw operation for a continuation, and is destroyed when the operation completes.
 526 // Contents are read from the Java object at the entry points of this module, and written at exists or intermediate calls into Java
 527 class ContMirror {
 528 private:
 529   JavaThread* const _thread;   // Thread being frozen/thawed
 530   ContinuationEntry* _entry;
 531   oop _cont;
 532 
 533   stackChunkOop _tail;
 534 
 535   // Profiling data for the JFR event
 536   short _e_size;
 537   short _e_num_interpreted_frames;
 538   short _e_num_frames;
 539 
 540 public:
 541   inline void post_safepoint(Handle conth);
 542   stackChunkOop allocate_stack_chunk(int stack_size, bool is_preempt);
 543 
 544 private:
 545   ContMirror(const ContMirror& cont); // no copy constructor
 546 
 547 public:
 548   // does not automatically read the continuation object
 549   ContMirror(JavaThread* thread, oop cont);
 550   ContMirror(oop cont);
 551   ContMirror(const RegisterMap* map);
 552 
 553   intptr_t hash() {
 554     #ifndef PRODUCT
 555       return Thread::current()->is_Java_thread() ? _cont->identity_hash() : -1;
 556     #else
 557       return 0;
 558     #endif
 559   }
 560 
 561   void read();
 562   inline void read_minimal();
 563   void read_rest();
 564 
 565   inline void write();
 566 
 567   oop mirror() { return _cont; }
 568   oop parent() { return jdk_internal_vm_Continuation::parent(_cont); }
 569 
 570   ContinuationEntry* entry() const { return _entry; }
 571   intptr_t* entrySP() const { return _entry->entry_sp(); }
 572   intptr_t* entryFP() const { return _entry->entry_fp(); }
 573   address   entryPC() const { return _entry->entry_pc(); }
 574 
 575   int argsize() const { return _entry->argsize(); }
 576   void set_argsize(int value) { _entry->set_argsize(value); }
 577 
 578   bool is_mounted() { return _entry != nullptr; }
 579 
 580   stackChunkOop tail() const         { return _tail; }
 581   void set_tail(stackChunkOop chunk) { _tail = chunk; }
 582 
 583   JavaThread* thread() const { return _thread; }
 584 
 585   bool is_empty() const { return !has_nonempty_chunk(); }
 586   bool has_nonempty_chunk() const { return last_nonempty_chunk() != nullptr; };
 587   stackChunkOop last_nonempty_chunk() const { return nonempty_chunk(_tail); }
 588   stackChunkOop prev_nonempty_chunk(stackChunkOop chunk) const { return nonempty_chunk(chunk->parent()); }
 589   inline stackChunkOop nonempty_chunk(stackChunkOop chunk) const;
 590 
 591   template <bool aligned = true>
 592   void copy_to_chunk(intptr_t* from, intptr_t* to, int size);
 593 
 594   address last_pc() { return last_nonempty_chunk()->pc(); }
 595 
 596   stackChunkOop find_chunk_by_address(void* p) const;
 597 
 598   const frame last_frame();
 599   inline void set_empty() { _tail = nullptr; }
 600 
 601   bool is_preempted() { return jdk_internal_vm_Continuation::is_preempted(_cont); }
 602   void set_preempted(bool value) { jdk_internal_vm_Continuation::set_preempted(_cont, value); }
 603 
 604   inline void inc_num_interpreted_frames() { _e_num_interpreted_frames++; }
 605   inline void dec_num_interpreted_frames() { _e_num_interpreted_frames++; }
 606 
 607   template<typename Event> void post_jfr_event(Event *e, JavaThread* jt);
 608 
 609 #ifdef ASSERT
 610   inline bool is_entry_frame(const frame& f);
 611   bool has_mixed_frames();
 612   bool chunk_invariant();
 613 #endif
 614 };
 615 
 616 
 617 class ContinuationHelper {
 618 public:
 619   static const int frame_metadata; // size, in words, of frame metadata (e.g. pc and link)
 620   static const int align_wiggle; // size, in words, of maximum shift in frame position due to alignment
 621 
 622   static oop get_continuation(JavaThread* thread);
 623 
 624   static void set_anchor_to_entry(JavaThread* thread, ContinuationEntry* cont);
 625   static void set_anchor_to_entry_pd(JavaFrameAnchor* anchor, ContinuationEntry* cont);
 626   static void set_anchor(JavaThread* thread, intptr_t* sp);
 627   static void set_anchor_pd(JavaFrameAnchor* anchor, intptr_t* sp);
 628   static inline void clear_anchor(JavaThread* thread);
 629 
 630   static void update_register_map_for_entry_frame(const ContMirror& cont, RegisterMap* map);
 631   static void update_map_for_chunk_frame(RegisterMap* map);
 632   template<typename FKind, typename RegisterMapT> static void update_register_map(RegisterMapT* map, const frame& f); // TODO invert parameter order
 633   template<typename RegisterMapT> static void update_register_map_with_callee(RegisterMapT* map, const frame& f); // TODO invert parameter order
 634 
 635   static inline frame last_frame(JavaThread* thread);
 636   static inline void push_pd(const frame& f);
 637 
 638   template <bool dword_aligned = true>
 639   static inline void copy_from_stack(void* from, void* to, size_t size);
 640   template <bool dword_aligned = true>
 641   static inline void copy_to_stack(void* from, void* to, size_t size);
 642 };
 643 
 644 void ContinuationHelper::set_anchor_to_entry(JavaThread* thread, ContinuationEntry* cont) {
 645   JavaFrameAnchor* anchor = thread->frame_anchor();
 646   anchor->set_last_Java_sp(cont->entry_sp());
 647   anchor->set_last_Java_pc(cont->entry_pc());
 648   set_anchor_to_entry_pd(anchor, cont);
 649 
 650   assert (thread->has_last_Java_frame(), "");
 651   assert(thread->last_frame().cb() != nullptr, "");
 652   log_develop_trace(jvmcont)("set_anchor: [" JLONG_FORMAT "] [%d]", java_tid(thread), thread->osthread()->thread_id());
 653   print_vframe(thread->last_frame());
 654 }
 655 
 656 void ContinuationHelper::set_anchor(JavaThread* thread, intptr_t* sp) {
 657   address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
 658   assert (pc != nullptr, "");
 659 
 660   JavaFrameAnchor* anchor = thread->frame_anchor();
 661   anchor->set_last_Java_sp(sp);
 662   anchor->set_last_Java_pc(pc);
 663   set_anchor_pd(anchor, sp);
 664 
 665   assert (thread->has_last_Java_frame(), "");
 666   log_develop_trace(jvmcont)("set_anchor: [" JLONG_FORMAT "] [%d]", java_tid(thread), thread->osthread()->thread_id());
 667   print_vframe(thread->last_frame());
 668   assert(thread->last_frame().cb() != nullptr, "");
 669 }
 670 
 671 inline void ContinuationHelper::clear_anchor(JavaThread* thread) {
 672   thread->frame_anchor()->clear();
 673 }
 674 
 675 void ContinuationHelper::update_register_map_for_entry_frame(const ContMirror& cont, RegisterMap* map) { // TODO NOT PD
 676   // we need to register the link address for the entry frame
 677   if (cont.entry() != nullptr) {
 678     cont.entry()->update_register_map(map);
 679     log_develop_trace(jvmcont)("ContinuationHelper::update_register_map_for_entry_frame");
 680   } else {
 681     log_develop_trace(jvmcont)("ContinuationHelper::update_register_map_for_entry_frame: clearing register map.");
 682     map->clear();
 683   }
 684 }
 685 
 686 oop ContinuationHelper::get_continuation(JavaThread* thread) {
 687   assert (thread != nullptr, "");
 688   assert (thread->threadObj() != nullptr, "");
 689   return java_lang_Thread::continuation(thread->threadObj());
 690 }
 691 
 692 ContMirror::ContMirror(JavaThread* thread, oop cont)
 693  : _thread(thread), _entry(thread->last_continuation()), _cont(cont),
 694 #ifndef PRODUCT
 695   _tail(nullptr),
 696 #endif
 697   _e_size(0) {
 698 
 699   assert(_cont != nullptr && oopDesc::is_oop_or_null(_cont), "Invalid cont: " INTPTR_FORMAT, p2i((void*)_cont));
 700   assert (_cont == _entry->cont_oop(), "mirror: " INTPTR_FORMAT " entry: " INTPTR_FORMAT " entry_sp: "
 701           INTPTR_FORMAT, p2i((oopDesc*)_cont), p2i((oopDesc*)_entry->cont_oop()), p2i(entrySP()));
 702 }
 703 
 704 ContMirror::ContMirror(oop cont)
 705  : _thread(nullptr), _entry(nullptr), _cont(cont),
 706 #ifndef PRODUCT
 707   _tail(nullptr),
 708 #endif
 709   _e_size(0) {
 710   assert(_cont != nullptr && oopDesc::is_oop_or_null(_cont), "Invalid cont: " INTPTR_FORMAT, p2i((void*)_cont));
 711 
 712   read();
 713 }
 714 
 715 ContMirror::ContMirror(const RegisterMap* map)
 716  : _thread(map->thread()),
 717    _entry(Continuation::get_continuation_entry_for_continuation(_thread, map->stack_chunk()->cont())),
 718    _cont(map->stack_chunk()->cont()),
 719 #ifndef PRODUCT
 720   _tail(nullptr),
 721 #endif
 722   _e_size(0) {
 723 
 724   assert(_cont != nullptr && oopDesc::is_oop_or_null(_cont), "Invalid cont: " INTPTR_FORMAT, p2i((void*)_cont));
 725 
 726   assert (_entry == nullptr || _cont == _entry->cont_oop(), "mirror: " INTPTR_FORMAT " entry: " INTPTR_FORMAT " entry_sp: " INTPTR_FORMAT, p2i((oopDesc*)_cont), p2i((oopDesc*)_entry->cont_oop()), p2i(entrySP()));
 727   read();
 728 }
 729 
 730 void ContMirror::read() {
 731   read_minimal();
 732   read_rest();
 733 }
 734 
 735 ALWAYSINLINE void ContMirror::read_minimal() {
 736   _tail  = (stackChunkOop)jdk_internal_vm_Continuation::tail(_cont);
 737 
 738   // if (log_develop_is_enabled(Trace, jvmcont)) {
 739   //   log_develop_trace(jvmcont)("Reading continuation object: " INTPTR_FORMAT, p2i((oopDesc*)_cont));
 740   //   log_develop_trace(jvmcont)("\ttail: " INTPTR_FORMAT, p2i((oopDesc*)_tail));
 741   //   if (_tail != nullptr) _tail->print_on(tty);
 742   // }
 743 }
 744 
 745 void ContMirror::read_rest() {
 746   _e_num_interpreted_frames = 0;
 747   _e_num_frames = 0;
 748 }
 749 
 750 inline void ContMirror::write() {
 751   if (log_develop_is_enabled(Trace, jvmcont)) {
 752     log_develop_trace(jvmcont)("Writing continuation object:");
 753     log_develop_trace(jvmcont)("\ttail: " INTPTR_FORMAT, p2i((oopDesc*)_tail));
 754     if (_tail != nullptr) _tail->print_on(tty);
 755   }
 756 
 757   jdk_internal_vm_Continuation::set_tail(_cont, _tail);
 758 }
 759 
 760 inline stackChunkOop ContMirror::nonempty_chunk(stackChunkOop chunk) const {
 761   while (chunk != nullptr && chunk->is_empty()) chunk = chunk->parent();
 762   return chunk;
 763 }
 764 
 765 const frame ContMirror::last_frame() {
 766   stackChunkOop chunk = last_nonempty_chunk();
 767   if (chunk == nullptr) return frame();
 768   return StackChunkFrameStream<true>(chunk).to_frame();
 769 }
 770 
 771 stackChunkOop ContMirror::find_chunk_by_address(void* p) const {
 772   for (stackChunkOop chunk = tail(); chunk != nullptr; chunk = chunk->parent()) {
 773     if (chunk->is_in_chunk(p)) {
 774       assert (chunk->is_usable_in_chunk(p), "");
 775       return chunk;
 776     }
 777   }
 778   return nullptr;
 779 }
 780 
 781 template<typename Event> void ContMirror::post_jfr_event(Event* e, JavaThread* jt) {
 782 #if INCLUDE_JFR
 783   if (e->should_commit()) {
 784     log_develop_trace(jvmcont)("JFR event: frames: %d iframes: %d size: %d", _e_num_frames, _e_num_interpreted_frames, _e_size);
 785     e->set_carrierThread(JFR_VM_THREAD_ID(jt));
 786     e->set_contClass(_cont->klass());
 787     e->set_numFrames(_e_num_frames);
 788     e->set_numIFrames(_e_num_interpreted_frames);
 789     e->set_size(_e_size);
 790     e->commit();
 791   }
 792 #endif
 793 }
 794 
 795 template <bool aligned>
 796 void ContMirror::copy_to_chunk(intptr_t* from, intptr_t* to, int size) {
 797   tail()->copy_from_stack_to_chunk<aligned>(from, to, size);
 798   _e_size += size << LogBytesPerWord;
 799 }
 800 
 801 #ifdef ASSERT
 802 inline bool ContMirror::is_entry_frame(const frame& f) {
 803   return f.sp() == entrySP();
 804 }
 805 
 806 bool ContMirror::chunk_invariant() {
 807   // only the topmost chunk can be empty
 808   if (_tail == (oop)nullptr)
 809     return true;
 810   assert (_tail->is_stackChunk(), "");
 811   int i = 1;
 812   for (stackChunkOop chunk = _tail->parent(); chunk != (oop)nullptr; chunk = chunk->parent()) {
 813     if (chunk->is_empty()) {
 814       assert (chunk != _tail, "");
 815       tty->print_cr("i: %d", i);
 816       chunk->print_on(true, tty);
 817       return false;
 818     }
 819     i++;
 820   }
 821   return true;
 822 }
 823 
 824 bool ContMirror::has_mixed_frames() {
 825   for (stackChunkOop c = tail(); c != nullptr; c = c->parent()) if (c->has_mixed_frames()) return true;
 826   return false;
 827 }
 828 #endif
 829 
 830 #if INCLUDE_JVMTI
 831 static int num_java_frames(ContMirror& cont) {
 832   ResourceMark rm; // used for scope traversal in num_java_frames(CompiledMethod*, address)
 833   int count = 0;
 834   for (stackChunkOop chunk = cont.tail(); chunk != (oop)nullptr; chunk = chunk->parent()) {
 835     count += chunk->num_java_frames();
 836   }
 837   return count;
 838 }
 839 #endif // INCLUDE_JVMTI
 840 
 841 typedef int (*FreezeContFnT)(JavaThread*, intptr_t*, bool);
 842 
 843 static FreezeContFnT cont_freeze = nullptr;
 844 
 845 
 846 class CachedCompiledMetadata; // defined in PD
 847 template<class P>
 848 static inline oop safe_load(P *addr) {
 849   oop obj = (oop)RawAccess<>::oop_load(addr);
 850   obj = (oop)NativeAccess<>::oop_load(&obj);
 851   return obj;
 852 }
 853 
 854 #ifdef ASSERT
 855 template <class P>
 856 static void verify_oop_at(P* p) {
 857   oop obj = (oop)NativeAccess<>::oop_load(p);
 858   assert(oopDesc::is_oop_or_null(obj), "");
 859 }
 860 #endif
 861 
 862 class CountOops : public OopClosure {
 863 private:
 864   int _nr_oops;
 865 public:
 866   CountOops() : _nr_oops(0) {}
 867   int nr_oops() const { return _nr_oops; }
 868 
 869 
 870   virtual void do_oop(oop* o) { _nr_oops++; }
 871   virtual void do_oop(narrowOop* o) { _nr_oops++; }
 872 };
 873 
 874 // should match Continuation.preemptStatus() in Continuation.java
 875 enum freeze_result {
 876   freeze_ok = 0,
 877   freeze_ok_bottom = 1,
 878   freeze_pinned_cs = 2,
 879   freeze_pinned_native = 3,
 880   freeze_pinned_monitor = 4,
 881   freeze_exception = 5
 882 };
 883 
 884 const char* freeze_result_names[6] = {
 885   "freeze_ok",
 886   "freeze_ok_bottom",
 887   "freeze_pinned_cs",
 888   "freeze_pinned_native",
 889   "freeze_pinned_monitor",
 890   "freeze_exception"
 891 };
 892 
 893 template <typename ConfigT>
 894 class Freeze {
 895 
 896 private:
 897   JavaThread* const _thread;
 898   ContMirror& _cont;
 899   bool _barriers;
 900   const bool _preempt;
 901 
 902   intptr_t *_bottom_address;
 903   intptr_t *_top_address;
 904 
 905   int _size; // total size of all frames plus metadata in words. keeps track of offset where a frame should be written and how many bytes we need to allocate.
 906   int _frames;
 907   int _align_size;
 908 
 909   DEBUG_ONLY(intptr_t* _last_write;)
 910 
 911   inline void set_top_frame_metadata_pd(const frame& hf);
 912   template <typename FKind, bool bottom> inline void patch_pd(frame& callee, const frame& caller);
 913   inline void patch_chunk_pd(intptr_t* vsp, intptr_t* hsp);
 914   template<typename FKind> frame new_hframe(frame& f, frame& caller);
 915   inline intptr_t* align_bottom(intptr_t* vsp, int argsize);
 916   static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
 917 
 918   template<typename FKind> static inline frame sender(const frame& f);
 919 
 920 public:
 921 
 922   Freeze(JavaThread* thread, ContMirror& mirror, bool preempt) :
 923     _thread(thread), _cont(mirror), _barriers(false), _preempt(preempt) {
 924 
 925     // _cont.read_entry(); // even when retrying, because deopt can change entryPC; see Continuation::get_continuation_entry_pc_for_sender
 926     _cont.read(); // read_minimal
 927 
 928     assert (thread->last_continuation()->entry_sp() == _cont.entrySP(), "");
 929 
 930     int argsize = bottom_argsize();
 931     _bottom_address = _cont.entrySP() - argsize;
 932     DEBUG_ONLY(_cont.entry()->verify_cookie();)
 933 
 934     assert (!Interpreter::contains(_cont.entryPC()), "");
 935 
 936   #ifdef _LP64
 937     if (((intptr_t)_bottom_address & 0xf) != 0) {
 938       _bottom_address--;
 939     }
 940     assert((intptr_t)_bottom_address % 16 == 0, "");
 941   #endif
 942 
 943     log_develop_trace(jvmcont)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT, p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
 944     assert (_bottom_address != nullptr && _bottom_address <= _cont.entrySP(), "");
 945   }
 946 
 947   void init_rest() { // we want to postpone some initialization after chunk handling
 948     _size = 0;
 949     _frames = 0;
 950     _align_size = 0;
 951   }
 952 
 953   int nr_bytes() const  { return _size << LogBytesPerWord; }
 954   int nr_frames() const { return _frames; }
 955 
 956   inline bool should_flush_stack_processing() {
 957     StackWatermark* sw;
 958     uintptr_t watermark;
 959     return ((sw = StackWatermarkSet::get(_thread, StackWatermarkKind::gc)) != nullptr
 960       && (watermark = sw->watermark()) != 0
 961       && watermark <= ((uintptr_t)_cont.entrySP() + ContinuationEntry::size()));
 962   }
 963 
 964   NOINLINE void flush_stack_processing() {
 965     log_develop_trace(jvmcont)("flush_stack_processing");
 966     for (StackFrameStream fst(_thread, true, true); !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next())
 967       ;
 968   }
 969 
 970   template <bool aligned = true>
 971   void copy_to_chunk(intptr_t* from, intptr_t* to, int size) {
 972     _cont.copy_to_chunk<aligned>(from, to, size);
 973   #ifdef ASSERT
 974     stackChunkOop chunk = _cont.tail();
 975     assert (_last_write == to + size, "Missed a spot: _last_write: " INTPTR_FORMAT " to+size: " INTPTR_FORMAT " stack_size: %d _last_write offset: " PTR_FORMAT " to+size: " PTR_FORMAT,
 976       p2i(_last_write), p2i(to + size), chunk->stack_size(), _last_write - chunk->start_address(), to + size - chunk->start_address());
 977     _last_write = to;
 978     // tty->print_cr(">>> copy_to_chunk _last_write: %p", _last_write);
 979   #endif
 980   }
 981 
 982   freeze_result try_freeze_fast(intptr_t* sp, bool chunk_available) {
 983     if (freeze_fast(sp, chunk_available)) {
 984       return freeze_ok;
 985     }
 986     if (_thread != nullptr && _thread->has_pending_exception()) {
 987       return freeze_exception;
 988     }
 989 
 990     EventContinuationFreezeOld e;
 991     if (e.should_commit()) {
 992       e.set_id(cast_from_oop<u8>(_cont.mirror()));
 993       e.commit();
 994     }
 995     // TODO R REMOVE when deopt change is fixed
 996     assert (!_thread->cont_fastpath() || _barriers, "");
 997     log_develop_trace(jvmcont)("-- RETRYING SLOW --");
 998     return freeze_slow();
 999   }
1000 
1001 
1002   inline int bottom_argsize() {
1003     int argsize = _cont.argsize(); // in words
1004     log_develop_trace(jvmcont)("bottom_argsize: %d", argsize);
1005     assert (argsize >= 0, "argsize: %d", argsize);
1006     return argsize;
1007   }
1008 
1009   // returns true iff there's room in the chunk for a fast, compiled-frame-only freeze
1010   // TODO PERF: consider inlining in stub
1011   bool is_chunk_available(intptr_t* top_sp
1012 #ifdef ASSERT
1013     , int* out_size = nullptr
1014 #endif
1015   ) {
1016     stackChunkOop chunk = _cont.tail();
1017     if (chunk == nullptr || chunk->is_gc_mode() || ConfigT::requires_barriers(chunk) || chunk->has_mixed_frames()) {
1018       log_develop_trace(jvmcont)("is_chunk_available %s", chunk == nullptr ? "no chunk" : "chunk requires barriers");
1019       return false;
1020     }
1021 
1022     // assert (CodeCache::find_blob(*(address*)(top_sp - SENDER_SP_RET_ADDRESS_OFFSET)) == StubRoutines::cont_doYield_stub(), ""); -- fails on Windows
1023     assert (StubRoutines::cont_doYield_stub()->frame_size() == ContinuationHelper::frame_metadata, "");
1024     intptr_t* const top = top_sp + ContinuationHelper::frame_metadata;
1025     const int argsize = bottom_argsize();
1026     intptr_t* const bottom = align_bottom(_cont.entrySP(), argsize);
1027     int size = bottom - top; // in words
1028 
1029     const int sp = chunk->sp();
1030     if (sp < chunk->stack_size()) {
1031       size -= argsize;
1032     }
1033     assert (size > 0, "");
1034 
1035     bool available = sp - ContinuationHelper::frame_metadata >= size;
1036     log_develop_trace(jvmcont)("is_chunk_available available: %d size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT, available, argsize, size, p2i(top), p2i(bottom));
1037     DEBUG_ONLY(if (out_size != nullptr) *out_size = size;)
1038     return available;
1039   }
1040 
1041   bool freeze_fast(intptr_t* top_sp, bool chunk_available) {
1042   #ifdef CALLGRIND_START_INSTRUMENTATION
1043     if (_frames > 0 && callgrind_counter == 1) {
1044       callgrind_counter = 2;
1045       tty->print_cr("Starting callgrind instrumentation");
1046       CALLGRIND_START_INSTRUMENTATION;
1047     }
1048   #endif
1049 
1050     // tty->print_cr("FREEZE FAST");
1051     log_develop_trace(jvmcont)("freeze_fast");
1052     assert (_thread != nullptr, "");
1053     assert(_cont.chunk_invariant(), "");
1054     assert (!Interpreter::contains(_cont.entryPC()), "");
1055 
1056     stackChunkOop chunk = _cont.tail();
1057 
1058     // On Windows, this finds, `BufferBlob (0x00000290bae6fc90) used for I2C/C2I adapters` and `BufferBlob (0x0000023375f38110) used for Interpreter`
1059     // if (!(CodeCache::find_blob(*(address*)(top_sp - SENDER_SP_RET_ADDRESS_OFFSET)) == StubRoutines::cont_doYield_stub())) {
1060     //   CodeBlob* cb11 = CodeCache::find_blob(*(address*)(top_sp - SENDER_SP_RET_ADDRESS_OFFSET));
1061     //   if (cb11 == nullptr) tty->print_cr(">>>> WHOA NULL"); else {tty->print_cr(">>>> WHOA"); cb11->print_value_on(tty);}
1062     // }
1063     // assert (CodeCache::find_blob(*(address*)(top_sp - SENDER_SP_RET_ADDRESS_OFFSET)) == StubRoutines::cont_doYield_stub(), ""); -- fails on Windows
1064     assert (StubRoutines::cont_doYield_stub()->frame_size() == ContinuationHelper::frame_metadata, "");
1065     intptr_t* const top = top_sp + ContinuationHelper::frame_metadata;
1066 
1067     const int argsize = bottom_argsize();
1068     intptr_t* const bottom = align_bottom(_cont.entrySP(), argsize);
1069     const int size = bottom - top; // in words
1070     log_develop_trace(jvmcont)("freeze_fast size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT, size, argsize, p2i(top), p2i(bottom));
1071     assert (size > 0, "");
1072 
1073     int sp;
1074   #ifdef ASSERT
1075     bool allocated, empty;
1076     int is_chunk_available_size;
1077     bool is_chunk_available0 = is_chunk_available(top_sp, &is_chunk_available_size);
1078     intptr_t* orig_chunk_sp = nullptr;
1079   #endif
1080     if (LIKELY(chunk_available)) {
1081       assert (chunk == _cont.tail() && is_chunk_available0, "");
1082       DEBUG_ONLY(allocated = false;)
1083       DEBUG_ONLY(orig_chunk_sp = chunk->sp_address();)
1084       sp = chunk->sp();
1085 
1086       if (sp < chunk->stack_size()) { // we are copying into a non-empty chunk
1087         assert (sp < (chunk->stack_size() - chunk->argsize()), "");
1088         assert (*(address*)(chunk->sp_address() - frame::sender_sp_ret_address_offset()) == chunk->pc(), "chunk->sp_address() - frame::sender_sp_ret_address_offset(): %p *(address*)(chunk->sp_address() - frame::sender_sp_ret_address_offset()): %p chunk->pc(): %p", chunk->sp_address() - frame::sender_sp_ret_address_offset(), *(address*)(chunk->sp_address() - frame::sender_sp_ret_address_offset()), chunk->pc());
1089 
1090         DEBUG_ONLY(empty = false;)
1091         sp += argsize; // we overlap
1092         assert (sp <= chunk->stack_size(), "");
1093 
1094         log_develop_trace(jvmcont)("add max_size: %d -- %d", size - argsize, chunk->max_size() + size - argsize);
1095         chunk->set_max_size(chunk->max_size() + size - argsize);
1096 
1097         intptr_t* const bottom_sp = bottom - argsize;
1098         log_develop_trace(jvmcont)("patching bottom sp: " INTPTR_FORMAT, p2i(bottom_sp));
1099         assert (bottom_sp == _bottom_address, "");
1100         assert (*(address*)(bottom_sp - frame::sender_sp_ret_address_offset()) == StubRoutines::cont_returnBarrier(), "");
1101         patch_chunk_pd(bottom_sp, chunk->sp_address());
1102         // we don't patch the pc at this time, so as not to make the stack unwalkable
1103       } else { // the chunk is empty
1104         assert(sp == chunk->stack_size(), "sp: %d chunk->stack_size(): %d", sp, chunk->stack_size());
1105         DEBUG_ONLY(empty = true;)
1106         log_develop_trace(jvmcont)("add max_size: %d -- %d", size + ContinuationHelper::frame_metadata, size + ContinuationHelper::frame_metadata);
1107         chunk->set_max_size(size);
1108         chunk->set_argsize(argsize);
1109       }
1110 
1111       // chunk->reset_counters(chunk);
1112     } else {
1113       assert (_thread->thread_state() == _thread_in_vm, "");
1114       assert (!is_chunk_available(top_sp), "");
1115       assert (_thread->cont_fastpath(), "");
1116 
1117       chunk = allocate_chunk(size + ContinuationHelper::frame_metadata);
1118       if (UNLIKELY(chunk == nullptr || !_thread->cont_fastpath())) {
1119         return false;
1120       }
1121 
1122       DEBUG_ONLY(empty = true;)
1123       DEBUG_ONLY(allocated = true;)
1124 
1125       sp = size + ContinuationHelper::frame_metadata;
1126       DEBUG_ONLY(orig_chunk_sp = chunk->start_address() + sp;)
1127 
1128       assert (chunk->parent() == (oop)nullptr || chunk->parent()->is_stackChunk(), "");
1129       // in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments.
1130       // They'll then be stored twice: in the chunk and in the parent
1131 
1132       _cont.set_tail(chunk);
1133       // jdk_internal_vm_Continuation::set_tail(_cont.mirror(), chunk);
1134 
1135       if (UNLIKELY(ConfigT::requires_barriers(chunk))) { // probably humongous
1136         log_develop_trace(jvmcont)("allocation requires barriers; retrying slow");
1137         chunk->set_argsize(0);
1138         chunk->set_sp(sp);
1139         _barriers = true;
1140         return false;
1141       }
1142 
1143       log_develop_trace(jvmcont)("add max_size: %d -- %d", size + ContinuationHelper::frame_metadata, size + ContinuationHelper::frame_metadata);
1144       chunk->set_max_size(size);
1145       chunk->set_argsize(argsize);
1146     }
1147 
1148     assert (chunk != nullptr, "");
1149     assert (!chunk->has_mixed_frames(), "");
1150     assert (!chunk->is_gc_mode(), "");
1151     assert (!chunk->has_bitmap(), "");
1152 
1153     if (should_flush_stack_processing())
1154       flush_stack_processing();
1155 
1156     NoSafepointVerifier nsv;
1157     assert (chunk->is_stackChunk(), "");
1158     assert (!chunk->requires_barriers(), "");
1159     assert (chunk == _cont.tail(), "");
1160     // assert (chunk == jdk_internal_vm_Continuation::tail(_cont.mirror()), "");
1161     // assert (!chunk->is_gc_mode(), "allocated: %d empty: %d", allocated, empty);
1162     assert (sp <= chunk->stack_size(), "sp: %d chunk size: %d size: %d argsize: %d allocated: %d", sp, chunk->stack_size(), size, argsize, allocated);
1163 
1164     log_develop_trace(jvmcont)("freeze_fast start: chunk " INTPTR_FORMAT " size: %d orig sp: %d argsize: %d", p2i((oopDesc*)chunk), chunk->stack_size(), sp, argsize);
1165     assert (sp >= size, "");
1166     sp -= size;
1167     assert (!is_chunk_available0 || orig_chunk_sp - (chunk->start_address() + sp) == is_chunk_available_size, "mismatched size calculation: orig_sp - sp: " PTR_FORMAT " size: %d argsize: %d is_chunk_available_size: %d empty: %d allocated: %d", orig_chunk_sp - (chunk->start_address() + sp), size, argsize, is_chunk_available_size, empty, allocated);
1168 
1169     intptr_t* chunk_top = chunk->start_address() + sp;
1170     assert (empty || *(address*)(orig_chunk_sp - frame::sender_sp_ret_address_offset()) == chunk->pc(), "corig_chunk_sp - frame::sender_sp_ret_address_offset(): %p *(address*)(orig_chunk_sp - SENDER_SP_RET_ADDRESS_OFFSET): %p chunk->pc(): %p", orig_chunk_sp - frame::sender_sp_ret_address_offset(), *(address*)(orig_chunk_sp - frame::sender_sp_ret_address_offset()), chunk->pc());
1171 
1172     log_develop_trace(jvmcont)("freeze_fast start: " INTPTR_FORMAT " sp: %d chunk_top: " INTPTR_FORMAT, p2i(chunk->start_address()), sp, p2i(chunk_top));
1173     intptr_t* from = top       - ContinuationHelper::frame_metadata;
1174     intptr_t* to   = chunk_top - ContinuationHelper::frame_metadata;
1175     _cont.copy_to_chunk(from, to, size + ContinuationHelper::frame_metadata);
1176 
1177     // patch pc
1178     intptr_t* chunk_bottom_sp = chunk_top + size - argsize;
1179     log_develop_trace(jvmcont)("freeze_fast patching return address at: " INTPTR_FORMAT " to: " INTPTR_FORMAT, p2i(chunk_bottom_sp - frame::sender_sp_ret_address_offset()), p2i(chunk->pc()));
1180     assert (empty || *(address*)(chunk_bottom_sp - frame::sender_sp_ret_address_offset()) == StubRoutines::cont_returnBarrier(), "");
1181     *(address*)(chunk_bottom_sp - frame::sender_sp_ret_address_offset()) = chunk->pc();
1182 
1183     // We're always writing to a young chunk, so the GC can't see it until the next safepoint.
1184     OrderAccess::storestore();
1185     chunk->set_sp(sp);
1186     chunk->set_pc(*(address*)(top - frame::sender_sp_ret_address_offset()));
1187     chunk->set_gc_sp(sp);
1188     assert (chunk->sp_address() == chunk_top, "");
1189 
1190     _cont.write();
1191 
1192     // if (UNLIKELY(argsize != 0)) {
1193     //   // we're patching the chunk itself rather than the stack before the copy becuase of concurrent stack scanning
1194     //   intptr_t* const chunk_bottom_sp = to + size - argsize;
1195     //   log_develop_trace(jvmcont)("patching chunk's bottom sp: " INTPTR_FORMAT, p2i(chunk_bottom_sp));
1196     //   assert (*(address*)(chunk_bottom_sp - SENDER_SP_RET_ADDRESS_OFFSET) == StubRoutines::cont_returnBarrier(), "");
1197     //   *(address*)(chunk_bottom_sp - SENDER_SP_RET_ADDRESS_OFFSET) = chunk->pc();
1198     // }
1199 
1200     // // We're always writing to a young chunk, so the GC can't see it until the next safepoint.
1201     // chunk->set_sp(sp);
1202     // chunk->set_pc(*(address*)(top - SENDER_SP_RET_ADDRESS_OFFSET));
1203     // chunk->set_gc_sp(sp);
1204 
1205     log_develop_trace(jvmcont)("Young chunk success");
1206     if (log_develop_is_enabled(Debug, jvmcont)) chunk->print_on(true, tty);
1207 
1208     log_develop_trace(jvmcont)("FREEZE CHUNK #" INTPTR_FORMAT, _cont.hash());
1209     assert (_cont.chunk_invariant(), "");
1210     assert (verify_stack_chunk<1>(chunk), "");
1211 
1212   #if CONT_JFR
1213     EventContinuationFreezeYoung e;
1214     if (e.should_commit()) {
1215       e.set_id(cast_from_oop<u8>(chunk));
1216       e.set_allocate(allocated);
1217       e.set_size(size << LogBytesPerWord);
1218       e.commit();
1219     }
1220   #endif
1221 
1222     // assert(verify_continuation<222>(_cont.mirror()), "");
1223 
1224     return true;
1225   }
1226 
1227  freeze_result freeze_slow() {
1228   #ifdef ASSERT
1229     ResourceMark rm;
1230   #endif
1231     // tty->print_cr("FREEZE SLOW");
1232     log_develop_trace(jvmcont)("freeze_slow  #" INTPTR_FORMAT, _cont.hash());
1233 
1234     assert (_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, "");
1235 
1236     init_rest();
1237     // _cont.read_rest();
1238 
1239     HandleMark hm(Thread::current());
1240 
1241     frame f = freeze_start_frame();
1242 
1243     _top_address = f.sp();
1244     frame caller;
1245     freeze_result res = freeze(f, caller, 0, false, true);
1246 
1247     if (res == freeze_ok) {
1248       finish_freeze(f, caller);
1249       _cont.write();
1250     }
1251 
1252     return res;
1253   }
1254 
1255   inline bool stack_overflow() { // detect stack overflow in recursive native code
1256     JavaThread* t = !_preempt ? _thread : JavaThread::current();
1257     assert (t == JavaThread::current(), "");
1258     if ((address)&t < t->stack_overflow_state()->stack_overflow_limit()) {
1259       Exceptions::_throw_msg(t, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Stack overflow while freezing");
1260       return true;
1261     }
1262     return false;
1263   }
1264 
1265   frame freeze_start_frame() {
1266     frame f = _thread->last_frame();
1267     if (LIKELY(!_preempt)) {
1268       assert (StubRoutines::cont_doYield_stub()->contains(f.pc()), "");
1269       return freeze_start_frame_yield_stub(f);
1270     } else {
1271       return freeze_start_frame_safepoint_stub(f);
1272     }
1273   }
1274 
1275   frame freeze_start_frame_yield_stub(frame f) {
1276     // log_develop_trace(jvmcont)("%s nop at freeze yield", nativePostCallNop_at(_fi->pc) != nullptr ? "has" : "no");
1277     assert(StubRoutines::cont_doYield_stub()->contains(f.pc()), "must be");
1278     f = sender<StubF>(f);
1279 
1280     // Log(jvmcont) logv; LogStream st(logv.debug()); f.print_on(st);
1281     if (log_develop_is_enabled(Debug, jvmcont)) f.print_on(tty);
1282 
1283     return f;
1284   }
1285 
1286   frame freeze_start_frame_safepoint_stub(frame f) {
1287 #if (defined(X86) || defined(AARCH64)) && !defined(ZERO)
1288     f.set_fp(f.real_fp()); // f.set_fp(*Frame::callee_link_address(f)); // ????
1289 #else
1290     Unimplemented();
1291 #endif
1292     if (!Interpreter::contains(f.pc())) {
1293   #ifdef ASSERT
1294       if (!Frame::is_stub(f.cb())) { f.print_value_on(tty, JavaThread::current()); }
1295   #endif
1296       assert (Frame::is_stub(f.cb()), "must be");
1297       assert (f.oop_map() != nullptr, "must be");
1298 
1299       if (Interpreter::contains(StubF::return_pc(f))) {
1300         log_develop_trace(jvmcont)("Safepoint stub in interpreter");
1301         f = sender<StubF>(f);
1302       }
1303     }
1304 
1305     // Log(jvmcont) logv; LogStream st(logv.debug()); f.print_on(st);
1306     if (log_develop_is_enabled(Debug, jvmcont)) f.print_on(tty);
1307 
1308     return f;
1309   }
1310 
1311   NOINLINE freeze_result freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
1312     assert (f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
1313     assert (f.is_interpreted_frame() || ((top && _preempt) == Frame::is_stub(f.cb())), "");
1314 
1315     if (stack_overflow()) return freeze_exception;
1316 
1317     // Dynamically branch on frame type
1318     if (f.is_compiled_frame()) {
1319       if (UNLIKELY(f.oop_map() == nullptr)) return freeze_pinned_native; // special native frame
1320       if (UNLIKELY(Compiled::is_owning_locks(_cont.thread(), SmallRegisterMap::instance, f))) return freeze_pinned_monitor;
1321 
1322       return recurse_freeze_compiled_frame(f, caller, callee_argsize, callee_interpreted);
1323     } else if (f.is_interpreted_frame()) {
1324       assert ((_preempt && top) || !f.interpreter_frame_method()->is_native(), "");
1325       if (Interpreted::is_owning_locks(f)) return freeze_pinned_monitor;
1326       if (_preempt && top && f.interpreter_frame_method()->is_native()) return freeze_pinned_native; // interpreter native entry
1327 
1328       return recurse_freeze_interpreted_frame(f, caller, callee_argsize, callee_interpreted);
1329     } else if (_preempt && top && Frame::is_stub(f.cb())) {
1330       return recurse_freeze_stub_frame(f, caller);
1331     } else {
1332       return freeze_pinned_native;
1333     }
1334   }
1335 
1336   template<typename FKind>
1337   inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize) {
1338     assert (FKind::is_instance(f), "");
1339     // log_develop_trace(jvmcont)("recurse_freeze_java_frame fsize: %d frame_bottom: " INTPTR_FORMAT " _bottom_address: " INTPTR_FORMAT, fsize, p2i(FKind::frame_bottom(f)), p2i(_bottom_address));
1340 
1341     assert (fsize > 0 && argsize >= 0, "");
1342     _frames++;
1343     _size += fsize;
1344 
1345     if (FKind::frame_bottom(f) >= _bottom_address - 1) { // sometimes there's a space between enterSpecial and the next frame
1346       return finalize_freeze<FKind>(f, caller, argsize); // recursion end
1347     } else {
1348       frame senderf = sender<FKind>(f);
1349       assert (FKind::interpreted || senderf.sp() == senderf.unextended_sp(), "");
1350       freeze_result result = freeze(senderf, caller, argsize, FKind::interpreted, false); // recursive call
1351 
1352       return result;
1353     }
1354   }
1355 
1356   inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool bottom) {
1357     log_develop_trace(jvmcont)("============================= FREEZING FRAME interpreted: %d bottom: %d", f.is_interpreted_frame(), bottom);
1358     log_develop_trace(jvmcont)("fsize: %d argsize: %d", fsize, argsize);
1359     if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
1360     assert (caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
1361   }
1362 
1363   inline void after_freeze_java_frame(const frame& hf, bool bottom) {
1364     DEBUG_ONLY(if (log_develop_is_enabled(Trace, jvmcont)) hf.print_value_on(tty, nullptr);)
1365     DEBUG_ONLY(if (log_develop_is_enabled(Trace, jvmcont)) print_frame_layout<true>(hf);)
1366     if (bottom && log_develop_is_enabled(Trace, jvmcont)) {
1367       log_develop_trace(jvmcont)("bottom h-frame:");
1368       hf.print_on<true>(tty);
1369     }
1370   }
1371 
1372   template<typename FKind> // the callee's type
1373   freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize) {
1374   #ifdef CALLGRIND_START_INSTRUMENTATION
1375     if (_frames > 0 && _cgrind_interpreted_frames == 0 && callgrind_counter == 1) {
1376       callgrind_counter = 2;
1377       tty->print_cr("Starting callgrind instrumentation");
1378       CALLGRIND_START_INSTRUMENTATION;
1379     }
1380   #endif
1381 
1382     // const int argsize = _cont.argsize();
1383     // assert (FKind::interpreted || argsize == Compiled::stack_argsize(callee), "argsize: %d argsize(callee): %d", argsize, Compiled::stack_argsize(callee));
1384     assert (FKind::interpreted || argsize == _cont.argsize(), "argsize: %d _cont.argsize(): %d", argsize, _cont.argsize());
1385     log_develop_trace(jvmcont)("bottom: " INTPTR_FORMAT " count %d size: %d argsize: %d", p2i(_bottom_address), nr_frames(), nr_bytes(), argsize);
1386 
1387   #ifdef ASSERT
1388     bool empty = _cont.is_empty();
1389     log_develop_trace(jvmcont)("empty: %d", empty);
1390   #endif
1391 
1392     stackChunkOop chunk = _cont.tail();
1393 
1394     assert (chunk == nullptr || (chunk->max_size() == 0) == chunk->is_empty(), "chunk->max_size(): %d chunk->is_empty(): %d", chunk->max_size(), chunk->is_empty());
1395 
1396     _size += ContinuationHelper::frame_metadata; // for top frame's metadata
1397 
1398     int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind
1399     int unextended_sp = -1;
1400     if (chunk != nullptr) {
1401       unextended_sp = chunk->sp();
1402       if (!chunk->is_empty()) {
1403         bool top_interpreted = Interpreter::contains(chunk->pc());
1404         unextended_sp = chunk->sp();
1405         if (top_interpreted) {
1406           StackChunkFrameStream<true> last(chunk);
1407           unextended_sp += last.unextended_sp() - last.sp(); // can be negative (-1), often with lambda forms
1408         }
1409         if (FKind::interpreted == top_interpreted) {
1410           overlap = argsize;
1411         }
1412       }
1413     }
1414     // else if (FKind::interpreted) {
1415     //   argsize = 0;
1416     // }
1417 
1418     log_develop_trace(jvmcont)("finalize _size: %d overlap: %d unextended_sp: %d", _size, overlap, unextended_sp);
1419 
1420     _size -= overlap;
1421     assert (_size >= 0, "");
1422 
1423     assert (chunk == nullptr || chunk->is_empty() || unextended_sp == chunk->to_offset(StackChunkFrameStream<true>(chunk).unextended_sp()), "");
1424     assert (chunk != nullptr || unextended_sp < _size, "");
1425 
1426      // _barriers can be set to true by an allocation in freeze_fast, in which case the chunk is available
1427     assert (!_barriers || (unextended_sp >= _size && chunk->is_empty()), "unextended_sp: %d size: %d is_empty: %d", unextended_sp, _size, chunk->is_empty());
1428 
1429     DEBUG_ONLY(bool empty_chunk = true);
1430     if (unextended_sp < _size || chunk->is_gc_mode() || (!_barriers && ConfigT::requires_barriers(chunk))) {
1431       // ALLOCATION
1432 
1433       if (log_develop_is_enabled(Trace, jvmcont)) {
1434         if (chunk == nullptr) log_develop_trace(jvmcont)("is chunk available: no chunk");
1435         else {
1436           log_develop_trace(jvmcont)("is chunk available: barriers: %d _size: %d free size: %d", chunk->requires_barriers(), _size, chunk->sp() - ContinuationHelper::frame_metadata);
1437           chunk->print_on(tty);
1438         }
1439       }
1440 
1441       _size += overlap; // we're allocating a new chunk, so no overlap
1442       // overlap = 0;
1443 
1444       chunk = allocate_chunk(_size);
1445       if (chunk == (oop)nullptr) {
1446         return freeze_exception;
1447       }
1448 
1449       int sp = chunk->stack_size() - argsize;
1450       chunk->set_sp(sp);
1451       chunk->set_gc_sp(sp);
1452       chunk->set_argsize(argsize);
1453       assert (chunk->is_empty(), "");
1454       _barriers = ConfigT::requires_barriers(chunk);
1455 
1456       if (_barriers) { log_develop_trace(jvmcont)("allocation requires barriers"); }
1457 
1458       _cont.set_tail(chunk);
1459       // jdk_internal_vm_Continuation::set_tail(_cont.mirror(), _cont.tail()); -- doesn't seem to help
1460     } else {
1461       log_develop_trace(jvmcont)("Reusing chunk mixed: %d empty: %d interpreted callee: %d caller: %d", chunk->has_mixed_frames(), chunk->is_empty(), callee.is_interpreted_frame(), Interpreter::contains(chunk->pc()));
1462       if (chunk->is_empty()) {
1463         int sp = chunk->stack_size() - argsize;
1464         chunk->set_sp(sp);
1465         chunk->set_gc_sp(sp);
1466         chunk->set_argsize(argsize);
1467         _size += overlap;
1468         assert (chunk->max_size() == 0, "");
1469       } DEBUG_ONLY(else empty_chunk = false;)
1470     }
1471     chunk->set_has_mixed_frames(true);
1472 
1473     assert (chunk->requires_barriers() == _barriers, "");
1474     assert (!_barriers || chunk->is_empty(), "");
1475 
1476     assert (!chunk->has_bitmap(), "");
1477     assert (!chunk->is_empty() || StackChunkFrameStream<true>(chunk).is_done(), "");
1478     assert (!chunk->is_empty() || StackChunkFrameStream<true>(chunk).to_frame().is_empty(), "");
1479 
1480     if (should_flush_stack_processing())
1481       flush_stack_processing();
1482 
1483     log_develop_trace(jvmcont)("add max_size: %d -- %d", _size - ContinuationHelper::frame_metadata, chunk->max_size() + _size - ContinuationHelper::frame_metadata);
1484     chunk->set_max_size(chunk->max_size() + _size - ContinuationHelper::frame_metadata);
1485 
1486     log_develop_trace(jvmcont)("top chunk:");
1487     if (log_develop_is_enabled(Trace, jvmcont)) chunk->print_on(tty);
1488 
1489     caller = StackChunkFrameStream<true>(chunk).to_frame();
1490 
1491     DEBUG_ONLY(_last_write = caller.unextended_sp() + (empty_chunk ? argsize : overlap);)
1492     // tty->print_cr(">>> finalize_freeze chunk->sp_address(): %p empty_chunk: %d argsize: %d overlap: %d _last_write: %p _last_write - _size: %p", chunk->sp_address(), empty_chunk, argsize, overlap, _last_write, _last_write - _size); // caller.print_on<true>(tty);
1493     assert(chunk->is_in_chunk(_last_write - _size), "_last_write - _size: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(_last_write - _size), p2i(chunk->start_address()));
1494   #ifdef ASSERT
1495     log_develop_trace(jvmcont)("top_hframe before (freeze):");
1496     if (log_develop_is_enabled(Trace, jvmcont)) caller.print_on<true>(tty);
1497 
1498     assert (!empty || Frame::assert_bottom_java_frame_name(callee, ENTER_SIG), "");
1499 
1500     frame entry = sender<FKind>(callee);
1501 
1502     log_develop_trace(jvmcont)("Found entry:");
1503     if (log_develop_is_enabled(Trace, jvmcont)) entry.print_on(tty);
1504 
1505     assert (FKind::interpreted || entry.sp() == entry.unextended_sp(), "");
1506 #endif
1507 
1508     return freeze_ok_bottom;
1509   }
1510 
1511   template <typename FKind>
1512   void patch(const frame& f, frame& hf, const frame& caller, bool bottom) {
1513     assert (FKind::is_instance(f), "");
1514 
1515     if (bottom) {
1516       address last_pc = caller.pc();
1517       assert ((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1518 
1519       log_develop_trace(jvmcont)("Fixing return address on bottom frame: " INTPTR_FORMAT, p2i(last_pc));
1520 
1521       FKind::patch_pc(caller, last_pc);
1522 
1523       patch_pd<FKind, true>(hf, caller);
1524     } else {
1525       assert (!caller.is_empty(), "");
1526       // in fast mode, partial copy does not copy _is_interpreted for the caller
1527       assert (Interpreter::contains(FKind::interpreted ? FKind::return_pc(hf) : Frame::real_pc(caller)) == caller.is_interpreted_frame(),
1528         "FKind: %s contains: %d is_interpreted: %d", FKind::name, Interpreter::contains(FKind::interpreted ? FKind::return_pc(hf) : Frame::real_pc(caller)), caller.is_interpreted_frame());
1529 
1530       patch_pd<FKind, false>(hf, caller);
1531     }
1532     if (FKind::interpreted) {
1533       Interpreted::patch_sender_sp<true>(hf, caller.unextended_sp());
1534     }
1535 
1536 #ifdef ASSERT
1537     // TODO DEOPT: long term solution: unroll on freeze and patch pc
1538     if (!FKind::interpreted && !FKind::stub) {
1539       assert (hf.get_cb()->is_compiled(), "");
1540       if (f.is_deoptimized_frame()) {
1541         log_develop_trace(jvmcont)("Freezing deoptimized frame");
1542         assert (f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), "");
1543         assert (f.cb()->as_compiled_method()->is_deopt_pc(Frame::real_pc(f)), "");
1544       }
1545     }
1546 #endif
1547   }
1548 
1549   NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted) {
1550 #if (defined(X86) || defined(AARCH64)) && !defined(ZERO)
1551     { // TODO PD
1552       assert ((f.at<false>(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
1553       intptr_t* real_unextended_sp = (intptr_t*)f.at<false>(frame::interpreter_frame_last_sp_offset);
1554       if (real_unextended_sp != nullptr) f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
1555     }
1556 #else
1557     Unimplemented();
1558 #endif
1559 
1560     intptr_t* const vsp = Interpreted::frame_top(f, callee_argsize, callee_interpreted);
1561     const int argsize = Interpreted::stack_argsize(f);
1562     const int locals = f.interpreter_frame_method()->max_locals();
1563     assert (Interpreted::frame_bottom<false>(f) >= f.fp() + ContinuationHelper::frame_metadata + locals, ""); // equal on x86
1564     const int fsize = f.fp() + ContinuationHelper::frame_metadata + locals - vsp;
1565 
1566 #ifdef ASSERT
1567   {
1568     ResourceMark rm;
1569     InterpreterOopMap mask;
1570     f.interpreted_frame_oop_map(&mask);
1571     assert (vsp <= Interpreted::frame_top(f, &mask), "vsp: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT, p2i(vsp), p2i(Interpreted::frame_top(f, &mask)));
1572     // if (!(fsize + 1 >= Interpreted::size(f, &mask))) {
1573     //   tty->print_cr("bottom: %p top: %p", Interpreted::frame_bottom<false>(f), Interpreted::frame_top(f, &mask));
1574     //   tty->print_cr("vsp: %p fp: %p locals: %d", vsp, f.fp(), locals);
1575     //   f.print_on(tty);
1576     //   print_frame_layout<false>(f);
1577     // }
1578     // Seen to fail on serviceability/jvmti/vthread/SuspendResume[1/2] on AArch64
1579     // assert (fsize + 1 >= Interpreted::size(f, &mask), "fsize: %d Interpreted::size: %d", fsize, Interpreted::size(f, &mask)); // add 1 for possible alignment padding
1580     if (fsize > Interpreted::size(f, &mask) + 1) {
1581       log_develop_trace(jvmcont)("III fsize: %d Interpreted::size: %d", fsize, Interpreted::size(f, &mask));
1582       log_develop_trace(jvmcont)("    vsp: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT, p2i(vsp), p2i(Interpreted::frame_top(f, &mask)));
1583     }
1584   }
1585 #endif
1586 
1587     Method* frame_method = Frame::frame_method(f);
1588 
1589     log_develop_trace(jvmcont)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d callee_argsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1590       frame_method->name_and_sig_as_C_string(), _size, fsize, argsize, callee_interpreted, callee_argsize, p2i(vsp), p2i(vsp+fsize));
1591 
1592     freeze_result result = recurse_freeze_java_frame<Interpreted>(f, caller, fsize, argsize);
1593     if (UNLIKELY(result > freeze_ok_bottom)) return result;
1594     bool bottom = result == freeze_ok_bottom;
1595 
1596     DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, bottom);)
1597 
1598     frame hf = new_hframe<Interpreted>(f, caller);
1599 
1600     // tty->print_cr(">>> INTERPRETED bottom: %d argsize: %d callee_argsize: %d callee_interpreted: %d caller_interpreted: %d", bottom, argsize, callee_argsize, callee_interpreted, caller.is_interpreted_frame());
1601 
1602     intptr_t* hsp = Interpreted::frame_top(hf, callee_argsize, callee_interpreted);
1603     assert (Interpreted::frame_bottom<true>(hf) == hsp + fsize, "");
1604 
1605     // on AArch64 we add padding between the locals and the rest of the frame to keep the fp 16-byte-aligned
1606     copy_to_chunk<false>(Interpreted::frame_bottom<false>(f) - locals, Interpreted::frame_bottom<true>(hf) - locals, locals); // copy locals
1607     copy_to_chunk<false>(vsp, hsp, fsize - locals); // copy rest
1608     assert (!bottom || !caller.is_interpreted_frame() || (hsp + fsize) == (caller.unextended_sp() + argsize), "");
1609 
1610     relativize_interpreted_frame_metadata(f, hf);
1611 
1612     patch<Interpreted>(f, hf, caller, bottom);
1613 
1614     _cont.inc_num_interpreted_frames();
1615     DEBUG_ONLY(after_freeze_java_frame(hf, bottom);)
1616     caller = hf;
1617 
1618     // Mark frame_method's marking cycle for GC and redefinition on_stack calculation.
1619     frame_method->record_marking_cycle();
1620 
1621     return freeze_ok;
1622   }
1623 
1624   freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted) {
1625     intptr_t* const vsp = Compiled::frame_top(f, callee_argsize, callee_interpreted);
1626     const int argsize = Compiled::stack_argsize(f);
1627     const int fsize = Compiled::frame_bottom(f) + argsize - vsp;
1628 
1629     log_develop_trace(jvmcont)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d callee_argsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1630       Frame::frame_method(f) != nullptr ? Frame::frame_method(f)->name_and_sig_as_C_string() : "", _size, fsize, argsize, callee_interpreted, callee_argsize, p2i(vsp), p2i(vsp+fsize));
1631 
1632     freeze_result result = recurse_freeze_java_frame<Compiled>(f, caller, fsize, argsize);
1633     if (UNLIKELY(result > freeze_ok_bottom)) return result;
1634     bool bottom = result == freeze_ok_bottom;
1635 
1636     DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, bottom);)
1637 
1638     frame hf = new_hframe<Compiled>(f, caller);
1639 
1640     intptr_t* hsp = Compiled::frame_top(hf, callee_argsize, callee_interpreted);
1641 
1642     // tty->print_cr(">>> COMPILED bottom: %d argsize: %d callee_argsize: %d callee_interpreted: %d caller_interpreted: %d", bottom, argsize, callee_argsize, callee_interpreted, caller.is_interpreted_frame());
1643     copy_to_chunk<false>(vsp, hsp, fsize);
1644     assert (!bottom || !caller.is_compiled_frame() || (hsp + fsize) == (caller.unextended_sp() + argsize), "");
1645 
1646     if (caller.is_interpreted_frame()) {
1647       log_develop_trace(jvmcont)("add max_size align %d", ContinuationHelper::align_wiggle);
1648       _align_size += ContinuationHelper::align_wiggle; // See Thaw::align
1649     }
1650 
1651     patch<Compiled>(f, hf, caller, bottom);
1652 
1653     // log_develop_trace(jvmcont)("freeze_compiled_frame real_pc: " INTPTR_FORMAT " address: " INTPTR_FORMAT " sp: " INTPTR_FORMAT, p2i(Frame::real_pc(f)), p2i(&(((address*) f.sp())[-1])), p2i(f.sp()));
1654     assert(bottom || Interpreter::contains(Compiled::real_pc(caller)) == caller.is_interpreted_frame(), "");
1655 
1656     DEBUG_ONLY(after_freeze_java_frame(hf, bottom);)
1657     caller = hf;
1658     return freeze_ok;
1659   }
1660 
1661   NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller) {
1662     intptr_t* const vsp = StubF::frame_top(f, 0, 0);
1663     const int fsize = f.cb()->frame_size();
1664 
1665     log_develop_trace(jvmcont)("recurse_freeze_stub_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1666       f.cb()->name(), _size, fsize, p2i(vsp), p2i(vsp+fsize));
1667 
1668     // we're inlining recurse_freeze_java_frame and freeze here because we need to use a full RegisterMap to test lock ownership
1669     _frames++;
1670     _size += fsize;
1671 
1672     RegisterMap map(_cont.thread(), true, false, false);
1673     map.set_include_argument_oops(false);
1674     ContinuationHelper::update_register_map<StubF>(&map, f);
1675     f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
1676     frame senderf = sender<StubF>(f);
1677     assert (senderf.unextended_sp() < _bottom_address - 1, "");
1678     assert (senderf.is_compiled_frame(), "");
1679 
1680     if (UNLIKELY(senderf.oop_map() == nullptr)) return freeze_pinned_native; // native frame
1681     if (UNLIKELY(Compiled::is_owning_locks(_cont.thread(), &map, senderf))) return freeze_pinned_monitor;
1682 
1683     freeze_result result = recurse_freeze_compiled_frame(senderf, caller, 0, 0);
1684     if (UNLIKELY(result > freeze_ok_bottom)) return result;
1685     assert (result != freeze_ok_bottom, "");
1686     assert (!caller.is_interpreted_frame(), "");
1687 
1688     DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, false);)
1689     frame hf = new_hframe<StubF>(f, caller);
1690     intptr_t* hsp = StubF::frame_top(hf, 0, 0);
1691     copy_to_chunk<false>(vsp, hsp, fsize);
1692     DEBUG_ONLY(after_freeze_java_frame(hf, false);)
1693 
1694     caller = hf;
1695     return freeze_ok;
1696   }
1697 
1698   NOINLINE void finish_freeze(const frame& f, const frame& top) {
1699     stackChunkOop chunk = _cont.tail();
1700     assert (chunk->to_offset(top.sp()) <= chunk->sp(), "top.sp(): %d sp: %d", chunk->to_offset(top.sp()), chunk->sp());
1701 
1702     if (log_develop_is_enabled(Trace, jvmcont)) top.print_on<true>(tty);
1703 
1704     set_top_frame_metadata_pd(top);
1705     assert (top.pc() == Frame::real_pc(top), "");
1706 
1707     OrderAccess::storestore();
1708     chunk->set_sp(chunk->to_offset(top.sp()));
1709     chunk->set_pc(top.pc());
1710 
1711     log_develop_trace(jvmcont)("add max_size _align_size: %d -- %d", _align_size, chunk->max_size() + _align_size);
1712     chunk->set_max_size(chunk->max_size() + _align_size);
1713 
1714     if (UNLIKELY(_barriers)) {
1715       log_develop_trace(jvmcont)("do barriers on humongous chunk");
1716       InstanceStackChunkKlass::do_barriers<true>(_cont.tail());
1717     }
1718 
1719     log_develop_trace(jvmcont)("finish_freeze: has_mixed_frames: %d", chunk->has_mixed_frames());
1720 
1721     if (log_develop_is_enabled(Trace, jvmcont)) {
1722       log_develop_trace(jvmcont)("top_hframe after (freeze):");
1723       _cont.last_frame().template print_on<true>(tty);
1724     }
1725 
1726     assert(_cont.chunk_invariant(), "");
1727   }
1728 
1729   static inline void relativize(intptr_t* const vfp, intptr_t* const hfp, int offset) {
1730     assert (*(hfp + offset) == *(vfp + offset), "vaddr: " INTPTR_FORMAT " *vaddr: " INTPTR_FORMAT " haddr: " INTPTR_FORMAT " *haddr: " INTPTR_FORMAT, p2i(vfp + offset) , *(vfp + offset), p2i(hfp + offset) , *(hfp + offset));
1731     intptr_t* addr = hfp + offset;
1732     intptr_t value = *(intptr_t**)addr - vfp;
1733     // tty->print_cr(">>>> relativize offset: %d fp: %p delta: %ld derel: %p", offset, vfp, value, *(intptr_t**)addr);
1734     *addr = value;
1735   }
1736 
1737   stackChunkOop allocate_chunk(int size) {
1738     log_develop_trace(jvmcont)("allocate_chunk allocating new chunk");
1739     stackChunkOop chunk = _cont.allocate_stack_chunk(size, _preempt);
1740     if (chunk == nullptr) { // OOM
1741       return nullptr;
1742     }
1743     assert (chunk->stack_size() == size, "");
1744     assert (chunk->size() >= size, "chunk->size(): %d size: %d", chunk->size(), size);
1745     assert ((intptr_t)chunk->start_address() % 8 == 0, "");
1746 
1747     stackChunkOop chunk0 = _cont.tail();
1748     if (chunk0 != (oop)nullptr && chunk0->is_empty()) {
1749       // chunk0 = chunk0->is_parent_null<typename ConfigT::OopT>() ? (oop)nullptr : chunk0->parent();
1750       chunk0 = chunk0->parent();
1751       assert (chunk0 == (oop)nullptr || !chunk0->is_empty(), "");
1752     }
1753 
1754     // TODO PERF: maybe just memset 0, and only set non-zero fields.
1755     // chunk->set_pc(nullptr);
1756     // chunk->set_argsize(0);
1757     chunk->clear_flags();
1758     chunk->set_gc_mode(false);
1759     chunk->set_max_size(0);
1760     chunk->set_mark_cycle(0);
1761     chunk->reset_counters();
1762     // chunk->set_pc(nullptr); // TODO PERF: necessary?
1763 
1764     assert (chunk->flags() == 0, "");
1765     assert (chunk->is_gc_mode() == false, "");
1766     assert (chunk->mark_cycle() == 0, "");
1767     assert (chunk->numFrames() <= 0, "");
1768     assert (chunk->numOops() <= 0, "");
1769     assert (chunk->max_size() == 0, "");
1770 
1771     // fields are uninitialized
1772     chunk->set_parent_raw<typename ConfigT::OopT>(chunk0);
1773     chunk->set_cont_raw<typename ConfigT::OopT>(_cont.mirror());
1774 
1775     // Promote young chunks quickly
1776     chunk->set_mark(chunk->mark().set_age(15));
1777 
1778     return chunk;
1779   }
1780 
1781   int remaining_in_chunk(stackChunkOop chunk) {
1782     return chunk->stack_size() - chunk->sp();
1783   }
1784 };
1785 
1786 int early_return(int res, JavaThread* thread) {
1787   thread->set_cont_yield(false);
1788   log_develop_trace(jvmcont)("=== end of freeze (fail %d)", res);
1789   return res;
1790 }
1791 
1792 #if INCLUDE_JVMTI
1793 static void invalidate_JVMTI_stack(JavaThread* thread) {
1794   if (thread->is_interp_only_mode()) {
1795     JvmtiThreadState *state = thread->jvmti_thread_state();
1796     if (state != nullptr)
1797       state->invalidate_cur_stack_depth();
1798   }
1799 }
1800 #endif // INCLUDE_JVMTI
1801 
1802 static void JVMTI_yield_cleanup(JavaThread* thread, ContMirror& cont) {
1803 #if INCLUDE_JVMTI
1804   if (JvmtiExport::can_post_frame_pop()) {
1805     ContinuationHelper::set_anchor_to_entry(thread, cont.entry()); // ensure frozen frames are invisible
1806 
1807     // cont.read_rest();
1808     int num_frames = num_java_frames(cont);
1809 
1810     // The call to JVMTI can safepoint, so we need to restore oops.
1811     Handle conth(Thread::current(), cont.mirror());
1812     JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1813     cont.post_safepoint(conth);
1814   }
1815   invalidate_JVMTI_stack(thread);
1816 #endif
1817 }
1818 
1819 static freeze_result is_pinned(const frame& f, RegisterMap* map) {
1820   if (f.is_interpreted_frame()) {
1821     if (Interpreted::is_owning_locks(f))           return freeze_pinned_monitor;
1822     if (f.interpreter_frame_method()->is_native()) return freeze_pinned_native; // interpreter native entry
1823   } else if (f.is_compiled_frame()) {
1824     if (Compiled::is_owning_locks(map->thread(), map, f)) return freeze_pinned_monitor;
1825   } else {
1826     return freeze_pinned_native;
1827   }
1828   return freeze_ok;
1829 }
1830 
1831 #ifdef ASSERT
1832 static bool monitors_on_stack(JavaThread* thread) {
1833   ContinuationEntry* cont = thread->last_continuation();
1834   RegisterMap map(thread, true, false, false);
1835   map.set_include_argument_oops(false);
1836   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(cont, f); f = f.sender(&map)) {
1837     if (is_pinned(f, &map) == freeze_pinned_monitor) return true;
1838   }
1839   return false;
1840 }
1841 
1842 static bool interpreted_native_or_deoptimized_on_stack(JavaThread* thread) {
1843   ContinuationEntry* cont = thread->last_continuation();
1844   RegisterMap map(thread, false, false, false);
1845   map.set_include_argument_oops(false);
1846   for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(cont, f); f = f.sender(&map)) {
1847     if (f.is_interpreted_frame() || f.is_native_frame() || f.is_deoptimized_frame()) {
1848       // tty->print_cr("interpreted_native_or_deoptimized_on_stack"); f.print_on(tty);
1849       return true;
1850     }
1851   }
1852   return false;
1853 }
1854 #endif
1855 static inline bool can_freeze_fast(JavaThread* thread) {
1856   // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c adapter or called Deoptimization::unpack_frames
1857   // Calls from native frames also go through the interpreter (see JavaCalls::call_helper)
1858 
1859   #ifdef ASSERT
1860     if (!(!thread->cont_fastpath() || (thread->cont_fastpath_thread_state() && !interpreted_native_or_deoptimized_on_stack(thread)))) { pns2(); pfl(); }
1861   #endif
1862   assert (!thread->cont_fastpath() || (thread->cont_fastpath_thread_state() && !interpreted_native_or_deoptimized_on_stack(thread)), "thread->raw_cont_fastpath(): " INTPTR_FORMAT " thread->cont_fastpath_thread_state(): %d", p2i(thread->raw_cont_fastpath()), thread->cont_fastpath_thread_state());
1863 
1864   // We also clear thread->cont_fastpath on deoptimization (notify_deopt) and when we thaw interpreted frames
1865   bool fast = UseContinuationFastPath && thread->cont_fastpath();
1866   assert (!fast || monitors_on_stack(thread) == (thread->held_monitor_count() > 0), "monitors_on_stack: %d held_monitor_count: %d", monitors_on_stack(thread), thread->held_monitor_count());
1867   fast = fast && thread->held_monitor_count() == 0;
1868   // if (!fast) tty->print_cr(">>> freeze fast: %d thread.cont_fastpath: %d held_monitor_count: %d", fast, thread->cont_fastpath(), thread->held_monitor_count());
1869   return fast;
1870 }
1871 
1872 
1873 static inline int freeze_epilog(JavaThread* thread, ContMirror& cont, bool preempt) {
1874   assert (verify_continuation<2>(cont.mirror()), "");
1875 
1876   assert (!cont.is_empty(), "");
1877 
1878   ContinuationHelper::set_anchor_to_entry(thread, cont.entry()); // ensure frozen frames are invisible to stack walks
1879   if (!preempt) {
1880     StackWatermarkSet::after_unwind(thread);
1881   }
1882 
1883   thread->set_cont_yield(false);
1884 
1885   log_develop_debug(jvmcont)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1886 
1887   return 0;
1888 }
1889 
1890 static int freeze_epilog(JavaThread* thread, ContMirror& cont, freeze_result res, bool preempt) {
1891   if (UNLIKELY(res != freeze_ok)) {
1892     assert (verify_continuation<11>(cont.mirror()), "");
1893     return early_return(res, thread);
1894   }
1895 #if CONT_JFR
1896   cont.post_jfr_event(&event, thread);
1897 #endif
1898   JVMTI_yield_cleanup(thread, cont); // can safepoint
1899   return freeze_epilog(thread, cont, preempt);
1900 }
1901 
1902 // returns the continuation yielding (based on context), or nullptr for failure (due to pinning)
1903 // it freezes multiple continuations, depending on contex
1904 // it must set Continuation.stackSize
1905 // sets Continuation.fp/sp to relative indices
1906 template<typename ConfigT>
1907 int freeze0(JavaThread* current, intptr_t* const sp, bool preempt) {
1908   //callgrind();
1909   assert (!current->cont_yield(), "");
1910   assert (!current->has_pending_exception(), ""); // if (current->has_pending_exception()) return early_return(freeze_exception, current, fi);
1911   assert (current->deferred_updates() == nullptr || current->deferred_updates()->count() == 0, "");
1912   assert (!preempt || current->thread_state() == _thread_in_vm || current->thread_state() == _thread_blocked
1913           /*|| current->thread_state() == _thread_in_native*/,
1914           "thread_state: %d %s", current->thread_state(), current->thread_state_name());
1915 
1916 #ifdef ASSERT
1917   log_develop_trace(jvmcont)("~~~~~~~~~ freeze sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT,
1918                              p2i(current->last_continuation()->entry_sp()),
1919                              p2i(current->last_continuation()->entry_fp()),
1920                              p2i(current->last_continuation()->entry_pc()));
1921 
1922   /* ContinuationHelper::set_anchor(current, fi); */ print_frames(current);
1923 #endif
1924 
1925 #if CONT_JFR
1926   EventContinuationFreeze event;
1927 #endif
1928 
1929   current->set_cont_yield(true);
1930 
1931   oop oopCont = ContinuationHelper::get_continuation(current);
1932   assert (oopCont == current->last_continuation()->cont_oop(), "");
1933   assert (ContinuationEntry::assert_entry_frame_laid_out(current), "");
1934 
1935   assert (verify_continuation<1>(oopCont), "");
1936   ContMirror cont(current, oopCont);
1937   log_develop_debug(jvmcont)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1938 
1939   if (jdk_internal_vm_Continuation::critical_section(oopCont) > 0) {
1940     log_develop_debug(jvmcont)("PINNED due to critical section");
1941     assert (verify_continuation<10>(cont.mirror()), "");
1942     return early_return(freeze_pinned_cs, current);
1943   }
1944 
1945   bool fast = can_freeze_fast(current);
1946   assert (!fast || current->held_monitor_count() == 0, "");
1947 
1948   Freeze<ConfigT> fr(current, cont, preempt);
1949 
1950   if (UNLIKELY(preempt)) {
1951     freeze_result res = fr.freeze_slow();
1952     cont.set_preempted(true);
1953     return freeze_epilog(current, cont, res, preempt);
1954   }
1955 
1956   if (fast && fr.is_chunk_available(sp)) {
1957     log_develop_trace(jvmcont)("chunk available; no transition");
1958     freeze_result res = fr.try_freeze_fast(sp, true);
1959     assert (res == freeze_ok, "");
1960   #if CONT_JFR
1961     cont.post_jfr_event(&event, current);
1962   #endif
1963 
1964     // if (UNLIKELY(preempt)) cont.set_preempted(true);
1965     return freeze_epilog(current, cont, preempt);
1966   }
1967 
1968   // if (current->held_monitor_count() > 0) {
1969   //    // tty->print_cr(">>> FAIL FAST");
1970   //    return freeze_pinned_monitor;
1971   // }
1972 
1973   log_develop_trace(jvmcont)("chunk unavailable; transitioning to VM");
1974   assert(current == JavaThread::current(), "must be current thread except for preempt");
1975   JRT_BLOCK
1976     freeze_result res = fast ? fr.try_freeze_fast(sp, false) : fr.freeze_slow();
1977     return freeze_epilog(current, cont, res, preempt);
1978   JRT_BLOCK_END
1979 }
1980 
1981 // Entry point to freeze. Transitions are handled manually
1982 JRT_BLOCK_ENTRY(int, Continuation::freeze(JavaThread* current, intptr_t* sp))
1983   // current->frame_anchor()->set_last_Java_sp(sp);
1984   // current->frame_anchor()->make_walkable(current);
1985 
1986   assert (sp == current->frame_anchor()->last_Java_sp(), "");
1987 
1988   if (current->raw_cont_fastpath() > current->last_continuation()->entry_sp() || current->raw_cont_fastpath() < sp) {
1989     current->set_cont_fastpath(nullptr);
1990   }
1991 
1992   return cont_freeze(current, sp, false);
1993 JRT_END
1994 
1995 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) {
1996   ContinuationEntry* cont = thread->last_continuation();
1997   if (cont == nullptr) {
1998     return freeze_ok;
1999   }
2000   if (jdk_internal_vm_Continuation::critical_section(cont->continuation()) > 0)
2001     return freeze_pinned_cs;
2002 
2003   RegisterMap map(thread, true, false, false);
2004   map.set_include_argument_oops(false);
2005   frame f = thread->last_frame();
2006 
2007   if (!safepoint) {
2008     f = f.sender(&map); // this is the yield frame
2009   } else { // safepoint yield
2010 #if (defined(X86) || defined(AARCH64)) && !defined(ZERO)
2011     f.set_fp(f.real_fp()); // Instead of this, maybe in ContMirror::set_last_frame always use the real_fp?
2012 #else
2013     Unimplemented();
2014 #endif
2015     if (!Interpreter::contains(f.pc())) {
2016       assert (Frame::is_stub(f.cb()), "must be");
2017       assert (f.oop_map() != nullptr, "must be");
2018       f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
2019     }
2020   }
2021 
2022   while (true) {
2023     freeze_result res = is_pinned(f, &map);
2024     if (res != freeze_ok)
2025       return res;
2026 
2027     f = f.sender(&map);
2028     if (!Continuation::is_frame_in_continuation(cont, f)) {
2029       oop scope = jdk_internal_vm_Continuation::scope(cont->continuation());
2030       if (scope == cont_scope)
2031         break;
2032       cont = cont->parent();
2033       if (cont == nullptr)
2034         break;
2035       if (jdk_internal_vm_Continuation::critical_section(cont->continuation()) > 0)
2036         return freeze_pinned_cs;
2037     }
2038   }
2039   return freeze_ok;
2040 }
2041 
2042 static void print_stack_trace(JavaThread* thread) {
2043   if (log_is_enabled(Trace, jvmcont, preempt)) {
2044     LogTarget(Trace, jvmcont, preempt) lt;
2045     assert(lt.is_enabled(), "already tested");
2046     ResourceMark rm;
2047     LogStream ls(lt);
2048     char buf[256];
2049     ls.print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
2050     for (StackFrameStream sfs(thread, true /* update */, true /* process_frames */); !sfs.is_done(); sfs.next()) {
2051       sfs.current()->print_on_error(&ls, buf, 256, false);
2052       ls.cr();
2053     }
2054   }
2055 }
2056 
2057 static bool is_safe_to_preempt(JavaThread* thread) {
2058 
2059   if (!thread->has_last_Java_frame()) {
2060     log_trace(jvmcont, preempt)("is_safe_to_preempt: no last Java frame");
2061     return false;
2062   }
2063 
2064   if (log_is_enabled(Trace, jvmcont, preempt)) {
2065     LogTarget(Trace, jvmcont, preempt) lt;
2066     assert(lt.is_enabled(), "already tested");
2067     ResourceMark rm;
2068     LogStream ls(lt);
2069     frame f = thread->last_frame();
2070     ls.print("is_safe_to_preempt %sSAFEPOINT ", Interpreter::contains(f.pc()) ? "INTERPRETER " : "");
2071     f.print_on(&ls);
2072   }
2073 
2074   address pc = thread->last_Java_pc();
2075   if (Interpreter::contains(pc)) {
2076     // Generally, we don't want to preempt when returning from some useful VM function, and certainly not when inside one.
2077     InterpreterCodelet* codelet = Interpreter::codelet_containing(pc);
2078     if (codelet != nullptr) {
2079       // We allow preemption only when at a safepoint codelet or a return byteocde
2080       if (codelet->bytecode() >= 0 && Bytecodes::is_return(codelet->bytecode())) {
2081         log_trace(jvmcont, preempt)("is_safe_to_preempt: safe bytecode: %s",
2082                                     Bytecodes::name(codelet->bytecode()));
2083         assert (codelet->kind() == InterpreterCodelet::codelet_bytecode, "");
2084         return true;
2085       } else if (codelet->kind() == InterpreterCodelet::codelet_safepoint_entry) {
2086         log_trace(jvmcont, preempt)("is_safe_to_preempt: safepoint entry: %s", codelet->description());
2087         return true;
2088       } else {
2089         log_trace(jvmcont, preempt)("is_safe_to_preempt: %s (unsafe)", codelet->description());
2090         print_stack_trace(thread);
2091         return false;
2092       }
2093     } else {
2094       log_trace(jvmcont, preempt)("is_safe_to_preempt: no codelet (safe?)");
2095       return true;
2096     }
2097   } else {
2098     CodeBlob* cb = CodeCache::find_blob(pc);
2099     if (cb->is_safepoint_stub()) {
2100       log_trace(jvmcont, preempt)("is_safe_to_preempt: safepoint stub");
2101       return true;
2102     } else {
2103       log_trace(jvmcont, preempt)("is_safe_to_preempt: not safepoint stub");
2104       return false;
2105     }
2106   }
2107 }
2108 
2109 // Called while the thread is blocked by the JavaThread caller, might not be completely in blocked state.
2110 // May still be in thread_in_vm getting to the blocked state.  I don't think we care that much since
2111 // the only frames we're looking at are Java frames.
2112 int Continuation::try_force_yield(JavaThread* target, const oop cont) {
2113   log_trace(jvmcont, preempt)("try_force_yield: thread state: %s", target->thread_state_name());
2114 
2115   ContinuationEntry* ce = target->last_continuation();
2116   oop innermost = ce->continuation();
2117   while (ce != nullptr && ce->continuation() != cont) {
2118     ce = ce->parent();
2119   }
2120   if (ce == nullptr) {
2121     return -1; // no continuation
2122   }
2123   if (target->_cont_yield) {
2124     return -2; // during yield
2125   }
2126   if (!is_safe_to_preempt(target)) {
2127     return freeze_pinned_native;
2128   }
2129 
2130   assert (target->has_last_Java_frame(), "");
2131   // if (Interpreter::contains(thread->last_Java_pc())) { thread->push_cont_fastpath(thread->last_Java_sp()); }
2132   assert (!Interpreter::contains(target->last_Java_pc()) || !target->cont_fastpath(),
2133           "fast_path at codelet %s",
2134           Interpreter::codelet_containing(target->last_Java_pc())->description());
2135 
2136   const oop scope = jdk_internal_vm_Continuation::scope(cont);
2137   if (innermost != cont) { // we have nested continuations
2138     // make sure none of the continuations in the hierarchy are pinned
2139     freeze_result res_pinned = is_pinned0(target, scope, true);
2140     if (res_pinned != freeze_ok) {
2141       log_trace(jvmcont, preempt)("try_force_yield: res_pinned");
2142       return res_pinned;
2143     }
2144     jdk_internal_vm_Continuation::set_yieldInfo(cont, scope);
2145   }
2146 
2147   assert (target->has_last_Java_frame(), "need to test again?");
2148   int res = cont_freeze(target, target->last_Java_sp(), true);
2149   log_trace(jvmcont, preempt)("try_force_yield: %s", freeze_result_names[res]);
2150   if (res == 0) { // success
2151     target->set_cont_preempt(true);
2152 
2153     // The target thread calls
2154     // Continuation::jump_from_safepoint from JavaThread::handle_special_runtime_exit_condition
2155     // to yield on return from suspension/blocking handshake.
2156   }
2157   return res;
2158 }
2159 
2160 typedef void (*cont_jump_from_sp_t)();
2161 
2162 void Continuation::jump_from_safepoint(JavaThread* thread) {
2163   assert (thread == JavaThread::current(), "");
2164   assert (thread->is_cont_force_yield(), "");
2165   log_develop_trace(jvmcont)("force_yield_if_preempted: is_cont_force_yield");
2166   thread->set_cont_preempt(false);
2167   if (thread->thread_state() == _thread_in_vm) {
2168     thread->set_thread_state(_thread_in_Java);
2169   }
2170   StackWatermarkSet::after_unwind(thread);
2171   MACOS_AARCH64_ONLY(thread->enable_wx(WXExec));
2172   CAST_TO_FN_PTR(cont_jump_from_sp_t, StubRoutines::cont_jump_from_sp())(); // does not return
2173   ShouldNotReachHere();
2174 }
2175 
2176 /////////////// THAW ////
2177 
2178 enum thaw_kind {
2179   thaw_top = 0,
2180   thaw_return_barrier = 1,
2181   thaw_exception = 2,
2182 };
2183 
2184 typedef intptr_t* (*ThawContFnT)(JavaThread*, thaw_kind);
2185 
2186 static ThawContFnT cont_thaw = nullptr;
2187 
2188 static bool stack_overflow_check(JavaThread* thread, int size, address sp) {
2189   const int page_size = os::vm_page_size();
2190   if (size > page_size) {
2191     if (sp - size < thread->stack_overflow_state()->stack_overflow_limit()) {
2192       return false;
2193     }
2194   }
2195   return true;
2196 }
2197 
2198 // make room on the stack for thaw
2199 // returns the size in bytes, or 0 on failure
2200 JRT_LEAF(int, Continuation::prepare_thaw(JavaThread* thread, bool return_barrier))
2201   log_develop_trace(jvmcont)("~~~~~~~~~ prepare_thaw return_barrier: %d", return_barrier);
2202 
2203   assert (thread == JavaThread::current(), "");
2204 
2205   oop cont = thread->last_continuation()->cont_oop(); // ContinuationHelper::get_continuation(thread);
2206   assert (cont == ContinuationHelper::get_continuation(thread), "cont: %p entry cont: %p", (oopDesc*)cont, (oopDesc*)ContinuationHelper::get_continuation(thread));
2207   assert (verify_continuation<1>(cont), "");
2208 
2209   stackChunkOop chunk = jdk_internal_vm_Continuation::tail(cont);
2210   assert (chunk != nullptr, "");
2211   if (UNLIKELY(chunk->is_empty())) {
2212     chunk = chunk->parent();
2213     jdk_internal_vm_Continuation::set_tail(cont, chunk);
2214   }
2215   assert (chunk != nullptr, "");
2216   assert (!chunk->is_empty(), "");
2217   assert (verify_stack_chunk<1>(chunk), "");
2218 
2219   int size = chunk->max_size();
2220   guarantee (size > 0, "");
2221 
2222   size += 2*ContinuationHelper::frame_metadata; // twice, because we might want to add a frame for StubRoutines::cont_interpreter_forced_preempt_return()
2223   size += ContinuationHelper::align_wiggle; // just in case we have an interpreted entry after which we need to align
2224   size <<= LogBytesPerWord;
2225 
2226   const address bottom = (address)thread->last_continuation()->entry_sp(); // os::current_stack_pointer(); points to the entry frame
2227   if (!stack_overflow_check(thread, size + 300, bottom)) {
2228     return 0;
2229   }
2230 
2231   log_develop_trace(jvmcont)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d", p2i(bottom), p2i(bottom - size), size);
2232 
2233   return size;
2234 JRT_END
2235 
2236 template <typename ConfigT>
2237 class Thaw {
2238 private:
2239   JavaThread* _thread;
2240   ContMirror& _cont;
2241 
2242   intptr_t* _fastpath;
2243   bool _barriers;
2244   intptr_t* _top_unextended_sp;
2245 
2246   void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2247 
2248   StackChunkFrameStream<true> _stream;
2249 
2250   int _align_size;
2251 
2252   NOT_PRODUCT(int _frames;)
2253 
2254   inline frame new_entry_frame();
2255   template<typename FKind> frame new_frame(const frame& hf, frame& caller, bool bottom);
2256   template<typename FKind, bool bottom> inline void patch_pd(frame& f, const frame& sender);
2257   inline intptr_t* align(const frame& hf, intptr_t* vsp, frame& caller, bool bottom);
2258   void patch_chunk_pd(intptr_t* sp);
2259   inline intptr_t* align_chunk(intptr_t* vsp);
2260   inline void prefetch_chunk_pd(void* start, int size_words);
2261   intptr_t* push_interpreter_return_frame(intptr_t* sp);
2262   static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2263   static inline void set_interpreter_frame_bottom(const frame& f, intptr_t* bottom);
2264 
2265   bool should_deoptimize() { return true; /* _thread->is_interp_only_mode(); */ } // TODO PERF
2266 
2267 public:
2268   DEBUG_ONLY(int _mode;)
2269   DEBUG_ONLY(bool barriers() { return _barriers; })
2270 
2271   Thaw(JavaThread* thread, ContMirror& cont) :
2272     _thread(thread), _cont(cont),
2273     _fastpath(nullptr) {
2274       DEBUG_ONLY(_top_unextended_sp = nullptr;)
2275       DEBUG_ONLY(_mode = 0;)
2276   }
2277 
2278   inline bool can_thaw_fast(stackChunkOop chunk) {
2279     return    !_barriers
2280            &&  _thread->cont_fastpath_thread_state()
2281            && !chunk->has_mixed_frames();
2282   }
2283 
2284   intptr_t* thaw(thaw_kind kind) {
2285     assert (!Interpreter::contains(_cont.entryPC()), "");
2286     // if (Interpreter::contains(_cont.entryPC())) _fastpath = false; // set _fastpath to false if entry is interpreted
2287 
2288     assert (verify_continuation<1>(_cont.mirror()), "");
2289     assert (!jdk_internal_vm_Continuation::done(_cont.mirror()), "");
2290     assert (!_cont.is_empty(), "");
2291 
2292     DEBUG_ONLY(_frames = 0;)
2293 
2294     stackChunkOop chunk = _cont.tail();
2295     assert (chunk != nullptr && !chunk->is_empty(), ""); // guaranteed by prepare_thaw
2296 
2297     _barriers = (chunk->should_fix<typename ConfigT::OopT, ConfigT::_concurrent_gc>() || ConfigT::requires_barriers(chunk));
2298     if (LIKELY(can_thaw_fast(chunk))) {
2299       // if (kind != thaw_return_barrier) tty->print_cr("THAW FAST");
2300       return thaw_fast(chunk);
2301     } else {
2302       // if (kind != thaw_return_barrier) tty->print_cr("THAW SLOW");
2303       return thaw_slow(chunk, kind != thaw_top);
2304     }
2305   }
2306 
2307   NOINLINE intptr_t* thaw_fast(stackChunkOop chunk) {
2308     assert (chunk != (oop) nullptr, "");
2309     assert (chunk == _cont.tail(), "");
2310     assert (!chunk->is_empty(), "");
2311     assert (!chunk->has_mixed_frames(), "");
2312     assert (!chunk->requires_barriers(), "");
2313     assert (!_thread->is_interp_only_mode(), "");
2314 
2315     log_develop_trace(jvmcont)("thaw_fast");
2316     if (log_develop_is_enabled(Debug, jvmcont)) chunk->print_on(true, tty);
2317 
2318     static const int threshold = 500; // words
2319 
2320     int sp = chunk->sp();
2321     int size = chunk->stack_size() - sp;
2322     int argsize;
2323 
2324     // this initial size could be reduced if it's a partial thaw
2325 
2326     // assert (verify_continuation<99>(_cont.mirror()), "");
2327 
2328     intptr_t* const hsp = chunk->start_address() + sp;
2329 
2330     // Instead of invoking barriers on oops in thawed frames, we use the gcSP field; see continuationChunk's get_chunk_sp
2331     chunk->set_mark_cycle(CodeCache::marking_cycle());
2332 
2333     bool partial, empty;
2334     if (LIKELY(!TEST_THAW_ONE_CHUNK_FRAME && (size < threshold))) {
2335       // prefetch with anticipation of memcpy starting at highest address
2336       prefetch_chunk_pd(chunk->start_address(), size);
2337 
2338       partial = false;
2339       DEBUG_ONLY(_mode = 1;)
2340 
2341       argsize = chunk->argsize();
2342       empty = true;
2343 
2344       chunk->set_sp(chunk->stack_size());
2345       chunk->set_argsize(0);
2346       // chunk->clear_flags();
2347       chunk->reset_counters();
2348       chunk->set_max_size(0);
2349       log_develop_trace(jvmcont)("set max_size: 0");
2350       // chunk->set_pc(nullptr);
2351 
2352     } else { // thaw a single frame
2353       partial = true;
2354       DEBUG_ONLY(_mode = 2;)
2355 
2356       StackChunkFrameStream<false> f(chunk);
2357       assert (hsp == f.sp() && hsp == f.unextended_sp(), "");
2358       size = f.cb()->frame_size();
2359       argsize = f.stack_argsize();
2360       f.next(SmallRegisterMap::instance);
2361       empty = f.is_done(); // (chunk->sp() + size) >= (chunk->stack_size() - chunk->argsize());
2362       assert (!empty || argsize == chunk->argsize(), "");
2363 
2364       if (empty) {
2365         chunk->set_sp(chunk->stack_size());
2366         chunk->set_argsize(0);
2367         chunk->reset_counters();
2368         chunk->set_max_size(0);
2369         log_develop_trace(jvmcont)("set max_size: 0");
2370         // chunk->set_pc(nullptr);
2371       } else {
2372         chunk->set_sp(chunk->sp() + size);
2373         address top_pc = *(address*)(hsp + size - frame::sender_sp_ret_address_offset());
2374         chunk->set_pc(top_pc);
2375         chunk->set_max_size(chunk->max_size() - size);
2376         log_develop_trace(jvmcont)("sub max_size: %d -- %d", size, chunk->max_size());
2377       }
2378       assert (empty == chunk->is_empty(), "");
2379       size += argsize;
2380     }
2381 
2382     const bool is_last = empty && chunk->is_parent_null<typename ConfigT::OopT>();
2383 
2384     log_develop_trace(jvmcont)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d", partial, is_last, empty, size, argsize);
2385 
2386     intptr_t* vsp = _cont.entrySP();
2387     intptr_t* bottom_sp = align_chunk(vsp - argsize);
2388 
2389     vsp -= size;
2390     assert (argsize != 0 || vsp == align_chunk(vsp), "");
2391     vsp = align_chunk(vsp);
2392 
2393     intptr_t* from = hsp - ContinuationHelper::frame_metadata;
2394     intptr_t* to   = vsp - ContinuationHelper::frame_metadata;
2395     copy_from_chunk(from, to, size + ContinuationHelper::frame_metadata); // TODO: maybe use a memcpy that cares about ordering because we're racing with the GC
2396     assert (_cont.entrySP() - 1 <= to + size + ContinuationHelper::frame_metadata && to + size + ContinuationHelper::frame_metadata <= _cont.entrySP(), "");
2397     assert (argsize != 0 || to + size + ContinuationHelper::frame_metadata == _cont.entrySP(), "");
2398 
2399     assert (!is_last || argsize == 0, "");
2400     _cont.set_argsize(argsize);
2401     log_develop_trace(jvmcont)("setting entry argsize: %d", _cont.argsize());
2402     patch_chunk(bottom_sp, is_last);
2403 
2404     DEBUG_ONLY(address pc = *(address*)(bottom_sp - frame::sender_sp_ret_address_offset());)
2405     assert (is_last ? CodeCache::find_blob(pc)->as_compiled_method()->method()->is_continuation_enter_intrinsic() : pc == StubRoutines::cont_returnBarrier(), "is_last: %d", is_last);
2406 
2407     assert (is_last == _cont.is_empty(), "is_last: %d _cont.is_empty(): %d", is_last, _cont.is_empty());
2408     assert(_cont.chunk_invariant(), "");
2409 
2410   #if CONT_JFR
2411     EventContinuationThawYoung e;
2412     if (e.should_commit()) {
2413       e.set_id(cast_from_oop<u8>(chunk));
2414       e.set_size(size << LogBytesPerWord);
2415       e.set_full(!partial);
2416       e.commit();
2417     }
2418   #endif
2419 
2420 #ifdef ASSERT
2421   intptr_t* sp0 = vsp;
2422   ContinuationHelper::set_anchor(_thread, sp0);
2423   print_frames(_thread, tty); // must be done after write(), as frame walking reads fields off the Java objects.
2424   if (LoomDeoptAfterThaw) {
2425     do_deopt_after_thaw(_thread);
2426   }
2427   // if (LoomVerifyAfterThaw) {
2428   //   assert(do_verify_after_thaw(_thread), "partial: %d empty: %d is_last: %d fix: %d", partial, empty, is_last, fix);
2429   // }
2430   ContinuationHelper::clear_anchor(_thread);
2431 #endif
2432 
2433     // assert (verify_continuation<100>(_cont.mirror()), "");
2434     return vsp;
2435   }
2436 
2437   template <bool aligned = true>
2438   void copy_from_chunk(intptr_t* from, intptr_t* to, int size) {
2439     assert (to + size <= _cont.entrySP(), "");
2440     _cont.tail()->template copy_from_chunk_to_stack<aligned>(from, to, size);
2441   }
2442 
2443   void patch_chunk(intptr_t* sp, bool is_last) {
2444     log_develop_trace(jvmcont)("thaw_fast patching -- sp: " INTPTR_FORMAT, p2i(sp));
2445 
2446     address pc = !is_last ? StubRoutines::cont_returnBarrier() : _cont.entryPC();
2447     *(address*)(sp - frame::sender_sp_ret_address_offset()) = pc;
2448     log_develop_trace(jvmcont)("thaw_fast is_last: %d sp: " INTPTR_FORMAT " patching pc at " INTPTR_FORMAT " to " INTPTR_FORMAT, is_last, p2i(sp), p2i(sp - frame::sender_sp_ret_address_offset()), p2i(pc));
2449 
2450     // patch_chunk_pd(sp);
2451   }
2452 
2453   intptr_t* thaw_slow(stackChunkOop chunk, bool return_barrier) {
2454     assert (!_cont.is_empty(), "");
2455     assert (chunk != nullptr, "");
2456     assert (!chunk->is_empty(), "");
2457 
2458     log_develop_trace(jvmcont)("thaw slow return_barrier: %d chunk: " INTPTR_FORMAT, return_barrier, p2i((stackChunkOopDesc*)chunk));
2459     if (log_develop_is_enabled(Trace, jvmcont)) chunk->print_on(true, tty);
2460 
2461     EventContinuationThawOld e;
2462     if (e.should_commit()) {
2463       e.set_id(cast_from_oop<u8>(_cont.mirror()));
2464       e.commit();
2465     }
2466 
2467     DEBUG_ONLY(_mode = 3;)
2468     // _cont.read_rest();
2469     _align_size = 0;
2470     int num_frames = (return_barrier ? 1 : 2);
2471     // _frames = 0;
2472 
2473     log_develop_trace(jvmcont)("thaw slow");
2474 
2475     bool last_interpreted = false;
2476     if (chunk->has_mixed_frames()) {
2477       last_interpreted = Interpreter::contains(chunk->pc());
2478       log_develop_trace(jvmcont)("thaw: preempt; last_interpreted: %d", last_interpreted);
2479     }
2480 
2481     _stream = StackChunkFrameStream<true>(chunk);
2482     _top_unextended_sp = _stream.unextended_sp();
2483 
2484     frame hf = _stream.to_frame();
2485     log_develop_trace(jvmcont)("top_hframe before (thaw):"); if (log_develop_is_enabled(Trace, jvmcont)) hf.print_on<true>(tty);
2486 
2487     frame f;
2488     thaw(hf, f, num_frames, true);
2489 
2490     finish_thaw(f); // f is now the topmost thawed frame
2491 
2492     _cont.write();
2493     assert(_cont.chunk_invariant(), "");
2494 
2495     if (!return_barrier) JVMTI_continue_cleanup(_thread);
2496 
2497     assert(_cont.chunk_invariant(), "");
2498     _thread->set_cont_fastpath(_fastpath);
2499 
2500     intptr_t* sp = f.sp();
2501 
2502   #ifdef ASSERT
2503     {
2504       log_develop_debug(jvmcont)("Jumping to frame (thaw): [" JLONG_FORMAT "]", java_tid(_thread));
2505       frame f(sp);
2506       if (log_develop_is_enabled(Debug, jvmcont)) f.print_on(tty);
2507       assert (f.is_interpreted_frame() || f.is_compiled_frame() || f.is_safepoint_blob_frame(), "");
2508     }
2509   #endif
2510 
2511     if (last_interpreted && _cont.is_preempted()) {
2512       assert (f.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2513       assert (Interpreter::contains(f.pc()), "");
2514       // InterpreterCodelet* codelet = Interpreter::codelet_containing(f.pc());
2515       // if (codelet != nullptr) {
2516       //   sp = push_interpreter_return_frame(sp);
2517       // }
2518       sp = push_interpreter_return_frame(sp);
2519     }
2520 
2521     return sp;
2522   }
2523 
2524   void thaw(const frame& hf, frame& caller, int num_frames, bool top) {
2525     log_develop_debug(jvmcont)("thaw num_frames: %d", num_frames);
2526     assert(!_cont.is_empty(), "no more frames");
2527     assert (num_frames > 0 && !hf.is_empty(), "");
2528 
2529     // Dynamically branch on frame type
2530     if (top && hf.is_safepoint_blob_frame()) {
2531       assert (Frame::is_stub(hf.cb()), "cb: %s", hf.cb()->name());
2532       recurse_thaw_stub_frame(hf, caller, num_frames);
2533     } else if (!hf.is_interpreted_frame()) {
2534       recurse_thaw_compiled_frame(hf, caller, num_frames);
2535     } else {
2536       recurse_thaw_interpreted_frame(hf, caller, num_frames);
2537     }
2538   }
2539 
2540   template<typename FKind>
2541   bool recurse_thaw_java_frame(frame& caller, int num_frames) {
2542     assert (num_frames > 0, "");
2543 
2544     DEBUG_ONLY(_frames++;)
2545 
2546     if (UNLIKELY(_barriers)) {
2547       InstanceStackChunkKlass::do_barriers<true>(_cont.tail(), _stream, SmallRegisterMap::instance);
2548     }
2549 
2550     int argsize = _stream.stack_argsize();
2551 
2552     _stream.next(SmallRegisterMap::instance);
2553     assert (_stream.to_frame().is_empty() == _stream.is_done(), "");
2554 
2555     // we never leave a compiled caller of an interpreted frame as the top frame in the chunk as it makes detecting that situation and adjusting unextended_sp tricky
2556     if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2557       log_develop_trace(jvmcont)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2558       num_frames++;
2559     }
2560 
2561     if (num_frames == 1 || _stream.is_done()) { // end recursion
2562       log_develop_trace(jvmcont)("is_empty: %d", _stream.is_done());
2563       finalize_thaw<FKind>(caller, argsize);
2564       return true; // bottom
2565     } else { // recurse
2566       thaw(_stream.to_frame(), caller, num_frames - 1, false);
2567       return false;
2568     }
2569   }
2570 
2571   template<typename FKind>
2572   void finalize_thaw(frame& entry, int argsize) {
2573     stackChunkOop chunk = _cont.tail();
2574 
2575     OrderAccess::storestore();
2576     if (!_stream.is_done()) {
2577       assert (_stream.sp() >= chunk->sp_address(), "");
2578       chunk->set_sp(chunk->to_offset(_stream.sp()));
2579       chunk->set_pc(_stream.pc());
2580     } else {
2581       chunk->set_pc(nullptr);
2582       chunk->set_argsize(0);
2583       chunk->set_sp(chunk->stack_size());
2584     }
2585     assert(_stream.is_done() == chunk->is_empty(), "_stream.is_done(): %d chunk->is_empty(): %d", _stream.is_done(), chunk->is_empty());
2586 
2587     int delta = _stream.unextended_sp() - _top_unextended_sp;
2588     log_develop_trace(jvmcont)("sub max_size: %d -- %d (unextended_sp: " INTPTR_FORMAT " orig unextended_sp: " INTPTR_FORMAT ")", delta, chunk->max_size() - delta, p2i(_stream.unextended_sp()), p2i(_top_unextended_sp));
2589     chunk->set_max_size(chunk->max_size() - delta);
2590 
2591     // assert (!_stream.is_done() || chunk->parent() != nullptr || argsize == 0, "");
2592     _cont.set_argsize(FKind::interpreted ? 0 : argsize);
2593     log_develop_trace(jvmcont)("setting entry argsize: %d (bottom interpreted: %d)", _cont.argsize(), FKind::interpreted);
2594 
2595     entry = new_entry_frame();
2596 
2597     assert (entry.sp() == _cont.entrySP(), "entry.sp: %p entrySP: %p", entry.sp(), _cont.entrySP());
2598 
2599   #ifdef ASSERT
2600     log_develop_trace(jvmcont)("Found entry:");
2601     print_vframe(entry);
2602     Frame::assert_bottom_java_frame_name(entry, ENTER_SPECIAL_SIG);
2603   #endif
2604 
2605     assert (_cont.is_entry_frame(entry), "");
2606   }
2607 
2608   inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) {
2609     log_develop_trace(jvmcont)("============================= THAWING FRAME: %d", num_frame);
2610     // frame hf = StackChunkFrameStream(_cont.tail(), hsp).to_frame();
2611     if (log_develop_is_enabled(Trace, jvmcont)) hf.print_on<true>(tty);
2612     assert (bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2613 }
2614 
2615   inline void after_thaw_java_frame(const frame& f, bool bottom) {
2616     log_develop_trace(jvmcont)("thawed frame:");
2617     DEBUG_ONLY(print_vframe(f);)
2618   }
2619 
2620   template<typename FKind, bool bottom>
2621   inline void patch(frame& f, const frame& caller) {
2622     // assert (_cont.is_empty0() == _cont.is_empty(), "is_empty0: %d is_empty: %d", _cont.is_empty0(), _cont.is_empty());
2623     if (bottom && !_cont.is_empty()) {
2624       log_develop_trace(jvmcont)("Setting return address to return barrier: " INTPTR_FORMAT, p2i(StubRoutines::cont_returnBarrier()));
2625       FKind::patch_pc(caller, StubRoutines::cont_returnBarrier());
2626     } else if (bottom || should_deoptimize()) {
2627       FKind::patch_pc(caller, caller.raw_pc()); // this patches the return address to the deopt handler if necessary
2628     }
2629     patch_pd<FKind, bottom>(f, caller); // TODO R: reevaluate if and when this is necessary -- only bottom and interpreted caller?
2630 
2631     if (FKind::interpreted) {
2632       Interpreted::patch_sender_sp<false>(f, caller.unextended_sp());
2633     }
2634 
2635     assert (!bottom || !_cont.is_empty() || Frame::assert_bottom_java_frame_name(f, ENTER_SIG), "");
2636     assert (!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "cont.is_empty(): %d is_cont_barrier_frame(f): %d ", _cont.is_empty(), Continuation::is_cont_barrier_frame(f));
2637   }
2638 
2639   NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
2640     assert (hf.is_interpreted_frame(), "");
2641 
2642     const bool bottom = recurse_thaw_java_frame<Interpreted>(caller, num_frames);
2643 
2644     DEBUG_ONLY(before_thaw_java_frame(hf, caller, bottom, num_frames);)
2645 
2646     frame f = new_frame<Interpreted>(hf, caller, bottom);
2647     intptr_t* const vsp = f.sp();
2648     intptr_t* const hsp = hf.unextended_sp();
2649     intptr_t* const frame_bottom = Interpreted::frame_bottom<false>(f);
2650 
2651     const int fsize = Interpreted::frame_bottom<true>(hf) - hsp;
2652     log_develop_trace(jvmcont)("fsize: %d", fsize);
2653 
2654     assert (!bottom || vsp + fsize >= _cont.entrySP() - 2, "");
2655     assert (!bottom || vsp + fsize <= _cont.entrySP(), "");
2656 
2657     assert (Interpreted::frame_bottom<false>(f) == vsp + fsize, "");
2658 
2659     // on AArch64 we add padding between the locals and the rest of the frame to keep the fp 16-byte-aligned
2660     const int locals = hf.interpreter_frame_method()->max_locals();
2661     copy_from_chunk<false>(Interpreted::frame_bottom<true>(hf) - locals, Interpreted::frame_bottom<false>(f) - locals, locals); // copy locals
2662     copy_from_chunk<false>(hsp, vsp, fsize - locals); // copy rest
2663 
2664     set_interpreter_frame_bottom(f, frame_bottom); // the copy overwrites the metadata
2665     derelativize_interpreted_frame_metadata(hf, f);
2666     bottom ? patch<Interpreted, true>(f, caller) : patch<Interpreted, false>(f, caller);
2667 
2668     DEBUG_ONLY(if (log_develop_is_enabled(Trace, jvmcont)) print_frame_layout<false>(f);)
2669 
2670     assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2671 
2672     assert(Interpreted::frame_bottom<false>(f) <= Frame::frame_top(caller), "Interpreted::frame_bottom<false>(f): %p Frame::frame_top(caller): %p", Interpreted::frame_bottom<false>(f), Frame::frame_top(caller));
2673 
2674     _cont.dec_num_interpreted_frames();
2675 
2676     maybe_set_fastpath(f.sp());
2677 
2678     if (!bottom) {
2679       log_develop_trace(jvmcont)("fix thawed caller");
2680       InstanceStackChunkKlass::fix_thawed_frame(_cont.tail(), caller, SmallRegisterMap::instance); // can only fix caller once this frame is thawed (due to callee saved regs)
2681     }
2682 
2683     DEBUG_ONLY(after_thaw_java_frame(f, bottom);)
2684     caller = f;
2685   }
2686 
2687   void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames) {
2688     assert (!hf.is_interpreted_frame(), "");
2689 
2690     const bool bottom = recurse_thaw_java_frame<Compiled>(caller, num_frames);
2691 
2692     DEBUG_ONLY(before_thaw_java_frame(hf, caller, bottom, num_frames);)
2693 
2694     assert (caller.sp() == caller.unextended_sp(), "");
2695 
2696     if ((!bottom && caller.is_interpreted_frame()) || (bottom && Interpreter::contains(_cont.tail()->pc()))) {
2697       log_develop_trace(jvmcont)("sub max_size align %d", ContinuationHelper::align_wiggle);
2698       _align_size += ContinuationHelper::align_wiggle; // we add one whether or not we've aligned because we add it in freeze_interpreted_frame
2699     }
2700 
2701     frame f = new_frame<Compiled>(hf, caller, bottom);
2702     intptr_t* const vsp = f.sp();
2703     intptr_t* const hsp = hf.unextended_sp();
2704     log_develop_trace(jvmcont)("vsp: " INTPTR_FORMAT, p2i(vsp));
2705     log_develop_trace(jvmcont)("hsp: %d ", _cont.tail()->to_offset(hsp));
2706 
2707     int fsize = Compiled::size(hf);
2708     log_develop_trace(jvmcont)("fsize: %d", fsize);
2709     fsize += (bottom || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2710     assert (fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "%d " INTPTR_FORMAT, fsize, caller.unextended_sp() - f.unextended_sp());
2711 
2712     intptr_t* from = hsp - ContinuationHelper::frame_metadata;
2713     intptr_t* to   = vsp - ContinuationHelper::frame_metadata;
2714     int sz = fsize + ContinuationHelper::frame_metadata;
2715 
2716     assert (!bottom || _cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP(), "");
2717     assert (!bottom || hf.compiled_frame_stack_argsize() != 0 || to + sz && to + sz == _cont.entrySP(), "");
2718 
2719     copy_from_chunk(from, to, sz);
2720 
2721     bottom ? patch<Compiled, true>(f, caller) : patch<Compiled, false>(f, caller);
2722 
2723     if (f.cb()->is_nmethod()) {
2724       f.cb()->as_nmethod()->run_nmethod_entry_barrier();
2725     }
2726 
2727     if (f.is_deoptimized_frame()) { // TODO PERF
2728       maybe_set_fastpath(f.sp());
2729     } else if (should_deoptimize() && (f.cb()->as_compiled_method()->is_marked_for_deoptimization() || _thread->is_interp_only_mode())) {
2730       log_develop_trace(jvmcont)("Deoptimizing thawed frame");
2731       DEBUG_ONLY(Frame::patch_pc(f, nullptr));
2732 
2733       f.deoptimize(_thread); // we're assuming there are no monitors; this doesn't revoke biased locks
2734       // ContinuationHelper::set_anchor(_thread, f); // deoptimization may need this
2735       // Deoptimization::deoptimize(_thread, f, &_map);
2736       // ContinuationHelper::clear_anchor(_thread);
2737 
2738       assert (f.is_deoptimized_frame() && Frame::is_deopt_return(f.raw_pc(), f), "");
2739       maybe_set_fastpath(f.sp());
2740     }
2741 
2742     if (!bottom) {
2743       log_develop_trace(jvmcont)("fix thawed caller");
2744       InstanceStackChunkKlass::fix_thawed_frame(_cont.tail(), caller, SmallRegisterMap::instance); // can only fix caller once this frame is thawed (due to callee saved regs)
2745     }
2746 
2747     DEBUG_ONLY(after_thaw_java_frame(f, bottom);)
2748     caller = f;
2749   }
2750 
2751   void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2752     log_develop_trace(jvmcont)("Found safepoint stub");
2753 
2754     DEBUG_ONLY(_frames++;)
2755 
2756     {
2757       RegisterMap map(nullptr, true, false, false);
2758       map.set_include_argument_oops(false);
2759       _stream.next(&map);
2760       assert (!_stream.is_done(), "");
2761       if (UNLIKELY(_barriers)) { // we're now doing this on the stub's caller
2762         InstanceStackChunkKlass::do_barriers<true>(_cont.tail(), _stream, &map);
2763       }
2764       assert (!_stream.is_done(), "");
2765     }
2766 
2767     recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames);
2768 
2769     DEBUG_ONLY(before_thaw_java_frame(hf, caller, false, num_frames);)
2770 
2771     assert(Frame::is_stub(hf.cb()), "");
2772     assert (caller.sp() == caller.unextended_sp(), "");
2773     assert (!caller.is_interpreted_frame(), "");
2774 
2775     int fsize = StubF::size(hf);
2776     log_develop_trace(jvmcont)("fsize: %d", fsize);
2777 
2778     frame f = new_frame<StubF>(hf, caller, false);
2779     intptr_t* vsp = f.sp();
2780     intptr_t* hsp = hf.sp();
2781     log_develop_trace(jvmcont)("hsp: %d ", _cont.tail()->to_offset(hsp));
2782     log_develop_trace(jvmcont)("vsp: " INTPTR_FORMAT, p2i(vsp));
2783 
2784     copy_from_chunk(hsp - ContinuationHelper::frame_metadata, vsp - ContinuationHelper::frame_metadata, fsize + ContinuationHelper::frame_metadata);
2785 
2786     { // can only fix caller once this frame is thawed (due to callee saved regs)
2787       RegisterMap map(nullptr, true, false, false); // map.clear();
2788       map.set_include_argument_oops(false);
2789       f.oop_map()->update_register_map(&f, &map);
2790       ContinuationHelper::update_register_map_with_callee(&map, caller);
2791       InstanceStackChunkKlass::fix_thawed_frame(_cont.tail(), caller, &map);
2792     }
2793 
2794     DEBUG_ONLY(after_thaw_java_frame(f, false);)
2795     caller = f;
2796   }
2797 
2798   void finish_thaw(frame& f) {
2799     stackChunkOop chunk = _cont.tail();
2800 
2801     if (chunk->is_empty()) {
2802       if (_barriers) {
2803         _cont.set_tail(chunk->parent());
2804       } else {
2805         chunk->set_has_mixed_frames(false);
2806       }
2807       chunk->set_max_size(0);
2808       assert (chunk->argsize() == 0, "");
2809     } else {
2810       log_develop_trace(jvmcont)("sub max_size _align_size: %d -- %d", _align_size, chunk->max_size() - _align_size);
2811       chunk->set_max_size(chunk->max_size() - _align_size);
2812     }
2813     assert (chunk->is_empty() == (chunk->max_size() == 0), "chunk->is_empty: %d chunk->max_size: %d", chunk->is_empty(), chunk->max_size());
2814 
2815     if ((intptr_t)f.sp() % 16 != 0) {
2816       assert (f.is_interpreted_frame(), "");
2817       f.set_sp(f.sp() - 1);
2818     }
2819     push_return_frame(f);
2820     InstanceStackChunkKlass::fix_thawed_frame(chunk, f, SmallRegisterMap::instance); // can only fix caller after push_return_frame (due to callee saved regs)
2821 
2822     assert (_cont.is_empty() == _cont.last_frame().is_empty(), "cont.is_empty: %d cont.last_frame().is_empty(): %d", _cont.is_empty(), _cont.last_frame().is_empty());
2823 
2824     log_develop_trace(jvmcont)("thawed %d frames", _frames);
2825 
2826     log_develop_trace(jvmcont)("top_hframe after (thaw):");
2827     if (log_develop_is_enabled(Trace, jvmcont)) _cont.last_frame().template print_on<true>(tty);
2828   }
2829 
2830   void push_return_frame(frame& f) { // see generate_cont_thaw
2831     assert (!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), "");
2832     assert (!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
2833 
2834     log_develop_trace(jvmcont)("push_return_frame");
2835     if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
2836 
2837     intptr_t* sp = f.sp();
2838     address pc = f.raw_pc();
2839     *(address*)(sp - frame::sender_sp_ret_address_offset()) = pc;
2840     Frame::patch_pc(f, pc); // in case we want to deopt the frame in a full transition, this is checked.
2841     ContinuationHelper::push_pd(f);
2842 
2843     assert(Frame::assert_frame_laid_out(f), "");
2844   }
2845 
2846   static inline void derelativize(intptr_t* const fp, int offset) {
2847     intptr_t* addr = fp + offset;
2848     // tty->print_cr(">>>> derelativize offset: %d fp: %p delta: %ld derel: %p", offset, fp, *addr, fp + *addr);
2849     *addr = (intptr_t)(fp + *addr);
2850   }
2851 
2852   static void JVMTI_continue_cleanup(JavaThread* thread) {
2853 #if INCLUDE_JVMTI
2854     invalidate_JVMTI_stack(thread);
2855 #endif // INCLUDE_JVMTI
2856   }
2857 };
2858 
2859 // returns new top sp; right below it are the pc and fp; see generate_cont_thaw
2860 // called after preparations (stack overflow check and making room)
2861 template<typename ConfigT>
2862 static inline intptr_t* thaw0(JavaThread* thread, const thaw_kind kind) {
2863   //callgrind();
2864   // NoSafepointVerifier nsv;
2865 #if CONT_JFR
2866   EventContinuationThaw event;
2867 #endif
2868 
2869   if (kind != thaw_top) {
2870     log_develop_trace(jvmcont)("== RETURN BARRIER");
2871   }
2872 
2873   log_develop_trace(jvmcont)("~~~~~~~~~ thaw kind: %d", kind);
2874   log_develop_trace(jvmcont)("sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT,
2875     p2i(thread->last_continuation()->entry_sp()), p2i(thread->last_continuation()->entry_fp()), p2i(thread->last_continuation()->entry_pc()));
2876 
2877   assert (thread == JavaThread::current(), "");
2878 
2879   oop oopCont = thread->last_continuation()->cont_oop();
2880 
2881   assert (!jdk_internal_vm_Continuation::done(oopCont), "");
2882 
2883   assert (oopCont == ContinuationHelper::get_continuation(thread), "");
2884 
2885   assert (verify_continuation<1>(oopCont), "");
2886   ContMirror cont(thread, oopCont);
2887   log_develop_debug(jvmcont)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
2888 
2889   cont.read(); // read_minimal
2890 
2891 #ifdef ASSERT
2892   ContinuationHelper::set_anchor_to_entry(thread, cont.entry());
2893   print_frames(thread);
2894 #endif
2895 
2896   Thaw<ConfigT> thw(thread, cont);
2897   intptr_t* const sp = thw.thaw(kind);
2898   assert ((intptr_t)sp % 16 == 0, "");
2899 
2900   thread->reset_held_monitor_count();
2901 
2902   assert (verify_continuation<2>(cont.mirror()), "");
2903 
2904 #ifndef PRODUCT
2905   intptr_t* sp0 = sp;
2906   address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
2907   if (pc0 == StubRoutines::cont_interpreter_forced_preempt_return()) {
2908     sp0 += ContinuationHelper::frame_metadata; // see push_interpreter_return_frame
2909   }
2910   ContinuationHelper::set_anchor(thread, sp0);
2911   print_frames(thread, tty); // must be done after write(), as frame walking reads fields off the Java objects.
2912   if (LoomVerifyAfterThaw) {
2913     assert(do_verify_after_thaw(thread, thw._mode, thw.barriers(), cont.tail()), "");
2914   }
2915   assert (ContinuationEntry::assert_entry_frame_laid_out(thread), "");
2916   ContinuationHelper::clear_anchor(thread);
2917 #endif
2918 
2919   if (log_develop_is_enabled(Trace, jvmcont)) {
2920     log_develop_trace(jvmcont)("Jumping to frame (thaw):");
2921     frame f(sp);
2922     print_vframe(f, nullptr);
2923   }
2924 
2925 #if CONT_JFR
2926   cont.post_jfr_event(&event, thread);
2927 #endif
2928 
2929   // assert (thread->last_continuation()->argsize() == 0 || Continuation::is_return_barrier_entry(*(address*)(thread->last_continuation()->bottom_sender_sp() - SENDER_SP_RET_ADDRESS_OFFSET)), "");
2930   assert (verify_continuation<3>(cont.mirror()), "");
2931   log_develop_debug(jvmcont)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
2932 
2933   return sp;
2934 }
2935 
2936 class ThawVerifyOopsClosure: public OopClosure {
2937   intptr_t* _p;
2938 public:
2939   ThawVerifyOopsClosure() : _p(nullptr) {}
2940   intptr_t* p() { return _p; }
2941   void reset() { _p = nullptr; }
2942 
2943   virtual void do_oop(oop* p) {
2944     oop o = *p;
2945     if (o == (oop)nullptr || is_good_oop(o)) return;
2946     _p = (intptr_t*)p;
2947     tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(*p), p2i(p));
2948   }
2949   virtual void do_oop(narrowOop* p) {
2950     oop o = RawAccess<>::oop_load(p);
2951     if (o == (oop)nullptr || is_good_oop(o)) return;
2952     _p = (intptr_t*)p;
2953     tty->print_cr("*** (narrow) non-oop %x found at " PTR_FORMAT, (int)(*p), p2i(p));
2954   }
2955 };
2956 
2957 void do_deopt_after_thaw(JavaThread* thread) {
2958   int i = 0;
2959   StackFrameStream fst(thread, true, false);
2960   fst.register_map()->set_include_argument_oops(false);
2961   ContinuationHelper::update_register_map_with_callee(fst.register_map(), *fst.current());
2962   for (; !fst.is_done(); fst.next()) {
2963     if (fst.current()->cb()->is_compiled()) {
2964       CompiledMethod* cm = fst.current()->cb()->as_compiled_method();
2965       if (!cm->method()->is_continuation_enter_intrinsic()) {
2966         cm->make_deoptimized();
2967       }
2968     }
2969   }
2970 }
2971 
2972 
2973 bool do_verify_after_thaw(JavaThread* thread, int mode, bool barriers, stackChunkOop chunk) {
2974   assert(thread->has_last_Java_frame(), "");
2975 
2976   ResourceMark rm;
2977   ThawVerifyOopsClosure cl;
2978   CodeBlobToOopClosure cf(&cl, false);
2979 
2980   int i = 0;
2981   StackFrameStream fst(thread, true, false);
2982   fst.register_map()->set_include_argument_oops(false);
2983   ContinuationHelper::update_register_map_with_callee(fst.register_map(), *fst.current());
2984   for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) {
2985     if (fst.current()->cb()->is_compiled() && fst.current()->cb()->as_compiled_method()->is_marked_for_deoptimization()) {
2986       tty->print_cr(">>> do_verify_after_thaw deopt");
2987       fst.current()->deoptimize(nullptr);
2988       fst.current()->print_on(tty);
2989     }
2990 
2991     fst.current()->oops_do(&cl, &cf, fst.register_map());
2992     if (cl.p() != nullptr) {
2993 
2994       frame fr = *fst.current();
2995       tty->print_cr("Failed for frame %d, pc: %p, sp: %p, fp: %p; mode: %d barriers: %d %d", i, fr.pc(), fr.unextended_sp(), fr.fp(), mode, barriers, chunk->requires_barriers());
2996       if (!fr.is_interpreted_frame()) {
2997         tty->print_cr("size: %d argsize: %d", NonInterpretedUnknown::size(fr), NonInterpretedUnknown::stack_argsize(fr));
2998       }
2999   #ifdef ASSERT
3000       VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
3001       if (reg != nullptr) tty->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
3002   #endif
3003       fr.print_on(tty);
3004       cl.reset();
3005   #ifdef ASSERT
3006       pfl();
3007   #endif
3008       chunk->print_on(true, tty);
3009       return false;
3010     }
3011   }
3012   return true;
3013 }
3014 
3015 static void print_vframe(frame f, const RegisterMap* map, outputStream* st) {
3016   if (st != nullptr && !log_is_enabled(Trace, jvmcont)) return;
3017   if (st == nullptr) st = tty;
3018 
3019   st->print_cr("\tfp: " INTPTR_FORMAT " real_fp: " INTPTR_FORMAT ", sp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT " usp: " INTPTR_FORMAT, p2i(f.fp()), p2i(f.real_fp()), p2i(f.sp()), p2i(f.pc()), p2i(f.unextended_sp()));
3020 
3021   f.print_on(st);
3022 }
3023 
3024 JRT_LEAF(intptr_t*, Continuation::thaw(JavaThread* thread, int kind))
3025   // TODO: JRT_LEAF and NoHandleMark is problematic for JFR events.
3026   // vFrameStreamCommon allocates Handles in RegisterMap for continuations.
3027   // JRT_ENTRY instead?
3028   ResetNoHandleMark rnhm;
3029 
3030   intptr_t* sp = cont_thaw(thread, (thaw_kind)kind);
3031   // ContinuationHelper::clear_anchor(thread);
3032   return sp;
3033 JRT_END
3034 
3035 bool Continuation::is_continuation_enterSpecial(const frame& f) {
3036   if (f.cb() == nullptr || !f.cb()->is_compiled()) return false;
3037   Method* m = f.cb()->as_compiled_method()->method();
3038   return (m != nullptr && m->is_continuation_enter_intrinsic());
3039 }
3040 
3041 bool Continuation::is_continuation_entry_frame(const frame& f, const RegisterMap *map) {
3042   // frame f = map->in_cont() ? map->stack_chunk()->derelativize(fr) : fr;
3043   // tty->print_cr(">>> is_continuation_entry_frame %d", map->in_cont()); map->in_cont() ? f.print_on<true>(tty) : f.print_on<false>(tty);
3044   // Method* m = Frame::frame_method(f);
3045 
3046   // we can do this because the entry frame is never inlined
3047   Method* m = (map->in_cont() && f.is_interpreted_frame()) ? map->stack_chunk()->interpreter_frame_method(f)
3048                                                            : Frame::frame_method(f);
3049   return m != nullptr && m->intrinsic_id() == vmIntrinsics::_Continuation_enter;
3050 }
3051 
3052 // bool Continuation::is_cont_post_barrier_entry_frame(const frame& f) {
3053 //   return is_return_barrier_entry(Frame::real_pc(f));
3054 // }
3055 
3056 // When walking the virtual stack, this method returns true
3057 // iff the frame is a thawed continuation frame whose
3058 // caller is still frozen on the h-stack.
3059 // The continuation object can be extracted from the thread.
3060 bool Continuation::is_cont_barrier_frame(const frame& f) {
3061   assert (f.is_interpreted_frame() || f.cb() != nullptr, "");
3062   return is_return_barrier_entry(f.is_interpreted_frame() ? Interpreted::return_pc(f) : Compiled::return_pc(f));
3063 }
3064 
3065 bool Continuation::is_return_barrier_entry(const address pc) {
3066   return pc == StubRoutines::cont_returnBarrier();
3067 }
3068 
3069 static inline bool is_sp_in_continuation(ContinuationEntry* cont, intptr_t* const sp) {
3070   // tty->print_cr(">>>> is_sp_in_continuation cont: %p sp: %p entry: %p in: %d", (oopDesc*)cont, sp, jdk_internal_vm_Continuation::entrySP(cont), jdk_internal_vm_Continuation::entrySP(cont) > sp);
3071   return cont->entry_sp() > sp;
3072 }
3073 
3074 bool Continuation::is_frame_in_continuation(ContinuationEntry* cont, const frame& f) {
3075   return is_sp_in_continuation(cont, f.unextended_sp());
3076 }
3077 
3078 static ContinuationEntry* get_continuation_entry_for_frame(JavaThread* thread, intptr_t* const sp) {
3079   assert (thread != nullptr, "");
3080   ContinuationEntry* cont = thread->last_continuation();
3081   while (cont != nullptr && !is_sp_in_continuation(cont, sp)) {
3082     cont = cont->parent();
3083   }
3084   // if (cont != nullptr) tty->print_cr(">>> get_continuation_entry_for_frame: %p entry.sp: %p oop: %p", sp, cont->entry_sp(), (oopDesc*)cont->continuation());
3085   return cont;
3086 }
3087 
3088 static oop get_continuation_for_sp(JavaThread* thread, intptr_t* const sp) {
3089   assert (thread != nullptr, "");
3090   ContinuationEntry* cont = get_continuation_entry_for_frame(thread, sp);
3091   return cont != nullptr ? cont->continuation() : (oop)nullptr;
3092 }
3093 
3094 oop Continuation::get_continuation_for_frame(JavaThread* thread, const frame& f) {
3095   return get_continuation_for_sp(thread, f.unextended_sp());
3096 }
3097 
3098 ContinuationEntry* Continuation::get_continuation_entry_for_continuation(JavaThread* thread, oop cont) {
3099   if (thread == nullptr || cont == (oop)nullptr) return nullptr;
3100 
3101   for (ContinuationEntry* entry = thread->last_continuation(); entry != nullptr; entry = entry->parent()) {
3102     if (cont == entry->continuation()) return entry;
3103   }
3104   return nullptr;
3105 }
3106 
3107 bool Continuation::is_frame_in_continuation(JavaThread* thread, const frame& f) {
3108   return get_continuation_entry_for_frame(thread, f.unextended_sp()) != nullptr;
3109 }
3110 
3111 bool Continuation::is_mounted(JavaThread* thread, oop cont_scope) {
3112   return last_continuation(thread, cont_scope) != nullptr;
3113 }
3114 
3115 ContinuationEntry* Continuation::last_continuation(const JavaThread* thread, oop cont_scope) {
3116   guarantee (thread->has_last_Java_frame(), "");
3117   for (ContinuationEntry* entry = thread->last_continuation(); entry != nullptr; entry = entry->parent()) {
3118     if (cont_scope == jdk_internal_vm_Continuation::scope(entry->continuation()))
3119       return entry;
3120   }
3121   return nullptr;
3122 }
3123 
3124 void Continuation::notify_deopt(JavaThread* thread, intptr_t* sp) {
3125   ContinuationEntry* cont = thread->last_continuation();
3126 
3127   if (cont == nullptr) return;
3128 
3129   if (is_sp_in_continuation(cont, sp)) {
3130     thread->push_cont_fastpath(sp);
3131     return;
3132   }
3133 
3134   ContinuationEntry* prev;
3135   do {
3136     prev = cont;
3137     cont = cont->parent();
3138   } while (cont != nullptr && !is_sp_in_continuation(cont, sp));
3139 
3140   if (cont == nullptr) return;
3141   assert (is_sp_in_continuation(cont, sp), "");
3142   if (sp > prev->parent_cont_fastpath())
3143       prev->set_parent_cont_fastpath(sp);
3144 }
3145 
3146 bool Continuation::fix_continuation_bottom_sender(JavaThread* thread, const frame& callee, address* sender_pc, intptr_t** sender_sp) {
3147   if (thread != nullptr && is_return_barrier_entry(*sender_pc)) {
3148     ContinuationEntry* cont = get_continuation_entry_for_frame(thread, callee.is_interpreted_frame() ? callee.interpreter_frame_last_sp() : callee.unextended_sp());
3149     assert (cont != nullptr, "callee.unextended_sp(): " INTPTR_FORMAT, p2i(callee.unextended_sp()));
3150 
3151     log_develop_debug(jvmcont)("fix_continuation_bottom_sender: [" JLONG_FORMAT "] [%d]", java_tid(thread), thread->osthread()->thread_id());
3152     log_develop_trace(jvmcont)("fix_continuation_bottom_sender: sender_pc: " INTPTR_FORMAT " -> " INTPTR_FORMAT, p2i(*sender_pc), p2i(cont->entry_pc()));
3153     log_develop_trace(jvmcont)("fix_continuation_bottom_sender: sender_sp: " INTPTR_FORMAT " -> " INTPTR_FORMAT, p2i(*sender_sp), p2i(cont->entry_sp()));
3154     // log_develop_trace(jvmcont)("fix_continuation_bottom_sender callee:"); if (log_develop_is_enabled(Debug, jvmcont)) callee.print_value_on(tty, thread);
3155 
3156     *sender_pc = cont->entry_pc();
3157     *sender_sp = cont->entry_sp();
3158     // We DO NOT want to fix FP. It could contain an oop that has changed on the stack, and its location should be OK anyway
3159 
3160     return true;
3161   }
3162   return false;
3163 }
3164 
3165 address Continuation::get_top_return_pc_post_barrier(JavaThread* thread, address pc) {
3166   ContinuationEntry* cont;
3167   if (thread != nullptr && is_return_barrier_entry(pc) && (cont = thread->last_continuation()) != nullptr) {
3168     pc = cont->entry_pc();
3169   }
3170   return pc;
3171 }
3172 
3173 bool Continuation::is_scope_bottom(oop cont_scope, const frame& f, const RegisterMap* map) {
3174   if (cont_scope == nullptr || !is_continuation_entry_frame(f, map))
3175     return false;
3176 
3177   oop cont = get_continuation_for_sp(map->thread(), f.sp());
3178   if (cont == nullptr)
3179     return false;
3180 
3181   oop sc = continuation_scope(cont);
3182   assert(sc != nullptr, "");
3183   return sc == cont_scope;
3184 }
3185 
3186 frame Continuation::continuation_parent_frame(RegisterMap* map) {
3187   assert (map->in_cont(), "");
3188   ContMirror cont(map);
3189   assert (map->thread() != nullptr || !cont.is_mounted(), "map->thread() == nullptr: %d cont.is_mounted(): %d", map->thread() == nullptr, cont.is_mounted());
3190 
3191   log_develop_trace(jvmcont)("continuation_parent_frame");
3192   if (map->update_map()) {
3193     ContinuationHelper::update_register_map_for_entry_frame(cont, map);
3194   }
3195 
3196   if (!cont.is_mounted()) { // When we're walking an unmounted continuation and reached the end
3197     oop parent = jdk_internal_vm_Continuation::parent(cont.mirror());
3198     stackChunkOop chunk = parent != nullptr ? ContMirror(parent).last_nonempty_chunk() : nullptr;
3199     if (chunk != nullptr) {
3200       return chunk->top_frame(map);
3201     }
3202 
3203     // tty->print_cr("continuation_parent_frame: no more");
3204     map->set_stack_chunk(nullptr);
3205     return frame();
3206   }
3207 
3208   map->set_stack_chunk(nullptr);
3209 
3210 #if (defined(X86) || defined(AARCH64)) && !defined(ZERO)
3211   frame sender(cont.entrySP(), cont.entryFP(), cont.entryPC());
3212 #else
3213   frame sender = frame();
3214   Unimplemented();
3215 #endif
3216 
3217   // tty->print_cr("continuation_parent_frame");
3218   // print_vframe(sender, map, nullptr);
3219 
3220   return sender;
3221 }
3222 
3223 static frame continuation_top_frame(oop contOop, RegisterMap* map) {
3224   stackChunkOop chunk = ContMirror(contOop).last_nonempty_chunk();
3225   map->set_stack_chunk(chunk);
3226   return chunk != nullptr ? chunk->top_frame(map) : frame();
3227 }
3228 
3229 frame Continuation::top_frame(const frame& callee, RegisterMap* map) {
3230   assert (map != nullptr, "");
3231   return continuation_top_frame(get_continuation_for_sp(map->thread(), callee.sp()), map);
3232 }
3233 
3234 frame Continuation::last_frame(oop continuation, RegisterMap *map) {
3235   assert(map != nullptr, "a map must be given");
3236   return continuation_top_frame(continuation, map);
3237 }
3238 
3239 bool Continuation::has_last_Java_frame(oop continuation) {
3240   return !ContMirror(continuation).is_empty();
3241 }
3242 
3243 stackChunkOop Continuation::last_nonempty_chunk(oop continuation) {
3244   return ContMirror(continuation).last_nonempty_chunk();
3245 }
3246 
3247 javaVFrame* Continuation::last_java_vframe(Handle continuation, RegisterMap *map) {
3248   assert(map != nullptr, "a map must be given");
3249   // tty->print_cr(">>> Continuation::last_java_vframe");
3250   if (!ContMirror(continuation()).is_empty()) {
3251     frame f = last_frame(continuation(), map);
3252     for (vframe* vf = vframe::new_vframe(&f, map, nullptr); vf; vf = vf->sender()) {
3253       if (vf->is_java_frame()) return javaVFrame::cast(vf);
3254     }
3255   }
3256   return nullptr;
3257 }
3258 
3259 bool Continuation::is_in_usable_stack(address addr, const RegisterMap* map) {
3260   ContMirror cont(map);
3261   stackChunkOop chunk = cont.find_chunk_by_address(addr);
3262   return chunk != nullptr ? chunk->is_usable_in_chunk(addr) : false;
3263 }
3264 
3265 stackChunkOop Continuation::continuation_parent_chunk(stackChunkOop chunk) {
3266   assert(chunk->cont() != nullptr, "");
3267   oop cont_parent = jdk_internal_vm_Continuation::parent(chunk->cont());
3268   return cont_parent != nullptr ? Continuation::last_nonempty_chunk(cont_parent) : nullptr;
3269 }
3270 
3271 oop Continuation::continuation_scope(oop cont) {
3272   return cont != nullptr ? jdk_internal_vm_Continuation::scope(cont) : nullptr;
3273 }
3274 
3275 bool Continuation::pin(JavaThread* current) {
3276   ContinuationEntry* ce = current->last_continuation();
3277   if (ce == nullptr)
3278     return true;
3279 
3280   oop cont = ce->cont_oop();
3281   assert (cont != nullptr, "");
3282   assert (cont == ContinuationHelper::get_continuation(current), "");
3283 
3284   jshort value = jdk_internal_vm_Continuation::critical_section(cont);
3285   if (value < max_jshort) {
3286     jdk_internal_vm_Continuation::set_critical_section(cont, value + 1);
3287     return true;
3288   }
3289   return false;
3290 }
3291 
3292 bool Continuation::unpin(JavaThread* current) {
3293   ContinuationEntry* ce = current->last_continuation();
3294   if (ce == nullptr)
3295     return true;
3296 
3297   oop cont = ce->cont_oop();
3298   assert (cont != nullptr, "");
3299   assert (cont == ContinuationHelper::get_continuation(current), "");
3300   
3301   jshort value = jdk_internal_vm_Continuation::critical_section(cont);
3302   if (value > 0) {
3303     jdk_internal_vm_Continuation::set_critical_section(cont, value - 1);
3304     return true;
3305   }
3306   return false;
3307 }
3308 
3309 ///// Allocation
3310 
3311 inline void ContMirror::post_safepoint(Handle conth) {
3312   _cont = conth(); // reload oop
3313   if (_tail != (oop)nullptr) {
3314     _tail = (stackChunkOop)jdk_internal_vm_Continuation::tail(_cont);
3315   }
3316 }
3317 
3318 
3319 /* try to allocate a chunk from the tlab, if it doesn't work allocate one using the allocate
3320  * method. In the later case we might have done a safepoint and need to reload our oops */
3321 stackChunkOop ContMirror::allocate_stack_chunk(int stack_size, bool is_preempt) {
3322   InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass());
3323   int size_in_words = klass->instance_size(stack_size);
3324 
3325   assert(is_preempt || _thread == JavaThread::current(), "should be current");
3326   JavaThread* current = is_preempt ? JavaThread::current() : _thread;
3327 
3328   StackChunkAllocator allocator(klass, size_in_words, stack_size, current);
3329   HeapWord* start = current->tlab().allocate(size_in_words);
3330   if (start != nullptr) {
3331     return (stackChunkOop)allocator.initialize(start);
3332   }
3333 
3334   //HandleMark hm(current);
3335   Handle conth(current, _cont);
3336   stackChunkOop result = (stackChunkOop)allocator.allocate();
3337   post_safepoint(conth);
3338   return result;
3339 }
3340 
3341 void Continuation::emit_chunk_iterate_event(oop chunk, int num_frames, int num_oops) {
3342   EventContinuationIterateOops e;
3343   if (e.should_commit()) {
3344     e.set_id(cast_from_oop<u8>(chunk));
3345     e.set_safepoint(SafepointSynchronize::is_at_safepoint());
3346     e.set_numFrames((u2)num_frames);
3347     e.set_numOops((u2)num_oops);
3348     e.commit();
3349   }
3350 }
3351 
3352 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) {
3353   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
3354   return is_pinned0(thread, JNIHandles::resolve(cont_scope), false);
3355 }
3356 JVM_END
3357 
3358 JVM_ENTRY(jint, CONT_TryForceYield0(JNIEnv* env, jobject jcont, jobject jthread)) {
3359   JavaThread* current = JavaThread::thread_from_jni_environment(env);
3360   assert(current == JavaThread::current(), "should be");
3361   jint result = -1; // no continuation (should have enum)
3362 
3363   oop thread_oop = JNIHandles::resolve(jthread);
3364   if (thread_oop != nullptr) {
3365     JavaThread* target = java_lang_Thread::thread(thread_oop);
3366     assert(target != current, "should be different threads");
3367     // Suspend the target thread and freeze it.
3368     if (target->block_suspend(current)) {
3369       oop oopCont = JNIHandles::resolve_non_null(jcont);
3370       result = Continuation::try_force_yield(target, oopCont);
3371       target->continue_resume(current);
3372     }
3373   }
3374   return result;
3375 }
3376 JVM_END
3377 
3378 
3379 void Continuation::init() {
3380 }
3381 
3382 void Continuation::set_cont_fastpath_thread_state(JavaThread* thread) {
3383   assert (thread != nullptr, "");
3384   bool fast = !thread->is_interp_only_mode();
3385   thread->set_cont_fastpath_thread_state(fast);
3386 }
3387 
3388 #define CC (char*)  /*cast a literal from (const char*)*/
3389 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
3390 
3391 static JNINativeMethod CONT_methods[] = {
3392     {CC"tryForceYield0",   CC"(Ljava/lang/Thread;)I",            FN_PTR(CONT_TryForceYield0)},
3393     {CC"isPinned0",        CC"(Ljdk/internal/vm/ContinuationScope;)I", FN_PTR(CONT_isPinned0)},
3394 };
3395 
3396 void CONT_RegisterNativeMethods(JNIEnv *env, jclass cls) {
3397     Thread* thread = Thread::current();
3398     assert(thread->is_Java_thread(), "");
3399     ThreadToNativeFromVM trans((JavaThread*)thread);
3400     int status = env->RegisterNatives(cls, CONT_methods, sizeof(CONT_methods)/sizeof(JNINativeMethod));
3401     guarantee(status == JNI_OK && !env->ExceptionOccurred(), "register jdk.internal.vm.Continuation natives");
3402 }
3403 
3404 #include CPU_HEADER_INLINE(continuation)
3405 
3406 template <bool compressed_oops, typename BarrierSetT>
3407 class Config {
3408 public:
3409   typedef Config<compressed_oops, BarrierSetT> SelfT;
3410   typedef typename Conditional<compressed_oops, narrowOop, oop>::type OopT;
3411 
3412   static const bool _compressed_oops = compressed_oops;
3413   static const bool _concurrent_gc = BarrierSetT::is_concurrent_gc();
3414   // static const bool _post_barrier = post_barrier;
3415   // static const bool allow_stubs = gen_stubs && post_barrier && compressed_oops;
3416   // static const bool has_young = use_chunks;
3417   // static const bool full_stack = full;
3418 
3419   static int freeze(JavaThread* thread, intptr_t* sp, bool preempt) {
3420     return freeze0<SelfT>(thread, sp, preempt);
3421   }
3422 
3423   static intptr_t* thaw(JavaThread* thread, thaw_kind kind) {
3424     return thaw0<SelfT>(thread, kind);
3425   }
3426 
3427   static bool requires_barriers(oop obj) {
3428     return BarrierSetT::requires_barriers(obj);
3429   }
3430 
3431   static void print() {
3432     tty->print_cr(">>> Config compressed_oops: %d concurrent_gc: %d", _compressed_oops, _concurrent_gc);
3433     // tty->print_cr(">>> Config UseAVX: %ld UseUnalignedLoadStores: %d Enhanced REP MOVSB: %d Fast Short REP MOVSB: %d rdtscp: %d rdpid: %d", UseAVX, UseUnalignedLoadStores, VM_Version::supports_erms(), VM_Version::supports_fsrm(), VM_Version::supports_rdtscp(), VM_Version::supports_rdpid());
3434     // tty->print_cr(">>> Config avx512bw (not legacy bw): %d avx512dq (not legacy dq): %d avx512vl (not legacy vl): %d avx512vlbw (not legacy vlbw): %d", VM_Version::supports_avx512bw(), VM_Version::supports_avx512dq(), VM_Version::supports_avx512vl(), VM_Version::supports_avx512vlbw());
3435   }
3436 };
3437 
3438 class ConfigResolve {
3439 public:
3440   static void resolve() { resolve_compressed(); }
3441 
3442   static void resolve_compressed() {
3443     UseCompressedOops ? resolve_gc<true>()
3444                       : resolve_gc<false>();
3445   }
3446 
3447   template <bool use_compressed>
3448   static void resolve_gc() {
3449     BarrierSet* bs = BarrierSet::barrier_set();
3450     assert(bs != NULL, "freeze/thaw invoked before BarrierSet is set");
3451     switch (bs->kind()) {
3452 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
3453       case BarrierSet::bs_name: {                                       \
3454         resolve<use_compressed, typename BarrierSet::GetType<BarrierSet::bs_name>::type>(); \
3455       }                                                                 \
3456         break;
3457       FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
3458 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
3459 
3460     default:
3461       fatal("BarrierSet resolving not implemented");
3462     };
3463   }
3464 
3465   template <bool use_compressed, typename BarrierSetT>
3466   static void resolve() {
3467     typedef Config<use_compressed, BarrierSetT> SelectedConfigT;
3468     // SelectedConfigT::print();
3469 
3470     cont_freeze = SelectedConfigT::freeze;
3471     cont_thaw   = SelectedConfigT::thaw;
3472   }
3473 };
3474 
3475 void Continuations::init() {
3476   ConfigResolve::resolve();
3477   InstanceStackChunkKlass::resolve_memcpy_functions();
3478   Continuation::init();
3479 }
3480 
3481 void Continuations::print_statistics() {
3482   //tty->print_cr("Continuations hit/miss %ld / %ld", _exploded_hit, _exploded_miss);
3483   //tty->print_cr("Continuations nmethod hit/miss %ld / %ld", _nmethod_hit, _nmethod_miss);
3484 }
3485 
3486 ///// DEBUGGING
3487 
3488 #ifndef PRODUCT
3489 void Continuation::describe(FrameValues &values) {
3490   JavaThread* thread = JavaThread::active();
3491   if (thread != nullptr) {
3492     for (ContinuationEntry* cont = thread->last_continuation(); cont != nullptr; cont = cont->parent()) {
3493       intptr_t* bottom = cont->entry_sp();
3494       if (bottom != nullptr)
3495         values.describe(-1, bottom, "continuation entry");
3496     }
3497   }
3498 }
3499 
3500 #ifdef ASSERT
3501 bool Continuation::debug_is_stack_chunk(Klass* k) {
3502   return k->is_instance_klass() && InstanceKlass::cast(k)->is_stack_chunk_instance_klass();
3503 }
3504 
3505 bool Continuation::debug_is_stack_chunk(oop obj) {
3506   return obj != (oop)nullptr && obj->is_stackChunk();
3507 }
3508 
3509 bool Continuation::debug_is_continuation(Klass* klass) {
3510   return klass->is_subtype_of(vmClasses::Continuation_klass());
3511 }
3512 
3513 bool Continuation::debug_is_continuation(oop obj) {
3514   return obj->is_a(vmClasses::Continuation_klass());
3515 }
3516 
3517 bool Continuation::debug_is_continuation_run_frame(const frame& f) {
3518   bool is_continuation_run = false;
3519   if (f.is_compiled_frame()) {
3520     HandleMark hm(Thread::current());
3521     ResourceMark rm;
3522     Method* m = f.cb()->as_compiled_method()->scope_desc_at(f.pc())->method();
3523     if (m != nullptr) {
3524       char buf[50];
3525       if (0 == strcmp(ENTER_SPECIAL_SIG, m->name_and_sig_as_C_string(buf, 50))) {
3526         is_continuation_run = true;
3527       }
3528     }
3529   }
3530   return is_continuation_run;
3531 }
3532 
3533 
3534 NOINLINE bool Continuation::debug_verify_continuation(oop contOop) {
3535   DEBUG_ONLY(if (!VerifyContinuations) return true;)
3536   assert (contOop != (oop)nullptr, "");
3537   assert (oopDesc::is_oop(contOop), "");
3538   ContMirror cont(contOop);
3539   cont.read();
3540 
3541   assert (oopDesc::is_oop_or_null(cont.tail()), "");
3542   assert (cont.chunk_invariant(), "");
3543 
3544   bool nonempty_chunk = false;
3545   size_t max_size = 0;
3546   int num_chunks = 0;
3547   int num_frames = 0;
3548   int num_interpreted_frames = 0;
3549   int num_oops = 0;
3550   // tty->print_cr(">>> debug_verify_continuation traversing chunks");
3551   for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
3552     log_develop_trace(jvmcont)("debug_verify_continuation chunk %d", num_chunks);
3553     chunk->verify(&max_size, &num_oops, &num_frames, &num_interpreted_frames);
3554     if (!chunk->is_empty()) nonempty_chunk = true;
3555     num_chunks++;
3556   }
3557 
3558   // assert (cont.max_size() >= 0, ""); // size_t can't be negative...
3559   const bool is_empty = cont.is_empty();
3560   assert (!nonempty_chunk || !is_empty, "");
3561   assert (is_empty == (!nonempty_chunk && cont.last_frame().is_empty()), "");
3562   // assert (num_interpreted_frames == cont.num_interpreted_frames(), "interpreted_frames: %d cont.num_interpreted_frames(): %d", num_interpreted_frames, cont.num_interpreted_frames());
3563 
3564   return true;
3565 }
3566 
3567 void Continuation::debug_print_continuation(oop contOop, outputStream* st) {
3568   if (st == nullptr) st = tty;
3569 
3570   ContMirror cont(contOop);
3571 
3572   st->print_cr("CONTINUATION: " PTR_FORMAT " done: %d", contOop->identity_hash(), jdk_internal_vm_Continuation::done(contOop));
3573   st->print_cr("CHUNKS:");
3574   for (stackChunkOop chunk = cont.tail(); chunk != (oop)nullptr; chunk = chunk->parent()) {
3575     st->print("* ");
3576     chunk->print_on(true, tty);
3577   }
3578 
3579   // st->print_cr("frames: %d interpreted frames: %d oops: %d", cont.num_frames(), cont.num_interpreted_frames(), cont.num_oops());
3580 }
3581 #endif // ASSERT
3582 
3583 static jlong java_tid(JavaThread* thread) {
3584   return java_lang_Thread::thread_id(thread->threadObj());
3585 }
3586 
3587 #ifndef PRODUCT
3588 template <bool relative>
3589 static void print_frame_layout(const frame& f, outputStream* st) {
3590   ResourceMark rm;
3591   FrameValues values;
3592   assert (f.get_cb() != nullptr, "");
3593   RegisterMap map(relative ? (JavaThread*)nullptr : JavaThread::current(), true, false, false);
3594   map.set_include_argument_oops(false);
3595   map.set_skip_missing(true);
3596   frame::update_map_with_saved_link(&map, Frame::callee_link_address(f));
3597   const_cast<frame&>(f).describe<relative>(values, 0, &map);
3598   values.print_on((JavaThread*)nullptr, st);
3599 }
3600 #endif
3601 
3602 static void print_frames(JavaThread* thread, outputStream* st) {
3603   if (st != nullptr && !log_develop_is_enabled(Trace, jvmcont)) return;
3604   if (st == nullptr) st = tty;
3605 
3606   if (!thread->has_last_Java_frame()) st->print_cr("NO ANCHOR!");
3607 
3608   st->print_cr("------- frames ---------");
3609   RegisterMap map(thread, true, true, false);
3610   map.set_include_argument_oops(false);
3611 #ifndef PRODUCT
3612   map.set_skip_missing(true);
3613   ResetNoHandleMark rnhm;
3614   ResourceMark rm;
3615   HandleMark hm(Thread::current());
3616   FrameValues values;
3617 #endif
3618 
3619   int i = 0;
3620   for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
3621 #ifndef PRODUCT
3622     // print_vframe(f, &map, st);
3623     f.describe(values, i, &map);
3624 #else
3625     // f.print_on(st);
3626     // tty->print_cr("===");
3627     print_vframe(f, &map, st);
3628 #endif
3629     i++;
3630   }
3631 #ifndef PRODUCT
3632   values.print(thread);
3633 #endif
3634   st->print_cr("======= end frames =========");
3635 }
3636 
3637 // template<int x>
3638 // NOINLINE static void walk_frames(JavaThread* thread) {
3639 //   RegisterMap map(thread, false, false, false);
3640 //   for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map));
3641 // }
3642 
3643 #endif
3644 
3645 int ContinuationEntry::return_pc_offset = 0;
3646 nmethod* ContinuationEntry::continuation_enter = nullptr;
3647 address ContinuationEntry::return_pc = nullptr;
3648 
3649 void ContinuationEntry::set_enter_nmethod(nmethod* nm) {
3650   assert (return_pc_offset != 0, "");
3651   continuation_enter = nm;
3652   return_pc = nm->code_begin() + return_pc_offset;
3653 }
3654 
3655 ContinuationEntry* ContinuationEntry::from_frame(const frame& f) {
3656   assert (Continuation::is_continuation_enterSpecial(f), "");
3657   return (ContinuationEntry*)f.unextended_sp();
3658 }
3659 
3660 #ifdef ASSERT
3661 bool ContinuationEntry::assert_entry_frame_laid_out(JavaThread* thread) {
3662   assert (thread->has_last_Java_frame(), "Wrong place to use this assertion");
3663 
3664   ContinuationEntry* cont = Continuation::get_continuation_entry_for_continuation(thread, ContinuationHelper::get_continuation(thread));
3665   assert (cont != nullptr, "");
3666 
3667   intptr_t* unextended_sp = cont->entry_sp();
3668   intptr_t* sp;
3669   if (cont->argsize() > 0) {
3670     sp = cont->bottom_sender_sp();
3671   } else {
3672     sp = unextended_sp;
3673     bool interpreted_bottom = false;
3674     RegisterMap map(thread, false, false, false);
3675     frame f;
3676     for (f = thread->last_frame(); !f.is_first_frame() && f.sp() <= unextended_sp && !Continuation::is_continuation_enterSpecial(f); f = f.sender(&map)) {
3677       if (Continuation::is_continuation_enterSpecial(f))
3678         break;
3679       interpreted_bottom = f.is_interpreted_frame();
3680       if (!(f.sp() != nullptr && f.sp() <= cont->bottom_sender_sp())) {
3681         tty->print_cr("oops");
3682         f.print_on(tty);
3683       }
3684     }
3685     assert (Continuation::is_continuation_enterSpecial(f), "");
3686     sp = interpreted_bottom ? f.sp() : cont->bottom_sender_sp();
3687   }
3688 
3689   assert (sp != nullptr && sp <= cont->entry_sp(), "sp: " INTPTR_FORMAT " entry_sp: " INTPTR_FORMAT " bottom_sender_sp: " INTPTR_FORMAT, p2i(sp), p2i(cont->entry_sp()), p2i(cont->bottom_sender_sp()));
3690   address pc = *(address*)(sp - frame::sender_sp_ret_address_offset());
3691 
3692   if (pc != StubRoutines::cont_returnBarrier()) {
3693     CodeBlob* cb = pc != nullptr ? CodeCache::find_blob(pc) : nullptr;
3694 
3695     if (cb == nullptr || !cb->is_compiled() || !cb->as_compiled_method()->method()->is_continuation_enter_intrinsic()) {
3696       tty->print_cr(">>>> entry unextended_sp: %p sp: %p", unextended_sp, sp);
3697       if (cb == nullptr) tty->print_cr("NULL"); else cb->print_on(tty);
3698       os::print_location(tty, (intptr_t)pc);
3699      }
3700 
3701     assert (cb != nullptr, "");
3702     assert (cb->is_compiled(), "");
3703     assert (cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
3704   }
3705 
3706   // intptr_t* fp = *(intptr_t**)(sp - frame::sender_sp_offset);
3707   // assert (cont->entry_fp() == fp, "entry_fp: " INTPTR_FORMAT " actual: " INTPTR_FORMAT, p2i(cont->entry_sp()), p2i(fp));
3708 
3709   return true;
3710 }
3711 #endif