1 /*
   2  * Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "code/codeCache.inline.hpp"
  29 #include "code/compiledMethod.inline.hpp"
  30 #include "code/scopeDesc.hpp"
  31 #include "code/vmreg.inline.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "compiler/oopMap.inline.hpp"
  34 #include "jfr/jfrEvents.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/oopStorage.hpp"
  37 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "interpreter/linkResolver.hpp"
  40 #include "interpreter/oopMapCache.hpp"
  41 #include "logging/log.hpp"
  42 #include "logging/logStream.hpp"
  43 #include "metaprogramming/conditional.hpp"
  44 #include "oops/access.inline.hpp"
  45 #include "oops/objArrayOop.inline.hpp"
  46 #include "oops/weakHandle.hpp"
  47 #include "oops/weakHandle.inline.hpp"
  48 #include "prims/jvmtiThreadState.hpp"
  49 #include "runtime/continuation.hpp"
  50 #include "runtime/deoptimization.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/frame.hpp"
  53 #include "runtime/frame.inline.hpp"
  54 #include "runtime/javaCalls.hpp"
  55 #include "runtime/jniHandles.inline.hpp"
  56 #include "runtime/prefetch.inline.hpp"
  57 #include "runtime/sharedRuntime.hpp"
  58 #include "runtime/vframe_hp.hpp"
  59 #include "utilities/copy.hpp"
  60 #include "utilities/debug.hpp"
  61 #include "utilities/exceptions.hpp"
  62 #include "utilities/macros.hpp"
  63 
  64 // #define PERFTEST 1
  65 
  66 #ifdef PERFTEST
  67 #define PERFTEST_ONLY(code) code
  68 #else 
  69 #define PERFTEST_ONLY(code)
  70 #endif // ASSERT
  71 
  72 #ifdef __has_include
  73 #  if __has_include(<valgrind/callgrind.h>)
  74 #    include <valgrind/callgrind.h>
  75 #  endif
  76 #endif
  77 
  78 #ifdef CALLGRIND_START_INSTRUMENTATION
  79   static int callgrind_counter = 1;
  80   // static void callgrind() {
  81   //   if (callgrind_counter != 0) {
  82   //     if (callgrind_counter > 20000) {
  83   //       tty->print_cr("Starting callgrind instrumentation");
  84   //       CALLGRIND_START_INSTRUMENTATION;
  85   //       callgrind_counter = 0;
  86   //     } else
  87   //       callgrind_counter++;
  88   //   }        
  89   // }  
  90 #else   
  91   // static void callgrind() {}
  92 #endif
  93 
  94 // #undef log_develop_info
  95 // #undef log_develop_debug
  96 // #undef log_develop_trace
  97 // #undef log_develop_is_enabled
  98 // #define log_develop_info(...)  (!log_is_enabled(Info, __VA_ARGS__))   ? (void)0 : LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::Info>
  99 // #define log_develop_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::Debug>
 100 // #define log_develop_trace(...) (!log_is_enabled(Trace, __VA_ARGS__))  ? (void)0 : LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::Trace>
 101 // #define log_develop_is_enabled(level, ...)  log_is_enabled(level, __VA_ARGS__)
 102 
 103 // #undef ASSERT
 104 // #undef assert
 105 // #define assert(p, ...)
 106 
 107 int Continuations::_flags = 0;
 108 
 109 OopStorage* Continuation::_weak_handles = NULL;
 110 
 111 PERFTEST_ONLY(static int PERFTEST_LEVEL = ContPerfTest;)
 112 // Freeze:
 113 // 5 - no call into C
 114 // 10 - immediate return from C
 115 // 15 - return after count_frames
 116 // 20 - all work, but no copying
 117 // 25 - copy to stack
 118 // 30 - freeze oops
 119 // <100 - don't allocate
 120 // 100 - everything
 121 //
 122 // Thaw:
 123 // 105 - no call into C (prepare_thaw)
 124 // 110 - immediate return from C (prepare_thaw)
 125 // 112 - no call to thaw0
 126 // 115 - return after traversing frames
 127 // 120
 128 // 125 - copy from stack
 129 // 130 - thaw oops
 130 
 131 
 132 // TODO
 133 //
 134 //
 135 // Add:
 136 //  - method/nmethod metadata
 137 //  - compress interpreted frames
 138 //  - special native methods: Method.invoke, doPrivileged (+ method handles)
 139 //  - compiled->intrepreted for serialization (look at scopeDesc)
 140 //  - caching h-stacks in thread stacks
 141 //
 142 // Things to compress in interpreted frames: return address, monitors, last_sp
 143 //
 144 // See: deoptimization.cpp, vframeArray.cpp, abstractInterpreter_x86.cpp
 145 //
 146 // For non-temporal load/store in clang (__builtin_nontemporal_load/store) see: https://clang.llvm.org/docs/LanguageExtensions.html
 147 
 148 #define YIELD_SIG  "java.lang.Continuation.yield(Ljava/lang/ContinuationScope;)V"
 149 #define YIELD0_SIG  "java.lang.Continuation.yield0(Ljava/lang/ContinuationScope;Ljava/lang/Continuation;)Z"
 150 #define ENTER_SIG  "java.lang.Continuation.enter()V"
 151 #define RUN_SIG    "java.lang.Continuation.run()V"
 152 
 153 static bool is_stub(CodeBlob* cb);
 154 template<bool indirect>
 155 static void set_anchor(JavaThread* thread, const FrameInfo* fi);
 156 // static void set_anchor(JavaThread* thread, const frame& f); -- unused
 157 
 158 // debugging functions
 159 static void print_oop(void *p, oop obj, outputStream* st = tty);
 160 static void print_vframe(frame f, const RegisterMap* map = NULL, outputStream* st = tty);
 161 
 162 #ifdef ASSERT
 163   static void print_frames(JavaThread* thread, outputStream* st = tty);
 164   static jlong java_tid(JavaThread* thread);
 165   static void print_blob(outputStream* st, address addr);
 166   // void static stop();
 167   // void static stop(const frame& f);
 168   // static void print_JavaThread_offsets();
 169   // static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map);
 170 
 171   static RegisterMap dmap(NULL, false); // global dummy RegisterMap
 172 #endif
 173 
 174 #define ELEMS_PER_WORD (wordSize/sizeof(jint))
 175 // Primitive hstack is int[]
 176 typedef jint ElemType;
 177 const BasicType basicElementType   = T_INT;
 178 const int       elementSizeInBytes = T_INT_aelem_bytes;
 179 const int       LogBytesPerElement = LogBytesPerInt;
 180 const int       elemsPerWord       = wordSize/elementSizeInBytes;
 181 const int       LogElemsPerWord    = 1;
 182 
 183 STATIC_ASSERT(elemsPerWord >= 1);
 184 STATIC_ASSERT(elementSizeInBytes == sizeof(ElemType));
 185 STATIC_ASSERT(elementSizeInBytes == (1 << LogBytesPerElement));
 186 STATIC_ASSERT(elementSizeInBytes <<  LogElemsPerWord == wordSize);
 187 
 188 // #define CHOOSE1(interp, f, ...) ((interp) ? Interpreted::f(__VA_ARGS__) : NonInterpretedUnknown::f(__VA_ARGS__))
 189 #define CHOOSE2(interp, f, ...) ((interp) ? f<Interpreted>(__VA_ARGS__) : f<NonInterpretedUnknown>(__VA_ARGS__))
 190 
 191 static const unsigned char FLAG_LAST_FRAME_INTERPRETED = 1;
 192 static const unsigned char FLAG_SAFEPOINT_YIELD = 1 << 1;
 193 
 194 static const int SP_WIGGLE = 3; // depends on the extra space between interpreted and compiled we add in Thaw::align
 195 
 196 void continuations_init() {
 197   Continuations::init();
 198 }
 199 
 200 class SmallRegisterMap;
 201 class ContMirror;
 202 class hframe;
 203 
 204 template <typename ConfigT>
 205 class CompiledMethodKeepalive;
 206 
 207 #ifdef CONT_DOUBLE_NOP
 208 class CachedCompiledMetadata;
 209 #endif
 210 
 211 // TODO R remove
 212 template<typename FKind> static intptr_t** slow_link_address(const frame& f);
 213 
 214 class Frame {
 215 public:
 216   template<typename RegisterMapT> static inline intptr_t** map_link_address(const RegisterMapT* map);
 217   static inline intptr_t** callee_link_address(const frame& f);
 218   static inline Method* frame_method(const frame& f);
 219   static inline address real_pc(const frame& f);
 220   static inline void patch_pc(const frame& f, address pc);
 221   static address* return_pc_address(const frame& f);
 222   static address return_pc(const frame& f);
 223 
 224   DEBUG_ONLY(static inline intptr_t* frame_top(const frame &f);)
 225 };
 226 
 227 template<typename Self>
 228 class FrameCommon : public Frame {
 229 public:
 230   static inline Method* frame_method(const frame& f);
 231 
 232   template <typename FrameT> static bool is_instance(const FrameT& f);
 233 };
 234 
 235 class Interpreted : public FrameCommon<Interpreted> {
 236 public:
 237   DEBUG_ONLY(static const char* name;)
 238   static const bool interpreted = true;
 239   static const bool stub = false;
 240   static const int extra_oops = 0;
 241   static const char type = 'i';
 242 
 243 public:
 244 
 245   static inline intptr_t* frame_top(const frame& f, InterpreterOopMap* mask);
 246   static inline intptr_t* frame_bottom(const frame& f);
 247 
 248   static inline address* return_pc_address(const frame& f);
 249   static inline address return_pc(const frame& f);
 250   static void patch_return_pc(frame& f, address pc);
 251   static void patch_sender_sp(frame& f, intptr_t* sp);
 252 
 253   static void oop_map(const frame& f, InterpreterOopMap* mask);
 254   static int num_oops(const frame&f, InterpreterOopMap* mask);
 255   static int size(const frame&f, InterpreterOopMap* mask);
 256   static inline int expression_stack_size(const frame &f, InterpreterOopMap* mask);
 257   static bool is_owning_locks(const frame& f);
 258 
 259   typedef InterpreterOopMap* ExtraT;
 260 };
 261 
 262 DEBUG_ONLY(const char* Interpreted::name = "Interpreted";)
 263 
 264 template<typename Self>
 265 class NonInterpreted : public FrameCommon<Self>  {
 266 public:
 267   static inline intptr_t* frame_top(const frame& f);
 268   static inline intptr_t* frame_bottom(const frame& f);
 269 
 270   template <typename FrameT> static inline int size(const FrameT& f);
 271   template <typename FrameT> static inline int stack_argsize(const FrameT& f);
 272   static inline int num_oops(const frame& f);
 273  
 274   template <typename RegisterMapT>
 275   static bool is_owning_locks(JavaThread* thread, const RegisterMapT* map, const frame& f);
 276 };
 277 
 278 class NonInterpretedUnknown : public NonInterpreted<NonInterpretedUnknown>  {
 279 public:
 280   DEBUG_ONLY(static const char* name;)
 281   static const bool interpreted = false;
 282 
 283   template <typename FrameT> static bool is_instance(const FrameT& f);
 284 };
 285 
 286 DEBUG_ONLY(const char* NonInterpretedUnknown::name = "NonInterpretedUnknown";)
 287 
 288 struct FpOopInfo;
 289 typedef int (*FreezeFnT)(address, address, address, address, int, FpOopInfo*);
 290 
 291 class Compiled : public NonInterpreted<Compiled>  {
 292 public:
 293   DEBUG_ONLY(static const char* name;)
 294   static const bool interpreted = false;
 295   static const bool stub = false;
 296   static const int extra_oops = 1;
 297   static const char type = 'c';
 298 
 299   typedef FreezeFnT ExtraT;
 300 };
 301 
 302 DEBUG_ONLY(const char* Compiled::name = "Compiled";)
 303 
 304 class StubF : public NonInterpreted<StubF> {
 305 public:
 306   DEBUG_ONLY(static const char* name;)
 307   static const bool interpreted = false;
 308   static const bool stub = true;
 309   static const int extra_oops = 0;
 310   static const char type = 's';
 311 };
 312 
 313 DEBUG_ONLY(const char* StubF::name = "Stub";)
 314 
 315 static bool is_stub(CodeBlob* cb) {
 316   return cb != NULL && (cb->is_safepoint_stub() || cb->is_runtime_stub());
 317 }
 318 
 319 enum op_mode {
 320   mode_fast,   // only compiled frames
 321   mode_slow,   // possibly interpreted frames
 322   mode_preempt // top frame is safepoint stub (forced preemption)
 323 };
 324 
 325 // Represents a stack frame on the horizontal stack, analogous to the frame class, for vertical-stack frames.
 326 
 327 // We do not maintain an sp and an unexetended sp. Instead, sp represents frame's unextended_sp, and various patching of interpreted frames is especially handled.
 328 template<typename SelfPD>
 329 class HFrameBase {
 330 protected:
 331   int     _sp;
 332   int     _ref_sp;
 333   address _pc;
 334 
 335   bool _is_interpreted;
 336   mutable void* _cb_imd; // stores CodeBlob in compiled frames and interpreted frame metadata for interpretedd frames
 337   mutable const ImmutableOopMap* _oop_map; // oop map, for compiled/stubs frames only
 338 
 339   friend class ContMirror;
 340 private:
 341   const SelfPD& self() const { return static_cast<const SelfPD&>(*this); }
 342   SelfPD& self() { return static_cast<SelfPD&>(*this); }
 343 
 344   const ImmutableOopMap* get_oop_map() const { return self().get_oop_map(); };
 345 
 346   void set_codeblob(address pc) {
 347     if (_cb_imd == NULL && !_is_interpreted) {// compute lazily
 348       _cb_imd = ContinuationCodeBlobLookup::find_blob(_pc);
 349       assert(_cb_imd != NULL, "must be valid");
 350     }
 351   }
 352 
 353 protected:
 354   HFrameBase() : _sp(-1), _ref_sp(-1), _pc(NULL), _is_interpreted(true), _cb_imd(NULL), _oop_map(NULL) {}
 355 
 356   HFrameBase(const HFrameBase& hf) : _sp(hf._sp),_ref_sp(hf._ref_sp), _pc(hf._pc),
 357                                      _is_interpreted(hf._is_interpreted), _cb_imd(hf._cb_imd), _oop_map(hf._oop_map) {}
 358 
 359   HFrameBase(int sp, int ref_sp, address pc, void* cb_md, bool is_interpreted)
 360     : _sp(sp), _ref_sp(ref_sp), _pc(pc),
 361       _is_interpreted(is_interpreted), _cb_imd((intptr_t*)cb_md), _oop_map(NULL) {}
 362 
 363   HFrameBase(int sp, int ref_sp, address pc, const ContMirror& cont)
 364     : _sp(sp), _ref_sp(ref_sp), _pc(pc),
 365       _is_interpreted(Interpreter::contains(pc)), _cb_imd(NULL), _oop_map(NULL) {
 366       set_codeblob(_pc);
 367     }
 368 
 369   static address deopt_original_pc(const ContMirror& cont, address pc, CodeBlob* cb, int sp);
 370 
 371 public:
 372   inline bool operator==(const HFrameBase& other) const;
 373   bool is_empty() const { return _pc == NULL; }
 374 
 375   inline int       sp()     const { return _sp; }
 376   inline address   pc()     const { return _pc; }
 377   inline int       ref_sp() const { return _ref_sp; }
 378 
 379   inline void set_sp(int sp) { _sp = sp; }
 380 
 381   inline CodeBlob* cb()     const { assert (!Interpreter::contains(pc()), ""); return (CodeBlob*)_cb_imd; }
 382   void set_cb(CodeBlob* cb) {
 383     assert (!_is_interpreted, "");
 384     if (_cb_imd == NULL) _cb_imd = cb;
 385     assert (cb == slow_get_cb(*this), "");
 386     assert (_cb_imd == cb, "");
 387     assert (((CodeBlob*)_cb_imd)->contains(_pc), "");
 388   }
 389   inline bool is_interpreted_frame() const { return _is_interpreted; } // due to partial copy below, this may lie in mode_fast
 390 
 391   template<op_mode mode>
 392   void copy_partial(const SelfPD& other) {
 393     _sp = other._sp;
 394     _ref_sp = other._ref_sp;
 395     _pc = other._pc;
 396     if (mode != mode_fast) {
 397       _is_interpreted = other._is_interpreted;
 398     }
 399     self().copy_partial_pd(other);
 400   }
 401 
 402   inline void set_pc(address pc) { _pc = pc; }
 403   inline void set_ref_sp(int ref_sp) { _ref_sp = ref_sp; }
 404 
 405   template<typename FKind> address return_pc() const { return *self().template return_pc_address<FKind>(); }
 406 
 407   const CodeBlob* get_cb() const { return self().get_cb(); }
 408 
 409   const ImmutableOopMap* oop_map() const {
 410     if (_oop_map == NULL) {
 411       _oop_map = get_oop_map();
 412     }
 413     return _oop_map;
 414   }
 415 
 416   template<typename FKind> int frame_top_index() const;
 417   template<typename FKind> int frame_bottom_index() const { return self().template frame_bottom_index<FKind>(); };
 418 
 419   address real_pc(const ContMirror& cont) const;
 420   void patch_pc(address pc, const ContMirror& cont) const;
 421   template<typename FKind> inline void patch_return_pc(address value); // only for interpreted frames
 422   
 423   int compiled_frame_size() const;
 424   int compiled_frame_num_oops() const;
 425   int compiled_frame_stack_argsize() const;
 426 
 427   DEBUG_ONLY(int interpreted_frame_top_index() const { return self().interpreted_frame_top_index(); } )
 428   int interpreted_frame_num_monitors() const         { return self().interpreted_frame_num_monitors(); }
 429   int interpreted_frame_num_oops(const InterpreterOopMap& mask) const;
 430   int interpreted_frame_size() const;
 431   void interpreted_frame_oop_map(InterpreterOopMap* mask) const { self().interpreted_frame_oop_map(mask); }
 432 
 433   template<typename FKind, op_mode mode> SelfPD sender(const ContMirror& cont, int num_oops) const {
 434     assert (mode != mode_fast || !FKind::interpreted, "");
 435     return self().template sender<FKind, mode>(cont, num_oops);
 436   }
 437   template<typename FKind, op_mode mode> SelfPD sender(const ContMirror& cont, const InterpreterOopMap* mask, int extra_oops = 0) const;
 438   template<op_mode mode /* = mode_slow*/> SelfPD sender(const ContMirror& cont) const;
 439 
 440   template<typename FKind> bool is_bottom(const ContMirror& cont) const;
 441 
 442   address interpreter_frame_bcp() const                             { return self().interpreter_frame_bcp(); }
 443   intptr_t* interpreter_frame_local_at(int index) const             { return self().interpreter_frame_local_at(index); }
 444   intptr_t* interpreter_frame_expression_stack_at(int offset) const { return self().interpreter_frame_expression_stack_at(offset); }
 445 
 446   template<typename FKind> Method* method() const;
 447 
 448   inline frame to_frame(ContMirror& cont) const;
 449 
 450   void print_on(const ContMirror& cont, outputStream* st) const { self().print_on(cont, st); }
 451   void print_on(outputStream* st) const { self().print_on(st); };
 452   void print(const ContMirror& cont) const { print_on(cont, tty); }
 453   void print() const { print_on(tty); }
 454 };
 455 
 456 // defines hframe
 457 #include CPU_HEADER(hframe)
 458 
 459 template<typename Self> 
 460 template <typename FrameT> 
 461 bool FrameCommon<Self>::is_instance(const FrameT& f) { 
 462   return (Self::interpreted == f.is_interpreted_frame()) && (Self::stub == (!Self::interpreted && is_stub(slow_get_cb(f))));
 463 }
 464 
 465 template <typename FrameT> 
 466 bool NonInterpretedUnknown::is_instance(const FrameT& f) {
 467   return (interpreted == f.is_interpreted_frame()); 
 468 }
 469 
 470 // Mirrors the Java continuation objects.
 471 // This object is created when we begin a freeze/thaw operation for a continuation, and is destroyed when the operation completes.
 472 // Contents are read from the Java object at the entry points of this module, and written at exists or intermediate calls into Java
 473 class ContMirror {
 474 private:
 475   JavaThread* const _thread;
 476   oop _cont;
 477   intptr_t* _entrySP;
 478   intptr_t* _entryFP;
 479   address _entryPC;
 480 
 481   int  _sp;
 482   intptr_t _fp;
 483   address _pc;
 484 
 485   typeArrayOop _stack;
 486   int _stack_length;
 487   ElemType* _hstack;
 488 
 489   size_t _max_size;
 490 
 491   int _ref_sp;
 492   objArrayOop _ref_stack;
 493 
 494   unsigned char _flags;
 495 
 496   short _num_interpreted_frames;
 497   short _num_frames;
 498 
 499   // Profiling data for the JFR event
 500   short _e_num_interpreted_frames;
 501   short _e_num_frames;
 502   short _e_num_refs;
 503   short _e_size;
 504 
 505 private:
 506   ElemType* stack() const { return _hstack; }
 507 
 508   template <typename ConfigT> bool allocate_stacks_in_native(int size, int oops, bool needs_stack, bool needs_refstack);
 509   void allocate_stacks_in_java(int size, int oops, int frames);
 510   static int fix_decreasing_index(int index, int old_length, int new_length);
 511   inline void post_safepoint(Handle conth);
 512   int ensure_capacity(int old, int min);
 513   bool allocate_stack(int size);
 514   typeArrayOop allocate_stack_array(size_t elements);
 515   bool grow_stack(int new_size);
 516   static void copy_primitive_array(typeArrayOop old_array, int old_start, typeArrayOop new_array, int new_start, int count);
 517   template <typename ConfigT> bool allocate_ref_stack(int nr_oops);
 518   template <typename ConfigT> objArrayOop  allocate_refstack_array(size_t nr_oops);
 519   template <typename ConfigT> objArrayOop  allocate_keepalive_array(size_t nr_oops);
 520   template <typename ConfigT> bool grow_ref_stack(int nr_oops);
 521   template <typename ConfigT> void copy_ref_array(objArrayOop old_array, int old_start, objArrayOop new_array, int new_start, int count);
 522   template <typename ConfigT> void zero_ref_array(objArrayOop new_array, int new_length, int min_length);
 523   oop raw_allocate(Klass* klass, size_t words, size_t elements, bool zero);
 524 
 525 public:
 526   // TODO R: get rid of these:
 527   static inline int to_index(int x) { return x >> LogBytesPerElement; }
 528   static inline int to_bytes(int x)    { return x << LogBytesPerElement; }
 529   static inline int to_index(const void* base, const void* ptr) { return to_index((const char*)ptr - (const char*)base); }
 530 
 531 private:
 532   ContMirror(const ContMirror& cont); // no copy constructor
 533 
 534   void read();
 535 
 536 public:
 537   ContMirror(JavaThread* thread, oop cont);
 538   ContMirror(const RegisterMap* map);
 539 
 540   intptr_t hash() { 
 541     #ifndef PRODUCT
 542       return Thread::current()->is_Java_thread() ? _cont->identity_hash() : -1;
 543     #else
 544       return 0;
 545     #endif
 546   }
 547 
 548   void write();
 549 
 550   oop mirror() { return _cont; }
 551   oop parent() { return java_lang_Continuation::parent(_cont); }
 552   void cleanup();
 553 
 554   intptr_t* entrySP() const { return _entrySP; }
 555   intptr_t* entryFP() const { return _entryFP; }
 556   address   entryPC() const { return _entryPC; }
 557 
 558   bool is_mounted() { return _entryPC != NULL; }
 559 
 560   void set_entrySP(intptr_t* sp) { _entrySP = sp; }
 561   void set_entryFP(intptr_t* fp) { _entryFP = fp; }
 562   void set_entryPC(address pc)   { _entryPC = pc; log_develop_trace(jvmcont)("set_entryPC " INTPTR_FORMAT, p2i(pc)); }
 563 
 564   int sp() const           { return _sp; }
 565   intptr_t fp() const      { return _fp; }
 566   address pc() const       { return _pc; }
 567 
 568   void set_sp(int sp)      { _sp = sp; }
 569   void set_fp(intptr_t fp) { _fp = fp; }
 570   void clear_pc()  { _pc = NULL; set_flag(FLAG_LAST_FRAME_INTERPRETED, false); }
 571   void set_pc(address pc, bool interpreted)  { _pc = pc; set_flag(FLAG_LAST_FRAME_INTERPRETED, interpreted); 
 572                                                assert (interpreted == Interpreter::contains(pc), ""); }
 573 
 574   bool is_flag(unsigned char flag) { return (_flags & flag) != 0; }
 575   void set_flag(unsigned char flag, bool v) { _flags = (v ? _flags |= flag : _flags &= ~flag); }
 576 
 577   int stack_length() const { return _stack_length; }
 578 
 579   JavaThread* thread() const { return _thread; }
 580 
 581   template <typename ConfigT> inline void allocate_stacks(int size, int oops, int frames);
 582 
 583   template <typename ConfigT>
 584   void make_keepalive(CompiledMethodKeepalive<ConfigT>* keepalive);
 585 
 586   inline bool in_hstack(void *p) { return (_hstack != NULL && p >= _hstack && p < (_hstack + _stack_length)); }
 587 
 588   bool valid_stack_index(int idx) const { return idx >= 0 && idx < _stack_length; }
 589 
 590   void copy_to_stack(void* from, void* to, int size);
 591   void copy_from_stack(void* from, void* to, int size);
 592 
 593   objArrayOop refStack(int size);
 594   objArrayOop refStack() { return _ref_stack; }
 595   int refSP() { return _ref_sp; }
 596   void set_refSP(int refSP) { log_develop_trace(jvmcont)("set_refSP: %d", refSP); _ref_sp = refSP; }
 597 
 598   inline int stack_index(void* p) const;
 599   inline intptr_t* stack_address(int i) const;
 600 
 601   static inline void relativize(intptr_t* const fp, intptr_t* const hfp, int offset);
 602   static inline void derelativize(intptr_t* const fp, int offset);
 603 
 604   bool is_in_stack(void* p) const ;
 605   bool is_in_ref_stack(void* p) const;
 606 
 607   bool is_empty();
 608 
 609   template<op_mode mode> const hframe last_frame();
 610   template<op_mode mode> void set_last_frame(const hframe& f);
 611   inline void set_last_frame_pd(const hframe& f);
 612   inline void set_empty();
 613 
 614   hframe from_frame(const frame& f);
 615 
 616   template <typename ConfigT>
 617   inline int add_oop(oop obj, int index);
 618 
 619   inline oop obj_at(int i);
 620   int num_oops();
 621   void null_ref_stack(int start, int num);
 622 
 623   inline size_t max_size() { return _max_size; }
 624   inline void add_size(size_t s) { log_develop_trace(jvmcont)("add max_size: " SIZE_FORMAT " s: " SIZE_FORMAT, _max_size + s, s);
 625                                    _max_size += s; }
 626   inline void sub_size(size_t s) { log_develop_trace(jvmcont)("sub max_size: " SIZE_FORMAT " s: " SIZE_FORMAT, _max_size - s, s);
 627                                    assert(s <= _max_size, "s: " SIZE_FORMAT " max_size: " SIZE_FORMAT, s, _max_size);
 628                                    _max_size -= s; }
 629   inline short num_interpreted_frames() { return _num_interpreted_frames; }
 630   inline void inc_num_interpreted_frames() { _num_interpreted_frames++; _e_num_interpreted_frames++; }
 631   inline void dec_num_interpreted_frames() { _num_interpreted_frames--; _e_num_interpreted_frames++; }
 632 
 633   inline short num_frames() { return _num_frames; }
 634   inline void add_num_frames(int n) { _num_frames += n; _e_num_frames += n; }
 635   inline void inc_num_frames() { _num_frames++; _e_num_frames++; }
 636   inline void dec_num_frames() { _num_frames--; _e_num_frames++; }
 637 
 638   void print_hframes(outputStream* st = tty);
 639 
 640   inline void e_add_refs(int num) { _e_num_refs += num; }
 641   template<typename Event> void post_jfr_event(Event *e);
 642 };
 643 
 644 template<typename SelfPD>
 645 inline bool HFrameBase<SelfPD>::operator==(const HFrameBase& other) const {
 646   return  _sp == other._sp && _pc == other._pc;
 647 }
 648 
 649 template<typename SelfPD>
 650 address HFrameBase<SelfPD>::deopt_original_pc(const ContMirror& cont, address pc, CodeBlob* cb, int sp) {
 651   // TODO DEOPT: unnecessary in the long term solution of unroll on freeze
 652 
 653   assert (cb != NULL && cb->is_compiled(), "");
 654   CompiledMethod* cm = cb->as_compiled_method();
 655   if (cm->is_deopt_pc(pc)) {
 656     log_develop_trace(jvmcont)("hframe::deopt_original_pc deoptimized frame");
 657     pc = *(address*)((address)cont.stack_address(sp) + cm->orig_pc_offset());
 658     assert(pc != NULL, "");
 659     assert(cm->insts_contains_inclusive(pc), "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
 660     assert(!cm->is_deopt_pc(pc), "");
 661     // _deopt_state = is_deoptimized;
 662   }
 663 
 664   return pc;
 665 }
 666 
 667 template<typename SelfPD>
 668 address HFrameBase<SelfPD>::real_pc(const ContMirror& cont) const {
 669   address* pc_addr = cont.stack_address(self().pc_index());
 670   return *pc_addr;
 671 }
 672 
 673 template<typename SelfPD>
 674 template<typename FKind>
 675 inline void HFrameBase<SelfPD>::patch_return_pc(address value) {
 676   *(self().template return_pc_address<FKind>()) = value;
 677 }
 678 
 679 template<typename SelfPD>
 680 void HFrameBase<SelfPD>::patch_pc(address pc, const ContMirror& cont) const {
 681   address* pc_addr = (address*)cont.stack_address(self().pc_index());
 682   // tty->print_cr(">>>> patching %p with %p", pc_addr, pc);
 683   *pc_addr = pc;
 684 }
 685 
 686 template<typename SelfPD>
 687 template<typename FKind>
 688 bool HFrameBase<SelfPD>::is_bottom(const ContMirror& cont) const {
 689   return frame_bottom_index<FKind>()
 690     + ((FKind::interpreted || FKind::stub) ? 0 : cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size / elementSizeInBytes)
 691     >= cont.stack_length();
 692 }
 693 
 694 template<typename SelfPD>
 695 int HFrameBase<SelfPD>::interpreted_frame_num_oops(const InterpreterOopMap& mask) const {
 696   assert (_is_interpreted, "");
 697   // we calculate on relativized metadata; all monitors must be NULL on hstack, but as f.oops_do walks them, we count them
 698   return   mask.num_oops()
 699          + 1 // for the mirror
 700          + interpreted_frame_num_monitors();
 701 }
 702 
 703 template<typename SelfPD>
 704 int HFrameBase<SelfPD>::interpreted_frame_size() const {
 705   assert (_is_interpreted, "");
 706   return (frame_bottom_index<Interpreted>() - frame_top_index<Interpreted>()) * elementSizeInBytes;
 707 }
 708 
 709 template<typename SelfPD>
 710 inline int HFrameBase<SelfPD>::compiled_frame_num_oops() const {
 711   assert (!_is_interpreted, "");
 712   return oop_map()->num_oops();
 713 }
 714 
 715 template<typename SelfPD>
 716 int HFrameBase<SelfPD>::compiled_frame_size() const {
 717   return NonInterpretedUnknown::size(*this);
 718 }
 719 
 720 template<typename SelfPD>
 721 int HFrameBase<SelfPD>::compiled_frame_stack_argsize() const {
 722   return NonInterpretedUnknown::stack_argsize(*this);
 723 }
 724 
 725 template<typename SelfPD>
 726 template <typename FKind>
 727 int HFrameBase<SelfPD>::frame_top_index() const {
 728   assert (!FKind::interpreted || interpreted_frame_top_index() >= _sp, "");
 729   assert (FKind::is_instance(*(hframe*)this), "");
 730 
 731   return _sp;
 732 }
 733 
 734 #ifdef CONT_DOUBLE_NOP
 735 // TODO R remove after PD separation
 736 template<op_mode mode>
 737 static inline CachedCompiledMetadata cached_metadata(const hframe& hf);
 738 #endif
 739 
 740 template<typename SelfPD>
 741 template<typename FKind, op_mode mode>
 742 SelfPD HFrameBase<SelfPD>::sender(const ContMirror& cont, const InterpreterOopMap* mask, int extra_oops) const {
 743   assert (mode != mode_fast || !FKind::interpreted, "");
 744   int num_oops;
 745 #ifdef CONT_DOUBLE_NOP
 746   CachedCompiledMetadata md;
 747 #endif
 748   if (FKind::interpreted) {
 749     num_oops = interpreted_frame_num_oops(*mask);
 750   } else
 751 #ifdef CONT_DOUBLE_NOP
 752   if (mode == mode_fast && LIKELY(!(md = cached_metadata<mode>(self())).empty()))
 753     num_oops = md.num_oops();
 754   else {
 755     get_cb();
 756 #endif
 757     num_oops = compiled_frame_num_oops();
 758 #ifdef CONT_DOUBLE_NOP
 759   }
 760 #endif
 761 
 762   return sender<FKind, mode>(cont, extra_oops + num_oops);
 763 }
 764 
 765 template<typename SelfPD>
 766 template<op_mode mode>
 767 SelfPD HFrameBase<SelfPD>::sender(const ContMirror& cont) const {
 768   if (_is_interpreted) {
 769     InterpreterOopMap mask;
 770     interpreted_frame_oop_map(&mask);
 771     return sender<Interpreted, mode>(cont, &mask);
 772   } else {
 773     return sender<NonInterpretedUnknown, mode>(cont, (InterpreterOopMap*)NULL);
 774   }
 775 }
 776 
 777 template<>
 778 template<> Method* HFrameBase<hframe>::method<Interpreted>() const; // pd
 779 
 780 template<typename SelfPD>
 781 template<typename FKind>
 782 Method* HFrameBase<SelfPD>::method() const {
 783   assert (!is_interpreted_frame(), "");
 784   assert (!FKind::interpreted, "");
 785 
 786   return ((CompiledMethod*)cb())->method();
 787 }
 788 
 789 template<typename SelfPD>
 790 inline frame HFrameBase<SelfPD>::to_frame(ContMirror& cont) const {
 791   bool deopt = false;
 792   address pc = _pc;
 793   if (!is_interpreted_frame()) {
 794     CompiledMethod* cm = cb()->as_compiled_method_or_null();
 795     if (cm != NULL && cm->is_deopt_pc(pc)) {
 796       intptr_t* hsp = cont.stack_address(sp());
 797       address orig_pc = *(address*) ((address)hsp + cm->orig_pc_offset());
 798       assert (orig_pc != pc, "");
 799       assert (orig_pc != NULL, "");
 800 
 801       pc = orig_pc;
 802       deopt = true;
 803     }
 804   }
 805 
 806   // tty->print_cr("-- to_frame:");
 807   // print_on(cont, tty);
 808   return self().to_frame(cont, pc, deopt);
 809 }
 810 
 811 ContMirror::ContMirror(JavaThread* thread, oop cont)
 812  : _thread(thread), _cont(cont),
 813    _e_num_interpreted_frames(0), _e_num_frames(0), _e_num_refs(0), _e_size(0) {
 814   assert(_cont != NULL && oopDesc::is_oop_or_null(_cont), "Invalid cont: " INTPTR_FORMAT, p2i((void*)_cont));
 815 
 816   read();
 817 }
 818 
 819 ContMirror::ContMirror(const RegisterMap* map)
 820  : _thread(map->thread()), _cont(map->cont()),
 821    _e_num_interpreted_frames(0), _e_num_frames(0), _e_num_refs(0), _e_size(0) {
 822   assert(_cont != NULL && oopDesc::is_oop_or_null(_cont), "Invalid cont: " INTPTR_FORMAT, p2i((void*)_cont));
 823 
 824   read();
 825 }
 826 
 827 void ContMirror::read() {
 828   _entrySP = java_lang_Continuation::entrySP(_cont);
 829   _entryFP = java_lang_Continuation::entryFP(_cont);
 830   _entryPC = java_lang_Continuation::entryPC(_cont);
 831 
 832   _sp = java_lang_Continuation::sp(_cont);
 833   _fp = (intptr_t)java_lang_Continuation::fp(_cont);
 834   _pc = (address)java_lang_Continuation::pc(_cont);
 835 
 836   _stack = java_lang_Continuation::stack(_cont);
 837   if (_stack != NULL) {
 838     _stack_length = _stack->length();
 839     _hstack = (ElemType*)_stack->base(basicElementType);
 840   } else {
 841     _stack_length = 0;
 842     _hstack = NULL;
 843   }
 844   _max_size = java_lang_Continuation::maxSize(_cont);
 845 
 846   _ref_stack = java_lang_Continuation::refStack(_cont);
 847   _ref_sp = java_lang_Continuation::refSP(_cont);
 848 
 849   _flags = java_lang_Continuation::flags(_cont);
 850 
 851   _num_frames = java_lang_Continuation::numFrames(_cont);
 852   _num_interpreted_frames = java_lang_Continuation::numInterpretedFrames(_cont);
 853 
 854   if (log_develop_is_enabled(Trace, jvmcont)) {
 855     log_develop_trace(jvmcont)("Reading continuation object:");
 856     log_develop_trace(jvmcont)("\tentrySP: " INTPTR_FORMAT " entryFP: " INTPTR_FORMAT " entryPC: " INTPTR_FORMAT, p2i(_entrySP), p2i(_entryFP), p2i(_entryPC));
 857     log_develop_trace(jvmcont)("\tsp: %d fp: %ld 0x%lx pc: " INTPTR_FORMAT, _sp, _fp, _fp, p2i(_pc));
 858     log_develop_trace(jvmcont)("\tstack: " INTPTR_FORMAT " hstack: " INTPTR_FORMAT ", stack_length: %d max_size: " SIZE_FORMAT, p2i((oopDesc*)_stack), p2i(_hstack), _stack_length, _max_size);
 859     log_develop_trace(jvmcont)("\tref_stack: " INTPTR_FORMAT " ref_sp: %d", p2i((oopDesc*)_ref_stack), _ref_sp);
 860     log_develop_trace(jvmcont)("\tflags: %d", _flags);
 861     log_develop_trace(jvmcont)("\tnum_frames: %d", _num_frames);
 862     log_develop_trace(jvmcont)("\tnum_interpreted_frames: %d", _num_interpreted_frames);
 863   }
 864 }
 865 
 866 void ContMirror::write() {
 867   if (log_develop_is_enabled(Trace, jvmcont)) {
 868     log_develop_trace(jvmcont)("Writing continuation object:");
 869     log_develop_trace(jvmcont)("\tsp: %d fp: %ld 0x%lx pc: " INTPTR_FORMAT, _sp, _fp, _fp, p2i(_pc));
 870     log_develop_trace(jvmcont)("\tentrySP: " INTPTR_FORMAT " entryFP: " INTPTR_FORMAT " entryPC: " INTPTR_FORMAT, p2i(_entrySP), p2i(_entryFP), p2i(_entryPC));
 871     log_develop_trace(jvmcont)("\tmax_size: " SIZE_FORMAT, _max_size);
 872     log_develop_trace(jvmcont)("\tref_sp: %d", _ref_sp);
 873     log_develop_trace(jvmcont)("\tflags: %d", _flags);
 874     log_develop_trace(jvmcont)("\tnum_frames: %d", _num_frames);
 875     log_develop_trace(jvmcont)("\tnum_interpreted_frames: %d", _num_interpreted_frames);
 876     log_develop_trace(jvmcont)("\tend write");
 877   }
 878 
 879   java_lang_Continuation::set_sp(_cont, _sp);
 880   java_lang_Continuation::set_fp(_cont, _fp);
 881   java_lang_Continuation::set_pc(_cont, _pc);
 882   java_lang_Continuation::set_refSP(_cont, _ref_sp);
 883 
 884   java_lang_Continuation::set_entrySP(_cont, _entrySP);
 885   java_lang_Continuation::set_entryFP(_cont, _entryFP);
 886   java_lang_Continuation::set_entryPC(_cont, _entryPC);
 887 
 888   java_lang_Continuation::set_maxSize(_cont, (jint)_max_size);
 889   java_lang_Continuation::set_flags(_cont, _flags);
 890 
 891   java_lang_Continuation::set_numFrames(_cont, _num_frames);
 892   java_lang_Continuation::set_numInterpretedFrames(_cont, _num_interpreted_frames);
 893 }
 894 
 895 void ContMirror::cleanup() {
 896   // cleanup nmethods
 897   /*
 898   for (hframe hf = last_frame<mode_slow>(); !hf.is_empty(); hf = hf.sender<mode_slow>(*this)) {
 899     if (!hf.is_interpreted_frame()) {
 900       hf.cb()->as_compiled_method()->dec_on_continuation_stack();
 901     }
 902   }
 903   */
 904 }
 905 
 906 void ContMirror::null_ref_stack(int start, int num) {
 907   if (java_lang_Continuation::is_reset(_cont)) return;
 908 
 909   //log_develop_info(jvmcont)("clearing %d at %d", num, start);
 910   for (int i = 0; i < num; i++)
 911     _ref_stack->obj_at_put(start + i, NULL);
 912 }
 913 
 914 bool ContMirror::is_empty() {
 915   assert ((_pc == NULL) == (_sp < 0 || _sp >= _stack->length()), "");
 916   return _pc == NULL;
 917 }
 918 
 919 template<op_mode mode>
 920 inline void ContMirror::set_last_frame(const hframe& f) {
 921   assert (mode != mode_fast || !Interpreter::contains(f.pc()), "");
 922   assert (mode == mode_fast || f.is_interpreted_frame() == Interpreter::contains(f.pc()), "");
 923   set_pc(f.pc(), mode == mode_fast ? false : f.is_interpreted_frame());
 924   set_sp(f.sp());
 925   set_last_frame_pd(f);
 926   set_refSP(f.ref_sp());
 927 
 928   assert (!is_empty(), ""); // if (is_empty()) set_empty();
 929 
 930   if (log_develop_is_enabled(Trace, jvmcont)) {
 931     log_develop_trace(jvmcont)("set_last_frame cont sp: %d fp: 0x%lx pc: " INTPTR_FORMAT " interpreted: %d flag: %d", sp(), fp(), p2i(pc()), f.is_interpreted_frame(), is_flag(FLAG_LAST_FRAME_INTERPRETED));
 932     f.print_on(tty);
 933   }
 934 }
 935 
 936 inline void ContMirror::set_empty() {
 937   if (_stack_length > 0) {
 938     set_sp(_stack_length);
 939     set_refSP(_ref_stack->length());
 940   }
 941   set_fp(0);
 942   clear_pc();
 943 }
 944 
 945 bool ContMirror::is_in_stack(void* p) const {
 946   return p >= (stack() + _sp) && p < (stack() + stack_length());
 947 }
 948 
 949 bool ContMirror::is_in_ref_stack(void* p) const {
 950   void* base = _ref_stack->base();
 951   int length = _ref_stack->length();
 952 
 953   return p >= (UseCompressedOops ? (address)&((narrowOop*)base)[_ref_sp]
 954                                  : (address)&(      (oop*)base)[_ref_sp]) &&
 955          p <= (UseCompressedOops ? (address)&((narrowOop*)base)[length-1]
 956                                  : (address)&(      (oop*)base)[length-1]);
 957 
 958    // _ref_stack->obj_at_addr<narrowOop>(_ref_sp) : (address)_ref_stack->obj_at_addr<oop>(_ref_sp));
 959 }
 960 
 961 inline int ContMirror::stack_index(void* p) const {
 962   int i = to_index(stack(), p);
 963   assert (i >= 0 && i < stack_length(), "i: %d length: %d", i, stack_length());
 964   return i;
 965 }
 966 
 967 inline intptr_t* ContMirror::stack_address(int i) const {
 968   assert (i >= 0 && i < stack_length(), "i: %d length: %d", i, stack_length());
 969   return (intptr_t*)&stack()[i];
 970 }
 971 
 972 inline void ContMirror::relativize(intptr_t* const fp, intptr_t* const hfp, int offset) {
 973   intptr_t* addr = (hfp + offset);
 974   intptr_t value = to_index((address)*(hfp + offset) - (address)fp);
 975   *addr = value;
 976 }
 977 
 978 inline void ContMirror::derelativize(intptr_t* const fp, int offset) {
 979   *(fp + offset) = (intptr_t)((address)fp + to_bytes(*(intptr_t*)(fp + offset)));
 980 }
 981 
 982 void ContMirror::copy_to_stack(void* from, void* to, int size) {
 983   log_develop_trace(jvmcont)("Copying from v: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d bytes)", p2i(from), p2i((address)from + size), size);
 984   log_develop_trace(jvmcont)("Copying to h: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d - %d)", p2i(to), p2i((address)to + size), to_index(_hstack, to), to_index(_hstack, (address)to + size));
 985 
 986   assert (size > 0, "size: %d", size);
 987   assert (stack_index(to) >= 0, "");
 988   assert (to_index(_hstack, (address)to + size) <= _sp, "");
 989 
 990   // TODO PERF non-temporal store
 991   PERFTEST_ONLY(if (PERFTEST_LEVEL >= 25))
 992     memcpy(to, from, size); //Copy::conjoint_memory_atomic(from, to, size); // Copy::disjoint_words((HeapWord*)from, (HeapWord*)to, size/wordSize); //
 993 
 994   _e_size += size;
 995 }
 996 
 997 void ContMirror::copy_from_stack(void* from, void* to, int size) {
 998   log_develop_trace(jvmcont)("Copying from h: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d - %d)", p2i(from), p2i((address)from + size), to_index(stack(), from), to_index(stack(), (address)from + size));
 999   log_develop_trace(jvmcont)("Copying to v: " INTPTR_FORMAT " - " INTPTR_FORMAT " (%d bytes)", p2i(to), p2i((address)to + size), size);
1000 
1001   assert (size > 0, "size: %d", size);
1002   assert (stack_index(from) >= 0, "");
1003   assert (to_index(stack(), (address)from + size) <= stack_length(), "index: %d length: %d", to_index(stack(), (address)from + size), stack_length());
1004 
1005   // TODO PERF non-temporal load
1006   PERFTEST_ONLY(if (PERFTEST_LEVEL >= 125))
1007     memcpy(to, from, size); //Copy::conjoint_memory_atomic(from, to, size);
1008 
1009   _e_size += size;
1010 }
1011 
1012 template <typename ConfigT>
1013 inline int ContMirror::add_oop(oop obj, int index) {
1014   // assert (_ref_stack != NULL, "");
1015   // assert (index >= 0 && index < _ref_stack->length(), "index: %d length: %d", index, _ref_stack->length());
1016   assert (index < _ref_sp, "");
1017 
1018   log_develop_trace(jvmcont)("i: %d ", index);
1019   ConfigT::OopWriterT::obj_at_put(_ref_stack, index, obj);
1020   return index;
1021 }
1022 
1023 inline oop ContMirror::obj_at(int i) {
1024   assert (_ref_stack != NULL, "");
1025   assert (0 <= i && i < _ref_stack->length(), "i: %d length: %d", i, _ref_stack->length());
1026   // assert (_ref_sp <= i, "i: %d _ref_sp: %d length: %d", i, _ref_sp, _ref_stack->length()); -- in Thaw, we set_last_frame before reading the objects during the recursion return trip
1027 
1028   return _ref_stack->obj_at(i);
1029 }
1030 
1031 int ContMirror::num_oops() {
1032   return _ref_stack == NULL ? 0 : _ref_stack->length() - _ref_sp;
1033 }
1034 
1035 template<typename Event> void ContMirror::post_jfr_event(Event* e) {
1036   if (e->should_commit()) {
1037     log_develop_trace(jvmcont)("JFR event: frames: %d iframes: %d size: %d refs: %d", _e_num_frames, _e_num_interpreted_frames, _e_size, _e_num_refs);
1038     e->set_contClass(_cont->klass());
1039     e->set_numFrames(_e_num_frames);
1040     e->set_numIFrames(_e_num_interpreted_frames);
1041     e->set_size(_e_size);
1042     e->set_numRefs(_e_num_refs);
1043     e->commit();
1044   }
1045 }
1046 
1047 //////////////////////////// frame functions ///////////////
1048 
1049 class CachedCompiledMetadata; // defined in PD
1050 struct FpOopInfo;
1051 
1052 typedef int (*FreezeFnT)(address, address, address, address, int, FpOopInfo*);
1053 typedef int (*ThawFnT)(address /* dst */, address /* objArray */, address /* map */);
1054 
1055 
1056 class ContinuationHelper {
1057 public:
1058 #ifdef CONT_DOUBLE_NOP
1059   static inline CachedCompiledMetadata cached_metadata(address pc);
1060   template<op_mode mode, typename FrameT> static inline CachedCompiledMetadata cached_metadata(const FrameT& f);
1061   template<typename FrameT> static void patch_freeze_stub(const FrameT& f, address freeze_stub);
1062 #endif
1063 
1064   template<op_mode mode, typename FrameT> static FreezeFnT freeze_stub(const FrameT& f);
1065   template<op_mode mode, typename FrameT> static ThawFnT thaw_stub(const FrameT& f);
1066   
1067   template<typename FKind, typename RegisterMapT> static inline void update_register_map(RegisterMapT* map, const frame& f);
1068   template<typename RegisterMapT> static inline void update_register_map_with_callee(RegisterMapT* map, const frame& f);
1069   template<typename RegisterMapT> static inline void update_register_map(RegisterMapT* map, hframe::callee_info callee_info);
1070   static void update_register_map(RegisterMap* map, const hframe& sender, const ContMirror& cont);
1071   static void update_register_map_from_last_vstack_frame(RegisterMap* map);
1072 
1073   static inline frame frame_with(frame& f, intptr_t* sp, address pc, intptr_t* fp);
1074   static inline frame last_frame(JavaThread* thread);
1075   static inline void to_frame_info(const frame& f, const frame& callee, FrameInfo* fi);
1076   template<typename FKind> static inline void to_frame_info_pd(const frame& f, const frame& callee, FrameInfo* fi);
1077   static inline void to_frame_info_pd(const frame& f, FrameInfo* fi);
1078   template<bool indirect>
1079   static inline frame to_frame(FrameInfo* fi);
1080   static inline void set_last_vstack_frame(RegisterMap* map, const frame& callee);
1081   static inline void clear_last_vstack_frame(RegisterMap* map);
1082 };
1083 
1084 #ifdef ASSERT
1085   static char* method_name(Method* m);
1086   static inline Method* top_java_frame_method(const frame& f);
1087   static inline Method* bottom_java_frame_method(const frame& f);
1088   static char* top_java_frame_name(const frame& f);
1089   static char* bottom_java_frame_name(const frame& f);
1090   static bool assert_top_java_frame_name(const frame& f, const char* name);
1091   static bool assert_bottom_java_frame_name(const frame& f, const char* name);
1092   static inline bool is_deopt_return(address pc, const frame& sender);
1093 
1094   template <typename FrameT> static CodeBlob* slow_get_cb(const FrameT& f);
1095   template <typename FrameT> static const ImmutableOopMap* slow_get_oopmap(const FrameT& f);
1096   template <typename FrameT> static int slow_size(const FrameT& f);
1097   template <typename FrameT> static address slow_return_pc(const FrameT& f);
1098   template <typename FrameT> static int slow_stack_argsize(const FrameT& f);
1099   template <typename FrameT> static int slow_num_oops(const FrameT& f);
1100 #endif
1101 
1102 
1103 inline Method* Frame::frame_method(const frame& f) {
1104   Method* m = NULL;
1105   if (f.is_interpreted_frame())
1106     m = f.interpreter_frame_method();
1107   else if (f.is_compiled_frame())
1108     m = ((CompiledMethod*)f.cb())->method();
1109   return m;
1110 }
1111 
1112 address Frame::return_pc(const frame& f) {
1113   return *return_pc_address(f);
1114 }
1115 
1116 // static void patch_interpreted_bci(frame& f, int bci) {
1117 //   f.interpreter_frame_set_bcp(f.interpreter_frame_method()->bcp_from(bci));
1118 // }
1119 
1120 address Interpreted::return_pc(const frame& f) {
1121   return *return_pc_address(f);
1122 }
1123 
1124 void Interpreted::patch_return_pc(frame& f, address pc) {
1125   *return_pc_address(f) = pc;
1126 }
1127 
1128 void Interpreted::oop_map(const frame& f, InterpreterOopMap* mask) {
1129   assert (mask != NULL, "");
1130   Method* m = f.interpreter_frame_method();
1131   int   bci = f.interpreter_frame_bci();
1132   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
1133 }
1134 
1135 int Interpreted::num_oops(const frame&f, InterpreterOopMap* mask) {
1136   return   mask->num_oops()
1137          + 1 // for the mirror oop
1138          + ((intptr_t*)f.interpreter_frame_monitor_begin() - (intptr_t*)f.interpreter_frame_monitor_end())/BasicObjectLock::size(); // all locks must be NULL when freezing, but f.oops_do walks them, so we count them
1139 }
1140 
1141 int Interpreted::size(const frame&f, InterpreterOopMap* mask) {
1142   return (Interpreted::frame_bottom(f) - Interpreted::frame_top(f, mask)) * wordSize;
1143 }
1144 
1145 inline int Interpreted::expression_stack_size(const frame &f, InterpreterOopMap* mask) {
1146   int size = mask->expression_stack_size();
1147   assert (size <= f.interpreter_frame_expression_stack_size(), "size1: %d size2: %d", size, f.interpreter_frame_expression_stack_size());
1148   return size;
1149 }
1150 
1151 bool Interpreted::is_owning_locks(const frame& f) {
1152   assert (f.interpreter_frame_monitor_end() <= f.interpreter_frame_monitor_begin(), "must be");
1153   if (f.interpreter_frame_monitor_end() == f.interpreter_frame_monitor_begin())
1154     return false;
1155 
1156   for (BasicObjectLock* current = f.previous_monitor_in_interpreter_frame(f.interpreter_frame_monitor_begin());
1157         current >= f.interpreter_frame_monitor_end();
1158         current = f.previous_monitor_in_interpreter_frame(current)) {
1159 
1160       oop obj = current->obj();
1161       if (obj != NULL) {
1162         return true;
1163       }
1164   }
1165   return false;
1166 }
1167 
1168 template<typename Self>
1169 inline intptr_t* NonInterpreted<Self>::frame_top(const frame& f) { // inclusive; this will be copied with the frame
1170   return f.unextended_sp();
1171 }
1172 
1173 template<typename Self>
1174 inline intptr_t* NonInterpreted<Self>::frame_bottom(const frame& f) { // exclusive; this will not be copied with the frame
1175   return f.unextended_sp() + f.cb()->frame_size();
1176 }
1177 
1178 #ifdef ASSERT
1179   intptr_t* Frame::frame_top(const frame &f) {
1180     if (f.is_interpreted_frame()) {
1181       InterpreterOopMap mask;
1182       Interpreted::oop_map(f, &mask);
1183       return Interpreted::frame_top(f, &mask);
1184     } else {
1185       return Compiled::frame_top(f);
1186     }
1187   }
1188 #endif
1189 
1190 template<typename Self>
1191 template<typename FrameT>
1192 inline int NonInterpreted<Self>::size(const FrameT& f) {
1193   assert (!f.is_interpreted_frame() && Self::is_instance(f), "");
1194   return f.cb()->frame_size() * wordSize;
1195 }
1196 
1197 template<typename Self>
1198 template<typename FrameT>
1199 inline int NonInterpreted<Self>::stack_argsize(const FrameT& f) {  assert (f.cb()->is_compiled(), "");
1200   return f.cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size;
1201 }
1202 
1203 template<typename Self>
1204 inline int NonInterpreted<Self>::num_oops(const frame& f) {
1205   assert (!f.is_interpreted_frame() && Self::is_instance(f), "");
1206   assert (f.oop_map() != NULL, "");
1207   return f.oop_map()->num_oops() + Self::extra_oops;
1208 }
1209 
1210 template<typename Self>
1211 template<typename RegisterMapT>
1212 bool NonInterpreted<Self>::is_owning_locks(JavaThread* thread, const RegisterMapT* map, const frame& f) {
1213   // if (!DetectLocksInCompiledFrames) return false;
1214   assert (!f.is_interpreted_frame() && Self::is_instance(f), "");
1215 
1216   CompiledMethod* cm = f.cb()->as_compiled_method();
1217   assert (!cm->is_compiled() || !cm->as_compiled_method()->is_native_method(), ""); // See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp
1218 
1219   if (!cm->has_monitors()) {
1220     return false;
1221   }
1222 
1223   ResourceMark rm;
1224   for (ScopeDesc* scope = cm->scope_desc_at(f.pc()); scope != NULL; scope = scope->sender()) {
1225     GrowableArray<MonitorValue*>* mons = scope->monitors();
1226     if (mons == NULL || mons->is_empty())
1227       continue;
1228 
1229     for (int index = (mons->length()-1); index >= 0; index--) { // see compiledVFrame::monitors()
1230       MonitorValue* mon = mons->at(index);
1231       if (mon->eliminated())
1232         continue; // TODO: are we fine with this or should we return true?
1233       ScopeValue* ov = mon->owner();
1234       StackValue* owner_sv = StackValue::create_stack_value(&f, map, ov); // it is an oop
1235       oop owner = owner_sv->get_obj()();
1236       if (owner != NULL) {
1237         return true;
1238       }
1239     }
1240   }
1241   return false;
1242 }
1243 
1244 ////////////////////////////////////
1245 
1246 void ContinuationHelper::to_frame_info(const frame& f, const frame& callee, FrameInfo* fi) {
1247   fi->sp = f.unextended_sp(); // java_lang_Continuation::entrySP(cont);
1248   fi->pc = Frame::real_pc(f); // Continuation.run may have been deoptimized
1249   // callee.is_interpreted_frame() ? ContinuationHelper::to_frame_info_pd<Interpreted>(f, callee, fi)
1250   //                               : ContinuationHelper::to_frame_info_pd<Compiled   >(f, callee, fi);
1251   CHOOSE2(callee.is_interpreted_frame(), ContinuationHelper::to_frame_info_pd, f, callee, fi);
1252 }
1253 
1254 void clear_frame_info(FrameInfo* fi) {
1255   fi->fp = NULL;
1256   fi->sp = NULL;
1257   fi->pc = NULL;
1258 }
1259 
1260 // works only in thaw
1261 static inline bool is_entry_frame(const ContMirror& cont, const frame& f) {
1262   return f.sp() == cont.entrySP();
1263 }
1264 
1265 static int num_java_frames(CompiledMethod* cm, address pc) {
1266   int count = 0;
1267   for (ScopeDesc* scope = cm->scope_desc_at(pc); scope != NULL; scope = scope->sender())
1268     count++;
1269   return count;
1270 }
1271 
1272 static int num_java_frames(const hframe& f) {
1273   return f.is_interpreted_frame() ? 1 : num_java_frames(f.cb()->as_compiled_method(), f.pc());
1274 }
1275 
1276 static int num_java_frames(ContMirror& cont) {
1277   ResourceMark rm; // used for scope traversal in num_java_frames(CompiledMethod*, address)
1278   int count = 0;
1279   for (hframe hf = cont.last_frame<mode_slow>(); !hf.is_empty(); hf = hf.sender<mode_slow>(cont))
1280     count += num_java_frames(hf);
1281   return count;
1282 }
1283 
1284 // static int num_java_frames(const frame& f) {
1285 //   if (f.is_interpreted_frame())
1286 //     return 1;
1287 //   else if (f.is_compiled_frame())
1288 //     return num_java_frames(f.cb()->as_compiled_method(), f.pc());
1289 //   else
1290 //     return 0;
1291 // }
1292 
1293 // static int num_java_frames(ContMirror& cont, frame f) {
1294 //   int count = 0;
1295 //   RegisterMap map(cont.thread(), false, false, false); // should first argument be true?
1296 //   for (; f.real_fp() > cont.entrySP(); f = f.frame_sender<ContinuationCodeBlobLookup>(&map))
1297 //     count += num_java_frames(f);
1298 //   return count;
1299 // }
1300 
1301 static inline void clear_anchor(JavaThread* thread) {
1302   thread->frame_anchor()->clear();
1303 }
1304 
1305 #ifdef ASSERT
1306 static void set_anchor(ContMirror& cont) {
1307   FrameInfo fi = { cont.entryPC(), cont.entryFP(), cont.entrySP() };
1308   set_anchor<false>(cont.thread(), &fi);
1309 }
1310 #endif
1311 
1312 static oop get_continuation(JavaThread* thread) {
1313   assert (thread != NULL, "");
1314   return thread->last_continuation();
1315 }
1316 
1317 // static void set_continuation(JavaThread* thread, oop cont) {
1318 //   java_lang_Thread::set_continuation(thread->threadObj(), cont);
1319 // }
1320 
1321 template<typename RegisterMapT>
1322 class ContOopBase : public OopClosure, public DerivedOopClosure {
1323 protected:
1324   ContMirror* const _cont;
1325   const frame* _fr;
1326   void* const _vsp;
1327   int _count;
1328 #ifdef ASSERT
1329   RegisterMapT* _map;
1330 #endif
1331 
1332 public:
1333   int count() { return _count; }
1334 
1335 protected:
1336   ContOopBase(ContMirror* cont, const frame* fr, RegisterMapT* map, void* vsp)
1337    : _cont(cont), _fr(fr), _vsp(vsp) {
1338      _count = 0;
1339   #ifdef ASSERT
1340     _map = map;
1341   #endif
1342   }
1343 
1344   inline int verify(void* p) {
1345     int offset = (address)p - (address)_vsp; // in thaw_oops we set the saved link to a local, so if offset is negative, it can be big
1346 
1347 #ifdef ASSERT // this section adds substantial overhead
1348     VMReg reg;
1349     // The following is not true for the sender of the safepoint stub
1350     // assert(offset >= 0 || p == Frame::map_link_address(_map),
1351     //   "offset: %d reg: %s", offset, (reg = _map->find_register_spilled_here(p), reg != NULL ? reg->name() : "NONE")); // calle-saved register can only be rbp
1352     reg = _map->find_register_spilled_here(p); // expensive operation
1353     if (reg != NULL) log_develop_trace(jvmcont)("reg: %s", reg->name());
1354     log_develop_trace(jvmcont)("p: " INTPTR_FORMAT " offset: %d %s", p2i(p), offset, p == Frame::map_link_address(_map) ? "(link)" : "");
1355 #endif
1356 
1357     return offset;
1358   }
1359 
1360   inline void process(void* p) {
1361     DEBUG_ONLY(verify(p);)
1362     _count++;
1363   }
1364 };
1365 
1366 ///////////// FREEZE ///////
1367 
1368 enum freeze_result {
1369   freeze_ok = 0,
1370   freeze_pinned_cs = 1,
1371   freeze_pinned_native = 2,
1372   freeze_pinned_monitor = 3,
1373   freeze_exception = 4
1374 };
1375 
1376 typedef freeze_result (*FreezeContFnT)(JavaThread*, ContMirror&, FrameInfo*);
1377 
1378 static void freeze_compiled_frame_bp() {}
1379 static void thaw_compiled_frame_bp() {}
1380 
1381 static FreezeContFnT cont_freeze_fast = NULL;
1382 static FreezeContFnT cont_freeze_slow = NULL;
1383 static FreezeContFnT cont_freeze_preempt = NULL;
1384 
1385 template<op_mode mode>
1386 static freeze_result cont_freeze(JavaThread* thread, ContMirror& cont, FrameInfo* fi) {
1387   switch (mode) {
1388     case mode_fast:    return cont_freeze_fast   (thread, cont, fi);
1389     case mode_slow:    return cont_freeze_slow   (thread, cont, fi);
1390     case mode_preempt: return cont_freeze_preempt(thread, cont, fi);
1391     default:
1392       guarantee(false, "unreachable");
1393       return freeze_exception;
1394   }
1395 }
1396 
1397 class CountOops : public OopClosure {
1398 private:
1399   int _nr_oops;
1400 public:
1401   CountOops() : _nr_oops(0) {}
1402   int nr_oops() const { return _nr_oops; }
1403 
1404 
1405   virtual void do_oop(oop* o) { _nr_oops++; }
1406   virtual void do_oop(narrowOop* o) { _nr_oops++; }
1407 };
1408 
1409 struct FpOopInfo {
1410   bool _has_fp_oop; // is fp used to store a derived pointer
1411   int _fp_index;    // see FreezeOopFn::do_derived_oop
1412 
1413   FpOopInfo() : _has_fp_oop(false), _fp_index(0) {}
1414 
1415   static int flag_offset() { return in_bytes(byte_offset_of(FpOopInfo, _has_fp_oop)); }
1416   static int index_offset() { return in_bytes(byte_offset_of(FpOopInfo, _fp_index)); }
1417 
1418   void set_oop_fp_index(int index) {
1419     assert(_has_fp_oop == false, "can only have one");
1420     _has_fp_oop = true;
1421     _fp_index = index;
1422   }
1423 };
1424 
1425 template <typename OopT>
1426 class PersistOops : public OopClosure {
1427 private:
1428   int _limit;
1429   int _current;
1430   objArrayOop _array;
1431 public:
1432   PersistOops(int limit, objArrayOop array) : _limit(limit), _current(0), _array(array) {}
1433 
1434   virtual void do_oop(oop* o) { write_oop(o); }
1435   virtual void do_oop(narrowOop* o) { write_oop(o); }
1436 
1437 private:
1438   template <typename T>
1439   void write_oop(T* p) {
1440     assert(_current < _limit, "");
1441     oop obj = NativeAccess<>::oop_load(p);
1442     OopT* addr = _array->obj_at_address<OopT>(_current++); // depends on UseCompressedOops
1443     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(addr, obj);
1444   }
1445 };
1446 
1447 /*
1448  * This class is mainly responsible for the work that is required to make sure that nmethods that
1449  * are referenced from a Continuation stack are kept alive.
1450  *
1451  * While freezing, for each nmethod a keepalive array is allocated. It contains elements for all the
1452  * oops that are either immediates or in the oop section in the nmethod (basically all that would be
1453  * published to the closure while running nm->oops_do().).
1454  *
1455  * The keepalive array is than strongly linked from the oop array in the Continuation, a weak reference
1456  * is kept in the nmethod -> the keepalive array.
1457  *
1458  * Some GCs (currently only G1) have code that considers the weak reference to the keepalive array a
1459  * strong reference while this nmethod is on the stack. This is true while we are freezing, it helps
1460  * performance because we don't need to allocate and keep oops to this objects in a Handle for such GCs.
1461  * As soon as they are linked into the nmethod we know the object will stay alive.
1462  */
1463 template <typename ConfigT>
1464 class CompiledMethodKeepalive {
1465 private:
1466   typedef typename ConfigT::OopT OopT;
1467   typedef CompiledMethodKeepalive<ConfigT> SelfT;
1468   typedef typename ConfigT::KeepaliveObjectT KeepaliveObjectT;
1469 
1470   typename KeepaliveObjectT::TypeT _keepalive;
1471   CompiledMethod* _method;
1472   SelfT* _parent;
1473   JavaThread* _thread;
1474   int _nr_oops;
1475   bool _required;
1476 
1477   void store_keepalive(JavaThread* thread, oop* keepalive) { _keepalive = KeepaliveObjectT::make_keepalive(thread, keepalive); }
1478   oop read_keepalive() { return KeepaliveObjectT::read_keepalive(_keepalive); }
1479 
1480 public:
1481   CompiledMethodKeepalive(CompiledMethod* cm, SelfT* parent, JavaThread* thread) : _method(cm), _parent(NULL), _thread(thread), _nr_oops(0), _required(false) {
1482     oop* keepalive = cm->get_keepalive();
1483     if (keepalive != NULL) {
1484    //   log_info(jvmcont)("keepalive is %p (%p) for nm %p", keepalive, (void *) *keepalive, cm);
1485       WeakHandle<vm_nmethod_keepalive_data> wh = WeakHandle<vm_nmethod_keepalive_data>::from_raw(keepalive);
1486       oop resolved = wh.resolve();
1487       if (resolved != NULL) {
1488         //log_info(jvmcont)("found keepalive %p (%p)", keepalive, (void *) resolved);
1489         store_keepalive(thread, keepalive);
1490         return;
1491       }
1492 
1493       //log_info(jvmcont)("trying to clear stale keepalive for %p", _method);
1494       if (cm->clear_keepalive(keepalive)) {
1495         //log_info(jvmcont)("keepalive cleared for %p", _method);
1496         thread->keepalive_cleanup()->append(wh);
1497         // put on a list for cleanup in a safepoint
1498       }
1499     }
1500   //  log_info(jvmcont)("keepalive is %p for nm %p", keepalive, cm);
1501 
1502     nmethod* nm = cm->as_nmethod_or_null();
1503     if (nm != NULL) {
1504       _nr_oops = nm->nr_oops();
1505       //log_info(jvmcont)("need keepalive for %d oops", _nr_oops);
1506       _required = true;
1507       _parent = parent;
1508     }
1509   }
1510 
1511   void write_at(ContMirror& mirror, int index) {
1512     //assert(_keepalive != NULL, "");
1513     //log_develop_info(jvmcont)("writing mirror at %d\n", index);
1514     mirror.add_oop<ConfigT>(read_keepalive(), index);
1515     //*(hsp + index)
1516   }
1517 
1518   void persist_oops() {
1519     if (!_required) {
1520       // Even though our first one might have said require, someone else might have written a new entry before we wrote our own.
1521       return;
1522     }
1523 
1524     nmethod* nm = _method->as_nmethod_or_null();
1525     if (nm != NULL) {
1526       //assert(_keepalive != NULL && read_keepalive() != NULL, "");
1527       PersistOops<OopT> persist(_nr_oops, (objArrayOop) read_keepalive());
1528       nm->oops_do(&persist);
1529       //log_info(jvmcont)("oops persisted");
1530     }
1531   }
1532 
1533   void set_handle(Handle keepalive) {
1534     WeakHandle<vm_nmethod_keepalive_data> wh = WeakHandle<vm_nmethod_keepalive_data>::create(keepalive);
1535     oop* result = _method->set_keepalive(wh.raw());
1536 
1537     if (result != NULL) {
1538       store_keepalive(_thread, result);
1539       // someone else managed to do it before us, destroy the weak
1540       _required = false;
1541       wh.release();
1542     } else {
1543       store_keepalive(_thread, wh.raw());
1544       //log_info(jvmcont)("Winning cas for %p (%p -> %p (%p))", _method, result, wh.raw(), (void *) wh.resolve());
1545     }
1546   }
1547 
1548   SelfT* parent() { return _parent; }
1549   bool required() const { return _required; }
1550   int nr_oops() const { return _nr_oops; }
1551 
1552 };
1553 
1554 template <typename FKind>
1555 class FreezeFrame {
1556 };
1557 
1558 template <>
1559 class FreezeFrame<Interpreted> {
1560   public:
1561   template <bool top, bool bottom, bool IsKeepalive, typename FreezeT>
1562   static hframe dispatch(FreezeT& self, const frame& f, const hframe& caller, int fsize, int argsize, int oops, InterpreterOopMap* mask, typename FreezeT::CompiledMethodKeepaliveT* ignore) {
1563     return self.template freeze_interpreted_frame<top, bottom>(f, caller, fsize, oops, mask);
1564   }
1565 };
1566 
1567 template <>
1568 class FreezeFrame<Compiled> {
1569   public:
1570   template <bool top, bool bottom, bool IsKeepalive, typename FreezeT>
1571   static hframe dispatch(FreezeT& self, const frame& f, const hframe& caller, int fsize, int argsize, int oops, FreezeFnT f_fn, typename FreezeT::CompiledMethodKeepaliveT* kd) {
1572     return self.template freeze_compiled_frame<Compiled, top, bottom, IsKeepalive>(f, caller, fsize, argsize, oops, f_fn, kd);
1573   }
1574 };
1575 
1576 template <typename ConfigT, op_mode mode>
1577 class Freeze {
1578   typedef typename Conditional<mode == mode_preempt, RegisterMap, SmallRegisterMap>::type RegisterMapT;
1579   typedef Freeze<ConfigT, mode> SelfT;
1580   typedef CompiledMethodKeepalive<ConfigT> CompiledMethodKeepaliveT;
1581 
1582 private:
1583   JavaThread* _thread;
1584   ContMirror& _cont;
1585   intptr_t *_bottom_address;
1586 
1587   int _oops;
1588   int _size; // total size of all frames plus metadata. keeps track of offset where a frame should be written and how many bytes we need to allocate.
1589   int _frames;
1590   int _cgrind_interpreted_frames;
1591 
1592   FpOopInfo _fp_oop_info;
1593   FrameInfo* _fi;
1594 
1595   RegisterMapT _map;
1596 
1597   frame _safepoint_stub;
1598   hframe _safepoint_stub_h;
1599   bool  _safepoint_stub_caller;
1600   CompiledMethodKeepaliveT* _keepalive;
1601 #ifndef PRODUCT
1602   intptr_t* _safepoint_stub_hsp;
1603 #endif
1604 
1605   template<typename FKind> static inline frame sender(const frame& f);
1606   template <typename FKind, bool top, bool bottom> inline void patch_pd(const frame& f, hframe& callee, const hframe& caller);
1607   template <bool bottom> inline void align(const hframe& caller, int argsize);
1608   inline void relativize_interpreted_frame_metadata(const frame& f, intptr_t* vsp, const hframe& hf);
1609   template<bool cont_empty> hframe new_bottom_hframe(int sp, int ref_sp, address pc, bool interpreted);
1610   template<typename FKind> hframe new_hframe(const frame& f, intptr_t* vsp, const hframe& caller, int fsize, int num_oops, int argsize);
1611 
1612 public:
1613 
1614   Freeze(JavaThread* thread, ContMirror& mirror) :
1615     _thread(thread), _cont(mirror), _bottom_address(mirror.entrySP()),
1616     _oops(0), _size(0), _frames(0), _cgrind_interpreted_frames(0),
1617     _fp_oop_info(), _map(thread, false, false, false),
1618     _safepoint_stub_caller(false), _keepalive(NULL) {
1619 
1620     _map.set_include_argument_oops(false);
1621   }
1622 
1623   int nr_oops() const   { return _oops; }
1624   int nr_bytes() const  { return _size; }
1625   int nr_frames() const { return _frames; }
1626 
1627   freeze_result freeze(FrameInfo* fi) {
1628     _fi = fi;
1629 
1630     HandleMark hm(_thread);
1631 
1632     // tty->print_cr(">>> freeze mode: %d", mode);
1633 
1634     // assert (map.update_map(), "RegisterMap not set to update");
1635     assert (!_map.include_argument_oops(), "should be");
1636     frame f = freeze_start_frame(_map);
1637     hframe caller;
1638     return freeze<true>(f, caller, 0);
1639   }
1640 
1641   frame freeze_start_frame(SmallRegisterMap& ignored) {
1642     // if (mode == mode_preempt) // TODO: we should really do partial specialization, but then we'll need to define this out-of-line
1643     //   return freeze_start_frame_safepoint_stub();
1644 
1645     assert (mode != mode_preempt, "");
1646 
1647     log_develop_trace(jvmcont)("%s nop at freeze yield", nativePostCallNop_at(_fi->pc) != NULL ? "has" : "no");
1648 
1649     // Note: if the doYield stub does not have its own frame, we may need to consider deopt here, especially if yield is inlinable
1650     frame f = ContinuationHelper::last_frame(_thread); // thread->last_frame();
1651     assert(StubRoutines::cont_doYield_stub()->contains(f.pc()), "must be");
1652   #ifdef ASSERT
1653     hframe::callee_info my_info = slow_link_address<StubF>(f);
1654   #endif
1655     f = sender<StubF>(f);
1656     assert (Frame::callee_link_address(f) == my_info, "");
1657     // ContinuationHelper::update_register_map_with_callee(&_map, f);
1658 
1659     // The following doesn't work because fi->fp can contain an oop, that a GC doesn't know about when walking.
1660     // frame::update_map_with_saved_link(&map, (intptr_t **)&fi->fp);
1661     // frame f = ContinuationHelper::to_frame(fi); // the yield frame
1662 
1663     assert (f.pc() == _fi->pc, "");
1664 
1665     // Log(jvmcont) logv; LogStream st(logv.debug()); f.print_on(st);
1666     if (log_develop_is_enabled(Debug, jvmcont)) f.print_on(tty);
1667 
1668     return f;
1669   }
1670 
1671   frame freeze_start_frame(RegisterMap& ignored) {
1672     assert (mode == mode_preempt, "");
1673 
1674     // safepoint yield
1675     frame f = _thread->last_frame();
1676     f.set_fp(f.real_fp()); // Instead of this, maybe in ContMirror::set_last_frame always use the real_fp? // TODO PD
1677     if (Interpreter::contains(f.pc())) {
1678       log_develop_trace(jvmcont)("INTERPRETER SAFEPOINT");
1679       ContinuationHelper::update_register_map<Interpreted>(&_map, f);
1680       // f.set_sp(f.sp() - 1); // state pushed to the stack
1681     } else {
1682       log_develop_trace(jvmcont)("COMPILER SAFEPOINT");
1683   #ifdef ASSERT
1684       if (!is_stub(f.cb())) { f.print_value_on(tty, JavaThread::current()); }
1685   #endif
1686       assert (is_stub(f.cb()), "must be");
1687       assert (f.oop_map() != NULL, "must be");
1688       ContinuationHelper::update_register_map<StubF>(&_map, f);
1689       f.oop_map()->update_register_map(&f, &_map); // we have callee-save registers in this case
1690     }
1691 
1692     // Log(jvmcont) logv; LogStream st(logv.debug()); f.print_on(st);
1693     if (log_develop_is_enabled(Debug, jvmcont)) f.print_on(tty);
1694 
1695     return f;
1696   }
1697 
1698   template<bool top>
1699   NOINLINE freeze_result freeze(const frame& f, hframe& caller, int callee_argsize) {
1700     assert (f.unextended_sp() < _bottom_address - SP_WIGGLE, ""); // see recurse_freeze_java_frame
1701     assert (f.is_interpreted_frame() || ((top && mode == mode_preempt) == is_stub(f.cb())), "");
1702     assert (mode != mode_fast || (!f.is_interpreted_frame() && slow_get_cb(f)->is_compiled()), "");
1703     assert (mode != mode_fast || !f.is_deoptimized_frame(), "");
1704 
1705     // Dynamically branch on frame type
1706     if (mode == mode_fast || f.is_compiled_frame()) {
1707       if (UNLIKELY(mode != mode_fast && f.oop_map() == NULL))            return freeze_pinned_native; // special native frame
1708 
1709       #ifdef CONT_DOUBLE_NOP
1710         if (UNLIKELY(!(mode == mode_fast && !ContinuationHelper::cached_metadata<mode>(f).empty()) &&
1711              Compiled::is_owning_locks(_cont.thread(), &_map, f))) return freeze_pinned_monitor;
1712       #else
1713         if (UNLIKELY(Compiled::is_owning_locks(_cont.thread(), &_map, f))) return freeze_pinned_monitor;
1714       #endif
1715 
1716       // Keepalive info here...
1717       CompiledMethodKeepaliveT kd(f.cb()->as_compiled_method(), _keepalive, _thread);
1718       if (kd.required()) {
1719         _keepalive = &kd;
1720         return recurse_freeze_compiled_frame<top, true>(f, caller, &kd);
1721       }
1722 
1723       return recurse_freeze_compiled_frame<top, false>(f, caller, &kd);
1724     } else if (f.is_interpreted_frame()) {
1725       if (Interpreted::is_owning_locks(f)) return freeze_pinned_monitor;
1726 
1727       return recurse_freeze_interpreted_frame<top>(f, caller, callee_argsize);
1728     } else if (mode == mode_preempt && top && is_stub(f.cb())) {
1729       return recurse_freeze_stub_frame(f, caller);
1730     } else {
1731       return freeze_pinned_native;
1732     }
1733   }
1734 
1735   template<typename FKind, bool top, bool IsKeepalive>
1736   inline freeze_result recurse_freeze_java_frame(const frame& f, hframe& caller, int fsize, int argsize, int oops, typename FKind::ExtraT extra, CompiledMethodKeepaliveT* kd) {
1737     assert (FKind::is_instance(f), "");
1738     log_develop_trace(jvmcont)("recurse_freeze_java_frame fsize: %d oops: %d", fsize, oops);
1739 
1740   #ifdef ASSERT
1741     hframe::callee_info my_info = slow_link_address<FKind>(f);
1742   #endif
1743     frame senderf = sender<FKind>(f); // f.sender_for_compiled_frame<ContinuationCodeBlobLookup>(&map);
1744     assert (FKind::interpreted || senderf.sp() == senderf.unextended_sp(), "");
1745     assert (Frame::callee_link_address(senderf) == my_info, "");
1746 
1747     // sometimes an interpreted caller's sp extends a bit below entrySP, plus another word for possible alignment of compiled callee
1748     if (senderf.unextended_sp() >= _bottom_address - SP_WIGGLE) { // dynamic branch
1749       if (UNLIKELY(!allocate()))
1750         return freeze_exception;
1751 
1752       // senderf is the entry frame
1753       argsize = finalize<FKind>(senderf, f, caller); // recursion end
1754 
1755       freeze_java_frame<FKind, top, true, IsKeepalive>(f, caller, fsize, argsize, oops, extra, kd);
1756 
1757       if (log_develop_is_enabled(Trace, jvmcont)) {
1758         log_develop_trace(jvmcont)("bottom h-frame:");
1759         caller.print_on(tty); // caller is now the current hframe
1760       }
1761     } else {
1762       bool safepoint_stub_caller; // the use of _safepoint_stub_caller is not nice, but given preemption being performance non-critical, we don't want to add either a template or a regular parameter
1763       if (mode == mode_preempt) {
1764         safepoint_stub_caller = _safepoint_stub_caller;
1765         _safepoint_stub_caller = false;
1766       }
1767 
1768       freeze_result result = freeze<false>(senderf, caller, argsize); // recursive call
1769       if (UNLIKELY(result != freeze_ok))
1770         return result;
1771 
1772       if (mode == mode_preempt) _safepoint_stub_caller = safepoint_stub_caller; // restore _stub_caller
1773 
1774       freeze_java_frame<FKind, top, false, IsKeepalive>(f, caller, fsize, argsize, oops, extra, kd);
1775     }
1776 
1777     if (top) {
1778       finish(f, caller);
1779     }
1780     return freeze_ok;
1781   }
1782 
1783   void allocate_keepalive() {
1784     if (_keepalive == NULL) {
1785       return;
1786     }
1787 
1788     CompiledMethodKeepaliveT* current = _keepalive;
1789     while (current != NULL) {
1790       _cont.make_keepalive<ConfigT>(current);
1791       current = current->parent();
1792     }
1793   }
1794 
1795   inline bool allocate() {
1796     _cont.allocate_stacks<ConfigT>(_size, _oops, _frames);
1797     return !_thread->has_pending_exception();
1798   }
1799 
1800   template<typename FKind> // the callee's type
1801   int finalize(const frame& f, const frame& callee, hframe& caller) {
1802   #ifdef CALLGRIND_START_INSTRUMENTATION
1803     if (_frames > 0 && _cgrind_interpreted_frames == 0 && callgrind_counter == 1) {
1804       callgrind_counter = 2;
1805       tty->print_cr("Starting callgrind instrumentation");
1806       CALLGRIND_START_INSTRUMENTATION;
1807     }
1808   #endif
1809 
1810     // f is the entry frame
1811 
1812   #ifdef ASSERT
1813     log_develop_trace(jvmcont)("Found entry:");
1814     if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
1815 
1816     hframe orig_top_frame = _cont.last_frame<mode_slow>();
1817     bool empty = _cont.is_empty();
1818     log_develop_trace(jvmcont)("bottom: " INTPTR_FORMAT " count %d size: %d, num_oops: %d", p2i(_bottom_address), nr_frames(), nr_bytes(), nr_oops());
1819     log_develop_trace(jvmcont)("top_hframe before (freeze):");
1820     if (log_develop_is_enabled(Trace, jvmcont)) orig_top_frame.print_on(_cont, tty);
1821 
1822     log_develop_trace(jvmcont)("empty: %d", empty);
1823     assert (!CONT_FULL_STACK || empty, "");
1824     assert (!empty || _cont.sp() >= _cont.stack_length() || _cont.sp() < 0, "sp: %d stack_length: %d", _cont.sp(), _cont.stack_length());
1825     assert (orig_top_frame.is_empty() == empty, "empty: %d f.sp: %d", empty, orig_top_frame.sp());
1826   #endif
1827 
1828     setup_jump<FKind>(f, callee);
1829 
1830     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 15) return freeze_ok;)
1831 
1832     _cont.allocate_stacks<ConfigT>(_size, _oops, _frames);
1833     if (_thread->has_pending_exception()) {
1834       return freeze_exception;
1835     }
1836 
1837     allocate_keepalive();
1838 
1839     int argsize = 0;
1840     if (_cont.is_empty()) {
1841       caller = new_bottom_hframe<true>(_cont.sp(), _cont.refSP(), NULL, false);
1842     } else {
1843       assert (_cont.is_flag(FLAG_LAST_FRAME_INTERPRETED) == Interpreter::contains(_cont.pc()), "");
1844       int sp = _cont.sp();
1845 
1846       if (!FKind::interpreted) {
1847     #ifdef CONT_DOUBLE_NOP
1848         CachedCompiledMetadata md = ContinuationHelper::cached_metadata<mode>(callee);
1849         if (LIKELY(!md.empty())) {
1850           argsize = md.stack_argsize();
1851           assert(argsize == slow_stack_argsize(callee), "argsize: %d slow_stack_argsize: %d", argsize, slow_stack_argsize(callee));
1852         } else
1853     #endif
1854           argsize = Compiled::stack_argsize(callee);
1855 
1856         if (_cont.is_flag(FLAG_LAST_FRAME_INTERPRETED)) {
1857           log_develop_trace(jvmcont)("finalize _size: %d add argsize: %d", _size, argsize);
1858           _size += argsize;
1859         } else {
1860           // the arguments of the bottom-most frame are part of the topmost compiled frame on the hstack; we overwrite that part
1861           sp += argsize >> LogBytesPerElement;
1862         }
1863       }
1864       caller = new_bottom_hframe<false>(sp, _cont.refSP(), _cont.pc(), _cont.is_flag(FLAG_LAST_FRAME_INTERPRETED));
1865     }
1866 
1867     DEBUG_ONLY(log_develop_trace(jvmcont)("finalize bottom frame:"); if (log_develop_is_enabled(Trace, jvmcont)) caller.print_on(_cont, tty);)
1868 
1869     _cont.add_num_frames(_frames);
1870     _cont.add_size(_size);
1871     _cont.e_add_refs(_oops);
1872 
1873     return argsize;
1874   }
1875 
1876   template<typename FKind> // the callee's type
1877   void setup_jump(const frame& f, const frame& callee) {
1878     assert (f.pc() == Frame::real_pc(f) || (f.is_compiled_frame() && f.cb()->as_compiled_method()->is_deopt_pc(Frame::real_pc(f))), "");
1879     ContinuationHelper::to_frame_info_pd<FKind>(f, callee, _fi);
1880     _fi->sp = f.unextended_sp(); // java_lang_Continuation::entrySP(cont);
1881     _fi->pc = Continuation::is_return_barrier_entry(f.pc()) ? _cont.entryPC()
1882                                                             : Frame::real_pc(f); // Continuation.run may have been deoptimized
1883 
1884   #ifdef ASSERT
1885     // if (f.pc() != real_pc(f)) tty->print_cr("Continuation.run deopted!");
1886     log_develop_debug(jvmcont)("Jumping to frame (freeze): [%ld] (%d)", java_tid(_thread), _thread->has_pending_exception());
1887     frame f1 = ContinuationHelper::to_frame<true>(_fi);
1888     if (log_develop_is_enabled(Debug, jvmcont)) f1.print_on(tty);
1889     assert_top_java_frame_name(f1, RUN_SIG);
1890   #endif
1891   }
1892 
1893   template <typename T>
1894   friend class FreezeFrame;
1895 
1896   template<typename FKind, bool top, bool bottom, bool IsKeepalive>
1897   void freeze_java_frame(const frame& f, hframe& caller, int fsize, int argsize, int oops, typename FKind::ExtraT extra, CompiledMethodKeepaliveT* kd) {
1898     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 15) return;)
1899 
1900     log_develop_trace(jvmcont)("============================= FREEZING FRAME interpreted: %d top: %d bottom: %d", FKind::interpreted, top, bottom);
1901     log_develop_trace(jvmcont)("fsize: %d argsize: %d oops: %d", fsize, argsize, oops);
1902     if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
1903     assert ((mode == mode_fast && !bottom) || caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
1904 
1905     caller.copy_partial<mode>(FreezeFrame<FKind>::template dispatch<top, bottom, IsKeepalive, SelfT>(*this, f, caller, fsize, argsize, oops, extra, kd));
1906   }
1907 
1908   template <typename FKind>
1909   void freeze_oops(const frame& f, intptr_t* vsp, intptr_t *hsp, int index, int num_oops, void* extra) {
1910     PERFTEST_ONLY(if (PERFTEST_LEVEL < 30) return;)
1911     //log_develop_info(jvmcont)("writing %d oops from %d (%c)", num_oops, index, FKind::type);
1912 
1913     log_develop_trace(jvmcont)("Walking oops (freeze)");
1914 
1915     assert (!_map.include_argument_oops(), "");
1916 
1917     _fp_oop_info._has_fp_oop = false;
1918 
1919     int frozen;
1920     if (LIKELY(!FKind::interpreted && extra != NULL)) { // dynamic branch
1921       FreezeFnT f_fn = (FreezeFnT)extra;
1922       // tty->print_cr(">>>>0000<<<<<");
1923       frozen = freeze_compiled_oops_stub(f_fn, f, vsp, hsp, index);
1924     } else {
1925       if (num_oops == 0)
1926         return;
1927       ContinuationHelper::update_register_map_with_callee(&_map, f); // restore saved link
1928       frozen = FKind::interpreted ? freeze_intepreted_oops(f, vsp, hsp, index, *(InterpreterOopMap*)extra)
1929                                   : freeze_compiled_oops  (f, vsp, hsp, index);
1930     }
1931     assert(frozen == num_oops, "frozen: %d num_oops: %d", frozen, num_oops);
1932   }
1933 
1934   template <typename FKind, bool top, bool bottom>
1935   void patch(const frame& f, hframe& hf, const hframe& caller) {
1936     assert (FKind::is_instance(f), "");
1937     assert (bottom || !caller.is_empty(), "");
1938     // in fast mode, partial copy does not copy _is_interpreted for the caller
1939     assert (bottom || mode == mode_fast || Interpreter::contains(FKind::interpreted ? hf.return_pc<FKind>() : caller.real_pc(_cont)) == caller.is_interpreted_frame(), 
1940       "FKind: %s contains: %d is_interpreted: %d", FKind::name, Interpreter::contains(FKind::interpreted ? hf.return_pc<FKind>() : caller.real_pc(_cont)), caller.is_interpreted_frame()); // fails for perftest < 25, but that's ok
1941     assert (!bottom || !_cont.is_empty() || (_cont.fp() == 0 && _cont.pc() == NULL), "");
1942     assert (!bottom || _cont.is_empty() || caller == _cont.last_frame<mode_slow>(), "");
1943     assert (!bottom || _cont.is_empty() || Continuation::is_cont_barrier_frame(f), "");
1944     assert (!bottom || _cont.is_flag(FLAG_LAST_FRAME_INTERPRETED) == Interpreter::contains(_cont.pc()), "");
1945     assert (!FKind::interpreted || hf.interpreted_link_address() == _cont.stack_address(hf.fp()), "");
1946 
1947     if (bottom) {
1948       log_develop_trace(jvmcont)("Fixing return address on bottom frame: " INTPTR_FORMAT, p2i(_cont.pc()));
1949       FKind::interpreted ? hf.patch_return_pc<FKind>(_cont.pc())
1950                          : caller.patch_pc(_cont.pc(), _cont); // TODO PERF non-temporal store
1951     }
1952 
1953     patch_pd<FKind, top, bottom>(f, hf, caller);
1954 
1955 #ifdef ASSERT
1956     // TODO DEOPT: long term solution: unroll on freeze and patch pc
1957     if (mode != mode_fast && !FKind::interpreted && !FKind::stub) {
1958       assert (hf.cb()->is_compiled(), "");
1959       if (f.is_deoptimized_frame()) {
1960         log_develop_trace(jvmcont)("Freezing deoptimized frame");
1961         assert (f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), "");
1962         assert (f.cb()->as_compiled_method()->is_deopt_pc(Frame::real_pc(f)), "");
1963       }
1964     }
1965 #endif
1966   }
1967 
1968   template<bool top>
1969   NOINLINE freeze_result recurse_freeze_interpreted_frame(const frame& f, hframe& caller, int callee_argsize) {
1970     // ResourceMark rm(_thread);
1971     InterpreterOopMap mask;
1972     Interpreted::oop_map(f, &mask);
1973     int fsize = Interpreted::size(f, &mask);
1974     int oops  = Interpreted::num_oops(f, &mask);
1975 
1976     log_develop_trace(jvmcont)("recurse_interpreted_frame _size: %d add fsize: %d callee_argsize: %d -- %d", _size, fsize, callee_argsize, fsize + callee_argsize);
1977     _size += fsize + callee_argsize;
1978     _oops += oops;
1979     _frames++;
1980     _cgrind_interpreted_frames++;
1981 
1982     return recurse_freeze_java_frame<Interpreted, top, false>(f, caller, fsize, 0, oops, &mask, NULL);
1983   }
1984 
1985   template <bool top, bool bottom>
1986   hframe freeze_interpreted_frame(const frame& f, const hframe& caller, int fsize, int oops, InterpreterOopMap* mask) {
1987     intptr_t* vsp = Interpreted::frame_top(f, mask);
1988     assert ((Interpreted::frame_bottom(f) - vsp) * sizeof(intptr_t) == (size_t)fsize, "");
1989 
1990     hframe hf = new_hframe<Interpreted>(f, vsp, caller, fsize, oops, 0);
1991     intptr_t* hsp = _cont.stack_address(hf.sp());
1992 
1993     freeze_raw_frame(vsp, hsp, fsize);
1994 
1995     relativize_interpreted_frame_metadata(f, vsp, hf);
1996 
1997     freeze_oops<Interpreted>(f, vsp, hsp, hf.ref_sp(), oops, mask);
1998 
1999     patch<Interpreted, top, bottom>(f, hf, caller);
2000 
2001     _cont.inc_num_interpreted_frames();
2002 
2003     return hf;
2004   }
2005 
2006   int freeze_intepreted_oops(const frame& f, intptr_t* vsp, intptr_t* hsp, int starting_index, const InterpreterOopMap& mask) {
2007     FreezeOopFn oopFn(&_cont, &_fp_oop_info, &f, vsp, hsp, &_map, starting_index);
2008     const_cast<frame&>(f).oops_interpreted_do(&oopFn, NULL, mask);
2009     return oopFn.count();
2010   }
2011 
2012   template<bool top, bool IsKeepalive>
2013   freeze_result recurse_freeze_compiled_frame(const frame& f, hframe& caller, CompiledMethodKeepaliveT* kd) {
2014     int fsize, oops, argsize;
2015 #ifdef CONT_DOUBLE_NOP
2016     CachedCompiledMetadata md = ContinuationHelper::cached_metadata<mode>(f); // MUST BE SAFE FOR STUB CALLER; we're not at a call instruction
2017     fsize = md.size();
2018     if (LIKELY(fsize != 0)) {
2019       oops = md.num_oops();
2020       argsize = md.stack_argsize();
2021 
2022       assert(fsize == slow_size(f), "fsize: %d slow_size: %d", fsize, slow_size(f));
2023       assert(oops  == slow_num_oops(f), "oops: %d slow_num_oops: %d", oops, slow_num_oops(f));
2024       assert(argsize == slow_stack_argsize(f), "argsize: %d slow_stack_argsize: %d", argsize, slow_stack_argsize(f));
2025     } else
2026 #endif
2027     {
2028       fsize = Compiled::size(f);
2029       oops  = Compiled::num_oops(f);
2030       argsize = mode == mode_fast ? 0 : Compiled::stack_argsize(f);
2031     }
2032     FreezeFnT f_fn = get_oopmap_stub(f); // try to do this early, so we wouldn't need to look at the oopMap again.
2033 
2034     log_develop_trace(jvmcont)("recurse_freeze_compiled_frame _size: %d add fsize: %d", _size, fsize);
2035     _size += fsize;
2036     _oops += oops;
2037     _frames++;
2038 
2039     // TODO PERF: consider recalculating fsize, argsize and oops in freeze_compiled_frame instead of passing them, as we now do in thaw
2040     return recurse_freeze_java_frame<Compiled, top, IsKeepalive>(f, caller, fsize, argsize, oops, f_fn, kd);
2041   }
2042 
2043   template <typename FKind, bool top, bool bottom, bool IsKeepalive>
2044   hframe freeze_compiled_frame(const frame& f, const hframe& caller, int fsize, int argsize, int oops, FreezeFnT f_fn, CompiledMethodKeepaliveT* kd) {
2045     freeze_compiled_frame_bp();
2046 
2047     intptr_t* vsp = FKind::frame_top(f);
2048 
2049     // The following assertion appears also in patch_pd and align. 
2050     // Even in fast mode, we allow the caller of the bottom frame (i.e. last frame still on the hstack) to be interpreted.
2051     // We can have a different tradeoff, and only set mode_fast if this is not the case by uncommenting _fastpath = false in Thaw::finalize where we're setting the last frame
2052     // Doing so can save us the test for caller.is_interpreted_frame() when we're in mode_fast and bottom, but at the cost of not switching to fast mode even if only a frozen frame is interpreted.
2053     assert (mode != mode_fast || bottom || !Interpreter::contains(caller.pc()), "");
2054 
2055     // in mode_fast we must not look at caller.is_interpreted_frame() because it may be wrong (hframe::partial_copy)
2056 
2057     if (bottom || (mode != mode_fast && caller.is_interpreted_frame())) {
2058       if (!bottom) { // if we're bottom, argsize has been computed in finalize
2059         argsize = Compiled::stack_argsize(f);
2060       }
2061       log_develop_trace(jvmcont)("freeze_compiled_frame add argsize: fsize: %d argsize: %d fsize: %d", fsize, argsize, fsize + argsize);
2062       fsize += argsize;
2063       align<bottom>(caller, argsize); // TODO PERF
2064     }
2065 
2066     hframe hf = new_hframe<FKind>(f, vsp, caller, fsize, oops, argsize);
2067     intptr_t* hsp = _cont.stack_address(hf.sp());
2068 
2069     freeze_raw_frame(vsp, hsp, fsize);
2070 
2071     if (!FKind::stub) {
2072       if (mode == mode_preempt && _safepoint_stub_caller) {
2073         _safepoint_stub_h = freeze_safepoint_stub(hf);
2074       }
2075 
2076       // ref_sp: 3, oops 4  -> [ 3: oop, 4: oop, 5: oop, 6: nmethod ]
2077       kd->write_at(_cont, hf.ref_sp() + oops - 1);
2078       //freeze_oops<Compiled>(f, vsp, hsp, hf.ref_sp() + 1, oops - 1, (void*)f_fn);
2079       freeze_oops<Compiled>(f, vsp, hsp, hf.ref_sp(), oops - 1, (void*)f_fn);
2080 
2081       if (mode == mode_preempt && _safepoint_stub_caller) {
2082         assert (!_fp_oop_info._has_fp_oop, "must be");
2083         _safepoint_stub = frame();
2084       }
2085 
2086       if (IsKeepalive) {
2087         kd->persist_oops();
2088       }
2089     } else { // stub frame has no oops
2090       _fp_oop_info._has_fp_oop = false;
2091     }
2092 
2093     patch<FKind, top, bottom>(f, hf, caller);
2094 
2095     // log_develop_trace(jvmcont)("freeze_compiled_frame real_pc: " INTPTR_FORMAT " address: " INTPTR_FORMAT " sp: " INTPTR_FORMAT, p2i(Frame::real_pc(f)), p2i(&(((address*) f.sp())[-1])), p2i(f.sp()));
2096     assert(bottom || mode == mode_fast || Interpreter::contains(caller.real_pc(_cont)) == caller.is_interpreted_frame(), "");
2097 
2098     return hf;
2099   }
2100 
2101   int freeze_compiled_oops(const frame& f, intptr_t* vsp, intptr_t* hsp, int starting_index) {
2102     if (mode != mode_preempt && ConfigT::allow_stubs && get_oopmap_stub(f) == NULL) {
2103   #ifdef CONT_DOUBLE_NOP
2104       f.get_cb();
2105   #endif
2106       const ImmutableOopMap* oopmap = f.oop_map();
2107       assert(oopmap, "must have");
2108       oopmap->generate_stub(f.cb());
2109   #ifdef CONT_DOUBLE_NOP
2110       ContinuationHelper::patch_freeze_stub(f, (address)get_oopmap_stub(f));
2111   #endif
2112       log_develop_trace(jvmcont)("freeze_compiled_oops generating oopmap stub; success: %d", get_oopmap_stub(f) != NULL);
2113       // tty->print_cr(">>>> generating oopmap stub; success: %d <<<<<", get_oopmap_stub(f) != NULL);
2114       // f.print_on(tty);
2115     }
2116     FreezeFnT stub = get_oopmap_stub(f);
2117 
2118     if (mode != mode_preempt && ConfigT::allow_stubs && stub != NULL) {
2119       assert (_safepoint_stub.is_empty(), "");
2120       return freeze_compiled_oops_stub(stub, f, vsp, hsp, starting_index);
2121     } else {
2122       // tty->print_cr(">>>>33333<<<<<");
2123       intptr_t *stub_vsp = NULL;
2124       intptr_t *stub_hsp = NULL;
2125       if (mode == mode_preempt && _safepoint_stub_caller) {
2126         assert (!_safepoint_stub.is_empty(), "");
2127         stub_vsp = StubF::frame_top(_safepoint_stub);
2128   #ifndef PRODUCT
2129         assert (_safepoint_stub_hsp != NULL, "");
2130         stub_hsp = _safepoint_stub_hsp;
2131   #endif
2132       }
2133 
2134   #ifdef CONT_DOUBLE_NOP
2135       f.get_cb();
2136   #endif
2137       const ImmutableOopMap* oopmap = f.oop_map();
2138       assert(oopmap, "must have");
2139 
2140       FreezeOopFn oopFn(&_cont, &_fp_oop_info, &f, vsp, hsp, &_map, starting_index, stub_vsp, stub_hsp);
2141 
2142       OopMapDo<FreezeOopFn, FreezeOopFn, IncludeAllValues> visitor(&oopFn, &oopFn);
2143       visitor.oops_do(&f, &_map, oopmap);
2144       assert (!_map.include_argument_oops(), "");
2145 
2146       return oopFn.count();
2147     }
2148   }
2149 
2150   inline int freeze_compiled_oops_stub(FreezeFnT f_fn, const frame& f, intptr_t* vsp, intptr_t* hsp, int starting_index) {
2151     // tty->print_cr(">>>>2222<<<<<");
2152     // ContinuationHelper::update_register_map_with_callee(&_map, f);
2153     intptr_t** link_addr = Frame::callee_link_address(f); // Frame::map_link_address(map);
2154     typename ConfigT::OopT* addr = _cont.refStack()->template obj_at_address<typename ConfigT::OopT>(starting_index);
2155     int cnt = f_fn( (address) vsp,  (address) addr, (address) link_addr, (address) hsp, _cont.refStack()->length() - starting_index, &_fp_oop_info);
2156     return cnt;
2157   }
2158 
2159   NOINLINE void finish(const frame& f, const hframe& top) {
2160     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 15) return;)
2161 
2162     ConfigT::OopWriterT::finish(_cont, nr_oops(), top.ref_sp());
2163 
2164     assert (top.sp() <= _cont.sp(), "top.sp(): %d sp: %d", top.sp(), _cont.sp());
2165 
2166     _cont.set_last_frame<mode>(top);
2167 
2168     if (log_develop_is_enabled(Trace, jvmcont)) {
2169       log_develop_trace(jvmcont)("top_hframe after (freeze):");
2170       _cont.last_frame<mode_preempt>().print_on(_cont, tty);
2171     }
2172 
2173     assert (_cont.is_flag(FLAG_LAST_FRAME_INTERPRETED) == _cont.last_frame<mode>().is_interpreted_frame(), "");
2174   }
2175 
2176   NOINLINE freeze_result recurse_freeze_stub_frame(const frame& f, hframe& caller) {
2177     int fsize = StubF::size(f);
2178 
2179     log_develop_trace(jvmcont)("recurse_stub_frame _size: %d add fsize: %d", _size, fsize);
2180     _size += fsize;
2181     _frames++;
2182 
2183     assert (mode == mode_preempt, "");
2184     _safepoint_stub = f;
2185 
2186   #ifdef ASSERT
2187     hframe::callee_info my_info = slow_link_address<StubF>(f);
2188   #endif
2189     frame senderf = sender<StubF>(f); // f.sender_for_compiled_frame<ContinuationCodeBlobLookup>(&map);
2190 
2191     assert (Frame::callee_link_address(senderf) == my_info, "");
2192     assert (senderf.unextended_sp() < _bottom_address - SP_WIGGLE, "");
2193     assert (senderf.is_compiled_frame(), ""); // TODO has been seen to fail in Preempt.java with -XX:+DeoptimizeALot
2194     assert (senderf.oop_map() != NULL, "");
2195 
2196     // we can have stub_caller as a value template argument, but that's unnecessary
2197     _safepoint_stub_caller = true;
2198     freeze_result result = recurse_freeze_compiled_frame<false, false>(senderf, caller, NULL);
2199     if (result == freeze_ok) {
2200       finish(f, _safepoint_stub_h);
2201     }
2202     return result;
2203   }
2204 
2205   NOINLINE hframe freeze_safepoint_stub(hframe& caller) {
2206     log_develop_trace(jvmcont)("== FREEZING STUB FRAME:");
2207 
2208     assert(mode == mode_preempt, "");
2209     assert(!_safepoint_stub.is_empty(), "");
2210 
2211     int fsize = StubF::size(_safepoint_stub);
2212 
2213     hframe hf = freeze_compiled_frame<StubF, true, false, false>(_safepoint_stub, caller, fsize, 0, 0, NULL, NULL);
2214 
2215 #ifndef PRODUCT
2216     _safepoint_stub_hsp = _cont.stack_address(hf.sp());
2217 #endif
2218 
2219     log_develop_trace(jvmcont)("== DONE FREEZING STUB FRAME");
2220     return hf;
2221   }
2222 
2223   inline FreezeFnT get_oopmap_stub(const frame& f) {
2224     if (!ConfigT::allow_stubs)
2225       return NULL;
2226     return ContinuationHelper::freeze_stub<mode>(f);
2227   }
2228 
2229   inline void freeze_raw_frame(intptr_t* vsp, intptr_t* hsp, int fsize) {
2230     log_develop_trace(jvmcont)("freeze_raw_frame: sp: %d", _cont.stack_index(hsp));
2231     _cont.copy_to_stack(vsp, hsp, fsize);
2232   }
2233 
2234   class FreezeOopFn : public ContOopBase<RegisterMapT> {
2235   private:
2236     FpOopInfo* _fp_info;
2237     void* const _hsp;
2238     int _starting_index;
2239 
2240     const address _stub_vsp;
2241   #ifndef PRODUCT
2242     const address _stub_hsp;
2243   #endif
2244 
2245     int add_oop(oop obj, int index) {
2246       //log_develop_info(jvmcont)("writing oop at %d", index);
2247       return this->_cont->template add_oop<ConfigT>(obj, index);
2248     }
2249 
2250   protected:
2251     template <class T> inline void do_oop_work(T* p) {
2252       this->process(p);
2253       oop obj = RawAccess<>::oop_load(p); // we are reading off our own stack, Raw should be fine
2254       int index = add_oop(obj, _starting_index + this->_count - 1);
2255 
2256   #ifdef ASSERT
2257       print_oop(p, obj);
2258       assert (oopDesc::is_oop_or_null(obj), "invalid oop");
2259       log_develop_trace(jvmcont)("narrow: %d", sizeof(T) < wordSize);
2260 
2261       int offset = this->verify(p);
2262       assert(offset < 32768, "");
2263       if (_stub_vsp == NULL && offset < 0) { // rbp could be stored in the callee frame.
2264         assert (p == (T*)Frame::map_link_address(this->_map), "");
2265         _fp_info->set_oop_fp_index(0xbaba); // assumed to be unnecessary at this time; used only in ASSERT for now
2266       } else {
2267         address hloc = (address)_hsp + offset; // address of oop in the (raw) h-stack
2268         assert (this->_cont->in_hstack(hloc), "");
2269         assert (*(T*)hloc == *p, "*hloc: " INTPTR_FORMAT " *p: " INTPTR_FORMAT, *(intptr_t*)hloc, *(intptr_t*)p);
2270 
2271         log_develop_trace(jvmcont)("Marking oop at " INTPTR_FORMAT " (offset: %d)", p2i(hloc), offset);
2272         memset(hloc, 0xba, sizeof(T)); // we must take care not to write a full word to a narrow oop
2273         if (_stub_vsp != NULL && offset < 0) { // slow path
2274           int offset0 = (address)p - _stub_vsp;
2275           assert (offset0 >= 0, "stub vsp: " INTPTR_FORMAT " p: " INTPTR_FORMAT " offset: %d", p2i(_stub_vsp), p2i(p), offset0);
2276           assert (hloc == _stub_hsp + offset0, "");
2277         }
2278       }
2279   #endif
2280     }
2281 
2282   public:
2283     FreezeOopFn(ContMirror* cont, FpOopInfo* fp_info, const frame* fr, void* vsp, void* hsp, RegisterMapT* map, int starting_index, intptr_t* stub_vsp = NULL, intptr_t* stub_hsp = NULL)
2284     : ContOopBase<RegisterMapT>(cont, fr, map, vsp), _fp_info(fp_info), _hsp(hsp), _starting_index(starting_index),
2285       _stub_vsp((address)stub_vsp)
2286   #ifndef PRODUCT
2287       , _stub_hsp((address)stub_hsp)
2288   #endif
2289     {
2290       assert (cont->in_hstack(hsp), "");
2291     }
2292 
2293     void do_oop(oop* p)       { do_oop_work(p); }
2294     void do_oop(narrowOop* p) { do_oop_work(p); }
2295 
2296     void do_derived_oop(oop *base_loc, oop *derived_loc) {
2297       assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
2298       assert(derived_loc != base_loc, "Base and derived in same location");
2299       DEBUG_ONLY(this->verify(base_loc);)
2300       DEBUG_ONLY(this->verify(derived_loc);)
2301 
2302       intptr_t offset = cast_from_oop<intptr_t>(*derived_loc) - cast_from_oop<intptr_t>(*base_loc);
2303 
2304       log_develop_trace(jvmcont)(
2305         "Continuation freeze derived pointer@" INTPTR_FORMAT " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
2306         p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset);
2307 
2308       int hloc_offset = (address)derived_loc - (address)this->_vsp;
2309       if (hloc_offset < 0 && _stub_vsp == NULL) {
2310         assert ((intptr_t**)derived_loc == Frame::map_link_address(this->_map), "");
2311         _fp_info->set_oop_fp_index(offset);
2312 
2313         log_develop_trace(jvmcont)("Writing derived pointer offset in fp (offset: %ld, 0x%lx)", offset, offset);
2314       } else {
2315         intptr_t* hloc = (intptr_t*)((address)_hsp + hloc_offset);
2316         *hloc = offset;
2317 
2318         log_develop_trace(jvmcont)("Writing derived pointer offset at " INTPTR_FORMAT " (offset: " INTX_FORMAT ", " INTPTR_FORMAT ")", p2i(hloc), offset, offset);
2319 
2320   #ifdef ASSERT
2321         if (_stub_vsp != NULL && hloc_offset < 0) {
2322           int hloc_offset0 = (address)derived_loc - _stub_vsp;
2323           assert (hloc_offset0 >= 0, "hloc_offset: %d", hloc_offset0);
2324           assert(hloc == (intptr_t*)(_stub_hsp + hloc_offset0), "");
2325         }
2326   #endif
2327       }
2328     }
2329   };
2330 };
2331 
2332 template <typename ConfigT>
2333 class NormalOopWriter {
2334 public:
2335   typedef typename ConfigT::OopT OopT;
2336 
2337   static void obj_at_put(objArrayOop array, int index, oop obj) { array->obj_at_put_access<IS_DEST_UNINITIALIZED>(index, obj); }
2338   static void finish(ContMirror& mirror, int count, int low_array_index) { }
2339 };
2340 
2341 template <typename ConfigT>
2342 class RawOopWriter {
2343 public:
2344   typedef typename ConfigT::OopT OopT;
2345 
2346   static void obj_at_put(objArrayOop array, int index, oop obj) {
2347     OopT* addr = array->obj_at_addr<OopT>(index); // depends on UseCompressedOops
2348     //assert(*addr == (OopT) NULL, "");
2349     RawAccess<IS_DEST_UNINITIALIZED>::oop_store(addr, obj);
2350   }
2351 
2352   static void finish(ContMirror& mirror, int count, int low_array_index) {
2353     if (count > 0) {
2354       BarrierSet* bs = BarrierSet::barrier_set();
2355       ModRefBarrierSet* mbs = barrier_set_cast<ModRefBarrierSet>(bs);
2356       HeapWord* start = (HeapWord*) mirror.refStack()->obj_at_addr<OopT>(low_array_index);
2357       mbs->write_ref_array(start, count);
2358     }
2359   }
2360 };
2361 
2362 int early_return(int res, JavaThread* thread, FrameInfo* fi) {
2363   clear_frame_info(fi);
2364   thread->set_cont_yield(false);
2365   log_develop_trace(jvmcont)("=== end of freeze (fail %d)", res);
2366   return res;
2367 }
2368 
2369 static void invlidate_JVMTI_stack(JavaThread* thread) {
2370   if (thread->is_interp_only_mode()) {
2371     JvmtiThreadState *jvmti_state = thread->jvmti_thread_state();
2372     if (jvmti_state != NULL)
2373       jvmti_state->invalidate_cur_stack_depth();
2374   }
2375 }
2376 
2377 static void post_JVMTI_yield(JavaThread* thread, ContMirror& cont, const FrameInfo* fi) {
2378   if (JvmtiExport::should_post_continuation_yield() || JvmtiExport::can_post_frame_pop()) {
2379     set_anchor<true>(thread, fi); // ensure frozen frames are invisible
2380     JvmtiExport::post_continuation_yield(JavaThread::current(), num_java_frames(cont));
2381   }
2382 
2383   invlidate_JVMTI_stack(thread);
2384 }
2385 
2386 // returns the continuation yielding (based on context), or NULL for failure (due to pinning)
2387 // it freezes multiple continuations, depending on contex
2388 // it must set Continuation.stackSize
2389 // sets Continuation.fp/sp to relative indices
2390 //
2391 // In: fi->pc, fi->sp, fi->fp all point to the current (topmost) frame to freeze (the yield frame); THESE VALUES ARE CURRENTLY UNUSED
2392 // Out: fi->pc, fi->sp, fi->fp all point to the run frame (entry's caller)
2393 //      unless freezing has failed, in which case fi->pc = 0
2394 //      However, fi->fp points to the _address_ on the stack of the entry frame's link to its caller (so *(fi->fp) is the fp)
2395 template<op_mode mode>
2396 int freeze0(JavaThread* thread, FrameInfo* fi) {
2397   //callgrind();
2398   PERFTEST_ONLY(PERFTEST_LEVEL = ContPerfTest;)
2399 
2400   PERFTEST_ONLY(if (PERFTEST_LEVEL <= 10) return early_return(freeze_ok, thread, fi);)
2401   PERFTEST_ONLY(if (PERFTEST_LEVEL < 1000) thread->set_cont_yield(false);)
2402 
2403 #ifdef ASSERT
2404   log_develop_trace(jvmcont)("~~~~~~~~~ freeze mode: %d fi->sp: " INTPTR_FORMAT " fi->fp: " INTPTR_FORMAT " fi->pc: " INTPTR_FORMAT, mode, p2i(fi->sp), p2i(fi->fp), p2i(fi->pc));
2405   /* set_anchor(thread, fi); */ print_frames(thread);
2406 #endif
2407   // if (mode != mode_fast) tty->print_cr(">>> freeze0 mode: %d", mode);
2408 
2409   assert (thread->thread_state() == _thread_in_vm || thread->thread_state() == _thread_blocked, "thread->thread_state(): %d", thread->thread_state());
2410   assert (!thread->cont_yield(), "");
2411   assert (!thread->has_pending_exception(), ""); // if (thread->has_pending_exception()) return early_return(freeze_exception, thread, fi);
2412 
2413   EventContinuationFreeze event;
2414 
2415   thread->set_cont_yield(true);
2416   thread->cont_frame()->sp = NULL;
2417   DEBUG_ONLY(thread->_continuation = NULL;)
2418 
2419   oop oopCont = get_continuation(thread);
2420   ContMirror cont(thread, oopCont);
2421   log_develop_debug(jvmcont)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
2422 
2423   if (java_lang_Continuation::critical_section(oopCont) > 0) {
2424     log_develop_debug(jvmcont)("PINNED due to critical section");
2425     return early_return(freeze_pinned_cs, thread, fi);
2426   }
2427 
2428   freeze_result res = cont_freeze<mode>(thread, cont, fi);
2429   if (res != freeze_ok)
2430     return early_return(res, thread, fi);
2431 
2432   PERFTEST_ONLY(if (PERFTEST_LEVEL <= 15) return freeze_ok;)
2433 
2434   cont.set_flag(FLAG_SAFEPOINT_YIELD, mode == mode_preempt);
2435 
2436   cont.write(); // commit the freeze
2437 
2438   cont.post_jfr_event(&event);
2439   post_JVMTI_yield(thread, cont, fi); // can safepoint
2440 
2441   // set_anchor(thread, fi);
2442   thread->set_cont_yield(false);
2443 
2444   log_develop_debug(jvmcont)("ENTRY: sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT, p2i(fi->sp), p2i(fi->fp), p2i(fi->pc));
2445   log_develop_debug(jvmcont)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
2446 
2447   return 0;
2448 }
2449 
2450 JRT_ENTRY(int, Continuation::freeze(JavaThread* thread, FrameInfo* fi, bool from_interpreter))
2451   // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c adapter or called Deoptimization::unpack_frames
2452   // Calls from native frames also go through the interpreter (see JavaCalls::call_helper)
2453   // We also clear thread->cont_fastpath in Deoptimize::deoptimize_single_frame and when we thaw interpreted frames
2454   bool fast = UseContinuationFastPath && thread->cont_fastpath() && !from_interpreter;
2455   // tty->print_cr(">>> freeze fast: %d thread->cont_fastpath(): %d from_interpreter: %d", fast, thread->cont_fastpath(), from_interpreter);
2456   return fast ? freeze0<mode_fast>(thread, fi)
2457               : freeze0<mode_slow>(thread, fi);
2458 JRT_END
2459 
2460 static freeze_result is_pinned(const frame& f, const RegisterMap* map) {
2461   if (f.is_interpreted_frame()) {
2462     if (Interpreted::is_owning_locks(f)) {
2463       return freeze_pinned_monitor;
2464     }
2465 
2466   } else if (f.is_compiled_frame()) {
2467     if (Compiled::is_owning_locks(map->thread(), map, f)) {
2468       return freeze_pinned_monitor;
2469     }
2470 
2471   } else {
2472     return freeze_pinned_native;
2473   }
2474   return freeze_ok;
2475 }
2476 
2477 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) {
2478   oop cont = get_continuation(thread);
2479   if (cont == (oop) NULL) {
2480     return freeze_ok;
2481   }
2482   if (java_lang_Continuation::critical_section(cont) > 0)
2483     return freeze_pinned_cs;
2484 
2485   RegisterMap map(thread, false, false, false); // should first argument be true?
2486   map.set_include_argument_oops(false);
2487   frame f = thread->last_frame();
2488 
2489   if (!safepoint) {
2490     f = f.frame_sender<ContinuationCodeBlobLookup>(&map); // LOOKUP // this is the yield frame
2491   } else { // safepoint yield
2492     f.set_fp(f.real_fp()); // Instead of this, maybe in ContMirror::set_last_frame always use the real_fp?
2493     if (!Interpreter::contains(f.pc())) {
2494       assert (is_stub(f.cb()), "must be");
2495       assert (f.oop_map() != NULL, "must be");
2496       f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
2497     }
2498   }
2499 
2500   while (true) {
2501     freeze_result res = is_pinned(f, &map);
2502     if (res != freeze_ok)
2503       return res;
2504 
2505     f = f.frame_sender<ContinuationCodeBlobLookup>(&map);
2506     if (!Continuation::is_frame_in_continuation(f, cont)) {
2507       oop scope = java_lang_Continuation::scope(cont);
2508       if (oopDesc::equals(scope, cont_scope))
2509         break;
2510       cont = java_lang_Continuation::parent(cont);
2511       if (cont == (oop) NULL)
2512         break;
2513       if (java_lang_Continuation::critical_section(cont) > 0)
2514         return freeze_pinned_cs;
2515     }
2516   }
2517   return freeze_ok;
2518 }
2519 
2520 typedef int (*DoYieldStub)(int scopes);
2521 
2522 // called in a safepoint
2523 int Continuation::try_force_yield(JavaThread* thread, const oop cont) {
2524   // this is the only place where we traverse the continuatuion hierarchy in native code, as it needs to be done in a safepoint
2525   oop scope = NULL;
2526   oop innermost = get_continuation(thread);
2527   for (oop c = innermost; c != NULL; c = java_lang_Continuation::parent(c)) {
2528     if (oopDesc::equals(c, cont)) {
2529       scope = java_lang_Continuation::scope(c);
2530       break;
2531     }
2532   }
2533   if (scope == NULL) {
2534     return -1; // no continuation
2535   }
2536   if (thread->_cont_yield) {
2537     return -2; // during yield
2538   }
2539   if (!oopDesc::equals(innermost, cont)) { // we have nested continuations
2540     // make sure none of the continuations in the hierarchy are pinned
2541     freeze_result res_pinned = is_pinned0(thread, java_lang_Continuation::scope(cont), true);
2542     if (res_pinned != freeze_ok)
2543       return res_pinned;
2544 
2545     java_lang_Continuation::set_yieldInfo(cont, scope);
2546   }
2547 
2548 // #ifdef ASSERT
2549 //   tty->print_cr("FREEZING:");
2550 //   frame lf = thread->last_frame();
2551 //   lf.print_on(tty);
2552 //   tty->print_cr("");
2553 //   const ImmutableOopMap* oopmap = lf.oop_map();
2554 //   if (oopmap != NULL) {
2555 //     oopmap->print();
2556 //     tty->print_cr("");
2557 //   } else {
2558 //     tty->print_cr("oopmap: NULL");
2559 //   }
2560 //   tty->print_cr("*&^*&#^$*&&@(#*&@(#&*(*@#&*(&@#$^*(&#$(*&#@$(*&#($*&@#($*&$(#*$");
2561 // #endif
2562   // TODO: save return value
2563 
2564   FrameInfo fi;
2565   int res = freeze0<mode_preempt>(thread, &fi); // CAST_TO_FN_PTR(DoYieldStub, StubRoutines::cont_doYield_C())(-1);
2566   if (res == 0) { // success
2567     thread->_cont_frame = fi;
2568     thread->set_cont_preempt(true);
2569 
2570     frame last = thread->last_frame();
2571     Frame::patch_pc(last, StubRoutines::cont_jump_from_sp()); // reinstates rbpc and rlocals for the sake of the interpreter
2572     log_develop_trace(jvmcont)("try_force_yield installed cont_jump_from_sp stub on"); if (log_develop_is_enabled(Trace, jvmcont)) last.print_on(tty);
2573 
2574     // this return barrier is used for compiled frames; for interpreted frames we use the call to StubRoutines::cont_jump_from_sp_C in JavaThread::handle_special_runtime_exit_condition
2575   }
2576   return res;
2577 }
2578 /////////////// THAW ////
2579 
2580 typedef bool (*ThawContFnT)(JavaThread*, ContMirror&, FrameInfo*, int);
2581 
2582 static ThawContFnT cont_thaw_fast = NULL;
2583 static ThawContFnT cont_thaw_slow = NULL;
2584 static ThawContFnT cont_thaw_preempt = NULL;
2585 
2586 template<op_mode mode>
2587 static bool cont_thaw(JavaThread* thread, ContMirror& cont, FrameInfo* fi, int num_frames) {
2588   switch (mode) {
2589     case mode_fast:    return cont_thaw_fast   (thread, cont, fi, num_frames);
2590     case mode_slow:    return cont_thaw_slow   (thread, cont, fi, num_frames);
2591     case mode_preempt: return cont_thaw_preempt(thread, cont, fi, num_frames);
2592     default:
2593       guarantee(false, "unreachable");
2594       return false;
2595   }
2596 }
2597 
2598 static inline int thaw_num_frames(bool return_barrier) {
2599   if (CONT_FULL_STACK) {
2600     assert (!return_barrier, "");
2601     return 10000;
2602   }
2603   return return_barrier ? 1 : 2;
2604 }
2605 
2606 static bool stack_overflow_check(JavaThread* thread, int size, address sp) {
2607   const int page_size = os::vm_page_size();
2608   if (size > page_size) {
2609     if (sp - size < thread->stack_overflow_limit()) {
2610       return false;
2611     }
2612   }
2613   return true;
2614 }
2615 
2616 // In: fi->sp = the sp of the entry frame
2617 // Out: returns the size of frames to thaw or 0 for no more frames or a stack overflow
2618 //      On failure: fi->sp - cont's entry SP
2619 //                  fi->fp - cont's entry FP
2620 //                  fi->pc - overflow? throw StackOverflowError : cont's entry PC
2621 JRT_LEAF(int, Continuation::prepare_thaw(FrameInfo* fi, bool return_barrier))
2622   PERFTEST_ONLY(PERFTEST_LEVEL = ContPerfTest;)
2623 
2624   PERFTEST_ONLY(if (PERFTEST_LEVEL <= 110) return 0;)
2625 
2626   int num_frames = thaw_num_frames(return_barrier);
2627 
2628   log_develop_trace(jvmcont)("~~~~~~~~~ prepare_thaw return_barrier: %d num_frames: %d", return_barrier, num_frames);
2629   log_develop_trace(jvmcont)("prepare_thaw pc: " INTPTR_FORMAT " fp: " INTPTR_FORMAT " sp: " INTPTR_FORMAT, p2i(fi->pc), p2i(fi->fp), p2i(fi->sp));
2630 
2631   JavaThread* thread = JavaThread::current();
2632   oop cont = get_continuation(thread);
2633 
2634   // if the entry frame is interpreted, it may leave a parameter on the stack, which would be left there if the return barrier is hit
2635   // assert ((address)java_lang_Continuation::entrySP(cont) - bottom <= 8, "bottom: " INTPTR_FORMAT ", entrySP: " INTPTR_FORMAT, bottom, java_lang_Continuation::entrySP(cont));
2636   int size = java_lang_Continuation::maxSize(cont); // frames_size(cont, num_frames);
2637   if (size == 0) { // no more frames
2638     return 0;
2639   }
2640   size += SP_WIGGLE * sizeof(intptr_t); // just in case we have an interpreted entry after which we need to align
2641 
2642   const address bottom = (address)fi->sp; // os::current_stack_pointer(); points to the entry frame
2643   if (!stack_overflow_check(thread, size + 300, bottom)) {
2644     fi->pc = StubRoutines::throw_StackOverflowError_entry();
2645     return 0;
2646   }
2647 
2648   log_develop_trace(jvmcont)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d", p2i(bottom), p2i(bottom - size), size);
2649 
2650   PERFTEST_ONLY(if (PERFTEST_LEVEL <= 120) return 0;)
2651 
2652   return size;
2653 JRT_END
2654 
2655 template <typename ConfigT, op_mode mode>
2656 class Thaw {
2657   typedef typename Conditional<mode == mode_preempt, RegisterMap, SmallRegisterMap>::type RegisterMapT;
2658 
2659 private:
2660   JavaThread* _thread;
2661   ContMirror& _cont;
2662   FrameInfo* _fi;
2663 
2664   bool _fastpath; // if true, a subsequent freeze can be in mode_fast
2665 
2666   RegisterMapT _map; // map is only passed to thaw_compiled_frame for use in deoptimize, which uses it only for biased locks; we may not need deoptimize there at all -- investigate
2667 
2668   const hframe* _safepoint_stub;
2669   bool _safepoint_stub_caller;
2670   frame _safepoint_stub_f;
2671 
2672   DEBUG_ONLY(int _frames;)
2673 
2674   inline frame new_entry_frame();
2675   template<typename FKind> frame new_frame(const hframe& hf, intptr_t* vsp);
2676   template<typename FKind, bool top, bool bottom> inline void patch_pd(frame& f, const frame& sender);
2677   void derelativize_interpreted_frame_metadata(const hframe& hf, const frame& f);
2678   inline hframe::callee_info frame_callee_info_address(frame& f);
2679   template<typename FKind, bool top, bool bottom> inline intptr_t* align(const hframe& hf, intptr_t* vsp, frame& caller);
2680 
2681   bool should_deoptimize() { return true; /* mode != mode_fast && _thread->is_interp_only_mode(); */ } // TODO PERF
2682 
2683 public:
2684 
2685   Thaw(JavaThread* thread, ContMirror& mirror) :
2686     _thread(thread), _cont(mirror),
2687     _fastpath(true),
2688     _map(thread, false, false, false),
2689     _safepoint_stub(NULL), _safepoint_stub_caller(false) {
2690 
2691     _map.set_include_argument_oops(false);
2692   }
2693 
2694   bool thaw(FrameInfo* fi, int num_frames) {
2695     _fi = fi;
2696 
2697     assert (!_map.include_argument_oops(), "should be");
2698 
2699     DEBUG_ONLY(int orig_num_frames = _cont.num_frames();)
2700     DEBUG_ONLY(_frames = 0;)
2701 
2702     hframe hf = _cont.last_frame<mode>();
2703 
2704     log_develop_trace(jvmcont)("top_hframe before (thaw):"); if (log_develop_is_enabled(Trace, jvmcont)) hf.print_on(_cont, tty);
2705 
2706     frame caller;
2707     thaw<true>(hf, caller, num_frames);
2708 
2709     assert (_cont.num_frames() == orig_num_frames - _frames, "cont.is_empty: %d num_frames: %d orig_num_frames: %d frame_count: %d", _cont.is_empty(), _cont.num_frames(), orig_num_frames, _frames);
2710     assert (mode != mode_fast || _fastpath, "");
2711     return _fastpath;
2712   }
2713 
2714   template<bool top>
2715   void thaw(const hframe& hf, frame& caller, int num_frames) {
2716     assert (num_frames > 0 && !hf.is_empty(), "");
2717 
2718     // Dynamically branch on frame type
2719     if (mode == mode_preempt && top && !hf.is_interpreted_frame()) {
2720       assert (is_stub(hf.cb()), "");
2721       recurse_stub_frame(hf, caller, num_frames);
2722     } else if (mode == mode_fast || !hf.is_interpreted_frame()) {
2723       recurse_compiled_frame<top>(hf, caller, num_frames);
2724     } else {
2725       assert (mode != mode_fast, "");
2726       recurse_interpreted_frame<top>(hf, caller, num_frames);
2727     }
2728   }
2729 
2730   template<typename FKind, bool top>
2731   void recurse_thaw_java_frame(const hframe& hf, frame& caller, int num_frames, void* extra) {
2732     assert (num_frames > 0, "");
2733 
2734     //hframe hsender = hf.sender<FKind, mode(_cont, 
2735     //return sender<FKind, mode>(cont, FKind::interpreted ? interpreted_frame_num_oops(*mask) : compiled_frame_num_oops());
2736     hframe hsender = hf.sender<FKind, mode>(_cont, FKind::interpreted ? (InterpreterOopMap*)extra : NULL, FKind::extra_oops); // TODO PERF maybe we can reuse fsize?
2737 
2738     bool is_empty = hsender.is_empty();
2739     if (num_frames == 1 || is_empty) {
2740       log_develop_trace(jvmcont)("is_empty: %d", is_empty);
2741       finalize<FKind>(hsender, hf, is_empty, caller);
2742       thaw_java_frame<FKind, top, true>(hf, caller, extra);
2743     } else {
2744       bool safepoint_stub_caller; // the use of _safepoint_stub_caller is not nice, but given preemption being performance non-critical, we don't want to add either a template or a regular parameter
2745       if (mode == mode_preempt) {
2746         safepoint_stub_caller = _safepoint_stub_caller;
2747         _safepoint_stub_caller = false;
2748       }
2749 
2750       thaw<false>(hsender, caller, num_frames - 1); // recurse
2751 
2752       if (mode == mode_preempt) _safepoint_stub_caller = safepoint_stub_caller; // restore _stub_caller
2753 
2754       thaw_java_frame<FKind, top, false>(hf, caller, extra);
2755     }
2756 
2757     if (top) {
2758       finish(caller); // caller is now the current frame
2759     }
2760 
2761     DEBUG_ONLY(_frames++;)
2762   }
2763 
2764   template<typename FKind>
2765   void finalize(const hframe& hf, const hframe& callee, bool is_empty, frame& entry) {
2766     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 115) return;)
2767 
2768     entry = new_entry_frame();
2769     // if (entry.is_interpreted_frame()) _fastpath = false; // set _fastpath if entry is interpreted ? 
2770 
2771   #ifdef ASSERT
2772     log_develop_trace(jvmcont)("Found entry:");
2773     print_vframe(entry);
2774     assert_bottom_java_frame_name(entry, RUN_SIG);
2775   #endif
2776 
2777     if (is_empty) {
2778       _cont.set_empty();
2779 
2780       // This is part of the mechanism to pop stack-passed compiler arguments; see generate_cont_thaw's no_saved_sp label.
2781       // we use thread->_cont_frame->sp rather than the continuations themselves (which allow nesting) b/c it's faser and simpler.
2782       // for that to work, we rely on the fact that parent continuation's have at least Continuation.run on the stack, which does not require stack arguments
2783       _cont.thread()->cont_frame()->sp = NULL;
2784       // _cont.set_entryPC(NULL);
2785     } else {
2786       _cont.set_last_frame<mode>(hf); // _last_frame = hf;
2787       if (!FKind::interpreted && !hf.is_interpreted_frame()) {
2788         int argsize;
2789     #ifdef CONT_DOUBLE_NOP
2790         CachedCompiledMetadata md = ContinuationHelper::cached_metadata<mode>(callee);
2791         if (LIKELY(!md.empty())) {
2792           argsize = md.stack_argsize();
2793           assert(argsize == slow_stack_argsize(callee), "argsize: %d slow_stack_argsize: %d", argsize, slow_stack_argsize(callee));
2794         } else
2795     #endif
2796           argsize = callee.compiled_frame_stack_argsize();
2797         // we'll be subtracting the argsize in thaw_compiled_frame, but if the caller is compiled, we shouldn't
2798         _cont.add_size(argsize);
2799       } 
2800       // else {
2801       //   _fastpath = false; // see discussion in Freeze::freeze_compiled_frame
2802       // }
2803     }
2804 
2805     assert (is_entry_frame(_cont, entry), "");
2806     assert (_frames == 0, "");
2807     assert (is_empty == _cont.is_empty() /* _last_frame.is_empty()*/, "hf.is_empty(cont): %d last_frame.is_empty(): %d ", is_empty, _cont.is_empty()/*_last_frame.is_empty()*/);
2808   }
2809 
2810   template<typename FKind, bool top, bool bottom>
2811   void thaw_java_frame(const hframe& hf, frame& caller, void* extra) {
2812     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 115) return;)
2813 
2814     log_develop_trace(jvmcont)("============================= THAWING FRAME:");
2815 
2816     assert (FKind::is_instance(hf), "");
2817     assert (bottom == is_entry_frame(_cont, caller), "");
2818 
2819     if (log_develop_is_enabled(Trace, jvmcont)) hf.print(_cont);
2820 
2821     log_develop_trace(jvmcont)("stack_length: %d", _cont.stack_length());
2822 
2823     // TODO PERF see partial_copy in Freeze
2824     caller = FKind::interpreted ? thaw_interpreted_frame    <top, bottom>(hf, caller, (InterpreterOopMap*)extra)
2825                                 : thaw_compiled_frame<FKind, top, bottom>(hf, caller, (ThawFnT)extra);
2826 
2827     log_develop_trace(jvmcont)("thawed frame:");
2828     DEBUG_ONLY(print_vframe(caller, &dmap);)
2829   }
2830 
2831   template <typename FKind>
2832   void thaw_oops(frame& f, intptr_t* vsp, int oop_index, void* extra) {
2833     PERFTEST_ONLY(if (PERFTEST_LEVEL < 130) return;)
2834 
2835     log_develop_trace(jvmcont)("Walking oops (thaw)");
2836 
2837     assert (!_map.include_argument_oops(), "");
2838 
2839     int thawed;
2840     if (!FKind::interpreted && extra != NULL) {
2841       thawed = thaw_compiled_oops_stub(f, (ThawFnT)extra, vsp, oop_index);
2842       //log_develop_info(jvmcont)("thawing %d oops from %d (stub)", thawed, oop_index);
2843     } else {
2844       int num_oops = FKind::interpreted ? Interpreted::num_oops(f, (InterpreterOopMap*)extra) : NonInterpreted<FKind>::num_oops(f);
2845       num_oops -= FKind::extra_oops;
2846       //log_develop_info(jvmcont)("thawing %d oops from %d", num_oops, oop_index);
2847       if (num_oops == 0) {
2848         if (FKind::extra_oops > 0) {
2849           _cont.null_ref_stack(oop_index, FKind::extra_oops);
2850         }
2851         return;
2852       }
2853 
2854       thawed = FKind::interpreted ? thaw_interpreted_oops(f, vsp, oop_index, (InterpreterOopMap*)extra)
2855                                   : thaw_compiled_oops   (f, vsp, oop_index);
2856     }
2857 
2858     log_develop_trace(jvmcont)("count: %d", thawed);
2859 #ifdef ASSERT
2860     int num_oops = FKind::interpreted ? Interpreted::num_oops(f, (InterpreterOopMap*)extra) : NonInterpreted<FKind>::num_oops(f);
2861     assert(thawed == num_oops - FKind::extra_oops, "closure oop count different.");
2862 #endif
2863 
2864     _cont.null_ref_stack(oop_index, thawed + FKind::extra_oops);
2865     _cont.e_add_refs(thawed);
2866 
2867     log_develop_trace(jvmcont)("Done walking oops");
2868   }
2869 
2870   template<typename FKind, bool top, bool bottom>
2871   inline void patch(frame& f, const frame& caller) {
2872     if (bottom && !_cont.is_empty()) {
2873       log_develop_trace(jvmcont)("Setting return address to return barrier: " INTPTR_FORMAT, p2i(StubRoutines::cont_returnBarrier()));
2874       FKind::interpreted ? Interpreted::patch_return_pc(f, StubRoutines::cont_returnBarrier())
2875                          : FKind::patch_pc(caller, StubRoutines::cont_returnBarrier());
2876     } else if (bottom || should_deoptimize()) {
2877       FKind::interpreted ? Interpreted::patch_return_pc(f, caller.raw_pc())
2878                          : FKind::patch_pc(caller, caller.raw_pc()); // this patches the return address to the deopt handler if necessary
2879     }
2880     patch_pd<FKind, top, bottom>(f, caller);
2881 
2882     if (FKind::interpreted) {
2883       Interpreted::patch_sender_sp(f, caller.unextended_sp()); // ContMirror::derelativize(vfp, frame::interpreter_frame_sender_sp_offset);
2884     }
2885 
2886     assert (!bottom || !_cont.is_empty() || assert_bottom_java_frame_name(f, ENTER_SIG), "");
2887     assert (!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "cont.is_empty(): %d is_cont_barrier_frame(f): %d ", _cont.is_empty(), Continuation::is_cont_barrier_frame(f));
2888   }
2889 
2890   template<bool top>
2891   NOINLINE void recurse_interpreted_frame(const hframe& hf, frame& caller, int num_frames) {
2892     // ResourceMark rm(_thread);
2893     InterpreterOopMap mask;
2894     hf.interpreted_frame_oop_map(&mask);
2895     int fsize = hf.interpreted_frame_size();
2896     int oops  = hf.interpreted_frame_num_oops(mask);
2897 
2898     recurse_thaw_java_frame<Interpreted, top>(hf, caller, num_frames, (void*)&mask);
2899   }
2900 
2901   template<bool top, bool bottom>
2902   frame thaw_interpreted_frame(const hframe& hf, const frame& caller, InterpreterOopMap* mask) {
2903     int fsize = hf.interpreted_frame_size();
2904     log_develop_trace(jvmcont)("fsize: %d", fsize);
2905     intptr_t* vsp = (intptr_t*)((address)caller.unextended_sp() - fsize);
2906     intptr_t* hsp = _cont.stack_address(hf.sp());
2907 
2908     frame f = new_frame<Interpreted>(hf, vsp);
2909 
2910     // if the caller is compiled we should really extend its sp to be our fp + 2 (1 for the return address, plus 1), but we don't bother as we don't use it
2911 
2912     thaw_raw_frame(hsp, vsp, fsize);
2913 
2914     derelativize_interpreted_frame_metadata(hf, f);
2915 
2916     thaw_oops<Interpreted>(f, f.sp(), hf.ref_sp(), mask);
2917 
2918     patch<Interpreted, top, bottom>(f, caller);
2919 
2920     assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2921     assert(Interpreted::frame_bottom(f) <= Frame::frame_top(caller), "");
2922 
2923     _cont.sub_size(fsize);
2924     _cont.dec_num_frames();
2925     _cont.dec_num_interpreted_frames();
2926 
2927     _fastpath = false;
2928 
2929     return f;
2930   }
2931 
2932   int thaw_interpreted_oops(frame& f, intptr_t* vsp, int starting_index, InterpreterOopMap* mask) {
2933     assert (mask != NULL, "");
2934 
2935     ThawOopFn oopFn(&_cont, &f, starting_index, vsp, &_map);
2936     f.oops_interpreted_do(&oopFn, NULL, mask); // f.oops_do(&oopFn, NULL, &oopFn, &_map);
2937     return oopFn.count();
2938   }
2939 
2940   template<bool top>
2941   void recurse_compiled_frame(const hframe& hf, frame& caller, int num_frames) {
2942     ThawFnT t_fn = get_oopmap_stub(hf); // try to do this early, so we wouldn't need to look at the oopMap again.
2943 
2944     return recurse_thaw_java_frame<Compiled, top>(hf, caller, num_frames, (void*)t_fn);
2945   }
2946 
2947   template<typename FKind, bool top, bool bottom>
2948   frame thaw_compiled_frame(const hframe& hf, const frame& caller, ThawFnT t_fn) {
2949     thaw_compiled_frame_bp();
2950     assert(FKind::stub == is_stub(hf.cb()), "");
2951     assert (caller.sp() == caller.unextended_sp(), "");
2952 
2953     int fsize;
2954 #ifdef CONT_DOUBLE_NOP
2955     CachedCompiledMetadata md;
2956     if (mode != mode_preempt) {
2957       md = ContinuationHelper::cached_metadata(hf.pc());
2958       fsize = md.size();
2959     }
2960     if (mode == mode_preempt || UNLIKELY(fsize == 0))
2961 #endif
2962       fsize = hf.compiled_frame_size();
2963     assert(fsize == slow_size(hf), "argsize: %d slow_size: %d", fsize, slow_size(hf));
2964     log_develop_trace(jvmcont)("fsize: %d", fsize);
2965 
2966     intptr_t* vsp = (intptr_t*)((address)caller.unextended_sp() - fsize);
2967     log_develop_trace(jvmcont)("vsp: " INTPTR_FORMAT, p2i(vsp));
2968 
2969     if (bottom || (mode != mode_fast && caller.is_interpreted_frame())) {
2970       log_develop_trace(jvmcont)("thaw_compiled_frame add argsize: fsize: %d argsize: %d fsize: %d", fsize, hf.compiled_frame_stack_argsize(), fsize + hf.compiled_frame_stack_argsize());
2971       int argsize;
2972   #ifdef CONT_DOUBLE_NOP
2973       if (mode != mode_preempt && LIKELY(!md.empty())) {
2974         argsize = md.stack_argsize();
2975         assert(argsize == slow_stack_argsize(hf), "argsize: %d slow_stack_argsize: %d", argsize, slow_stack_argsize(hf));
2976       } else
2977   #endif
2978         argsize = hf.compiled_frame_stack_argsize();
2979 
2980       fsize += argsize;
2981       vsp   -= argsize >> LogBytesPerWord;
2982 
2983       const_cast<frame&>(caller).set_sp((intptr_t*)((address)caller.sp() - argsize));
2984       assert (caller.sp() == (intptr_t*)((address)vsp + (fsize-argsize)), "");
2985 
2986       vsp = align<FKind, top, bottom>(hf, vsp, const_cast<frame&>(caller));
2987     }
2988 
2989     _cont.sub_size(fsize);
2990 
2991     intptr_t* hsp = _cont.stack_address(hf.sp());
2992 
2993     log_develop_trace(jvmcont)("hsp: %d ", _cont.stack_index(hsp));
2994 
2995     frame f = new_frame<FKind>(hf, vsp);
2996 
2997     thaw_raw_frame(hsp, vsp, fsize);
2998 
2999     if (!FKind::stub) {
3000       if (mode == mode_preempt && _safepoint_stub_caller) {
3001         _safepoint_stub_f = thaw_safepoint_stub(f);
3002       }
3003 
3004       thaw_oops<FKind>(f, f.sp(), hf.ref_sp(), (void*)t_fn);
3005     }
3006 
3007     patch<FKind, top, bottom>(f, caller);
3008 
3009     _cont.dec_num_frames();
3010 
3011     if (!FKind::stub) {
3012       if (f.is_deoptimized_frame()) { // TODO PERF
3013         _fastpath = false;
3014       } else if (should_deoptimize()
3015           && (hf.cb()->as_compiled_method()->is_marked_for_deoptimization() || (mode != mode_fast && _thread->is_interp_only_mode()))) {
3016         log_develop_trace(jvmcont)("Deoptimizing thawed frame");
3017         DEBUG_ONLY(Frame::patch_pc(f, NULL));
3018 
3019         f.deoptimize(_thread); // we're assuming there are no monitors; this doesn't revoke biased locks
3020         // set_anchor(_thread, f); // deoptimization may need this
3021         // Deoptimization::deoptimize(_thread, f, &_map); // gets passed frame by value 
3022         // clear_anchor(_thread);
3023 
3024         assert (f.is_deoptimized_frame() && is_deopt_return(f.raw_pc(), f), 
3025           "f.is_deoptimized_frame(): %d is_deopt_return(f.raw_pc()): %d is_deopt_return(f.pc()): %d", 
3026           f.is_deoptimized_frame(), is_deopt_return(f.raw_pc(), f), is_deopt_return(f.pc(), f));
3027         _fastpath = false;
3028       } 
3029     }
3030 
3031     return f;
3032   }
3033 
3034   int thaw_compiled_oops(frame& f, intptr_t* vsp, int starting_index) {
3035     DEBUG_ONLY(intptr_t* tmp_fp = f.fp();) // TODO PD
3036 
3037     // Thawing oops overwrite the link in the callee if rbp contained an oop (only possible if we're compiled).
3038     // This only matters when we're the top frame, as that's the value that will be restored into rbp when we jump to continue.
3039     ContinuationHelper::update_register_map(&_map, frame_callee_info_address(f));
3040 
3041     ThawOopFn oopFn(&_cont, &f, starting_index, vsp, &_map);
3042     OopMapDo<ThawOopFn, ThawOopFn, IncludeAllValues> visitor(&oopFn, &oopFn);
3043     visitor.oops_do(&f, &_map, f.oop_map());
3044 
3045     DEBUG_ONLY(if (tmp_fp != f.fp()) log_develop_trace(jvmcont)("WHOA link has changed (thaw) f.fp: " INTPTR_FORMAT " link: " INTPTR_FORMAT, p2i(f.fp()), p2i(tmp_fp));) // TODO PD
3046 
3047     int cnt = oopFn.count();
3048     return cnt;
3049   }
3050 
3051   int thaw_compiled_oops_stub(frame& f, ThawFnT t_fn, intptr_t* vsp, int starting_index) {
3052     typename ConfigT::OopT* addr = _cont.refStack()->template obj_at_address<typename ConfigT::OopT>(starting_index);
3053     int cnt = t_fn((address) vsp, (address)addr, (address)frame_callee_info_address(f)); // write the link straight into the frame struct
3054     return cnt;
3055   }
3056 
3057   void finish(frame& f) {
3058     PERFTEST_ONLY(if (PERFTEST_LEVEL <= 115) return;)
3059 
3060     setup_jump(f);
3061 
3062     // _cont.set_last_frame(_last_frame);
3063 
3064     assert (!CONT_FULL_STACK || _cont.is_empty(), "");
3065     assert (_cont.is_empty() == _cont.last_frame<mode_slow>().is_empty(), "cont.is_empty: %d cont.last_frame().is_empty(): %d", _cont.is_empty(), _cont.last_frame<mode_slow>().is_empty());
3066     assert (_cont.is_empty() == (_cont.max_size() == 0), "cont.is_empty: %d cont.max_size: " SIZE_FORMAT, _cont.is_empty(), _cont.max_size());
3067     assert (_cont.is_empty() == (_cont.num_frames() == 0), "cont.is_empty: %d num_frames: %d", _cont.is_empty(), _cont.num_frames());
3068     assert (_cont.is_empty() <= (_cont.num_interpreted_frames() == 0), "cont.is_empty: %d num_interpreted_frames: %d", _cont.is_empty(), _cont.num_interpreted_frames());
3069 
3070     log_develop_trace(jvmcont)("thawed %d frames", _frames);
3071 
3072     log_develop_trace(jvmcont)("top_hframe after (thaw):");
3073     if (log_develop_is_enabled(Trace, jvmcont)) _cont.last_frame<mode_slow>().print_on(_cont, tty);
3074   }
3075 
3076   void setup_jump(frame& f) {
3077     assert (!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_compiled_method()->is_deopt_pc(f.raw_pc()), "");
3078     assert (!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
3079 
3080     assert ((address)(_fi + 1) < (address)f.sp(), "");
3081     _fi->sp = f.sp();
3082     address pc = f.raw_pc();
3083     _fi->pc = pc;
3084     ContinuationHelper::to_frame_info_pd(f, _fi);
3085 
3086     Frame::patch_pc(f, pc); // in case we want to deopt the frame in a full transition, this is checked.
3087 
3088     assert (mode == mode_preempt || !CONT_FULL_STACK || assert_top_java_frame_name(f, YIELD0_SIG), "");
3089   }
3090 
3091   void recurse_stub_frame(const hframe& hf, frame& caller, int num_frames) {
3092     log_develop_trace(jvmcont)("Found safepoint stub");
3093 
3094     assert (num_frames > 1, "");
3095     assert (mode == mode_preempt, "");
3096     assert(!hf.is_bottom<StubF>(_cont), "");
3097 
3098     assert (hf.compiled_frame_num_oops() == 0, "");
3099 
3100     _safepoint_stub = &hf;
3101     _safepoint_stub_caller = true;
3102 
3103     hframe hsender = hf.sender<StubF, mode>(_cont, 0);
3104     assert (!hsender.is_interpreted_frame(), "");
3105     recurse_compiled_frame<false>(hsender, caller, num_frames - 1);
3106 
3107     _safepoint_stub_caller = false;
3108 
3109     // In the case of a safepoint stub, the above line, called on the stub's sender, actually returns the safepoint stub after thawing it.
3110     finish(_safepoint_stub_f);
3111 
3112     DEBUG_ONLY(_frames++;)
3113   }
3114 
3115   NOINLINE frame thaw_safepoint_stub(frame& caller) {
3116     // A safepoint stub is the only case we encounter callee-saved registers (aside from rbp). We therefore thaw that frame
3117     // before thawing the oops in its sender, as the oops will need to be written to that stub frame.
3118     log_develop_trace(jvmcont)("THAWING SAFEPOINT STUB");
3119 
3120     assert(mode == mode_preempt, "");
3121     assert (_safepoint_stub != NULL, "");
3122 
3123     hframe stubf = *_safepoint_stub;
3124     _safepoint_stub_caller = false;
3125     _safepoint_stub = NULL;
3126 
3127     frame f = thaw_compiled_frame<StubF, true, false>(stubf, caller, NULL);
3128 
3129     f.oop_map()->update_register_map(&f, _map.as_RegisterMap());
3130     log_develop_trace(jvmcont)("THAWING OOPS FOR SENDER OF SAFEPOINT STUB");
3131     return f;
3132   }
3133 
3134   inline ThawFnT get_oopmap_stub(const hframe& f) {
3135     if (!ConfigT::allow_stubs)
3136       return NULL;
3137     return ContinuationHelper::thaw_stub<mode>(f);
3138   }
3139 
3140   inline void thaw_raw_frame(intptr_t* hsp, intptr_t* vsp, int fsize) {
3141     log_develop_trace(jvmcont)("thaw_raw_frame: sp: %d", _cont.stack_index(hsp));
3142     _cont.copy_from_stack(hsp, vsp, fsize);
3143   }
3144 
3145   class ThawOopFn : public ContOopBase<RegisterMapT> {
3146   private:
3147     int _i;
3148 
3149   protected:
3150     template <class T> inline void do_oop_work(T* p) {
3151       this->process(p);
3152       oop obj = this->_cont->obj_at(_i); // does a HeapAccess<IN_HEAP_ARRAY> load barrier
3153 
3154       assert (oopDesc::is_oop_or_null(obj), "invalid oop");
3155       log_develop_trace(jvmcont)("i: %d", _i); print_oop(p, obj);
3156       
3157       NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(p, obj);
3158       _i++;
3159     }
3160   public:
3161     ThawOopFn(ContMirror* cont, frame* fr, int index, void* vsp, RegisterMapT* map)
3162       : ContOopBase<RegisterMapT>(cont, fr, map, vsp) { _i = index; }
3163     void do_oop(oop* p)       { do_oop_work(p); }
3164     void do_oop(narrowOop* p) { do_oop_work(p); }
3165 
3166     void do_derived_oop(oop *base_loc, oop *derived_loc) {
3167       assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop: " INTPTR_FORMAT " (at " INTPTR_FORMAT ")", p2i((oopDesc*)*base_loc), p2i(base_loc));
3168       assert(derived_loc != base_loc, "Base and derived in same location");
3169       DEBUG_ONLY(this->verify(base_loc);)
3170       DEBUG_ONLY(this->verify(derived_loc);)
3171       assert (oopDesc::is_oop_or_null(*base_loc), "invalid oop");
3172 
3173       intptr_t offset = *(intptr_t*)derived_loc;
3174 
3175       log_develop_trace(jvmcont)(
3176         "Continuation thaw derived pointer@" INTPTR_FORMAT " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
3177         p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset);
3178 
3179       oop obj = cast_to_oop(cast_from_oop<intptr_t>(*base_loc) + offset);
3180       *derived_loc = obj;
3181 
3182       assert(Universe::heap()->is_in_or_null(obj), "");
3183     }
3184   };
3185 };
3186 
3187 static void post_JVMTI_continue(JavaThread* thread, FrameInfo* fi, int java_frame_count) {
3188   if (JvmtiExport::should_post_continuation_run()) {
3189     set_anchor<false>(thread, fi); // ensure thawed frames are visible
3190     JvmtiExport::post_continuation_run(JavaThread::current(), java_frame_count);
3191     clear_anchor(thread);
3192   }
3193 
3194   invlidate_JVMTI_stack(thread);
3195 }
3196 
3197 // fi->pc is the return address -- the entry
3198 // fi->sp is the top of the stack after thaw
3199 // fi->fp current rbp
3200 // called after preparations (stack overflow check and making room)
3201 static inline void thaw0(JavaThread* thread, FrameInfo* fi, const bool return_barrier) {
3202   // NoSafepointVerifier nsv;
3203   EventContinuationThaw event;
3204 
3205   if (return_barrier) {
3206     log_develop_trace(jvmcont)("== RETURN BARRIER");
3207   }
3208   const int num_frames = thaw_num_frames(return_barrier);
3209 
3210   log_develop_trace(jvmcont)("~~~~~~~~~ thaw num_frames: %d", num_frames);
3211   log_develop_trace(jvmcont)("sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT, p2i(fi->sp), p2i(fi->fp), p2i(fi->pc));
3212 
3213   oop oopCont = get_continuation(thread);
3214   ContMirror cont(thread, oopCont);
3215   log_develop_debug(jvmcont)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
3216 
3217   cont.set_entrySP(fi->sp);
3218   cont.set_entryFP(fi->fp);
3219   if (!return_barrier) { // not return barrier
3220     cont.set_entryPC(fi->pc);
3221   }
3222 
3223 #ifdef ASSERT
3224   set_anchor(cont); // required for assert(thread->frame_anchor()->has_last_Java_frame()) in frame::deoptimize
3225 //   print_frames(thread);
3226 #endif
3227 
3228   assert(num_frames > 0, "num_frames <= 0: %d", num_frames);
3229   assert(!cont.is_empty(), "no more frames");
3230 
3231   int java_frame_count = -1;
3232   if (!return_barrier && JvmtiExport::should_post_continuation_run()) {
3233     java_frame_count = num_java_frames(cont);
3234   }
3235 
3236   bool res; // whether only compiled frames are thawed
3237   if (cont.is_flag(FLAG_SAFEPOINT_YIELD)) {
3238     res = cont_thaw<mode_preempt>(thread, cont, fi, num_frames);
3239   } else if (cont.num_interpreted_frames() == 0 && !thread->is_interp_only_mode()) {
3240     res = cont_thaw<mode_fast>(thread, cont, fi, num_frames);
3241   } else {
3242     res = cont_thaw<mode_slow>(thread, cont, fi, num_frames);
3243   }
3244 
3245   cont.write();
3246 
3247   thread->set_cont_fastpath(res);
3248 
3249   log_develop_trace(jvmcont)("fi->sp: " INTPTR_FORMAT " fi->fp: " INTPTR_FORMAT " fi->pc: " INTPTR_FORMAT, p2i(fi->sp), p2i(fi->fp), p2i(fi->pc));
3250 
3251 #ifndef PRODUCT
3252   set_anchor<false>(thread, fi);
3253   print_frames(thread, tty); // must be done after write(), as frame walking reads fields off the Java objects.
3254   clear_anchor(thread);
3255 #endif
3256 
3257   if (log_develop_is_enabled(Trace, jvmcont)) {
3258     log_develop_trace(jvmcont)("Jumping to frame (thaw):");
3259     frame f = frame(fi->sp, fi->fp, fi->pc);
3260     print_vframe(f, NULL);
3261   }
3262 
3263   DEBUG_ONLY(thread->_continuation = oopCont;)
3264 
3265   cont.post_jfr_event(&event);
3266   if (!return_barrier) {
3267     post_JVMTI_continue(thread, fi, java_frame_count);
3268   }
3269 
3270   log_develop_debug(jvmcont)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
3271 }
3272 
3273 // IN:  fi->sp = the future SP of the topmost thawed frame (where we'll copy the thawed frames)
3274 // Out: fi->sp = the SP of the topmost thawed frame -- the one we will resume at
3275 //      fi->fp = the FP " ...
3276 //      fi->pc = the PC " ...
3277 // JRT_ENTRY(void, Continuation::thaw(JavaThread* thread, FrameInfo* fi, int num_frames))
3278 JRT_LEAF(address, Continuation::thaw_leaf(FrameInfo* fi, bool return_barrier, bool exception))
3279   //callgrind();
3280   PERFTEST_ONLY(PERFTEST_LEVEL = ContPerfTest;)
3281 
3282   thaw0(JavaThread::current(), fi, return_barrier);
3283   // clear_anchor(JavaThread::current());
3284 
3285   if (exception) {
3286     // TODO: handle deopt. see TemplateInterpreterGenerator::generate_throw_exception, OptoRuntime::handle_exception_C, OptoRuntime::handle_exception_helper
3287     // assert (!top.is_deoptimized_frame(), ""); -- seems to be handled
3288     address ret = fi->pc;
3289     fi->pc = SharedRuntime::raw_exception_handler_for_return_address(JavaThread::current(), fi->pc);
3290     return ret;
3291   } else {
3292     return reinterpret_cast<address>(Interpreter::contains(fi->pc)); // TODO PERF: really only necessary in the case of continuing from a forced yield
3293   }
3294 JRT_END
3295 
3296 JRT_ENTRY(address, Continuation::thaw(JavaThread* thread, FrameInfo* fi, bool return_barrier, bool exception))
3297   //callgrind();
3298   PERFTEST_ONLY(PERFTEST_LEVEL = ContPerfTest;)
3299 
3300   assert(thread == JavaThread::current(), "");
3301 
3302   thaw0(thread, fi, return_barrier);
3303   set_anchor<false>(thread, fi); // we're in a full transition that expects last_java_frame
3304 
3305   if (exception) {
3306     // TODO: handle deopt. see TemplateInterpreterGenerator::generate_throw_exception, OptoRuntime::handle_exception_C, OptoRuntime::handle_exception_helper
3307     // assert (!top.is_deoptimized_frame(), ""); -- seems to be handled
3308     address ret = fi->pc;
3309     fi->pc = SharedRuntime::raw_exception_handler_for_return_address(JavaThread::current(), fi->pc);
3310     return ret;
3311   } else {
3312     return reinterpret_cast<address>(Interpreter::contains(fi->pc)); // TODO PERF: really only necessary in the case of continuing from a forced yield
3313   }
3314 JRT_END
3315 
3316 bool Continuation::is_continuation_entry_frame(const frame& f, const RegisterMap* map) {
3317   Method* m = (map->in_cont() && f.is_interpreted_frame()) ? Continuation::interpreter_frame_method(f, map)
3318                                                            : Frame::frame_method(f);
3319   if (m == NULL)
3320     return false;
3321 
3322   // we can do this because the entry frame is never inlined
3323   return m->intrinsic_id() == vmIntrinsics::_Continuation_enter;
3324 }
3325 
3326 bool Continuation::is_cont_post_barrier_entry_frame(const frame& f) {
3327   return is_return_barrier_entry(Frame::real_pc(f));
3328 }
3329 
3330 // When walking the virtual stack, this method returns true
3331 // iff the frame is a thawed continuation frame whose
3332 // caller is still frozen on the h-stack.
3333 // The continuation object can be extracted from the thread.
3334 bool Continuation::is_cont_barrier_frame(const frame& f) {
3335 #ifdef CONT_DOUBLE_NOP
3336   #ifdef ASSERT
3337     if (!f.is_interpreted_frame()) return is_return_barrier_entry(slow_return_pc(f));
3338   #endif
3339 #endif
3340   assert (f.is_interpreted_frame() || f.cb() != NULL, "");
3341   return is_return_barrier_entry(f.is_interpreted_frame() ? Interpreted::return_pc(f) : Compiled::return_pc(f));
3342 }
3343 
3344 bool Continuation::is_return_barrier_entry(const address pc) {
3345   return pc == StubRoutines::cont_returnBarrier();
3346 }
3347 
3348 static inline bool is_sp_in_continuation(intptr_t* const sp, oop cont) {
3349   // tty->print_cr(">>>> is_sp_in_continuation cont: %p sp: %p entry: %p in: %d", (oopDesc*)cont, sp, java_lang_Continuation::entrySP(cont), java_lang_Continuation::entrySP(cont) > sp);
3350   return java_lang_Continuation::entrySP(cont) > sp;
3351 }
3352 
3353 bool Continuation::is_frame_in_continuation(const frame& f, oop cont) {
3354   return is_sp_in_continuation(f.unextended_sp(), cont);
3355 }
3356 
3357 static oop get_continuation_for_frame(JavaThread* thread, intptr_t* const sp) {
3358   oop cont = get_continuation(thread);
3359   while (cont != NULL && !is_sp_in_continuation(sp, cont)) {
3360     cont = java_lang_Continuation::parent(cont);
3361   }
3362   // tty->print_cr("get_continuation_for_frame cont: %p entrySP: %p", (oopDesc*)cont, cont != NULL ? java_lang_Continuation::entrySP(cont): NULL);
3363   return cont;
3364 }
3365 
3366 oop Continuation::get_continutation_for_frame(JavaThread* thread, const frame& f) {
3367   return get_continuation_for_frame(thread, f.unextended_sp());
3368 }
3369 
3370 bool Continuation::is_frame_in_continuation(JavaThread* thread, const frame& f) {
3371   return get_continuation_for_frame(thread, f.unextended_sp()) != NULL;
3372 }
3373 
3374 address* Continuation::get_continuation_entry_pc_for_sender(Thread* thread, const frame& f, address* pc_addr0) {
3375   if (!thread->is_Java_thread()) 
3376     return pc_addr0;
3377   oop cont = get_continuation_for_frame((JavaThread*)thread, f.unextended_sp() - 1);
3378   if (cont == NULL)
3379     return pc_addr0;
3380   if (is_sp_in_continuation(f.unextended_sp(), cont))
3381     return pc_addr0; // not the run frame
3382   if (*pc_addr0 == f.raw_pc())
3383     return pc_addr0;
3384   
3385   address *pc_addr = java_lang_Continuation::entryPC_addr(cont);
3386   // If our callee is the entry frame, we can continue as usual becuse we use the ordinary return address; see Freeze::setup_jump
3387   // If the entry frame is the callee, we set entryPC_addr to NULL in Thaw::finalize
3388   // if (*pc_addr == NULL) {
3389   //   assert (!is_return_barrier_entry(*pc_addr0), "");
3390   //   return pc_addr0;
3391   // }
3392  
3393   // tty->print_cr(">>>> get_continuation_entry_pc_for_sender"); f.print_on(tty);
3394   log_develop_trace(jvmcont)("get_continuation_entry_pc_for_sender pc_addr: " INTPTR_FORMAT " *pc_addr: " INTPTR_FORMAT, p2i(pc_addr), p2i(*pc_addr));
3395   DEBUG_ONLY(if (log_develop_is_enabled(Trace, jvmcont)) { print_blob(tty, *pc_addr); print_blob(tty, *(address*)(f.sp()-1)); })
3396   // if (log_develop_is_enabled(Trace, jvmcont)) { os::print_location(tty, (intptr_t)pc_addr); os::print_location(tty, (intptr_t)*pc_addr); }
3397 
3398   return pc_addr;
3399 }
3400 
3401 bool Continuation::fix_continuation_bottom_sender(JavaThread* thread, const frame& callee, address* sender_pc, intptr_t** sender_sp, intptr_t** sender_fp) {
3402   // TODO : this code and its use sites, as well as get_continuation_entry_pc_for_sender, probably need more work
3403   if (thread != NULL && is_return_barrier_entry(*sender_pc)) {
3404     log_develop_trace(jvmcont)("fix_continuation_bottom_sender callee:"); if (log_develop_is_enabled(Debug, jvmcont)) callee.print_value_on(tty, thread);
3405     log_develop_trace(jvmcont)("fix_continuation_bottom_sender: sender_pc: " INTPTR_FORMAT " sender_sp: " INTPTR_FORMAT " sender_fp: " INTPTR_FORMAT, p2i(*sender_pc), p2i(*sender_sp), p2i(*sender_fp));
3406       
3407     oop cont = get_continuation_for_frame(thread, callee.is_interpreted_frame() ? callee.interpreter_frame_last_sp() : callee.unextended_sp());
3408     assert (cont != NULL, "callee.unextended_sp(): " INTPTR_FORMAT, p2i(callee.unextended_sp()));
3409     log_develop_trace(jvmcont)("fix_continuation_bottom_sender: continuation entrySP: " INTPTR_FORMAT " entryPC: " INTPTR_FORMAT " entryFP: " INTPTR_FORMAT, 
3410       p2i(java_lang_Continuation::entrySP(cont)), p2i(java_lang_Continuation::entryPC(cont)), p2i(java_lang_Continuation::entryFP(cont)));
3411 
3412     address new_pc = java_lang_Continuation::entryPC(cont);
3413     log_develop_trace(jvmcont)("fix_continuation_bottom_sender: sender_pc: " INTPTR_FORMAT " -> " INTPTR_FORMAT, p2i(*sender_pc), p2i(new_pc));
3414     assert (new_pc != NULL, "");
3415     *sender_pc = new_pc;
3416 
3417     intptr_t* new_fp = java_lang_Continuation::entryFP(cont);
3418     log_develop_trace(jvmcont)("fix_continuation_bottom_sender: sender_fp: " INTPTR_FORMAT " -> " INTPTR_FORMAT, p2i(*sender_fp), p2i(new_fp));
3419     *sender_fp = new_fp;
3420 
3421     if (callee.is_compiled_frame() && !Interpreter::contains(*sender_pc)) {
3422       // The callee's stack arguments (part of the caller frame) are also thawed to the stack when using lazy-copy
3423       int argsize = callee.cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size;
3424       assert ((argsize & WordAlignmentMask) == 0, "must be");
3425       argsize >>= LogBytesPerWord;
3426     #ifdef _LP64 // TODO PD
3427       if (argsize % 2 != 0)
3428         argsize++; // 16-byte alignment for compiled frame sp
3429     #endif
3430       log_develop_trace(jvmcont)("fix_continuation_bottom_sender: sender_sp: " INTPTR_FORMAT " -> " INTPTR_FORMAT, p2i(*sender_sp), p2i(*sender_sp + argsize));
3431       *sender_sp += argsize;
3432     } else {
3433       // intptr_t* new_sp = java_lang_Continuation::entrySP(cont);
3434       // if (Interpreter::contains(*sender_pc)) {
3435       //   new_sp -= 2;
3436       // }
3437       // log_develop_trace(jvmcont)("fix_continuation_bottom_sender: sender_sp: " INTPTR_FORMAT " -> " INTPTR_FORMAT, p2i(*sender_sp), p2i(new_sp));
3438       // *sender_sp = new_sp;
3439     }
3440     return true;
3441   }
3442   return false;
3443 }
3444 
3445 bool Continuation::fix_continuation_bottom_sender(RegisterMap* map, const frame& callee, address* sender_pc, intptr_t** sender_sp, intptr_t** sender_fp) {
3446   bool res = fix_continuation_bottom_sender(map->thread(), callee, sender_pc, sender_sp, sender_fp);
3447   if (res && !callee.is_interpreted_frame()) {
3448     ContinuationHelper::set_last_vstack_frame(map, callee);
3449   } else {
3450     ContinuationHelper::clear_last_vstack_frame(map);
3451   }
3452   return res;
3453 }
3454 
3455 frame Continuation::fix_continuation_bottom_sender(const frame& callee, RegisterMap* map, frame f) {
3456   if (!is_return_barrier_entry(f.pc())) {
3457     return f;
3458   }
3459 
3460   if (map->walk_cont()) {
3461     return top_frame(callee, map);
3462   }
3463 
3464   if (map->thread() != NULL) {
3465     address   sender_pc = f.pc();
3466     intptr_t* sender_sp = f.sp();
3467     intptr_t* sender_fp = f.fp();
3468     fix_continuation_bottom_sender(map, callee, &sender_pc, &sender_sp, &sender_fp);
3469     return ContinuationHelper::frame_with(f, sender_sp, sender_pc, sender_fp);
3470   }
3471 
3472   return f;
3473 }
3474 
3475 address Continuation::get_top_return_pc_post_barrier(JavaThread* thread, address pc) {
3476   oop cont;
3477   if (thread != NULL && is_return_barrier_entry(pc) && (cont = get_continuation(thread)) != NULL) {
3478     pc = java_lang_Continuation::entryPC(cont);
3479   }
3480   return pc;
3481 }
3482 
3483 bool Continuation::is_scope_bottom(oop cont_scope, const frame& f, const RegisterMap* map) {
3484   if (cont_scope == NULL || !is_continuation_entry_frame(f, map))
3485     return false;
3486 
3487   assert (!map->in_cont(), "");
3488   // if (map->in_cont()) return false;
3489 
3490   oop cont = get_continuation_for_frame(map->thread(), f.sp());
3491   if (cont == NULL)
3492     return false;
3493 
3494   oop sc = continuation_scope(cont);
3495   assert(sc != NULL, "");
3496   return oopDesc::equals(sc, cont_scope);
3497 }
3498 
3499 // TODO: delete? consider other is_scope_bottom or something
3500 // bool Continuation::is_scope_bottom(oop cont_scope, const frame& f, const RegisterMap* map) {
3501 //   if (cont_scope == NULL || !map->in_cont())
3502 //     return false;
3503 
3504 //   oop sc = continuation_scope(map->cont());
3505 //   assert(sc != NULL, "");
3506 //   if (!oopDesc::equals(sc, cont_scope))
3507 //     return false;
3508 
3509 //   ContMirror cont(map);
3510 
3511 //   hframe hf = cont.from_frame(f);
3512 //   hframe sender = hf.sender(cont);
3513 
3514 //   return sender.is_empty();
3515 // }
3516 
3517 static frame continuation_top_frame(oop contOop, RegisterMap* map) {
3518   ContMirror cont(NULL, contOop);
3519 
3520   hframe hf = cont.last_frame<mode_preempt>(); // here mode_preempt merely makes the fewest assumptions
3521   assert (!hf.is_empty(), "");
3522 
3523   // tty->print_cr(">>>> continuation_top_frame");
3524   // hf.print_on(cont, tty);
3525 
3526   // if (!oopDesc::equals(map->cont(), contOop))
3527   map->set_cont(contOop);
3528   map->set_in_cont(true);
3529 
3530   if (map->update_map() && !hf.is_interpreted_frame()) { // TODO : what about forced preemption? see `if (callee_safepoint_stub != NULL)` in thaw_java_frame
3531     frame::update_map_with_saved_link(map, reinterpret_cast<intptr_t**>(-1));
3532   }
3533 
3534   return hf.to_frame(cont);
3535 }
3536 
3537 static frame continuation_parent_frame(ContMirror& cont, RegisterMap* map) {
3538   assert (map->thread() != NULL || !cont.is_mounted(), "map->thread() == NULL: %d cont.is_mounted(): %d", map->thread() == NULL, cont.is_mounted());
3539 
3540   // if (map->thread() == NULL) { // When a continuation is mounted, its entry frame is always on the v-stack
3541   //   oop parentOop = java_lang_Continuation::parent(cont.mirror());
3542   //   if (parentOop != NULL) {
3543   //     // tty->print_cr("continuation_parent_frame: parent");
3544   //     return continuation_top_frame(parentOop, map);
3545   //   }
3546   // }
3547 
3548   oop parent = java_lang_Continuation::parent(cont.mirror());
3549   map->set_cont(parent);
3550   map->set_in_cont(false); // TODO parent != (oop)NULL; consider getting rid of set_in_cont altogether
3551 
3552   if (!cont.is_mounted()) { // When we're walking an unmounted continuation and reached the end
3553     // tty->print_cr("continuation_parent_frame: no more");
3554     return frame();
3555   }
3556 
3557   frame sender(cont.entrySP(), cont.entryFP(), cont.entryPC());
3558 
3559   // tty->print_cr("continuation_parent_frame");
3560   // print_vframe(sender, map, NULL);
3561 
3562   return sender;
3563 }
3564 
3565 frame Continuation::last_frame(Handle continuation, RegisterMap *map) {
3566   assert(map != NULL, "a map must be given");
3567   map->set_cont(continuation); // set handle
3568   return continuation_top_frame(continuation(), map);
3569 }
3570 
3571 bool Continuation::has_last_Java_frame(Handle continuation) {
3572   return java_lang_Continuation::pc(continuation()) != NULL;
3573 }
3574 
3575 javaVFrame* Continuation::last_java_vframe(Handle continuation, RegisterMap *map) {
3576   assert(map != NULL, "a map must be given");
3577   frame f = last_frame(continuation, map);
3578   for (vframe* vf = vframe::new_vframe(&f, map, NULL); vf; vf = vf->sender()) {
3579     if (vf->is_java_frame()) return javaVFrame::cast(vf);
3580   }
3581   return NULL;
3582 }
3583 
3584 frame Continuation::top_frame(const frame& callee, RegisterMap* map) {
3585   oop cont = get_continuation_for_frame(map->thread(), callee.sp());
3586 
3587   ContinuationHelper::set_last_vstack_frame(map, callee);
3588   return continuation_top_frame(cont, map);
3589 }
3590 
3591 static frame sender_for_frame(const frame& f, RegisterMap* map) {
3592   ContMirror cont(map);
3593   hframe hf = cont.from_frame(f);
3594   hframe sender = hf.sender<mode_slow>(cont);
3595 
3596   // tty->print_cr(">>>> sender_for_frame");
3597   // sender.print_on(cont, tty);
3598 
3599   if (map->update_map()) {
3600     if (sender.is_empty()) {
3601       ContinuationHelper::update_register_map_from_last_vstack_frame(map);
3602     } else { // if (!sender.is_interpreted_frame())
3603       if (is_stub(f.cb())) {
3604         f.oop_map()->update_register_map(&f, map); // we have callee-save registers in this case
3605       }
3606       ContinuationHelper::update_register_map(map, sender, cont);
3607     }
3608   }
3609 
3610   if (!sender.is_empty()) {
3611     return sender.to_frame(cont);
3612   } else {
3613     log_develop_trace(jvmcont)("sender_for_frame: continuation_parent_frame");
3614     return continuation_parent_frame(cont, map);
3615   }
3616 }
3617 
3618 frame Continuation::sender_for_interpreter_frame(const frame& callee, RegisterMap* map) {
3619   return sender_for_frame(callee, map);
3620 }
3621 
3622 frame Continuation::sender_for_compiled_frame(const frame& callee, RegisterMap* map) {
3623   return sender_for_frame(callee, map);
3624 }
3625 
3626 int Continuation::frame_size(const frame& f, const RegisterMap* map) {
3627   if (map->in_cont()) {
3628     ContMirror cont(map);
3629     hframe hf = cont.from_frame(f);
3630     return (hf.is_interpreted_frame() ? hf.interpreted_frame_size() : hf.compiled_frame_size()) >> LogBytesPerWord;
3631   } else {
3632     assert (Continuation::is_cont_barrier_frame(f), "");
3633     return (f.is_interpreted_frame() ? ((address)Interpreted::frame_bottom(f) - (address)f.sp()) : NonInterpretedUnknown::size(f)) >> LogBytesPerWord;
3634   }
3635 }
3636 
3637 class OopIndexClosure : public OopMapClosure {
3638 private:
3639   int _i;
3640   int _index;
3641 
3642   int _offset;
3643   VMReg _reg;
3644 
3645 public:
3646   OopIndexClosure(int offset) : _i(0), _index(-1), _offset(offset), _reg(VMRegImpl::Bad()) {}
3647   OopIndexClosure(VMReg reg)  : _i(0), _index(-1), _offset(-1), _reg(reg) {}
3648 
3649   int index() { return _index; }
3650   int is_oop() { return _index >= 0; }
3651 
3652   void do_value(VMReg reg, OopMapValue::oop_types type) {
3653     assert (type == OopMapValue::oop_value || type == OopMapValue::narrowoop_value, "");
3654     if (reg->is_reg()) {
3655         if (_reg == reg) _index = _i;
3656     } else {
3657       int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size;
3658       if (sp_offset_in_bytes == _offset) _index = _i;
3659     }
3660     _i++;
3661   }
3662 };
3663 
3664 class InterpreterOopIndexClosure : public OffsetClosure {
3665 private:
3666   int _i;
3667   int _index;
3668 
3669   int _offset;
3670 
3671 public:
3672   InterpreterOopIndexClosure(int offset) : _i(0), _index(-1), _offset(offset) {}
3673 
3674   int index() { return _index; }
3675   int is_oop() { return _index >= 0; }
3676 
3677   void offset_do(int offset) {
3678     if (offset == _offset) _index = _i;
3679     _i++;
3680   }
3681 };
3682 
3683 // *grossly* inefficient
3684 static int find_oop_in_compiled_frame(const frame& fr, const RegisterMap* map, const int usp_offset_in_bytes) {
3685   assert (fr.is_compiled_frame(), "");
3686   const ImmutableOopMap* oop_map = fr.oop_map();
3687   assert (oop_map != NULL, "");
3688   OopIndexClosure ioc(usp_offset_in_bytes);
3689   oop_map->all_do(&fr, OopMapValue::oop_value | OopMapValue::narrowoop_value, &ioc);
3690   return ioc.index();
3691 }
3692 
3693 static int find_oop_in_compiled_frame(const frame& fr, const RegisterMap* map, VMReg reg) {
3694   assert (fr.is_compiled_frame(), "");
3695   const ImmutableOopMap* oop_map = fr.oop_map();
3696   assert (oop_map != NULL, "");
3697   OopIndexClosure ioc(reg);
3698   oop_map->all_do(&fr, OopMapValue::oop_value | OopMapValue::narrowoop_value, &ioc);
3699   return ioc.index();
3700 }
3701 
3702 static int find_oop_in_interpreted_frame(const hframe& hf, int offset, const InterpreterOopMap& mask, const ContMirror& cont) {
3703   // see void frame::oops_interpreted_do
3704   InterpreterOopIndexClosure ioc(offset);
3705   mask.iterate_oop(&ioc);
3706   int res = ioc.index() + 1 + hf.interpreted_frame_num_monitors(); // index 0 is mirror; next are InterpreterOopMap::iterate_oop
3707   return res; // index 0 is mirror
3708 }
3709 
3710 address Continuation::oop_address(objArrayOop ref_stack, int ref_sp, int index) {
3711   assert (index >= ref_sp && index < ref_stack->length(), "i: %d ref_sp: %d length: %d", index, ref_sp, ref_stack->length());
3712   oop obj = ref_stack->obj_at(index); // invoke barriers
3713   address p = UseCompressedOops ? (address)ref_stack->obj_at_addr<narrowOop>(index)
3714                                 : (address)ref_stack->obj_at_addr<oop>(index);
3715 
3716   log_develop_trace(jvmcont)("oop_address: index: %d", index);
3717   // print_oop(p, obj);
3718   assert (oopDesc::is_oop_or_null(obj), "invalid oop");
3719   return p;
3720 }
3721 
3722 bool Continuation::is_in_usable_stack(address addr, const RegisterMap* map) {
3723   ContMirror cont(map);
3724   return cont.is_in_stack(addr) || cont.is_in_ref_stack(addr);
3725 }
3726 
3727 address Continuation::usp_offset_to_location(const frame& fr, const RegisterMap* map, const int usp_offset_in_bytes) {
3728   return usp_offset_to_location(fr, map, usp_offset_in_bytes, find_oop_in_compiled_frame(fr, map, usp_offset_in_bytes) >= 0);
3729 }
3730 
3731 // if oop, it is narrow iff UseCompressedOops
3732 address Continuation::usp_offset_to_location(const frame& fr, const RegisterMap* map, const int usp_offset_in_bytes, bool is_oop) {
3733   assert (fr.is_compiled_frame(), "");
3734   ContMirror cont(map);
3735   hframe hf = cont.from_frame(fr);
3736 
3737   intptr_t* hsp = cont.stack_address(hf.sp());
3738   address loc = (address)hsp + usp_offset_in_bytes;
3739 
3740   log_develop_trace(jvmcont)("usp_offset_to_location oop_address: stack index: %d length: %d", cont.stack_index(loc), cont.stack_length());
3741 
3742   int oop_offset = find_oop_in_compiled_frame(fr, map, usp_offset_in_bytes);
3743   assert (is_oop == (oop_offset >= 0), "must be");
3744   address res = is_oop ? oop_address(cont.refStack(), cont.refSP(), hf.ref_sp() + oop_offset) : loc;
3745   return res;
3746 }
3747 
3748 int Continuation::usp_offset_to_index(const frame& fr, const RegisterMap* map, const int usp_offset_in_bytes) {
3749   assert (fr.is_compiled_frame() || is_stub(fr.cb()), "");
3750   ContMirror cont(map);
3751   hframe hf = cont.from_frame(fr);
3752 
3753   intptr_t* hsp;
3754   if (usp_offset_in_bytes >= 0) {
3755      hsp = cont.stack_address(hf.sp());
3756   } else {
3757     hframe stub = cont.last_frame<mode_slow>();
3758 
3759     assert (cont.is_flag(FLAG_SAFEPOINT_YIELD), "must be");
3760     assert (is_stub(stub.cb()), "must be");
3761     assert (stub.sender<mode_slow>(cont) == hf, "must be");
3762 
3763     hsp = cont.stack_address(stub.sp()) + stub.cb()->frame_size();
3764   }
3765   address loc = (address)hsp + usp_offset_in_bytes;
3766   int index = cont.stack_index(loc);
3767 
3768   log_develop_trace(jvmcont)("usp_offset_to_location oop_address: stack index: %d length: %d", index, cont.stack_length());
3769   return index;
3770 }
3771 
3772 address Continuation::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) {
3773   return reg_to_location(fr, map, reg, find_oop_in_compiled_frame(fr, map, reg) >= 0);
3774 }
3775 
3776 address Continuation::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg, bool is_oop) {
3777   assert (map != NULL, "");
3778   oop cont;
3779   if (map->in_cont()) {
3780     cont = map->cont();
3781   } else {
3782     cont = get_continutation_for_frame(map->thread(), fr);
3783   }
3784   return reg_to_location(cont, fr, map, reg, is_oop);
3785 }
3786 
3787 address Continuation::reg_to_location(oop contOop, const frame& fr, const RegisterMap* map, VMReg reg, bool is_oop) {
3788   assert (map != NULL, "");
3789   assert (fr.is_compiled_frame(), "");
3790 
3791   // assert (!is_continuation_entry_frame(fr, map), "");
3792   // if (is_continuation_entry_frame(fr, map)) {
3793   //   log_develop_trace(jvmcont)("reg_to_location continuation entry link address: " INTPTR_FORMAT, p2i(map->location(reg)));
3794   //   return map->location(reg); // see sender_for_frame, `if (sender.is_empty())`
3795   // }
3796 
3797   assert (contOop != NULL, "");
3798 
3799   ContMirror cont(map->thread(), contOop);
3800   hframe hf = cont.from_frame(fr);
3801 
3802   int oop_index = find_oop_in_compiled_frame(fr, map, reg);
3803   assert (is_oop == oop_index >= 0, "must be");
3804 
3805   address res = NULL;
3806   if (oop_index >= 0) {
3807     res = oop_address(cont.refStack(), cont.refSP(), hf.ref_sp() + find_oop_in_compiled_frame(fr, map, reg));
3808   } else {
3809   // assert ((void*)Frame::map_link_address(map) == (void*)map->location(reg), "must be the link register (rbp): %s", reg->name());
3810     int index = (int)reinterpret_cast<uintptr_t>(map->location(reg)); // the RegisterMap should contain the link index. See sender_for_frame
3811     assert (index >= 0, "non-oop in fp of the topmost frame is not supported");
3812     if (index >= 0) { // see frame::update_map_with_saved_link in continuation_top_frame
3813       address loc = (address)cont.stack_address(index);
3814       log_develop_trace(jvmcont)("reg_to_location oop_address: stack index: %d length: %d", index, cont.stack_length());
3815       if (oop_index < 0)
3816         res = loc;
3817     }
3818   }
3819   return res;
3820 }
3821 
3822 address Continuation::interpreter_frame_expression_stack_at(const frame& fr, const RegisterMap* map, const InterpreterOopMap& oop_mask, int index) {
3823   assert (fr.is_interpreted_frame(), "");
3824   ContMirror cont(map);
3825   hframe hf = cont.from_frame(fr);
3826 
3827   int max_locals = hf.method<Interpreted>()->max_locals();
3828   address loc = (address)hf.interpreter_frame_expression_stack_at(index);
3829   if (loc == NULL)
3830     return NULL;
3831 
3832   int index1 = max_locals + index; // see stack_expressions in vframe.cpp
3833   log_develop_trace(jvmcont)("interpreter_frame_expression_stack_at oop_address: stack index: %d, length: %d exp: %d index1: %d", cont.stack_index(loc), cont.stack_length(), index, index1);
3834 
3835   address res = oop_mask.is_oop(index1)
3836     ? oop_address(cont.refStack(), cont.refSP(), hf.ref_sp() + find_oop_in_interpreted_frame(hf, index1, oop_mask, cont))
3837     : loc;
3838   return res;
3839 }
3840 
3841 address Continuation::interpreter_frame_local_at(const frame& fr, const RegisterMap* map, const InterpreterOopMap& oop_mask, int index) {
3842   assert (fr.is_interpreted_frame(), "");
3843   ContMirror cont(map);
3844   hframe hf = cont.from_frame(fr);
3845 
3846   address loc = (address)hf.interpreter_frame_local_at(index);
3847 
3848   log_develop_trace(jvmcont)("interpreter_frame_local_at oop_address: stack index: %d length: %d local: %d", cont.stack_index(loc), cont.stack_length(), index);
3849   address res = oop_mask.is_oop(index)
3850     ? oop_address(cont.refStack(), cont.refSP(), hf.ref_sp() + find_oop_in_interpreted_frame(hf, index, oop_mask, cont))
3851     : loc;
3852   return res;
3853 }
3854 
3855 Method* Continuation::interpreter_frame_method(const frame& fr, const RegisterMap* map) {
3856   assert (fr.is_interpreted_frame(), "");
3857   hframe hf = ContMirror(map).from_frame(fr);
3858   return hf.method<Interpreted>();
3859 }
3860 
3861 address Continuation::interpreter_frame_bcp(const frame& fr, const RegisterMap* map) {
3862   assert (fr.is_interpreted_frame(), "");
3863   hframe hf = ContMirror(map).from_frame(fr);
3864   return hf.interpreter_frame_bcp();
3865 }
3866 
3867 oop Continuation::continuation_scope(oop cont) {
3868   return cont != NULL ? java_lang_Continuation::scope(cont) : (oop)NULL;
3869 }
3870 
3871 ///// Allocation
3872 
3873 template <typename ConfigT>
3874 void ContMirror::make_keepalive(CompiledMethodKeepalive<ConfigT>* keepalive) {
3875   Handle conth(_thread, _cont);
3876   int oops = keepalive->nr_oops();
3877   if (oops == 0) {
3878     oops = 1;
3879   }
3880   oop keepalive_obj = allocate_keepalive_array<ConfigT>(oops);
3881 
3882   uint64_t counter = SafepointSynchronize::safepoint_counter();
3883   // check gc cycle
3884   Handle keepaliveHandle = Handle(_thread, keepalive_obj);
3885   keepalive->set_handle(keepaliveHandle);
3886   // check gc cycle and maybe reload
3887   //if (!SafepointSynchronize::is_same_safepoint(counter)) {
3888     post_safepoint(conth);
3889   //}
3890 }
3891 
3892 template <typename ConfigT>
3893 inline void ContMirror::allocate_stacks(int size, int oops, int frames) {
3894   bool needs_stack_allocation    = (_stack == NULL || to_index(size) > (_sp >= 0 ? _sp : _stack_length));
3895   bool needs_refStack_allocation = (_ref_stack == NULL || oops > _ref_sp);
3896 
3897   log_develop_trace(jvmcont)("stack size: %d (int): %d sp: %d stack_length: %d needs alloc: %d", size, to_index(size), _sp, _stack_length, needs_stack_allocation);
3898   log_develop_trace(jvmcont)("num_oops: %d ref_sp: %d needs alloc: %d", oops, _ref_sp, needs_stack_allocation);
3899 
3900   assert(_sp == java_lang_Continuation::sp(_cont) && _fp == java_lang_Continuation::fp(_cont) && _pc == java_lang_Continuation::pc(_cont), "");
3901 
3902   if (!(needs_stack_allocation | needs_refStack_allocation))
3903     return;
3904 
3905 #ifdef PERFTEST
3906   if (PERFTEST_LEVEL < 100) {
3907     tty->print_cr("stack size: %d (int): %d sp: %d stack_length: %d needs alloc: %d", size, to_index(size), _sp, _stack_length, needs_stack_allocation);
3908     tty->print_cr("num_oops: %d ref_sp: %d needs alloc: %d", oops, _ref_sp, needs_stack_allocation);
3909   }
3910   guarantee(PERFTEST_LEVEL >= 100, "");
3911 #endif
3912 
3913   if (!allocate_stacks_in_native<ConfigT>(size, oops, needs_stack_allocation, needs_refStack_allocation)) {
3914     allocate_stacks_in_java(size, oops, frames);
3915     if (!thread()->has_pending_exception()) return;
3916   }
3917 
3918   // This first assertion isn't important, as we'll overwrite the Java-computed ones, but it's just to test that the Java computation is OK.
3919   assert(_sp == java_lang_Continuation::sp(_cont) && _fp == java_lang_Continuation::fp(_cont) && _pc == java_lang_Continuation::pc(_cont), "");
3920   assert (oopDesc::equals(_stack, java_lang_Continuation::stack(_cont)), "");
3921   assert (_stack->base(basicElementType) == _hstack, "");
3922   assert (to_bytes(_stack_length) >= size && to_bytes(_sp) >= size, "stack_length: %d sp: %d size: %d", to_bytes(_stack_length), _sp, size);
3923   assert (to_bytes(_ref_sp) >= oops, "oops: %d ref_sp: %d refStack length: %d", oops, _ref_sp, _ref_stack->length());
3924 }
3925 
3926 template <typename ConfigT>
3927 NOINLINE bool ContMirror::allocate_stacks_in_native(int size, int oops, bool needs_stack, bool needs_refstack) {
3928   if (needs_stack) {
3929     if (_stack == NULL) {
3930       if (!allocate_stack(size)) {
3931         return false;
3932       }
3933     } else {
3934       if (!grow_stack(size)) {
3935         return false;
3936       }
3937     }
3938 
3939     java_lang_Continuation::set_stack(_cont, _stack);
3940 
3941     // TODO: may not be necessary because at this point we know that the freeze will succeed and these values will get written in ::write
3942     java_lang_Continuation::set_sp(_cont, _sp);
3943     java_lang_Continuation::set_fp(_cont, _fp);
3944   }
3945 
3946   if (needs_refstack) {
3947     if (_ref_stack == NULL) {
3948       if (!allocate_ref_stack<ConfigT>(oops)) {
3949         return false;
3950       }
3951     } else {
3952       if (!grow_ref_stack<ConfigT>(oops)) {
3953         return false;
3954       }
3955     }
3956     java_lang_Continuation::set_refStack(_cont, _ref_stack);
3957 
3958     // TODO: may not be necessary because at this point we know that the freeze will succeed and this value will get written in ::write
3959     java_lang_Continuation::set_refSP(_cont, _ref_sp);
3960   }
3961 
3962   return true;
3963 }
3964 
3965 bool ContMirror::allocate_stack(int size) {
3966   int elements = size >> LogBytesPerElement;
3967   oop result = allocate_stack_array(elements);
3968   if (result == NULL) {
3969     return false;
3970   }
3971 
3972   _stack = typeArrayOop(result);
3973   _sp = elements;
3974   _stack_length = elements;
3975   _hstack = (ElemType*)_stack->base(basicElementType);
3976 
3977   return true;
3978 }
3979 
3980 bool ContMirror::grow_stack(int new_size) {
3981   new_size = new_size >> LogBytesPerElement;
3982 
3983   int old_length = _stack_length;
3984   int offset = _sp > 0 ? _sp : old_length;
3985   int min_length = (old_length - offset) + new_size;
3986 
3987   if (min_length <= old_length) {
3988     return false;
3989   }
3990 
3991   int new_length = ensure_capacity(old_length, min_length);
3992   if (new_length == -1) {
3993     return false;
3994   }
3995 
3996   typeArrayOop new_stack = allocate_stack_array(new_length);
3997   if (new_stack == NULL) {
3998     return false;
3999   }
4000 
4001   log_develop_trace(jvmcont)("grow_stack old_length: %d new_length: %d", old_length, new_length);
4002   ElemType* new_hstack = (ElemType*)new_stack->base(basicElementType);
4003   int n = old_length - offset;
4004   assert(new_length > n, "");
4005   if (n > 0) {
4006     copy_primitive_array(_stack, offset, new_stack, new_length - n, n);
4007   }
4008   _stack = new_stack;
4009   _stack_length = new_length;
4010   _hstack = new_hstack;
4011 
4012   log_develop_trace(jvmcont)("grow_stack old sp: %d fp: %ld", _sp, _fp);
4013   _sp = fix_decreasing_index(_sp, old_length, new_length);
4014   if (is_flag(FLAG_LAST_FRAME_INTERPRETED)) { // if (Interpreter::contains(_pc)) {// only interpreter frames use relative (index) fp
4015     _fp = fix_decreasing_index(_fp, old_length, new_length);
4016   }
4017   log_develop_trace(jvmcont)("grow_stack new sp: %d fp: %ld", _sp, _fp);
4018 
4019   return true;
4020 }
4021 
4022 template <typename ConfigT>
4023 bool ContMirror::allocate_ref_stack(int nr_oops) {
4024   // we don't zero the array because we allocate an array that exactly holds all the oops we'll fill in as we freeze
4025   oop result = allocate_refstack_array<ConfigT>(nr_oops);
4026   if (result == NULL) {
4027     return false;
4028   }
4029   _ref_stack = objArrayOop(result);
4030   _ref_sp = nr_oops;
4031 
4032   assert (_ref_stack->length() == nr_oops, "");
4033 
4034   return true;
4035 }
4036 
4037 template <typename ConfigT>
4038 bool ContMirror::grow_ref_stack(int nr_oops) {
4039   int old_length = _ref_stack->length();
4040   int offset = _ref_sp > 0 ? _ref_sp : old_length;
4041   int old_oops = old_length - offset;
4042   int min_length = old_oops + nr_oops;
4043 
4044   int new_length = ensure_capacity(old_length, min_length);
4045   if (new_length == -1) {
4046     return false;
4047   }
4048 
4049   objArrayOop new_ref_stack = allocate_refstack_array<ConfigT>(new_length);
4050   if (new_ref_stack == NULL) {
4051     return false;
4052   }
4053   assert (new_ref_stack->length() == new_length, "");
4054   log_develop_trace(jvmcont)("grow_ref_stack old_length: %d new_length: %d", old_length, new_length);
4055 
4056   zero_ref_array<ConfigT>(new_ref_stack, new_length, min_length);
4057   if (old_oops > 0) {
4058     assert(!CONT_FULL_STACK, "");
4059     copy_ref_array<ConfigT>(_ref_stack, offset, new_ref_stack, fix_decreasing_index(offset, old_length, new_length), old_oops);
4060   }
4061 
4062   _ref_stack = new_ref_stack;
4063 
4064   log_develop_trace(jvmcont)("grow_ref_stack old ref_sp: %d", _ref_sp);
4065   _ref_sp = fix_decreasing_index(_ref_sp, old_length, new_length);
4066   log_develop_trace(jvmcont)("grow_ref_stack new ref_sp: %d", _ref_sp);
4067   return true;
4068 }
4069 
4070 int ContMirror::ensure_capacity(int old, int min) {
4071   int newsize = old + (old >> 1);
4072   if (newsize - min <= 0) {
4073     if (min < 0) { // overflow
4074       return -1;
4075     }
4076     return min;
4077   }
4078   return newsize;
4079 }
4080 
4081 int ContMirror::fix_decreasing_index(int index, int old_length, int new_length) {
4082   return new_length - (old_length - index);
4083 }
4084 
4085 inline void ContMirror::post_safepoint(Handle conth) {
4086   _cont = conth(); // reload oop
4087   _ref_stack = java_lang_Continuation::refStack(_cont);
4088   _stack = java_lang_Continuation::stack(_cont);
4089   _hstack = (ElemType*)_stack->base(basicElementType);
4090 }
4091 
4092 typeArrayOop ContMirror::allocate_stack_array(size_t elements) {
4093   assert(elements > 0, "");
4094   log_develop_trace(jvmcont)("allocate_stack_array elements: %lu", elements);
4095 
4096   TypeArrayKlass* klass = TypeArrayKlass::cast(Universe::intArrayKlassObj());
4097   size_t size_in_words = typeArrayOopDesc::object_size(klass, (int)elements);
4098   return typeArrayOop(raw_allocate(klass, size_in_words, elements, false));
4099 }
4100 
4101 void ContMirror::copy_primitive_array(typeArrayOop old_array, int old_start, typeArrayOop new_array, int new_start, int count) {
4102   ElemType* from = (ElemType*)old_array->base(basicElementType) + old_start;
4103   ElemType* to   = (ElemType*)new_array->base(basicElementType) + new_start;
4104   size_t size = to_bytes(count);
4105   memcpy(to, from, size);
4106 
4107   //Copy::conjoint_memory_atomic(from, to, size); // Copy::disjoint_words((HeapWord*)from, (HeapWord*)to, size/wordSize); //
4108   // ArrayAccess<ARRAYCOPY_DISJOINT>::oop_arraycopy(_stack, offset * elementSizeInBytes, new_stack, (new_length - n) * elementSizeInBytes, n);
4109 }
4110 
4111 template <typename ConfigT>
4112 objArrayOop ContMirror::allocate_refstack_array(size_t nr_oops) {
4113   assert(nr_oops > 0, "");
4114   bool zero = !ConfigT::_post_barrier; // !BarrierSet::barrier_set()->is_a(BarrierSet::ModRef);
4115   log_develop_trace(jvmcont)("allocate_refstack_array nr_oops: %lu zero: %d", nr_oops, zero);
4116 
4117   ArrayKlass* klass = ArrayKlass::cast(Universe::objectArrayKlassObj());
4118   size_t size_in_words = objArrayOopDesc::object_size((int)nr_oops);
4119   return objArrayOop(raw_allocate(klass, size_in_words, nr_oops, zero));
4120 }
4121 
4122 template <typename ConfigT>
4123 objArrayOop ContMirror::allocate_keepalive_array(size_t nr_oops) {
4124   //assert(nr_oops > 0, "");
4125   bool zero = true; // !BarrierSet::barrier_set()->is_a(BarrierSet::ModRef);
4126   log_develop_trace(jvmcont)("allocate_keepalive_array nr_oops: %lu zero: %d", nr_oops, zero);
4127 
4128   ArrayKlass* klass = ArrayKlass::cast(Universe::objectArrayKlassObj());
4129   size_t size_in_words = objArrayOopDesc::object_size((int)nr_oops);
4130   return objArrayOop(raw_allocate(klass, size_in_words, nr_oops, zero));
4131 }
4132 
4133 
4134 template <typename ConfigT>
4135 void ContMirror::zero_ref_array(objArrayOop new_array, int new_length, int min_length) {
4136   assert (new_length == new_array->length(), "");
4137   int extra_oops = new_length - min_length;
4138 
4139   if (ConfigT::_post_barrier) {
4140     // zero the bottom part of the array that won't be filled in the freeze
4141     HeapWord* new_base = new_array->base();
4142     const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
4143     assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0), "");
4144     uint word_size = ((uint)extra_oops + OopsPerHeapWord - 1)/OopsPerHeapWord;
4145     Copy::fill_to_aligned_words(new_base, word_size, 0); // fill_to_words (we could be filling more than the elements if narrow, but we do this before copying)
4146   }
4147 
4148   DEBUG_ONLY(for (int i=0; i<extra_oops; i++) assert(new_array->obj_at(i) == (oop)NULL, "");)
4149 }
4150 
4151 template <typename ConfigT>
4152 void ContMirror::copy_ref_array(objArrayOop old_array, int old_start, objArrayOop new_array, int new_start, int count) {
4153   assert (new_start + count == new_array->length(), "");
4154 
4155   typedef typename ConfigT::OopT OopT;
4156   if (ConfigT::_post_barrier) {
4157     OopT* from = (OopT*)old_array->base() + old_start;
4158     OopT* to   = (OopT*)new_array->base() + new_start;
4159     memcpy((void*)to, (void*)from, count * sizeof(OopT));
4160     barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set())->write_ref_array((HeapWord*)to, count);
4161   } else {
4162     // Requires the array is zeroed (see G1BarrierSet::write_ref_array_pre_work)
4163     DEBUG_ONLY(for (int i=0; i<count; i++) assert(new_array->obj_at(new_start + i) == (oop)NULL, "");)
4164     size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<OopT>(old_start);
4165     size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<OopT>(new_start);
4166     ArrayAccess<ARRAYCOPY_DISJOINT>::oop_arraycopy(old_array, src_offset, new_array, dst_offset, count);
4167 
4168     // for (int i=0, old_i = old_start, new_i = new_start; i < count; i++, old_i++, new_i++) new_array->obj_at_put(new_i, old_array->obj_at(old_i));
4169   }
4170 }
4171 
4172 /* try to allocate an array from the tlab, if it doesn't work allocate one using the allocate
4173  * method. In the later case we might have done a safepoint and need to reload our oops */
4174 oop ContMirror::raw_allocate(Klass* klass, size_t size_in_words, size_t elements, bool zero) {
4175   ObjArrayAllocator allocator(klass, size_in_words, (int)elements, zero, _thread);
4176   HeapWord* start = _thread->tlab().allocate(size_in_words);
4177   if (start != NULL) {
4178     return allocator.initialize(start);
4179   } else {
4180     //HandleMark hm(_thread);
4181     Handle conth(_thread, _cont);
4182     uint64_t counter = SafepointSynchronize::safepoint_counter();
4183     oop result = allocator.allocate(/* use_tlab */ false);
4184     //if (!SafepointSynchronize::is_same_safepoint(counter)) {
4185       post_safepoint(conth);
4186     //}
4187     return result;
4188   }
4189 }
4190 
4191 NOINLINE void ContMirror::allocate_stacks_in_java(int size, int oops, int frames) {
4192   guarantee (false, "unreachable");
4193   int old_stack_length = _stack_length;
4194 
4195   //HandleMark hm(_thread);
4196   Handle conth(_thread, _cont);
4197   JavaCallArguments args;
4198   args.push_oop(conth);
4199   args.push_int(size);
4200   args.push_int(oops);
4201   args.push_int(frames);
4202   JavaValue result(T_VOID);
4203   JavaCalls::call_virtual(&result, SystemDictionary::Continuation_klass(), vmSymbols::getStacks_name(), vmSymbols::continuationGetStacks_signature(), &args, _thread);
4204   post_safepoint(conth); // reload oop after java call
4205 
4206   _sp     = java_lang_Continuation::sp(_cont);
4207   _fp     = java_lang_Continuation::fp(_cont);
4208   _ref_sp = java_lang_Continuation::refSP(_cont);
4209   _stack_length = _stack->length();
4210   /* We probably should handle OOM? */
4211 }
4212 
4213 JVM_ENTRY(void, CONT_Clean(JNIEnv* env, jobject jcont)) {
4214     JavaThread* thread = JavaThread::thread_from_jni_environment(env);
4215     oop oopCont = JNIHandles::resolve_non_null(jcont);
4216     ContMirror cont(thread, oopCont);
4217     cont.cleanup();
4218 }
4219 JVM_END
4220 
4221 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) {
4222   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
4223   return is_pinned0(thread, JNIHandles::resolve(cont_scope), false);
4224 }
4225 JVM_END
4226 
4227 JVM_ENTRY(jint, CONT_TryForceYield0(JNIEnv* env, jobject jcont, jobject jthread)) {
4228   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
4229 
4230   if (!ThreadLocalHandshakes || !SafepointMechanism::uses_thread_local_poll()) {
4231     return -5;
4232   }
4233 
4234   class ForceYieldClosure : public ThreadClosure {
4235     jobject _jcont;
4236     jint _result;
4237 
4238     void do_thread(Thread* th) {
4239       // assert (th == Thread::current(), ""); -- the handshake can be carried out by a VM thread (see HandshakeState::process_by_vmthread)
4240       assert (th->is_Java_thread(), "");
4241       JavaThread* thread = (JavaThread*)th;
4242 
4243       // tty->print_cr(">>> ForceYieldClosure thread");
4244       // thread->print_on(tty);
4245       // if (thread != Thread::current()) {
4246       //   tty->print_cr(">>> current thread");
4247       //   Thread::current()->print_on(tty);
4248       // }
4249 
4250       oop oopCont = JNIHandles::resolve_non_null(_jcont);
4251       _result = Continuation::try_force_yield(thread, oopCont);
4252     }
4253 
4254   public:
4255     ForceYieldClosure(jobject jcont) : _jcont(jcont), _result(-1) {}
4256     jint result() const { return _result; }
4257   };
4258   ForceYieldClosure fyc(jcont);
4259 
4260   // tty->print_cr("TRY_FORCE_YIELD0");
4261   // thread->print();
4262   // tty->print_cr("");
4263 
4264   if (true) {
4265     oop thread_oop = JNIHandles::resolve(jthread);
4266     if (thread_oop != NULL) {
4267       JavaThread* target = java_lang_Thread::thread(thread_oop);
4268       Handshake::execute(&fyc, target);
4269     }
4270   } else {
4271     Handshake::execute(&fyc);
4272   }
4273   return fyc.result();
4274 }
4275 JVM_END
4276 
4277 #define CC (char*)  /*cast a literal from (const char*)*/
4278 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
4279 
4280 static JNINativeMethod CONT_methods[] = {
4281     {CC"clean0",           CC"()V",                              FN_PTR(CONT_Clean)},
4282     {CC"tryForceYield0",   CC"(Ljava/lang/Thread;)I",            FN_PTR(CONT_TryForceYield0)},
4283     {CC"isPinned0",        CC"(Ljava/lang/ContinuationScope;)I", FN_PTR(CONT_isPinned0)},
4284 };
4285 
4286 void CONT_RegisterNativeMethods(JNIEnv *env, jclass cls) {
4287     Thread* thread = Thread::current();
4288     assert(thread->is_Java_thread(), "");
4289     ThreadToNativeFromVM trans((JavaThread*)thread);
4290     int status = env->RegisterNatives(cls, CONT_methods, sizeof(CONT_methods)/sizeof(JNINativeMethod));
4291     guarantee(status == JNI_OK && !env->ExceptionOccurred(), "register java.lang.Continuation natives");
4292 }
4293 
4294 #include CPU_HEADER_INLINE(continuation)
4295 
4296 #ifdef CONT_DOUBLE_NOP
4297 template<op_mode mode>
4298 static inline CachedCompiledMetadata cached_metadata(const hframe& hf) {
4299   return ContinuationHelper::cached_metadata<mode>(hf);
4300 }
4301 #endif
4302 
4303 /* This is hopefully only temporary, currently only G1 has support for making the weak
4304  * keepalive OOPs strong while their nmethods are on the stack. */
4305 class HandleKeepalive {
4306 public:
4307   typedef Handle TypeT;
4308 
4309   static Handle make_keepalive(JavaThread* thread, oop* keepalive) {
4310     return Handle(thread, WeakHandle<vm_nmethod_keepalive_data>::from_raw(keepalive).resolve());
4311   }
4312 
4313   static oop read_keepalive(Handle obj) {
4314     return obj();
4315   }
4316 };
4317 
4318 class NoKeepalive {
4319 public:
4320   typedef oop* TypeT;
4321 
4322   static oop* make_keepalive(JavaThread* thread, oop* keepalive) {
4323     return keepalive;
4324   }
4325 
4326   static oop read_keepalive(oop* keepalive) {
4327     return WeakHandle<vm_nmethod_keepalive_data>::from_raw(keepalive).resolve();
4328   }
4329 };
4330 
4331 template <bool compressed_oops, bool post_barrier, bool gen_stubs, bool g1gc>
4332 class Config {
4333 public:
4334   typedef Config<compressed_oops, post_barrier, gen_stubs, g1gc> SelfT;
4335   typedef typename Conditional<compressed_oops, narrowOop, oop>::type OopT;
4336   typedef typename Conditional<post_barrier, RawOopWriter<SelfT>, NormalOopWriter<SelfT> >::type OopWriterT;
4337   typedef typename Conditional<g1gc, NoKeepalive, HandleKeepalive>::type KeepaliveObjectT;
4338 
4339   static const bool _compressed_oops = compressed_oops;
4340   static const bool _post_barrier = post_barrier;
4341   static const bool allow_stubs = gen_stubs && post_barrier && compressed_oops;
4342 
4343   template<op_mode mode>
4344   static freeze_result freeze(JavaThread* thread, ContMirror& cont, FrameInfo* fi) {
4345     return Freeze<SelfT, mode>(thread, cont).freeze(fi);
4346   }
4347 
4348   template<op_mode mode>
4349   static bool thaw(JavaThread* thread, ContMirror& cont, FrameInfo* fi, int num_frames) {
4350     return Thaw<SelfT, mode>(thread, cont).thaw(fi, num_frames);
4351   }
4352 };
4353 
4354 class ConfigResolve {
4355 public:
4356   static void resolve() { resolve_compressed(); }
4357 
4358   static void resolve_compressed() {
4359     UseCompressedOops ? resolve_modref<true>()
4360                       : resolve_modref<false>();
4361   }
4362 
4363   template <bool use_compressed>
4364   static void resolve_modref() {
4365     BarrierSet::barrier_set()->is_a(BarrierSet::ModRef)
4366       ? resolve_gencode<use_compressed, true>()
4367       : resolve_gencode<use_compressed, false>();
4368   }
4369 
4370   template <bool use_compressed, bool is_modref>
4371   static void resolve_gencode() {
4372     LoomGenCode 
4373       ? resolve_g1<use_compressed, is_modref, true>()
4374       : resolve_g1<use_compressed, is_modref, false>();
4375   } 
4376 
4377   template <bool use_compressed, bool is_modref, bool gencode>
4378   static void resolve_g1() {
4379     UseG1GC && UseContinuationStrong
4380       ? resolve<use_compressed, is_modref, gencode, true>()
4381       : resolve<use_compressed, is_modref, gencode, false>();
4382   }
4383 
4384   template <bool use_compressed, bool is_modref, bool gencode, bool g1gc>
4385   static void resolve() {
4386     // tty->print_cr(">>> ConfigResolve::resolve use_compressed: %d is_modref: %d gen_code:%d", use_compressed, is_modref, gen_code);
4387 
4388     cont_freeze_fast    = Config<use_compressed, is_modref, gencode, g1gc>::template freeze<mode_fast>;
4389     cont_freeze_slow    = Config<use_compressed, is_modref, gencode, g1gc>::template freeze<mode_slow>;
4390     cont_freeze_preempt = Config<use_compressed, is_modref, gencode, g1gc>::template freeze<mode_preempt>;
4391 
4392     cont_thaw_fast    = Config<use_compressed, is_modref, gencode, g1gc>::template thaw<mode_fast>;
4393     cont_thaw_slow    = Config<use_compressed, is_modref, gencode, g1gc>::template thaw<mode_slow>;
4394     cont_thaw_preempt = Config<use_compressed, is_modref, gencode, g1gc>::template thaw<mode_preempt>;
4395   }
4396 };
4397 
4398 void Continuations::init() {
4399   ConfigResolve::resolve();
4400   OopMapStubGenerator::init();
4401   Continuation::init();
4402 }
4403 
4404 void Continuation::init() {
4405   _weak_handles = new OopStorage("Continuation NMethodKeepalive weak",
4406       NMethodKeepaliveAlloc_lock,
4407       NMethodKeepaliveActive_lock);
4408 }
4409 
4410 class KeepaliveCleanupClosure : public ThreadClosure {
4411 private:
4412   int _count;
4413 public:
4414   KeepaliveCleanupClosure() : _count(0) {}
4415 
4416   int count() const { return _count; }
4417 
4418   virtual void do_thread(Thread* thread) {
4419     JavaThread* jthread = (JavaThread*) thread;
4420     GrowableArray<WeakHandle<vm_nmethod_keepalive_data> >* cleanup_list = jthread->keepalive_cleanup();
4421     int len = cleanup_list->length();
4422     _count += len;
4423     for (int i = 0; i < len; ++i) {
4424       WeakHandle<vm_nmethod_keepalive_data> ref = cleanup_list->at(i);
4425       ref.release();
4426     }
4427 
4428     cleanup_list->clear();
4429     assert(cleanup_list->length() == 0, "should be clean");
4430   }
4431 };
4432 
4433 void Continuations::cleanup_keepalives() {
4434   KeepaliveCleanupClosure closure;
4435   Threads::java_threads_do(&closure);
4436   //log_info(jvmcont)("cleanup %d refs", closure.count());
4437 }
4438 
4439 volatile intptr_t Continuations::_exploded_miss = 0;
4440 volatile intptr_t Continuations::_exploded_hit = 0;
4441 volatile intptr_t Continuations::_nmethod_miss = 0;
4442 volatile intptr_t Continuations::_nmethod_hit = 0;
4443 
4444 void Continuations::exploded_miss() {
4445   //Atomic::inc(&_exploded_miss);
4446 }
4447 
4448 void Continuations::exploded_hit() {
4449   //Atomic::inc(&_exploded_hit);
4450 }
4451 
4452 void Continuations::nmethod_miss() {
4453   //Atomic::inc(&_nmethod_miss);
4454 }
4455 
4456 void Continuations::nmethod_hit() {
4457   //Atomic::inc(&_nmethod_hit);
4458 }
4459 
4460 void Continuations::print_statistics() {
4461   //tty->print_cr("Continuations hit/miss %ld / %ld", _exploded_hit, _exploded_miss);
4462   //tty->print_cr("Continuations nmethod hit/miss %ld / %ld", _nmethod_hit, _nmethod_miss);
4463 }
4464 
4465 ///// DEBUGGING
4466 
4467 #ifndef PRODUCT
4468 void Continuation::describe(FrameValues &values) {
4469   JavaThread* thread = JavaThread::current();
4470   if (thread != NULL) {
4471     for (oop cont = thread->last_continuation(); cont != (oop)NULL; cont = java_lang_Continuation::parent(cont)) {
4472       intptr_t* bottom = java_lang_Continuation::entrySP(cont);
4473       if (bottom != NULL)
4474         values.describe(-1, bottom, "continuation entry");
4475     }
4476   }
4477 }
4478 #endif
4479 
4480 void Continuation::nmethod_patched(nmethod* nm) {
4481   //log_info(jvmcont)("nmethod patched %p", nm);
4482   oop* keepalive = nm->get_keepalive();
4483   if (keepalive == NULL) {
4484     return;
4485   }
4486   WeakHandle<vm_nmethod_keepalive_data> wh = WeakHandle<vm_nmethod_keepalive_data>::from_raw(keepalive);
4487   oop resolved = wh.resolve();
4488 #ifdef DEBUG
4489   Universe::heap()->is_in_or_null(resolved);
4490 #endif
4491 
4492 #ifndef PRODUCT
4493   CountOops count;
4494   nm->oops_do(&count, false, true);
4495   assert(nm->nr_oops() >= count.nr_oops(), "should be");
4496 #endif
4497 
4498   if (resolved == NULL) {
4499     return;
4500   }
4501 
4502   if (UseCompressedOops) {
4503     PersistOops<narrowOop> persist(nm->nr_oops(), (objArrayOop) resolved);
4504     nm->oops_do(&persist);
4505   } else {
4506     PersistOops<oop> persist(nm->nr_oops(), (objArrayOop) resolved);;
4507     nm->oops_do(&persist);
4508   }
4509 }
4510 
4511 static void print_oop(void *p, oop obj, outputStream* st) {
4512   if (!log_develop_is_enabled(Trace, jvmcont) && st != NULL) return;
4513 
4514   if (st == NULL) st = tty;
4515 
4516   st->print_cr(INTPTR_FORMAT ": ", p2i(p));
4517   if (obj == NULL) {
4518     st->print_cr("*NULL*");
4519   } else {
4520     if (oopDesc::is_oop_or_null(obj)) {
4521       if (obj->is_objArray()) {
4522         st->print_cr("valid objArray: " INTPTR_FORMAT, p2i(obj));
4523       } else {
4524         obj->print_value_on(st);
4525         // obj->print();
4526       }
4527     } else {
4528       st->print_cr("invalid oop: " INTPTR_FORMAT, p2i(obj));
4529     }
4530     st->cr();
4531   }
4532 }
4533 
4534 void ContMirror::print_hframes(outputStream* st) {
4535   if (st != NULL && !log_develop_is_enabled(Trace, jvmcont)) return;
4536   if (st == NULL) st = tty;
4537 
4538   st->print_cr("------- hframes ---------");
4539   st->print_cr("sp: %d length: %d", _sp, _stack_length);
4540   int i = 0;
4541   for (hframe f = last_frame<mode_slow>(); !f.is_empty(); f = f.sender<mode_slow>(*this)) {
4542     st->print_cr("frame: %d", i);
4543     f.print_on(*this, st);
4544     i++;
4545   }
4546   st->print_cr("======= end hframes =========");
4547 }
4548 
4549 #ifdef ASSERT
4550 
4551 static jlong java_tid(JavaThread* thread) {
4552   return java_lang_Thread::thread_id(thread->threadObj());
4553 }
4554 
4555 static void print_frames(JavaThread* thread, outputStream* st) {
4556   if (st != NULL && !log_develop_is_enabled(Trace, jvmcont)) return;
4557   if (st == NULL) st = tty;
4558 
4559   st->print_cr("------- frames ---------");
4560   RegisterMap map(thread, true, false);
4561 #ifndef PRODUCT
4562   map.set_skip_missing(true);
4563   ResetNoHandleMark rnhm;
4564   ResourceMark rm;
4565   HandleMark hm;
4566   FrameValues values;
4567 #endif
4568 
4569   int i = 0;
4570   for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
4571 #ifndef PRODUCT
4572     // print_vframe(f, &map, st);
4573     f.describe(values, i, &map);
4574 #else
4575     print_vframe(f, &map, st);
4576 #endif
4577     i++;
4578   }
4579 #ifndef PRODUCT
4580   values.print(thread);
4581 #endif
4582   st->print_cr("======= end frames =========");
4583 }
4584 
4585 // static inline bool is_not_entrant(const frame& f) {
4586 //   return  f.is_compiled_frame() ? f.cb()->as_nmethod()->is_not_entrant() : false;
4587 // }
4588 
4589 static char* method_name(Method* m) {
4590   return m != NULL ? m->name_and_sig_as_C_string() : NULL;
4591 }
4592 
4593 static inline Method* top_java_frame_method(const frame& f) {
4594   Method* m = NULL;
4595   if (f.is_interpreted_frame()) {
4596     m = f.interpreter_frame_method();
4597   } else if (f.is_compiled_frame()) {
4598     CompiledMethod* cm = f.cb()->as_compiled_method();
4599     ScopeDesc* scope = cm->scope_desc_at(f.pc());
4600     m = scope->method();
4601   }
4602   // m = ((CompiledMethod*)f.cb())->method();
4603   return m;
4604 }
4605 
4606 static inline Method* bottom_java_frame_method(const frame& f) {
4607   return Frame::frame_method(f);
4608 }
4609 
4610 static char* top_java_frame_name(const frame& f) {
4611   return method_name(top_java_frame_method(f));
4612 }
4613 
4614 static char* bottom_java_frame_name(const frame& f) {
4615   return method_name(bottom_java_frame_method(f));
4616 }
4617 
4618 static bool assert_top_java_frame_name(const frame& f, const char* name) {
4619   ResourceMark rm;
4620   bool res = (strcmp(top_java_frame_name(f), name) == 0);
4621   assert (res, "name: %s", top_java_frame_name(f));
4622   return res;
4623 }
4624 
4625 static bool assert_bottom_java_frame_name(const frame& f, const char* name) {
4626   ResourceMark rm;
4627   bool res = (strcmp(bottom_java_frame_name(f), name) == 0);
4628   assert (res, "name: %s", bottom_java_frame_name(f));
4629   return res;
4630 }
4631 
4632 static inline bool is_deopt_return(address pc, const frame& sender) {
4633   if (sender.is_interpreted_frame()) return false;
4634 
4635   CompiledMethod* cm = sender.cb()->as_compiled_method();
4636   return cm->is_deopt_pc(pc);
4637 }
4638 
4639 template <typename FrameT>
4640 static CodeBlob* slow_get_cb(const FrameT& f) {
4641   assert (!f.is_interpreted_frame(), "");
4642   CodeBlob* cb = f.cb();
4643   if (cb == NULL) {
4644     cb = CodeCache::find_blob(f.pc());
4645   }
4646   assert (cb != NULL, "");
4647   return cb;
4648 }
4649 
4650 template <typename FrameT>
4651 static const ImmutableOopMap* slow_get_oopmap(const FrameT& f) {
4652   const ImmutableOopMap* oopmap = f.oop_map();
4653   if (oopmap == NULL) {
4654     oopmap = OopMapSet::find_map(slow_get_cb(f), f.pc());
4655   }
4656   assert (oopmap != NULL, "");
4657   return oopmap;
4658 }
4659 
4660 template <typename FrameT>
4661 static int slow_size(const FrameT& f) { 
4662   return slow_get_cb(f)->frame_size() * wordSize; 
4663 }
4664 
4665 template <typename FrameT>
4666 static address slow_return_pc(const FrameT& f) { 
4667   return *slow_return_pc_address<NonInterpretedUnknown>(f); 
4668 }
4669 
4670 template <typename FrameT>
4671 static int slow_stack_argsize(const FrameT& f) { 
4672   CodeBlob* cb = slow_get_cb(f);
4673   assert (cb->is_compiled(), "");
4674   return cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size; 
4675 }
4676 
4677 template <typename FrameT>
4678 static int slow_num_oops(const FrameT& f) { 
4679   return slow_get_oopmap(f)->num_oops(); 
4680 }
4681 
4682 static void print_blob(outputStream* st, address addr) {
4683   CodeBlob* b = CodeCache::find_blob_unsafe(addr);
4684   st->print("address: " INTPTR_FORMAT " blob: ", p2i(addr));
4685   if (b != NULL) {
4686     b->dump_for_addr(addr, st, false);
4687   } else {
4688     st->print_cr("NULL");
4689   }
4690 }
4691 
4692 // void static stop() {
4693 //     print_frames(JavaThread::current(), NULL);
4694 //     assert (false, "");
4695 // }
4696 
4697 // void static stop(const frame& f) {
4698 //     f.print_on(tty);
4699 //     stop();
4700 // }
4701 #endif
4702 
4703 // #ifdef ASSERT
4704 // #define JAVA_THREAD_OFFSET(field) tty->print_cr("JavaThread." #field " 0x%x", in_bytes(JavaThread:: cat2(field,_offset()) ))
4705 // #define cat2(a,b)         cat2_hidden(a,b)
4706 // #define cat2_hidden(a,b)  a ## b
4707 // #define cat3(a,b,c)       cat3_hidden(a,b,c)
4708 // #define cat3_hidden(a,b,c)  a ## b ## c
4709 
4710 // static void print_JavaThread_offsets() {
4711 //   JAVA_THREAD_OFFSET(threadObj);
4712 //   JAVA_THREAD_OFFSET(jni_environment);
4713 //   JAVA_THREAD_OFFSET(pending_jni_exception_check_fn);
4714 //   JAVA_THREAD_OFFSET(last_Java_sp);
4715 //   JAVA_THREAD_OFFSET(last_Java_pc);
4716 //   JAVA_THREAD_OFFSET(frame_anchor);
4717 //   JAVA_THREAD_OFFSET(callee_target);
4718 //   JAVA_THREAD_OFFSET(vm_result);
4719 //   JAVA_THREAD_OFFSET(vm_result_2);
4720 //   JAVA_THREAD_OFFSET(thread_state);
4721 //   JAVA_THREAD_OFFSET(saved_exception_pc);
4722 //   JAVA_THREAD_OFFSET(osthread);
4723 //   JAVA_THREAD_OFFSET(continuation);
4724 //   JAVA_THREAD_OFFSET(exception_oop);
4725 //   JAVA_THREAD_OFFSET(exception_pc);
4726 //   JAVA_THREAD_OFFSET(exception_handler_pc);
4727 //   JAVA_THREAD_OFFSET(stack_overflow_limit);
4728 //   JAVA_THREAD_OFFSET(is_method_handle_return);
4729 //   JAVA_THREAD_OFFSET(stack_guard_state);
4730 //   JAVA_THREAD_OFFSET(reserved_stack_activation);
4731 //   JAVA_THREAD_OFFSET(suspend_flags);
4732 //   JAVA_THREAD_OFFSET(do_not_unlock_if_synchronized);
4733 //   JAVA_THREAD_OFFSET(should_post_on_exceptions_flag);
4734 // // #ifndef PRODUCT
4735 // //   static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index); }
4736 // //   static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring); }
4737 // // #endif // PRODUCT
4738 // // #if INCLUDE_JVMCI
4739 // //   static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); }
4740 // //   static ByteSize pending_monitorenter_offset()  { return byte_offset_of(JavaThread, _pending_monitorenter); }
4741 // //   static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); }
4742 // //   static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); }
4743 // //   static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); }
4744 // //   static ByteSize jvmci_counters_offset()        { return byte_offset_of(JavaThread, _jvmci_counters); }
4745 // // #endif // INCLUDE_JVMCI
4746 // }
4747 //
4748 // #endif