1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/javaClasses.inline.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "code/codeCache.inline.hpp" 28 #include "code/nmethod.inline.hpp" 29 #include "code/vmreg.inline.hpp" 30 #include "compiler/oopMap.inline.hpp" 31 #include "gc/shared/barrierSet.hpp" 32 #include "gc/shared/continuationGCSupport.inline.hpp" 33 #include "gc/shared/gc_globals.hpp" 34 #include "gc/shared/memAllocator.hpp" 35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 36 #include "interpreter/bytecodeStream.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "interpreter/interpreterRuntime.hpp" 39 #include "jfr/jfrEvents.hpp" 40 #include "logging/log.hpp" 41 #include "logging/logStream.hpp" 42 #include "oops/access.inline.hpp" 43 #include "oops/constantPool.inline.hpp" 44 #include "oops/method.inline.hpp" 45 #include "oops/objArrayOop.inline.hpp" 46 #include "oops/oopsHierarchy.hpp" 47 #include "oops/stackChunkOop.inline.hpp" 48 #include "prims/jvmtiThreadState.hpp" 49 #include "runtime/arguments.hpp" 50 #include "runtime/continuation.hpp" 51 #include "runtime/continuationEntry.inline.hpp" 52 #include "runtime/continuationHelper.inline.hpp" 53 #include "runtime/continuationJavaClasses.inline.hpp" 54 #include "runtime/continuationWrapper.inline.hpp" 55 #include "runtime/frame.inline.hpp" 56 #include "runtime/interfaceSupport.inline.hpp" 57 #include "runtime/javaThread.inline.hpp" 58 #include "runtime/jniHandles.inline.hpp" 59 #include "runtime/keepStackGCProcessed.hpp" 60 #include "runtime/objectMonitor.inline.hpp" 61 #include "runtime/orderAccess.hpp" 62 #include "runtime/prefetch.inline.hpp" 63 #include "runtime/sharedRuntime.hpp" 64 #include "runtime/smallRegisterMap.inline.hpp" 65 #include "runtime/stackChunkFrameStream.inline.hpp" 66 #include "runtime/stackFrameStream.inline.hpp" 67 #include "runtime/stackOverflow.hpp" 68 #include "runtime/stackWatermarkSet.inline.hpp" 69 #include "runtime/vframe.inline.hpp" 70 #include "runtime/vframe_hp.hpp" 71 #include "utilities/debug.hpp" 72 #include "utilities/exceptions.hpp" 73 #include "utilities/macros.hpp" 74 #include "utilities/vmError.hpp" 75 #if INCLUDE_ZGC 76 #include "gc/z/zStackChunkGCData.inline.hpp" 77 #endif 78 #if INCLUDE_JFR 79 #include "jfr/jfr.inline.hpp" 80 #endif 81 #ifdef COMPILER1 82 #include "c1/c1_Runtime1.hpp" 83 #endif 84 #ifdef COMPILER2 85 #include "opto/runtime.hpp" 86 #endif 87 88 #include <type_traits> 89 90 /* 91 * This file contains the implementation of continuation freezing (yield) and thawing (run). 92 * 93 * This code is very latency-critical and very hot. An ordinary and well-behaved server application 94 * would likely call these operations many thousands of times per second second, on every core. 95 * 96 * Freeze might be called every time the application performs any I/O operation, every time it 97 * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called 98 * multiple times in each of those cases, as it is called by the return barrier, which may be 99 * invoked on method return. 100 * 101 * The amortized budget for each of those two operations is ~100-150ns. That is why, for 102 * example, every effort is made to avoid Java-VM transitions as much as possible. 103 * 104 * On the fast path, all frames are known to be compiled, and the chunk requires no barriers 105 * and so frames simply copied, and the bottom-most one is patched. 106 * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets 107 * and absolute pointers, and barriers invoked. 108 */ 109 110 /************************************************ 111 112 Thread-stack layout on freeze/thaw. 113 See corresponding stack-chunk layout in instanceStackChunkKlass.hpp 114 115 +----------------------------+ 116 | . | 117 | . | 118 | . | 119 | carrier frames | 120 | | 121 |----------------------------| 122 | | 123 | Continuation.run | 124 | | 125 |============================| 126 | enterSpecial frame | 127 | pc | 128 | rbp | 129 | ----- | 130 ^ | int argsize | = ContinuationEntry 131 | | oopDesc* cont | 132 | | oopDesc* chunk | 133 | | ContinuationEntry* parent | 134 | | ... | 135 | |============================| <------ JavaThread::_cont_entry = entry->sp() 136 | | ? alignment word ? | 137 | |----------------------------| <--\ 138 | | | | 139 | | ? caller stack args ? | | argsize (might not be 2-word aligned) words 140 Address | | | | Caller is still in the chunk. 141 | |----------------------------| | 142 | | pc (? return barrier ?) | | This pc contains the return barrier when the bottom-most frame 143 | | rbp | | isn't the last one in the continuation. 144 | | | | 145 | | frame | | 146 | | | | 147 +----------------------------| \__ Continuation frames to be frozen/thawed 148 | | / 149 | frame | | 150 | | | 151 |----------------------------| | 152 | | | 153 | frame | | 154 | | | 155 |----------------------------| <--/ 156 | | 157 | doYield/safepoint stub | When preempting forcefully, we could have a safepoint stub 158 | | instead of a doYield stub 159 |============================| <- the sp passed to freeze 160 | | 161 | Native freeze/thaw frames | 162 | . | 163 | . | 164 | . | 165 +----------------------------+ 166 167 ************************************************/ 168 169 static const bool TEST_THAW_ONE_CHUNK_FRAME = false; // force thawing frames one-at-a-time for testing 170 171 #define CONT_JFR false // emit low-level JFR events that count slow/fast path for continuation performance debugging only 172 #if CONT_JFR 173 #define CONT_JFR_ONLY(code) code 174 #else 175 #define CONT_JFR_ONLY(code) 176 #endif 177 178 // TODO: See AbstractAssembler::generate_stack_overflow_check, 179 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size() 180 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point. 181 182 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk 183 184 // Used to just annotatate cold/hot branches 185 #define LIKELY(condition) (condition) 186 #define UNLIKELY(condition) (condition) 187 188 // debugging functions 189 #ifdef ASSERT 190 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue 191 192 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); } 193 194 static void do_deopt_after_thaw(JavaThread* thread); 195 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st); 196 static void log_frames(JavaThread* thread, bool dolog = false); 197 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp); 198 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty); 199 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr = nullptr, const char** code_name_ptr = nullptr, int* bci_ptr = nullptr); 200 201 #define assert_pfl(p, ...) \ 202 do { \ 203 if (!(p)) { \ 204 JavaThread* t = JavaThread::active(); \ 205 if (t->has_last_Java_frame()) { \ 206 tty->print_cr("assert(" #p ") failed:"); \ 207 t->print_frame_layout(); \ 208 } \ 209 } \ 210 vmassert(p, __VA_ARGS__); \ 211 } while(0) 212 213 #else 214 static void verify_continuation(oop continuation) { } 215 #define assert_pfl(p, ...) 216 #endif 217 218 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint); 219 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp); 220 221 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier); 222 template<typename ConfigT> static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind); 223 224 225 // Entry point to freeze. Transitions are handled manually 226 // Called from gen_continuation_yield() in sharedRuntime_<cpu>.cpp through Continuation::freeze_entry(); 227 template<typename ConfigT> 228 static JRT_BLOCK_ENTRY(int, freeze(JavaThread* current, intptr_t* sp)) 229 assert(sp == current->frame_anchor()->last_Java_sp(), ""); 230 231 if (current->raw_cont_fastpath() > current->last_continuation()->entry_sp() || current->raw_cont_fastpath() < sp) { 232 current->set_cont_fastpath(nullptr); 233 } 234 235 return checked_cast<int>(ConfigT::freeze(current, sp)); 236 JRT_END 237 238 JRT_LEAF(int, Continuation::prepare_thaw(JavaThread* thread, bool return_barrier)) 239 return prepare_thaw_internal(thread, return_barrier); 240 JRT_END 241 242 template<typename ConfigT> 243 static JRT_LEAF(intptr_t*, thaw(JavaThread* thread, int kind)) 244 // TODO: JRT_LEAF and NoHandleMark is problematic for JFR events. 245 // vFrameStreamCommon allocates Handles in RegisterMap for continuations. 246 // Also the preemption case with JVMTI events enabled might safepoint so 247 // undo the NoSafepointVerifier here and rely on handling by ContinuationWrapper. 248 // JRT_ENTRY instead? 249 ResetNoHandleMark rnhm; 250 DEBUG_ONLY(PauseNoSafepointVerifier pnsv(&__nsv);) 251 252 // we might modify the code cache via BarrierSetNMethod::nmethod_entry_barrier 253 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); 254 return ConfigT::thaw(thread, (Continuation::thaw_kind)kind); 255 JRT_END 256 257 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) { 258 JavaThread* thread = JavaThread::thread_from_jni_environment(env); 259 return is_pinned0(thread, JNIHandles::resolve(cont_scope), false); 260 } 261 JVM_END 262 263 /////////// 264 265 enum class oop_kind { NARROW, WIDE }; 266 template <oop_kind oops, typename BarrierSetT> 267 class Config { 268 public: 269 typedef Config<oops, BarrierSetT> SelfT; 270 using OopT = std::conditional_t<oops == oop_kind::NARROW, narrowOop, oop>; 271 272 static freeze_result freeze(JavaThread* thread, intptr_t* const sp) { 273 freeze_result res = freeze_internal<SelfT, false>(thread, sp); 274 JFR_ONLY(assert((res == freeze_ok) || (res == thread->last_freeze_fail_result()), "freeze failure not set")); 275 return res; 276 } 277 278 static freeze_result freeze_preempt(JavaThread* thread, intptr_t* const sp) { 279 return freeze_internal<SelfT, true>(thread, sp); 280 } 281 282 static intptr_t* thaw(JavaThread* thread, Continuation::thaw_kind kind) { 283 return thaw_internal<SelfT>(thread, kind); 284 } 285 }; 286 287 #ifdef _WINDOWS 288 static void map_stack_pages(JavaThread* thread, size_t size, address sp) { 289 address new_sp = sp - size; 290 address watermark = thread->stack_overflow_state()->shadow_zone_growth_watermark(); 291 292 if (new_sp < watermark) { 293 size_t page_size = os::vm_page_size(); 294 address last_touched_page = watermark - StackOverflow::stack_shadow_zone_size(); 295 size_t pages_to_touch = align_up(watermark - new_sp, page_size) / page_size; 296 while (pages_to_touch-- > 0) { 297 last_touched_page -= page_size; 298 *last_touched_page = 0; 299 } 300 thread->stack_overflow_state()->set_shadow_zone_growth_watermark(new_sp); 301 } 302 } 303 #endif 304 305 static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) { 306 const size_t page_size = os::vm_page_size(); 307 if (size > page_size) { 308 if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) { 309 return false; 310 } 311 WINDOWS_ONLY(map_stack_pages(thread, size, sp)); 312 } 313 return true; 314 } 315 316 #ifdef ASSERT 317 static oop get_continuation(JavaThread* thread) { 318 assert(thread != nullptr, ""); 319 assert(thread->threadObj() != nullptr, ""); 320 return java_lang_Thread::continuation(thread->threadObj()); 321 } 322 #endif // ASSERT 323 324 inline void clear_anchor(JavaThread* thread) { 325 thread->frame_anchor()->clear(); 326 } 327 328 static void set_anchor(JavaThread* thread, intptr_t* sp, address pc) { 329 assert(pc != nullptr, ""); 330 331 JavaFrameAnchor* anchor = thread->frame_anchor(); 332 anchor->set_last_Java_sp(sp); 333 anchor->set_last_Java_pc(pc); 334 ContinuationHelper::set_anchor_pd(anchor, sp); 335 336 assert(thread->has_last_Java_frame(), ""); 337 assert(thread->last_frame().cb() != nullptr, ""); 338 } 339 340 static void set_anchor(JavaThread* thread, intptr_t* sp) { 341 address pc = ContinuationHelper::return_address_at( 342 sp - frame::sender_sp_ret_address_offset()); 343 set_anchor(thread, sp, pc); 344 } 345 346 static void set_anchor_to_entry(JavaThread* thread, ContinuationEntry* entry) { 347 JavaFrameAnchor* anchor = thread->frame_anchor(); 348 anchor->set_last_Java_sp(entry->entry_sp()); 349 anchor->set_last_Java_pc(entry->entry_pc()); 350 ContinuationHelper::set_anchor_to_entry_pd(anchor, entry); 351 352 assert(thread->has_last_Java_frame(), ""); 353 assert(thread->last_frame().cb() != nullptr, ""); 354 } 355 356 #if CONT_JFR 357 class FreezeThawJfrInfo : public StackObj { 358 short _e_size; 359 short _e_num_interpreted_frames; 360 public: 361 362 FreezeThawJfrInfo() : _e_size(0), _e_num_interpreted_frames(0) {} 363 inline void record_interpreted_frame() { _e_num_interpreted_frames++; } 364 inline void record_size_copied(int size) { _e_size += size << LogBytesPerWord; } 365 template<typename Event> void post_jfr_event(Event *e, oop continuation, JavaThread* jt); 366 }; 367 368 template<typename Event> void FreezeThawJfrInfo::post_jfr_event(Event* e, oop continuation, JavaThread* jt) { 369 if (e->should_commit()) { 370 log_develop_trace(continuations)("JFR event: iframes: %d size: %d", _e_num_interpreted_frames, _e_size); 371 e->set_carrierThread(JFR_JVM_THREAD_ID(jt)); 372 e->set_continuationClass(continuation->klass()); 373 e->set_interpretedFrames(_e_num_interpreted_frames); 374 e->set_size(_e_size); 375 e->commit(); 376 } 377 } 378 #endif // CONT_JFR 379 380 /////////////// FREEZE //// 381 382 class FreezeBase : public StackObj { 383 protected: 384 JavaThread* const _thread; 385 ContinuationWrapper& _cont; 386 bool _barriers; // only set when we allocate a chunk 387 388 intptr_t* _bottom_address; 389 390 // Used for preemption only 391 const bool _preempt; 392 frame _last_frame; 393 394 // Used to support freezing with held monitors 395 int _monitors_in_lockstack; 396 397 int _freeze_size; // total size of all frames plus metadata in words. 398 int _total_align_size; 399 400 intptr_t* _cont_stack_top; 401 intptr_t* _cont_stack_bottom; 402 403 CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;) 404 405 #ifdef ASSERT 406 intptr_t* _orig_chunk_sp; 407 int _fast_freeze_size; 408 bool _empty; 409 #endif 410 411 JvmtiSampledObjectAllocEventCollector* _jvmti_event_collector; 412 413 NOT_PRODUCT(int _frames;) 414 DEBUG_ONLY(intptr_t* _last_write;) 415 416 inline FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempt); 417 418 public: 419 NOINLINE freeze_result freeze_slow(); 420 void freeze_fast_existing_chunk(); 421 422 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; }) 423 void set_jvmti_event_collector(JvmtiSampledObjectAllocEventCollector* jsoaec) { _jvmti_event_collector = jsoaec; } 424 425 inline int size_if_fast_freeze_available(); 426 427 inline frame& last_frame() { return _last_frame; } 428 429 #ifdef ASSERT 430 bool check_valid_fast_path(); 431 #endif 432 433 protected: 434 inline void init_rest(); 435 void throw_stack_overflow_on_humongous_chunk(); 436 437 // fast path 438 inline void copy_to_chunk(intptr_t* from, intptr_t* to, int size); 439 inline void unwind_frames(); 440 inline void patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp); 441 442 // slow path 443 virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) = 0; 444 445 int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); } 446 447 private: 448 // slow path 449 frame freeze_start_frame(); 450 frame freeze_start_frame_on_preempt(); 451 NOINLINE freeze_result recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top); 452 inline frame freeze_start_frame_yield_stub(); 453 template<typename FKind> 454 inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize); 455 inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame); 456 inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame); 457 freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize); 458 void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame); 459 NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted); 460 freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted); 461 NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller); 462 NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller); 463 NOINLINE void finish_freeze(const frame& f, const frame& top); 464 465 void freeze_lockstack(stackChunkOop chunk); 466 467 inline bool stack_overflow(); 468 469 static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f) 470 : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); } 471 template<typename FKind> static inline frame sender(const frame& f); 472 template<typename FKind> frame new_heap_frame(frame& f, frame& caller); 473 inline void set_top_frame_metadata_pd(const frame& hf); 474 inline void patch_pd(frame& callee, const frame& caller); 475 inline void patch_pd_unused(intptr_t* sp); 476 void adjust_interpreted_frame_unextended_sp(frame& f); 477 static inline void prepare_freeze_interpreted_top_frame(frame& f); 478 static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf); 479 480 protected: 481 void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated)); 482 bool freeze_fast_new_chunk(stackChunkOop chunk); 483 }; 484 485 template <typename ConfigT> 486 class Freeze : public FreezeBase { 487 private: 488 stackChunkOop allocate_chunk(size_t stack_size, int argsize_md); 489 490 public: 491 inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt) 492 : FreezeBase(thread, cont, frame_sp, preempt) {} 493 494 freeze_result try_freeze_fast(); 495 496 protected: 497 virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) override { return allocate_chunk(stack_size, argsize_md); } 498 }; 499 500 FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt) : 501 _thread(thread), _cont(cont), _barriers(false), _preempt(preempt), _last_frame(false /* no initialization */) { 502 DEBUG_ONLY(_jvmti_event_collector = nullptr;) 503 504 assert(_thread != nullptr, ""); 505 assert(_thread->last_continuation()->entry_sp() == _cont.entrySP(), ""); 506 507 DEBUG_ONLY(_cont.entry()->verify_cookie();) 508 509 assert(!Interpreter::contains(_cont.entryPC()), ""); 510 511 _bottom_address = _cont.entrySP() - _cont.entry_frame_extension(); 512 #ifdef _LP64 513 if (((intptr_t)_bottom_address & 0xf) != 0) { 514 _bottom_address--; 515 } 516 assert(is_aligned(_bottom_address, frame::frame_alignment), ""); 517 #endif 518 519 log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT, 520 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord); 521 assert(_bottom_address != nullptr, ""); 522 assert(_bottom_address <= _cont.entrySP(), ""); 523 DEBUG_ONLY(_last_write = nullptr;) 524 525 assert(_cont.chunk_invariant(), ""); 526 assert(!Interpreter::contains(_cont.entryPC()), ""); 527 #if !defined(PPC64) || defined(ZERO) 528 static const int do_yield_frame_size = frame::metadata_words; 529 #else 530 static const int do_yield_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord; 531 #endif 532 // With preemption doYield() might not have been resolved yet 533 assert(_preempt || ContinuationEntry::do_yield_nmethod()->frame_size() == do_yield_frame_size, ""); 534 535 if (preempt) { 536 _last_frame = _thread->last_frame(); 537 } 538 539 // properties of the continuation on the stack; all sizes are in words 540 _cont_stack_top = frame_sp + (!preempt ? do_yield_frame_size : 0); // we don't freeze the doYield stub frame 541 _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0) 542 - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw 543 544 log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT, 545 cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom)); 546 assert(cont_size() > 0, ""); 547 548 _monitors_in_lockstack = _thread->lock_stack().monitor_count(); 549 } 550 551 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling 552 _freeze_size = 0; 553 _total_align_size = 0; 554 NOT_PRODUCT(_frames = 0;) 555 } 556 557 void FreezeBase::freeze_lockstack(stackChunkOop chunk) { 558 assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "no room for lockstack"); 559 560 _thread->lock_stack().move_to_address((oop*)chunk->start_address()); 561 chunk->set_lockstack_size(checked_cast<uint8_t>(_monitors_in_lockstack)); 562 chunk->set_has_lockstack(true); 563 } 564 565 void FreezeBase::copy_to_chunk(intptr_t* from, intptr_t* to, int size) { 566 stackChunkOop chunk = _cont.tail(); 567 chunk->copy_from_stack_to_chunk(from, to, size); 568 CONT_JFR_ONLY(_jfr_info.record_size_copied(size);) 569 570 #ifdef ASSERT 571 if (_last_write != nullptr) { 572 assert(_last_write == to + size, "Missed a spot: _last_write: " INTPTR_FORMAT " to+size: " INTPTR_FORMAT 573 " stack_size: %d _last_write offset: " PTR_FORMAT " to+size: " PTR_FORMAT, p2i(_last_write), p2i(to+size), 574 chunk->stack_size(), _last_write-chunk->start_address(), to+size-chunk->start_address()); 575 _last_write = to; 576 } 577 #endif 578 } 579 580 static void assert_frames_in_continuation_are_safe(JavaThread* thread) { 581 #ifdef ASSERT 582 StackWatermark* watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc); 583 if (watermark == nullptr) { 584 return; 585 } 586 ContinuationEntry* ce = thread->last_continuation(); 587 RegisterMap map(thread, 588 RegisterMap::UpdateMap::include, 589 RegisterMap::ProcessFrames::skip, 590 RegisterMap::WalkContinuation::skip); 591 map.set_include_argument_oops(false); 592 for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) { 593 watermark->assert_is_frame_safe(f); 594 } 595 #endif // ASSERT 596 } 597 598 // Called _after_ the last possible safepoint during the freeze operation (chunk allocation) 599 void FreezeBase::unwind_frames() { 600 ContinuationEntry* entry = _cont.entry(); 601 entry->flush_stack_processing(_thread); 602 assert_frames_in_continuation_are_safe(_thread); 603 JFR_ONLY(Jfr::check_and_process_sample_request(_thread);) 604 set_anchor_to_entry(_thread, entry); 605 } 606 607 template <typename ConfigT> 608 freeze_result Freeze<ConfigT>::try_freeze_fast() { 609 assert(_thread->thread_state() == _thread_in_vm, ""); 610 assert(_thread->cont_fastpath(), ""); 611 612 DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();) 613 assert(_fast_freeze_size == 0, ""); 614 615 stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words + _monitors_in_lockstack, _cont.argsize() + frame::metadata_words_at_top); 616 if (freeze_fast_new_chunk(chunk)) { 617 return freeze_ok; 618 } 619 if (_thread->has_pending_exception()) { 620 return freeze_exception; 621 } 622 623 // TODO R REMOVE when deopt change is fixed 624 assert(!_thread->cont_fastpath() || _barriers, ""); 625 log_develop_trace(continuations)("-- RETRYING SLOW --"); 626 return freeze_slow(); 627 } 628 629 // Returns size needed if the continuation fits, otherwise 0. 630 int FreezeBase::size_if_fast_freeze_available() { 631 stackChunkOop chunk = _cont.tail(); 632 if (chunk == nullptr || chunk->is_gc_mode() || chunk->requires_barriers() || chunk->has_mixed_frames()) { 633 log_develop_trace(continuations)("chunk available %s", chunk == nullptr ? "no chunk" : "chunk requires barriers"); 634 return 0; 635 } 636 637 int total_size_needed = cont_size(); 638 const int chunk_sp = chunk->sp(); 639 640 // argsize can be nonzero if we have a caller, but the caller could be in a non-empty parent chunk, 641 // so we subtract it only if we overlap with the caller, i.e. the current chunk isn't empty. 642 // Consider leaving the chunk's argsize set when emptying it and removing the following branch, 643 // although that would require changing stackChunkOopDesc::is_empty 644 if (!chunk->is_empty()) { 645 total_size_needed -= _cont.argsize() + frame::metadata_words_at_top; 646 } 647 648 total_size_needed += _monitors_in_lockstack; 649 650 int chunk_free_room = chunk_sp - frame::metadata_words_at_bottom; 651 bool available = chunk_free_room >= total_size_needed; 652 log_develop_trace(continuations)("chunk available: %s size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT, 653 available ? "yes" : "no" , total_size_needed, _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom)); 654 return available ? total_size_needed : 0; 655 } 656 657 void FreezeBase::freeze_fast_existing_chunk() { 658 stackChunkOop chunk = _cont.tail(); 659 660 DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();) 661 assert(_fast_freeze_size > 0, ""); 662 663 if (!chunk->is_empty()) { // we are copying into a non-empty chunk 664 DEBUG_ONLY(_empty = false;) 665 DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();) 666 #ifdef ASSERT 667 { 668 intptr_t* retaddr_slot = (chunk->sp_address() 669 - frame::sender_sp_ret_address_offset()); 670 assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(), 671 "unexpected saved return address"); 672 } 673 #endif 674 675 // the chunk's sp before the freeze, adjusted to point beyond the stack-passed arguments in the topmost frame 676 // we overlap; we'll overwrite the chunk's top frame's callee arguments 677 const int chunk_start_sp = chunk->sp() + _cont.argsize() + frame::metadata_words_at_top; 678 assert(chunk_start_sp <= chunk->stack_size(), "sp not pointing into stack"); 679 680 // increase max_size by what we're freezing minus the overlap 681 chunk->set_max_thawing_size(chunk->max_thawing_size() + cont_size() - _cont.argsize() - frame::metadata_words_at_top); 682 683 intptr_t* const bottom_sp = _cont_stack_bottom - _cont.argsize() - frame::metadata_words_at_top; 684 assert(bottom_sp == _bottom_address, ""); 685 // Because the chunk isn't empty, we know there's a caller in the chunk, therefore the bottom-most frame 686 // should have a return barrier (installed back when we thawed it). 687 #ifdef ASSERT 688 { 689 intptr_t* retaddr_slot = (bottom_sp 690 - frame::sender_sp_ret_address_offset()); 691 assert(ContinuationHelper::return_address_at(retaddr_slot) 692 == StubRoutines::cont_returnBarrier(), 693 "should be the continuation return barrier"); 694 } 695 #endif 696 // We copy the fp from the chunk back to the stack because it contains some caller data, 697 // including, possibly, an oop that might have gone stale since we thawed. 698 patch_stack_pd(bottom_sp, chunk->sp_address()); 699 // we don't patch the return pc at this time, so as not to make the stack unwalkable for async walks 700 701 freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false)); 702 } else { // the chunk is empty 703 const int chunk_start_sp = chunk->stack_size(); 704 705 DEBUG_ONLY(_empty = true;) 706 DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;) 707 708 chunk->set_max_thawing_size(cont_size()); 709 chunk->set_bottom(chunk_start_sp - _cont.argsize() - frame::metadata_words_at_top); 710 chunk->set_sp(chunk->bottom()); 711 712 freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false)); 713 } 714 } 715 716 bool FreezeBase::freeze_fast_new_chunk(stackChunkOop chunk) { 717 DEBUG_ONLY(_empty = true;) 718 719 // Install new chunk 720 _cont.set_tail(chunk); 721 722 if (UNLIKELY(chunk == nullptr || !_thread->cont_fastpath() || _barriers)) { // OOME/probably humongous 723 log_develop_trace(continuations)("Retrying slow. Barriers: %d", _barriers); 724 return false; 725 } 726 727 chunk->set_max_thawing_size(cont_size()); 728 729 // in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments. 730 // They'll then be stored twice: in the chunk and in the parent chunk's top frame 731 const int chunk_start_sp = cont_size() + frame::metadata_words + _monitors_in_lockstack; 732 assert(chunk_start_sp == chunk->stack_size(), ""); 733 734 DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;) 735 736 freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA true)); 737 738 return true; 739 } 740 741 void FreezeBase::freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated)) { 742 assert(chunk != nullptr, ""); 743 assert(!chunk->has_mixed_frames(), ""); 744 assert(!chunk->is_gc_mode(), ""); 745 assert(!chunk->has_bitmap(), ""); 746 assert(!chunk->requires_barriers(), ""); 747 assert(chunk == _cont.tail(), ""); 748 749 // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before 750 // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here 751 // will either see no continuation on the stack, or a consistent chunk. 752 unwind_frames(); 753 754 log_develop_trace(continuations)("freeze_fast start: chunk " INTPTR_FORMAT " size: %d orig sp: %d argsize: %d", 755 p2i((oopDesc*)chunk), chunk->stack_size(), chunk_start_sp, _cont.argsize()); 756 assert(chunk_start_sp <= chunk->stack_size(), ""); 757 assert(chunk_start_sp >= cont_size(), "no room in the chunk"); 758 759 const int chunk_new_sp = chunk_start_sp - cont_size(); // the chunk's new sp, after freeze 760 assert(!(_fast_freeze_size > 0) || (_orig_chunk_sp - (chunk->start_address() + chunk_new_sp)) == (_fast_freeze_size - _monitors_in_lockstack), ""); 761 762 intptr_t* chunk_top = chunk->start_address() + chunk_new_sp; 763 #ifdef ASSERT 764 if (!_empty) { 765 intptr_t* retaddr_slot = (_orig_chunk_sp 766 - frame::sender_sp_ret_address_offset()); 767 assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(), 768 "unexpected saved return address"); 769 } 770 #endif 771 772 log_develop_trace(continuations)("freeze_fast start: " INTPTR_FORMAT " sp: %d chunk_top: " INTPTR_FORMAT, 773 p2i(chunk->start_address()), chunk_new_sp, p2i(chunk_top)); 774 775 int adjust = frame::metadata_words_at_bottom; 776 #if INCLUDE_ASAN && defined(AARCH64) 777 // Reading at offset frame::metadata_words_at_bottom from _cont_stack_top 778 // will accesss memory at the callee frame, which on preemption cases will 779 // be the VM native method being called. The Arm 64-bit ABI doesn't specify 780 // a location where the frame record (returnpc+fp) has to be stored within 781 // a stack frame, and GCC currently chooses to save it at the top of the 782 // frame (lowest address). ASan treats this memory access in the callee as 783 // an overflow access to one of the locals stored in that frame. For these 784 // preemption cases we don't need to read these words anyways so we avoid it. 785 if (_preempt) { 786 adjust = 0; 787 } 788 #endif 789 intptr_t* from = _cont_stack_top - adjust; 790 intptr_t* to = chunk_top - adjust; 791 copy_to_chunk(from, to, cont_size() + adjust); 792 // Because we're not patched yet, the chunk is now in a bad state 793 794 // patch return pc of the bottom-most frozen frame (now in the chunk) 795 // with the actual caller's return address 796 intptr_t* chunk_bottom_retaddr_slot = (chunk_top + cont_size() 797 - _cont.argsize() 798 - frame::metadata_words_at_top 799 - frame::sender_sp_ret_address_offset()); 800 #ifdef ASSERT 801 if (!_empty) { 802 assert(ContinuationHelper::return_address_at(chunk_bottom_retaddr_slot) 803 == StubRoutines::cont_returnBarrier(), 804 "should be the continuation return barrier"); 805 } 806 #endif 807 ContinuationHelper::patch_return_address_at(chunk_bottom_retaddr_slot, 808 chunk->pc()); 809 810 // We're always writing to a young chunk, so the GC can't see it until the next safepoint. 811 chunk->set_sp(chunk_new_sp); 812 813 // set chunk->pc to the return address of the topmost frame in the chunk 814 if (_preempt) { 815 // On aarch64/riscv64, the return pc of the top frame won't necessarily be at sp[-1]. 816 // Also, on x64, if the top frame is the native wrapper frame, sp[-1] will not 817 // be the pc we used when creating the oopmap. Get the top's frame last pc from 818 // the anchor instead. 819 address last_pc = _last_frame.pc(); 820 ContinuationHelper::patch_return_address_at(chunk_top - frame::sender_sp_ret_address_offset(), last_pc); 821 chunk->set_pc(last_pc); 822 // For stub/native frames the fp is not used while frozen, and will be constructed 823 // again when thawing the frame (see ThawBase::handle_preempted_continuation). We 824 // patch it with a special bad address to help with debugging, particularly when 825 // inspecting frames and identifying invalid accesses. 826 patch_pd_unused(chunk_top); 827 } else { 828 chunk->set_pc(ContinuationHelper::return_address_at( 829 _cont_stack_top - frame::sender_sp_ret_address_offset())); 830 } 831 832 if (_monitors_in_lockstack > 0) { 833 freeze_lockstack(chunk); 834 } 835 836 _cont.write(); 837 838 log_develop_trace(continuations)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash()); 839 LogTarget(Trace, continuations) lt; 840 if (lt.develop_is_enabled()) { 841 LogStream ls(lt); 842 chunk->print_on(true, &ls); 843 } 844 845 // Verification 846 assert(_cont.chunk_invariant(), ""); 847 chunk->verify(); 848 849 #if CONT_JFR 850 EventContinuationFreezeFast e; 851 if (e.should_commit()) { 852 e.set_id(cast_from_oop<u8>(chunk)); 853 DEBUG_ONLY(e.set_allocate(chunk_is_allocated);) 854 e.set_size(cont_size() << LogBytesPerWord); 855 e.commit(); 856 } 857 #endif 858 } 859 860 NOINLINE freeze_result FreezeBase::freeze_slow() { 861 #ifdef ASSERT 862 ResourceMark rm; 863 #endif 864 865 log_develop_trace(continuations)("freeze_slow #" INTPTR_FORMAT, _cont.hash()); 866 assert(_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, ""); 867 868 #if CONT_JFR 869 EventContinuationFreezeSlow e; 870 if (e.should_commit()) { 871 e.set_id(cast_from_oop<u8>(_cont.continuation())); 872 e.commit(); 873 } 874 #endif 875 876 init_rest(); 877 878 HandleMark hm(Thread::current()); 879 880 frame f = freeze_start_frame(); 881 882 LogTarget(Debug, continuations) lt; 883 if (lt.develop_is_enabled()) { 884 LogStream ls(lt); 885 f.print_on(&ls); 886 } 887 888 frame caller; // the frozen caller in the chunk 889 freeze_result res = recurse_freeze(f, caller, 0, false, true); 890 891 if (res == freeze_ok) { 892 finish_freeze(f, caller); 893 _cont.write(); 894 } 895 896 return res; 897 } 898 899 frame FreezeBase::freeze_start_frame() { 900 if (LIKELY(!_preempt)) { 901 return freeze_start_frame_yield_stub(); 902 } else { 903 return freeze_start_frame_on_preempt(); 904 } 905 } 906 907 frame FreezeBase::freeze_start_frame_yield_stub() { 908 frame f = _thread->last_frame(); 909 assert(ContinuationEntry::do_yield_nmethod()->contains(f.pc()), "must be"); 910 f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); 911 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), ""); 912 return f; 913 } 914 915 frame FreezeBase::freeze_start_frame_on_preempt() { 916 assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized"); 917 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), ""); 918 return _last_frame; 919 } 920 921 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap. 922 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) { 923 assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame 924 assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb())) 925 || ((top && _preempt) == f.is_native_frame()), ""); 926 927 if (stack_overflow()) { 928 return freeze_exception; 929 } 930 931 if (f.is_compiled_frame()) { 932 if (UNLIKELY(f.oop_map() == nullptr)) { 933 // special native frame 934 return freeze_pinned_native; 935 } 936 return recurse_freeze_compiled_frame(f, caller, callee_argsize, callee_interpreted); 937 } else if (f.is_interpreted_frame()) { 938 assert(!f.interpreter_frame_method()->is_native() || (top && _preempt), ""); 939 return recurse_freeze_interpreted_frame(f, caller, callee_argsize, callee_interpreted); 940 } else if (top && _preempt) { 941 assert(f.is_native_frame() || f.is_runtime_frame(), ""); 942 return f.is_native_frame() ? recurse_freeze_native_frame(f, caller) : recurse_freeze_stub_frame(f, caller); 943 } else { 944 // Frame can't be frozen. Most likely the call_stub or upcall_stub 945 // which indicates there are further natives frames up the stack. 946 return freeze_pinned_native; 947 } 948 } 949 950 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap. 951 // See also StackChunkFrameStream<frame_kind>::frame_size() 952 template<typename FKind> 953 inline freeze_result FreezeBase::recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize) { 954 assert(FKind::is_instance(f), ""); 955 956 assert(fsize > 0, ""); 957 assert(argsize >= 0, ""); 958 _freeze_size += fsize; 959 NOT_PRODUCT(_frames++;) 960 961 assert(FKind::frame_bottom(f) <= _bottom_address, ""); 962 963 // We don't use FKind::frame_bottom(f) == _bottom_address because on x64 there's sometimes an extra word between 964 // enterSpecial and an interpreted frame 965 if (FKind::frame_bottom(f) >= _bottom_address - 1) { 966 return finalize_freeze(f, caller, argsize); // recursion end 967 } else { 968 frame senderf = sender<FKind>(f); 969 assert(FKind::interpreted || senderf.sp() == senderf.unextended_sp(), ""); 970 freeze_result result = recurse_freeze(senderf, caller, argsize, FKind::interpreted, false); // recursive call 971 return result; 972 } 973 } 974 975 inline void FreezeBase::before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame) { 976 LogTarget(Trace, continuations) lt; 977 if (lt.develop_is_enabled()) { 978 LogStream ls(lt); 979 ls.print_cr("======== FREEZING FRAME interpreted: %d bottom: %d", f.is_interpreted_frame(), is_bottom_frame); 980 ls.print_cr("fsize: %d argsize: %d", fsize, argsize); 981 f.print_value_on(&ls); 982 } 983 assert(caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), ""); 984 } 985 986 inline void FreezeBase::after_freeze_java_frame(const frame& hf, bool is_bottom_frame) { 987 LogTarget(Trace, continuations) lt; 988 if (lt.develop_is_enabled()) { 989 LogStream ls(lt); 990 DEBUG_ONLY(hf.print_value_on(&ls);) 991 assert(hf.is_heap_frame(), "should be"); 992 DEBUG_ONLY(print_frame_layout(hf, false, &ls);) 993 if (is_bottom_frame) { 994 ls.print_cr("bottom h-frame:"); 995 hf.print_on(&ls); 996 } 997 } 998 } 999 1000 // The parameter argsize_md includes metadata that has to be part of caller/callee overlap. 1001 // See also StackChunkFrameStream<frame_kind>::frame_size() 1002 freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, int argsize_md) { 1003 int argsize = argsize_md - frame::metadata_words_at_top; 1004 assert(callee.is_interpreted_frame() 1005 || ContinuationHelper::Frame::is_stub(callee.cb()) 1006 || callee.cb()->as_nmethod()->is_osr_method() 1007 || argsize == _cont.argsize(), "argsize: %d cont.argsize: %d", argsize, _cont.argsize()); 1008 log_develop_trace(continuations)("bottom: " INTPTR_FORMAT " count %d size: %d argsize: %d", 1009 p2i(_bottom_address), _frames, _freeze_size << LogBytesPerWord, argsize); 1010 1011 LogTarget(Trace, continuations) lt; 1012 1013 #ifdef ASSERT 1014 bool empty = _cont.is_empty(); 1015 log_develop_trace(continuations)("empty: %d", empty); 1016 #endif 1017 1018 stackChunkOop chunk = _cont.tail(); 1019 1020 assert(chunk == nullptr || (chunk->max_thawing_size() == 0) == chunk->is_empty(), ""); 1021 1022 _freeze_size += frame::metadata_words; // for top frame's metadata 1023 1024 int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind 1025 int unextended_sp = -1; 1026 if (chunk != nullptr) { 1027 if (!chunk->is_empty()) { 1028 StackChunkFrameStream<ChunkFrames::Mixed> last(chunk); 1029 unextended_sp = chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp()); 1030 bool top_interpreted = Interpreter::contains(chunk->pc()); 1031 if (callee.is_interpreted_frame() == top_interpreted) { 1032 overlap = argsize_md; 1033 } 1034 } else { 1035 unextended_sp = chunk->stack_size() - frame::metadata_words_at_top; 1036 } 1037 } 1038 1039 log_develop_trace(continuations)("finalize _size: %d overlap: %d unextended_sp: %d", _freeze_size, overlap, unextended_sp); 1040 1041 _freeze_size -= overlap; 1042 assert(_freeze_size >= 0, ""); 1043 1044 assert(chunk == nullptr || chunk->is_empty() 1045 || unextended_sp == chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp()), ""); 1046 assert(chunk != nullptr || unextended_sp < _freeze_size, ""); 1047 1048 _freeze_size += _monitors_in_lockstack; 1049 1050 // _barriers can be set to true by an allocation in freeze_fast, in which case the chunk is available 1051 bool allocated_old_in_freeze_fast = _barriers; 1052 assert(!allocated_old_in_freeze_fast || (unextended_sp >= _freeze_size && chunk->is_empty()), 1053 "Chunk allocated in freeze_fast is of insufficient size " 1054 "unextended_sp: %d size: %d is_empty: %d", unextended_sp, _freeze_size, chunk->is_empty()); 1055 assert(!allocated_old_in_freeze_fast || (!UseZGC && !UseG1GC), "Unexpected allocation"); 1056 1057 DEBUG_ONLY(bool empty_chunk = true); 1058 if (unextended_sp < _freeze_size || chunk->is_gc_mode() || (!allocated_old_in_freeze_fast && chunk->requires_barriers())) { 1059 // ALLOCATE NEW CHUNK 1060 1061 if (lt.develop_is_enabled()) { 1062 LogStream ls(lt); 1063 if (chunk == nullptr) { 1064 ls.print_cr("no chunk"); 1065 } else { 1066 ls.print_cr("chunk barriers: %d _size: %d free size: %d", 1067 chunk->requires_barriers(), _freeze_size, chunk->sp() - frame::metadata_words); 1068 chunk->print_on(&ls); 1069 } 1070 } 1071 1072 _freeze_size += overlap; // we're allocating a new chunk, so no overlap 1073 // overlap = 0; 1074 1075 chunk = allocate_chunk_slow(_freeze_size, argsize_md); 1076 if (chunk == nullptr) { 1077 return freeze_exception; 1078 } 1079 1080 // Install new chunk 1081 _cont.set_tail(chunk); 1082 assert(chunk->is_empty(), ""); 1083 } else { 1084 // REUSE EXISTING CHUNK 1085 log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty()); 1086 if (chunk->is_empty()) { 1087 int sp = chunk->stack_size() - argsize_md; 1088 chunk->set_sp(sp); 1089 chunk->set_bottom(sp); 1090 _freeze_size += overlap; 1091 assert(chunk->max_thawing_size() == 0, ""); 1092 } DEBUG_ONLY(else empty_chunk = false;) 1093 } 1094 assert(!chunk->is_gc_mode(), ""); 1095 assert(!chunk->has_bitmap(), ""); 1096 chunk->set_has_mixed_frames(true); 1097 1098 assert(chunk->requires_barriers() == _barriers, ""); 1099 assert(!_barriers || chunk->is_empty(), ""); 1100 1101 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), ""); 1102 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), ""); 1103 1104 if (_preempt) { 1105 frame top_frame = _thread->last_frame(); 1106 if (top_frame.is_interpreted_frame()) { 1107 // Some platforms do not save the last_sp in the top interpreter frame on VM calls. 1108 // We need it so that on resume we can restore the sp to the right place, since 1109 // thawing might add an alignment word to the expression stack (see finish_thaw()). 1110 // We do it now that we know freezing will be successful. 1111 prepare_freeze_interpreted_top_frame(top_frame); 1112 } 1113 1114 // Do this now so should_process_args_at_top() is set before calling finish_freeze 1115 // in case we might need to apply GC barriers to frames in this stackChunk. 1116 if (_thread->at_preemptable_init()) { 1117 assert(top_frame.is_interpreted_frame(), "only InterpreterRuntime::_new/resolve_from_cache allowed"); 1118 chunk->set_at_klass_init(true); 1119 Method* m = top_frame.interpreter_frame_method(); 1120 Bytecode current_bytecode = Bytecode(m, top_frame.interpreter_frame_bcp()); 1121 Bytecodes::Code code = current_bytecode.code(); 1122 int exp_size = top_frame.interpreter_frame_expression_stack_size(); 1123 if (code == Bytecodes::Code::_invokestatic && exp_size > 0) { 1124 chunk->set_has_args_at_top(true); 1125 } 1126 } 1127 } 1128 1129 // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before 1130 // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here 1131 // will either see no continuation or a consistent chunk. 1132 unwind_frames(); 1133 1134 chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words); 1135 1136 if (lt.develop_is_enabled()) { 1137 LogStream ls(lt); 1138 ls.print_cr("top chunk:"); 1139 chunk->print_on(&ls); 1140 } 1141 1142 if (_monitors_in_lockstack > 0) { 1143 freeze_lockstack(chunk); 1144 } 1145 1146 // The topmost existing frame in the chunk; or an empty frame if the chunk is empty 1147 caller = StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame(); 1148 1149 DEBUG_ONLY(_last_write = caller.unextended_sp() + (empty_chunk ? argsize_md : overlap);) 1150 1151 assert(chunk->is_in_chunk(_last_write - _freeze_size), 1152 "last_write-size: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(_last_write-_freeze_size), p2i(chunk->start_address())); 1153 #ifdef ASSERT 1154 if (lt.develop_is_enabled()) { 1155 LogStream ls(lt); 1156 ls.print_cr("top hframe before (freeze):"); 1157 assert(caller.is_heap_frame(), "should be"); 1158 caller.print_on(&ls); 1159 } 1160 1161 assert(!empty || Continuation::is_continuation_entry_frame(callee, nullptr), ""); 1162 1163 frame entry = sender(callee); 1164 1165 assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), ""); 1166 assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), ""); 1167 #endif 1168 1169 return freeze_ok_bottom; 1170 } 1171 1172 // After freezing a frame we need to possibly adjust some values related to the caller frame. 1173 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) { 1174 if (is_bottom_frame) { 1175 // If we're the bottom frame, we need to replace the return barrier with the real 1176 // caller's pc. 1177 address last_pc = caller.pc(); 1178 assert((last_pc == nullptr) == _cont.tail()->is_empty(), ""); 1179 ContinuationHelper::Frame::patch_pc(caller, last_pc); 1180 } else { 1181 assert(!caller.is_empty(), ""); 1182 } 1183 1184 patch_pd(hf, caller); 1185 1186 if (f.is_interpreted_frame()) { 1187 assert(hf.is_heap_frame(), "should be"); 1188 ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller); 1189 } 1190 1191 #ifdef ASSERT 1192 if (hf.is_compiled_frame()) { 1193 if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc 1194 log_develop_trace(continuations)("Freezing deoptimized frame"); 1195 assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), ""); 1196 assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), ""); 1197 } 1198 } 1199 #endif 1200 } 1201 1202 #ifdef ASSERT 1203 static void verify_frame_top(const frame& f, intptr_t* top) { 1204 ResourceMark rm; 1205 InterpreterOopMap mask; 1206 f.interpreted_frame_oop_map(&mask); 1207 assert(top <= ContinuationHelper::InterpretedFrame::frame_top(f, &mask), 1208 "frame_top: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT, 1209 p2i(top), p2i(ContinuationHelper::InterpretedFrame::frame_top(f, &mask))); 1210 } 1211 #endif // ASSERT 1212 1213 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap. 1214 // See also StackChunkFrameStream<frame_kind>::frame_size() 1215 NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, frame& caller, 1216 int callee_argsize /* incl. metadata */, 1217 bool callee_interpreted) { 1218 adjust_interpreted_frame_unextended_sp(f); 1219 1220 // The frame's top never includes the stack arguments to the callee 1221 intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted); 1222 intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f); 1223 const int fsize = pointer_delta_as_int(stack_frame_bottom, stack_frame_top); 1224 1225 DEBUG_ONLY(verify_frame_top(f, stack_frame_top)); 1226 1227 Method* frame_method = ContinuationHelper::Frame::frame_method(f); 1228 // including metadata between f and its args 1229 const int argsize = ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top; 1230 1231 log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d", 1232 frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize, callee_interpreted); 1233 // we'd rather not yield inside methods annotated with @JvmtiMountTransition 1234 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), ""); 1235 1236 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::InterpretedFrame>(f, caller, fsize, argsize); 1237 if (UNLIKELY(result > freeze_ok_bottom)) { 1238 return result; 1239 } 1240 1241 bool is_bottom_frame = result == freeze_ok_bottom; 1242 assert(!caller.is_empty() || is_bottom_frame, ""); 1243 1244 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, is_bottom_frame);) 1245 1246 frame hf = new_heap_frame<ContinuationHelper::InterpretedFrame>(f, caller); 1247 _total_align_size += frame::align_wiggle; // add alignment room for internal interpreted frame alignment on AArch64/PPC64 1248 1249 intptr_t* heap_frame_top = ContinuationHelper::InterpretedFrame::frame_top(hf, callee_argsize, callee_interpreted); 1250 intptr_t* heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf); 1251 assert(heap_frame_bottom == heap_frame_top + fsize, ""); 1252 1253 // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned. 1254 // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame. 1255 copy_to_chunk(stack_frame_top, heap_frame_top, fsize); 1256 assert(!is_bottom_frame || !caller.is_interpreted_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), ""); 1257 1258 relativize_interpreted_frame_metadata(f, hf); 1259 1260 patch(f, hf, caller, is_bottom_frame); 1261 1262 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();) 1263 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);) 1264 caller = hf; 1265 1266 // Mark frame_method's GC epoch for class redefinition on_stack calculation. 1267 frame_method->record_gc_epoch(); 1268 1269 return freeze_ok; 1270 } 1271 1272 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap. 1273 // See also StackChunkFrameStream<frame_kind>::frame_size() 1274 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller, 1275 int callee_argsize /* incl. metadata */, 1276 bool callee_interpreted) { 1277 // The frame's top never includes the stack arguments to the callee 1278 intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted); 1279 intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f); 1280 // including metadata between f and its stackargs 1281 const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top; 1282 const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top); 1283 1284 log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d", 1285 ContinuationHelper::Frame::frame_method(f) != nullptr ? 1286 ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "", 1287 _freeze_size, fsize, argsize); 1288 // we'd rather not yield inside methods annotated with @JvmtiMountTransition 1289 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), ""); 1290 1291 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize); 1292 if (UNLIKELY(result > freeze_ok_bottom)) { 1293 return result; 1294 } 1295 1296 bool is_bottom_frame = result == freeze_ok_bottom; 1297 assert(!caller.is_empty() || is_bottom_frame, ""); 1298 1299 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);) 1300 1301 frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller); 1302 1303 intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted); 1304 1305 copy_to_chunk(stack_frame_top, heap_frame_top, fsize); 1306 assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), ""); 1307 1308 if (caller.is_interpreted_frame()) { 1309 // When thawing the frame we might need to add alignment (see Thaw::align) 1310 _total_align_size += frame::align_wiggle; 1311 } 1312 1313 patch(f, hf, caller, is_bottom_frame); 1314 1315 assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), ""); 1316 1317 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);) 1318 caller = hf; 1319 return freeze_ok; 1320 } 1321 1322 NOINLINE freeze_result FreezeBase::recurse_freeze_stub_frame(frame& f, frame& caller) { 1323 DEBUG_ONLY(frame fsender = sender(f);) 1324 assert(fsender.is_compiled_frame(), "sender should be compiled frame"); 1325 1326 intptr_t* const stack_frame_top = ContinuationHelper::StubFrame::frame_top(f); 1327 const int fsize = f.cb()->frame_size(); 1328 1329 log_develop_trace(continuations)("recurse_freeze_stub_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT, 1330 f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize)); 1331 1332 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::StubFrame>(f, caller, fsize, 0); 1333 if (UNLIKELY(result > freeze_ok_bottom)) { 1334 return result; 1335 } 1336 1337 assert(result == freeze_ok, "should have caller"); 1338 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, false /*is_bottom_frame*/);) 1339 1340 frame hf = new_heap_frame<ContinuationHelper::StubFrame>(f, caller); 1341 intptr_t* heap_frame_top = ContinuationHelper::StubFrame::frame_top(hf); 1342 1343 copy_to_chunk(stack_frame_top, heap_frame_top, fsize); 1344 1345 patch(f, hf, caller, false /*is_bottom_frame*/); 1346 1347 DEBUG_ONLY(after_freeze_java_frame(hf, false /*is_bottom_frame*/);) 1348 1349 caller = hf; 1350 return freeze_ok; 1351 } 1352 1353 NOINLINE freeze_result FreezeBase::recurse_freeze_native_frame(frame& f, frame& caller) { 1354 if (!f.cb()->as_nmethod()->method()->is_object_wait0()) { 1355 assert(f.cb()->as_nmethod()->method()->is_synchronized(), ""); 1356 // Synchronized native method case. Unlike the interpreter native wrapper, the compiled 1357 // native wrapper tries to acquire the monitor after marshalling the arguments from the 1358 // caller into the native convention. This is so that we have a valid oopMap in case of 1359 // having to block in the slow path. But that would require freezing those registers too 1360 // and then fixing them back on thaw in case of oops. To avoid complicating things and 1361 // given that this would be a rare case anyways just pin the vthread to the carrier. 1362 return freeze_pinned_native; 1363 } 1364 1365 intptr_t* const stack_frame_top = ContinuationHelper::NativeFrame::frame_top(f); 1366 // There are no stackargs but argsize must include the metadata 1367 const int argsize = frame::metadata_words_at_top; 1368 const int fsize = f.cb()->frame_size() + argsize; 1369 1370 log_develop_trace(continuations)("recurse_freeze_native_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT, 1371 f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize)); 1372 1373 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::NativeFrame>(f, caller, fsize, argsize); 1374 if (UNLIKELY(result > freeze_ok_bottom)) { 1375 return result; 1376 } 1377 1378 assert(result == freeze_ok, "should have caller frame"); 1379 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, false /* is_bottom_frame */);) 1380 1381 frame hf = new_heap_frame<ContinuationHelper::NativeFrame>(f, caller); 1382 intptr_t* heap_frame_top = ContinuationHelper::NativeFrame::frame_top(hf); 1383 1384 copy_to_chunk(stack_frame_top, heap_frame_top, fsize); 1385 1386 if (caller.is_interpreted_frame()) { 1387 // When thawing the frame we might need to add alignment (see Thaw::align) 1388 _total_align_size += frame::align_wiggle; 1389 } 1390 1391 patch(f, hf, caller, false /* is_bottom_frame */); 1392 1393 DEBUG_ONLY(after_freeze_java_frame(hf, false /* is_bottom_frame */);) 1394 1395 caller = hf; 1396 return freeze_ok; 1397 } 1398 1399 NOINLINE void FreezeBase::finish_freeze(const frame& f, const frame& top) { 1400 stackChunkOop chunk = _cont.tail(); 1401 1402 LogTarget(Trace, continuations) lt; 1403 if (lt.develop_is_enabled()) { 1404 LogStream ls(lt); 1405 assert(top.is_heap_frame(), "should be"); 1406 top.print_on(&ls); 1407 } 1408 1409 set_top_frame_metadata_pd(top); 1410 1411 chunk->set_sp(chunk->to_offset(top.sp())); 1412 chunk->set_pc(top.pc()); 1413 1414 chunk->set_max_thawing_size(chunk->max_thawing_size() + _total_align_size); 1415 1416 assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "clash with lockstack"); 1417 1418 // At this point the chunk is consistent 1419 1420 if (UNLIKELY(_barriers)) { 1421 log_develop_trace(continuations)("do barriers on old chunk"); 1422 // Serial and Parallel GC can allocate objects directly into the old generation. 1423 // Then we want to relativize the derived pointers eagerly so that 1424 // old chunks are all in GC mode. 1425 assert(!UseG1GC, "G1 can not deal with allocating outside of eden"); 1426 assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking"); 1427 if (UseShenandoahGC) { 1428 _cont.tail()->relativize_derived_pointers_concurrently(); 1429 } else { 1430 ContinuationGCSupport::transform_stack_chunk(_cont.tail()); 1431 } 1432 // For objects in the old generation we must maintain the remembered set 1433 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(); 1434 } 1435 1436 log_develop_trace(continuations)("finish_freeze: has_mixed_frames: %d", chunk->has_mixed_frames()); 1437 if (lt.develop_is_enabled()) { 1438 LogStream ls(lt); 1439 chunk->print_on(true, &ls); 1440 } 1441 1442 if (lt.develop_is_enabled()) { 1443 LogStream ls(lt); 1444 ls.print_cr("top hframe after (freeze):"); 1445 assert(_cont.last_frame().is_heap_frame(), "should be"); 1446 _cont.last_frame().print_on(&ls); 1447 DEBUG_ONLY(print_frame_layout(top, false, &ls);) 1448 } 1449 1450 assert(_cont.chunk_invariant(), ""); 1451 } 1452 1453 inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive native code 1454 JavaThread* t = !_preempt ? _thread : JavaThread::current(); 1455 assert(t == JavaThread::current(), ""); 1456 if (os::current_stack_pointer() < t->stack_overflow_state()->shadow_zone_safe_limit()) { 1457 if (!_preempt) { 1458 ContinuationWrapper::SafepointOp so(t, _cont); // could also call _cont.done() instead 1459 Exceptions::_throw_msg(t, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Stack overflow while freezing"); 1460 } 1461 return true; 1462 } 1463 return false; 1464 } 1465 1466 class StackChunkAllocator : public MemAllocator { 1467 const size_t _stack_size; 1468 int _argsize_md; 1469 ContinuationWrapper& _continuation_wrapper; 1470 JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector; 1471 mutable bool _took_slow_path; 1472 1473 // Does the minimal amount of initialization needed for a TLAB allocation. 1474 // We don't need to do a full initialization, as such an allocation need not be immediately walkable. 1475 virtual oop initialize(HeapWord* mem) const override { 1476 assert(_stack_size > 0, ""); 1477 assert(_stack_size <= max_jint, ""); 1478 assert(_word_size > _stack_size, ""); 1479 1480 // zero out fields (but not the stack) 1481 const size_t hs = oopDesc::header_size(); 1482 if (oopDesc::has_klass_gap()) { 1483 oopDesc::set_klass_gap(mem, 0); 1484 } 1485 Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs); 1486 1487 int bottom = (int)_stack_size - _argsize_md; 1488 1489 jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size); 1490 jdk_internal_vm_StackChunk::set_bottom(mem, bottom); 1491 jdk_internal_vm_StackChunk::set_sp(mem, bottom); 1492 1493 return finish(mem); 1494 } 1495 1496 stackChunkOop allocate_fast() const { 1497 if (!UseTLAB) { 1498 return nullptr; 1499 } 1500 1501 HeapWord* const mem = MemAllocator::mem_allocate_inside_tlab_fast(); 1502 if (mem == nullptr) { 1503 return nullptr; 1504 } 1505 1506 oop obj = initialize(mem); 1507 return stackChunkOopDesc::cast(obj); 1508 } 1509 1510 public: 1511 StackChunkAllocator(Klass* klass, 1512 size_t word_size, 1513 Thread* thread, 1514 size_t stack_size, 1515 int argsize_md, 1516 ContinuationWrapper& continuation_wrapper, 1517 JvmtiSampledObjectAllocEventCollector* jvmti_event_collector) 1518 : MemAllocator(klass, word_size, thread), 1519 _stack_size(stack_size), 1520 _argsize_md(argsize_md), 1521 _continuation_wrapper(continuation_wrapper), 1522 _jvmti_event_collector(jvmti_event_collector), 1523 _took_slow_path(false) {} 1524 1525 // Provides it's own, specialized allocation which skips instrumentation 1526 // if the memory can be allocated without going to a slow-path. 1527 stackChunkOop allocate() const { 1528 // First try to allocate without any slow-paths or instrumentation. 1529 stackChunkOop obj = allocate_fast(); 1530 if (obj != nullptr) { 1531 return obj; 1532 } 1533 1534 // Now try full-blown allocation with all expensive operations, 1535 // including potentially safepoint operations. 1536 _took_slow_path = true; 1537 1538 // Protect unhandled Loom oops 1539 ContinuationWrapper::SafepointOp so(_thread, _continuation_wrapper); 1540 1541 // Can safepoint 1542 _jvmti_event_collector->start(); 1543 1544 // Can safepoint 1545 return stackChunkOopDesc::cast(MemAllocator::allocate()); 1546 } 1547 1548 bool took_slow_path() const { 1549 return _took_slow_path; 1550 } 1551 }; 1552 1553 template <typename ConfigT> 1554 stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size, int argsize_md) { 1555 log_develop_trace(continuations)("allocate_chunk allocating new chunk"); 1556 1557 InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass()); 1558 size_t size_in_words = klass->instance_size(stack_size); 1559 1560 if (CollectedHeap::stack_chunk_max_size() > 0 && size_in_words >= CollectedHeap::stack_chunk_max_size()) { 1561 if (!_preempt) { 1562 throw_stack_overflow_on_humongous_chunk(); 1563 } 1564 return nullptr; 1565 } 1566 1567 JavaThread* current = _preempt ? JavaThread::current() : _thread; 1568 assert(current == JavaThread::current(), "should be current"); 1569 1570 // Allocate the chunk. 1571 // 1572 // This might safepoint while allocating, but all safepointing due to 1573 // instrumentation have been deferred. This property is important for 1574 // some GCs, as this ensures that the allocated object is in the young 1575 // generation / newly allocated memory. 1576 StackChunkAllocator allocator(klass, size_in_words, current, stack_size, argsize_md, _cont, _jvmti_event_collector); 1577 stackChunkOop chunk = allocator.allocate(); 1578 1579 if (chunk == nullptr) { 1580 return nullptr; // OOME 1581 } 1582 1583 // assert that chunk is properly initialized 1584 assert(chunk->stack_size() == (int)stack_size, ""); 1585 assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size); 1586 assert(chunk->sp() == chunk->bottom(), ""); 1587 assert((intptr_t)chunk->start_address() % 8 == 0, ""); 1588 assert(chunk->max_thawing_size() == 0, ""); 1589 assert(chunk->pc() == nullptr, ""); 1590 assert(chunk->is_empty(), ""); 1591 assert(chunk->flags() == 0, ""); 1592 assert(chunk->is_gc_mode() == false, ""); 1593 assert(chunk->lockstack_size() == 0, ""); 1594 1595 // fields are uninitialized 1596 chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk()); 1597 chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation()); 1598 1599 #if INCLUDE_ZGC 1600 if (UseZGC) { 1601 ZStackChunkGCData::initialize(chunk); 1602 assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation"); 1603 _barriers = false; 1604 } else 1605 #endif 1606 #if INCLUDE_SHENANDOAHGC 1607 if (UseShenandoahGC) { 1608 _barriers = chunk->requires_barriers(); 1609 } else 1610 #endif 1611 { 1612 if (!allocator.took_slow_path()) { 1613 // Guaranteed to be in young gen / newly allocated memory 1614 assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation"); 1615 _barriers = false; 1616 } else { 1617 // Some GCs could put direct allocations in old gen for slow-path 1618 // allocations; need to explicitly check if that was the case. 1619 _barriers = chunk->requires_barriers(); 1620 } 1621 } 1622 1623 if (_barriers) { 1624 log_develop_trace(continuations)("allocation requires barriers"); 1625 } 1626 1627 assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), ""); 1628 1629 return chunk; 1630 } 1631 1632 void FreezeBase::throw_stack_overflow_on_humongous_chunk() { 1633 ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead 1634 Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk"); 1635 } 1636 1637 class AnchorMark : public StackObj { 1638 JavaThread* _current; 1639 frame& _top_frame; 1640 intptr_t* _last_sp_from_frame; 1641 bool _is_interpreted; 1642 1643 public: 1644 AnchorMark(JavaThread* current, frame& f) : _current(current), _top_frame(f), _is_interpreted(false) { 1645 intptr_t* sp = anchor_mark_set_pd(); 1646 set_anchor(_current, sp); 1647 } 1648 ~AnchorMark() { 1649 clear_anchor(_current); 1650 anchor_mark_clear_pd(); 1651 } 1652 inline intptr_t* anchor_mark_set_pd(); 1653 inline void anchor_mark_clear_pd(); 1654 }; 1655 1656 #if INCLUDE_JVMTI 1657 static int num_java_frames(ContinuationWrapper& cont) { 1658 ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address) 1659 int count = 0; 1660 for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) { 1661 count += chunk->num_java_frames(); 1662 } 1663 return count; 1664 } 1665 1666 static void invalidate_jvmti_stack(JavaThread* thread) { 1667 JvmtiThreadState *state = thread->jvmti_thread_state(); 1668 if (state != nullptr) { 1669 state->invalidate_cur_stack_depth(); 1670 } 1671 } 1672 1673 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) { 1674 if (JvmtiExport::has_frame_pops(thread)) { 1675 int num_frames = num_java_frames(cont); 1676 1677 ContinuationWrapper::SafepointOp so(Thread::current(), cont); 1678 JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames); 1679 } 1680 invalidate_jvmti_stack(thread); 1681 } 1682 1683 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top, Continuation::preempt_kind pk) { 1684 assert(current->vthread() != nullptr, "must be"); 1685 1686 HandleMarkCleaner hm(current); // Cleanup vth and so._conth Handles 1687 Handle vth(current, current->vthread()); 1688 ContinuationWrapper::SafepointOp so(current, cont); 1689 1690 AnchorMark am(current, top); // Set anchor so that the stack is walkable. 1691 1692 JRT_BLOCK 1693 JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false); 1694 1695 if (current->pending_contended_entered_event()) { 1696 // No monitor JVMTI events for ObjectLocker case. 1697 if (pk != Continuation::object_locker) { 1698 JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor()); 1699 } 1700 current->set_contended_entered_monitor(nullptr); 1701 } 1702 JRT_BLOCK_END 1703 } 1704 #endif // INCLUDE_JVMTI 1705 1706 #ifdef ASSERT 1707 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c 1708 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the 1709 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath. 1710 bool FreezeBase::check_valid_fast_path() { 1711 ContinuationEntry* ce = _thread->last_continuation(); 1712 RegisterMap map(_thread, 1713 RegisterMap::UpdateMap::skip, 1714 RegisterMap::ProcessFrames::skip, 1715 RegisterMap::WalkContinuation::skip); 1716 map.set_include_argument_oops(false); 1717 bool is_top_frame = true; 1718 for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) { 1719 if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) { 1720 return false; 1721 } 1722 } 1723 return true; 1724 } 1725 1726 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr, const char** code_name_ptr, int* bci_ptr) { 1727 JavaThread* current = JavaThread::current(); 1728 ResourceMark rm(current); 1729 1730 Method* m; 1731 const char* code_name; 1732 int bci; 1733 if (preempt_kind == Continuation::monitorenter) { 1734 assert(top.is_interpreted_frame() || top.is_runtime_frame(), ""); 1735 bool at_sync_method; 1736 if (top.is_interpreted_frame()) { 1737 m = top.interpreter_frame_method(); 1738 assert(!m->is_native() || m->is_synchronized(), "invalid method %s", m->external_name()); 1739 address bcp = top.interpreter_frame_bcp(); 1740 assert(bcp != 0 || m->is_native(), ""); 1741 at_sync_method = m->is_synchronized() && (bcp == 0 || bcp == m->code_base()); 1742 // bcp is advanced on monitorenter before making the VM call, adjust for that. 1743 bool at_sync_bytecode = bcp > m->code_base() && Bytecode(m, bcp - 1).code() == Bytecodes::Code::_monitorenter; 1744 assert(at_sync_method || at_sync_bytecode, ""); 1745 bci = at_sync_method ? -1 : top.interpreter_frame_bci(); 1746 } else { 1747 CodeBlob* cb = top.cb(); 1748 RegisterMap reg_map(current, 1749 RegisterMap::UpdateMap::skip, 1750 RegisterMap::ProcessFrames::skip, 1751 RegisterMap::WalkContinuation::skip); 1752 frame fr = top.sender(®_map); 1753 vframe* vf = vframe::new_vframe(&fr, ®_map, current); 1754 compiledVFrame* cvf = compiledVFrame::cast(vf); 1755 m = cvf->method(); 1756 bci = cvf->scope()->bci(); 1757 at_sync_method = bci == SynchronizationEntryBCI; 1758 assert(!at_sync_method || m->is_synchronized(), "bci is %d but method %s is not synchronized", bci, m->external_name()); 1759 bool is_c1_monitorenter = false, is_c2_monitorenter = false; 1760 COMPILER1_PRESENT(is_c1_monitorenter = cb == Runtime1::blob_for(StubId::c1_monitorenter_id) || 1761 cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id);) 1762 COMPILER2_PRESENT(is_c2_monitorenter = cb == CodeCache::find_blob(OptoRuntime::complete_monitor_locking_Java());) 1763 assert(is_c1_monitorenter || is_c2_monitorenter, "wrong runtime stub frame"); 1764 } 1765 code_name = at_sync_method ? "synchronized method" : "monitorenter"; 1766 } else if (preempt_kind == Continuation::object_wait) { 1767 assert(top.is_interpreted_frame() || top.is_native_frame(), ""); 1768 m = top.is_interpreted_frame() ? top.interpreter_frame_method() : top.cb()->as_nmethod()->method(); 1769 assert(m->is_object_wait0(), ""); 1770 bci = 0; 1771 code_name = ""; 1772 } else { 1773 assert(preempt_kind == Continuation::object_locker, "invalid preempt kind"); 1774 assert(top.is_interpreted_frame(), ""); 1775 m = top.interpreter_frame_method(); 1776 Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp()); 1777 Bytecodes::Code code = current_bytecode.code(); 1778 assert(code == Bytecodes::Code::_new || code == Bytecodes::Code::_invokestatic || 1779 (code == Bytecodes::Code::_getstatic || code == Bytecodes::Code::_putstatic), "invalid bytecode"); 1780 bci = top.interpreter_frame_bci(); 1781 code_name = Bytecodes::name(current_bytecode.code()); 1782 } 1783 assert(bci >= 0 || m->is_synchronized(), "invalid bci:%d at method %s", bci, m->external_name()); 1784 1785 if (m_ptr != nullptr) { 1786 *m_ptr = m; 1787 *code_name_ptr = code_name; 1788 *bci_ptr = bci; 1789 } 1790 } 1791 1792 static void log_preempt_after_freeze(ContinuationWrapper& cont) { 1793 JavaThread* current = cont.thread(); 1794 StackChunkFrameStream<ChunkFrames::Mixed> sfs(cont.tail()); 1795 frame top_frame = sfs.to_frame(); 1796 bool at_init = current->at_preemptable_init(); 1797 bool at_enter = current->current_pending_monitor() != nullptr; 1798 bool at_wait = current->current_waiting_monitor() != nullptr; 1799 assert((at_enter && !at_wait) || (!at_enter && at_wait), ""); 1800 Continuation::preempt_kind pk = at_init ? Continuation::object_locker : at_enter ? Continuation::monitorenter : Continuation::object_wait; 1801 1802 Method* m = nullptr; 1803 const char* code_name = nullptr; 1804 int bci = InvalidFrameStateBci; 1805 verify_frame_kind(top_frame, pk, &m, &code_name, &bci); 1806 assert(m != nullptr && code_name != nullptr && bci != InvalidFrameStateBci, "should be set"); 1807 1808 ResourceMark rm(current); 1809 if (bci < 0) { 1810 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " while synchronizing on %smethod %s", current->monitor_owner_id(), m->is_native() ? "native " : "", m->external_name()); 1811 } else if (m->is_object_wait0()) { 1812 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at native method %s", current->monitor_owner_id(), m->external_name()); 1813 } else { 1814 Klass* k = current->preempt_init_klass(); 1815 assert(k != nullptr || !at_init, ""); 1816 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at %s(bci:%d) in method %s %s%s", current->monitor_owner_id(), 1817 code_name, bci, m->external_name(), at_init ? "trying to initialize klass " : "", at_init ? k->external_name() : ""); 1818 } 1819 } 1820 #endif // ASSERT 1821 1822 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) { 1823 verify_continuation(cont.continuation()); 1824 assert(!cont.is_empty(), ""); 1825 1826 log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash()); 1827 return freeze_ok; 1828 } 1829 1830 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) { 1831 if (UNLIKELY(res != freeze_ok)) { 1832 JFR_ONLY(thread->set_last_freeze_fail_result(res);) 1833 verify_continuation(cont.continuation()); 1834 log_develop_trace(continuations)("=== end of freeze (fail %d)", res); 1835 return res; 1836 } 1837 1838 JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint 1839 return freeze_epilog(cont); 1840 } 1841 1842 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) { 1843 if (UNLIKELY(res != freeze_ok)) { 1844 verify_continuation(cont.continuation()); 1845 log_develop_trace(continuations)("=== end of freeze (fail %d)", res); 1846 return res; 1847 } 1848 1849 // Set up things so that on return to Java we jump to preempt stub. 1850 patch_return_pc_with_preempt_stub(old_last_frame); 1851 cont.tail()->set_preempted(true); 1852 DEBUG_ONLY(log_preempt_after_freeze(cont);) 1853 return freeze_epilog(cont); 1854 } 1855 1856 template<typename ConfigT, bool preempt> 1857 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) { 1858 assert(!current->has_pending_exception(), ""); 1859 1860 #ifdef ASSERT 1861 log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current)); 1862 log_frames(current, false); 1863 #endif 1864 1865 CONT_JFR_ONLY(EventContinuationFreeze event;) 1866 1867 ContinuationEntry* entry = current->last_continuation(); 1868 1869 oop oopCont = entry->cont_oop(current); 1870 assert(oopCont == current->last_continuation()->cont_oop(current), ""); 1871 assert(ContinuationEntry::assert_entry_frame_laid_out(current), ""); 1872 1873 verify_continuation(oopCont); 1874 ContinuationWrapper cont(current, oopCont); 1875 log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont)); 1876 1877 assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), ""); 1878 1879 assert((current->held_monitor_count() == 0 && current->jni_monitor_count() == 0), 1880 "Held monitor count should not be used for lightweight locking: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count()); 1881 1882 if (entry->is_pinned() || current->held_monitor_count() > 0) { 1883 log_develop_debug(continuations)("PINNED due to critical section/hold monitor"); 1884 verify_continuation(cont.continuation()); 1885 freeze_result res = entry->is_pinned() ? freeze_pinned_cs : freeze_pinned_monitor; 1886 if (!preempt) { 1887 JFR_ONLY(current->set_last_freeze_fail_result(res);) 1888 } 1889 log_develop_trace(continuations)("=== end of freeze (fail %d)", res); 1890 // Avoid Thread.yield() loops without safepoint polls. 1891 if (SafepointMechanism::should_process(current) && !preempt) { 1892 cont.done(); // allow safepoint 1893 ThreadInVMfromJava tivmfj(current); 1894 } 1895 return res; 1896 } 1897 1898 Freeze<ConfigT> freeze(current, cont, sp, preempt); 1899 1900 assert(!current->cont_fastpath() || freeze.check_valid_fast_path(), ""); 1901 bool fast = UseContinuationFastPath && current->cont_fastpath(); 1902 if (fast && freeze.size_if_fast_freeze_available() > 0) { 1903 freeze.freeze_fast_existing_chunk(); 1904 CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);) 1905 return !preempt ? freeze_epilog(cont) : preempt_epilog(cont, freeze_ok, freeze.last_frame()); 1906 } 1907 1908 if (preempt) { 1909 JvmtiSampledObjectAllocEventCollector jsoaec(false); 1910 freeze.set_jvmti_event_collector(&jsoaec); 1911 1912 freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow(); 1913 1914 CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);) 1915 preempt_epilog(cont, res, freeze.last_frame()); 1916 return res; 1917 } 1918 1919 log_develop_trace(continuations)("chunk unavailable; transitioning to VM"); 1920 assert(current == JavaThread::current(), "must be current thread"); 1921 JRT_BLOCK 1922 // delays a possible JvmtiSampledObjectAllocEventCollector in alloc_chunk 1923 JvmtiSampledObjectAllocEventCollector jsoaec(false); 1924 freeze.set_jvmti_event_collector(&jsoaec); 1925 1926 freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow(); 1927 1928 CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);) 1929 freeze_epilog(current, cont, res); 1930 cont.done(); // allow safepoint in the transition back to Java 1931 return res; 1932 JRT_BLOCK_END 1933 } 1934 1935 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) { 1936 ContinuationEntry* entry = thread->last_continuation(); 1937 if (entry == nullptr) { 1938 return freeze_ok; 1939 } 1940 if (entry->is_pinned()) { 1941 return freeze_pinned_cs; 1942 } else if (thread->held_monitor_count() > 0) { 1943 return freeze_pinned_monitor; 1944 } 1945 1946 RegisterMap map(thread, 1947 RegisterMap::UpdateMap::include, 1948 RegisterMap::ProcessFrames::skip, 1949 RegisterMap::WalkContinuation::skip); 1950 map.set_include_argument_oops(false); 1951 frame f = thread->last_frame(); 1952 1953 if (!safepoint) { 1954 f = f.sender(&map); // this is the yield frame 1955 } else { // safepoint yield 1956 #if (defined(X86) || defined(AARCH64) || defined(RISCV64)) && !defined(ZERO) 1957 f.set_fp(f.real_fp()); // Instead of this, maybe in ContinuationWrapper::set_last_frame always use the real_fp? 1958 #else 1959 Unimplemented(); 1960 #endif 1961 if (!Interpreter::contains(f.pc())) { 1962 assert(ContinuationHelper::Frame::is_stub(f.cb()), "must be"); 1963 assert(f.oop_map() != nullptr, "must be"); 1964 f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case 1965 } 1966 } 1967 1968 while (true) { 1969 if ((f.is_interpreted_frame() && f.interpreter_frame_method()->is_native()) || f.is_native_frame()) { 1970 return freeze_pinned_native; 1971 } 1972 1973 f = f.sender(&map); 1974 if (!Continuation::is_frame_in_continuation(entry, f)) { 1975 oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop(thread)); 1976 if (scope == cont_scope) { 1977 break; 1978 } 1979 intx monitor_count = entry->parent_held_monitor_count(); 1980 entry = entry->parent(); 1981 if (entry == nullptr) { 1982 break; 1983 } 1984 if (entry->is_pinned()) { 1985 return freeze_pinned_cs; 1986 } else if (monitor_count > 0) { 1987 return freeze_pinned_monitor; 1988 } 1989 } 1990 } 1991 return freeze_ok; 1992 } 1993 1994 /////////////// THAW //// 1995 1996 static int thaw_size(stackChunkOop chunk) { 1997 int size = chunk->max_thawing_size(); 1998 size += frame::metadata_words; // For the top pc+fp in push_return_frame or top = stack_sp - frame::metadata_words in thaw_fast 1999 size += 2*frame::align_wiggle; // in case of alignments at the top and bottom 2000 return size; 2001 } 2002 2003 // make room on the stack for thaw 2004 // returns the size in bytes, or 0 on failure 2005 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier) { 2006 log_develop_trace(continuations)("~~~~ prepare_thaw return_barrier: %d", return_barrier); 2007 2008 assert(thread == JavaThread::current(), ""); 2009 2010 ContinuationEntry* ce = thread->last_continuation(); 2011 assert(ce != nullptr, ""); 2012 oop continuation = ce->cont_oop(thread); 2013 assert(continuation == get_continuation(thread), ""); 2014 verify_continuation(continuation); 2015 2016 stackChunkOop chunk = jdk_internal_vm_Continuation::tail(continuation); 2017 assert(chunk != nullptr, ""); 2018 2019 // The tail can be empty because it might still be available for another freeze. 2020 // However, here we want to thaw, so we get rid of it (it will be GCed). 2021 if (UNLIKELY(chunk->is_empty())) { 2022 chunk = chunk->parent(); 2023 assert(chunk != nullptr, ""); 2024 assert(!chunk->is_empty(), ""); 2025 jdk_internal_vm_Continuation::set_tail(continuation, chunk); 2026 } 2027 2028 // Verification 2029 chunk->verify(); 2030 assert(chunk->max_thawing_size() > 0, "chunk invariant violated; expected to not be empty"); 2031 2032 // Only make space for the last chunk because we only thaw from the last chunk 2033 int size = thaw_size(chunk) << LogBytesPerWord; 2034 2035 const address bottom = (address)thread->last_continuation()->entry_sp(); 2036 // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages 2037 // for the Java frames in the check below. 2038 if (!stack_overflow_check(thread, size + 300, bottom)) { 2039 return 0; 2040 } 2041 2042 log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d", 2043 p2i(bottom), p2i(bottom - size), size); 2044 return size; 2045 } 2046 2047 class ThawBase : public StackObj { 2048 protected: 2049 JavaThread* _thread; 2050 ContinuationWrapper& _cont; 2051 CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;) 2052 2053 intptr_t* _fastpath; 2054 bool _barriers; 2055 bool _preempted_case; 2056 bool _process_args_at_top; 2057 intptr_t* _top_unextended_sp_before_thaw; 2058 int _align_size; 2059 DEBUG_ONLY(intptr_t* _top_stack_address); 2060 2061 // Only used for some preemption cases. 2062 ObjectMonitor* _monitor; 2063 2064 StackChunkFrameStream<ChunkFrames::Mixed> _stream; 2065 2066 NOT_PRODUCT(int _frames;) 2067 2068 protected: 2069 ThawBase(JavaThread* thread, ContinuationWrapper& cont) : 2070 _thread(thread), _cont(cont), 2071 _fastpath(nullptr) { 2072 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;) 2073 assert (cont.tail() != nullptr, "no last chunk"); 2074 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());) 2075 } 2076 2077 void clear_chunk(stackChunkOop chunk); 2078 template<bool check_stub> 2079 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize); 2080 void copy_from_chunk(intptr_t* from, intptr_t* to, int size); 2081 2082 void thaw_lockstack(stackChunkOop chunk); 2083 2084 // fast path 2085 inline void prefetch_chunk_pd(void* start, int size_words); 2086 void patch_return(intptr_t* sp, bool is_last); 2087 2088 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case); 2089 inline intptr_t* push_cleanup_continuation(); 2090 inline intptr_t* push_preempt_adapter(); 2091 intptr_t* redo_vmcall(JavaThread* current, frame& top); 2092 void throw_interrupted_exception(JavaThread* current, frame& top); 2093 2094 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case); 2095 void finish_thaw(frame& f); 2096 2097 private: 2098 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames); 2099 void finalize_thaw(frame& entry, int argsize); 2100 2101 inline bool seen_by_gc(); 2102 2103 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame); 2104 inline void after_thaw_java_frame(const frame& f, bool bottom); 2105 inline void patch(frame& f, const frame& caller, bool bottom); 2106 void clear_bitmap_bits(address start, address end); 2107 2108 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top); 2109 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller); 2110 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames); 2111 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames); 2112 2113 void push_return_frame(frame& f); 2114 inline frame new_entry_frame(); 2115 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom); 2116 inline void patch_pd(frame& f, const frame& sender); 2117 inline void patch_pd(frame& f, intptr_t* caller_sp); 2118 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom); 2119 2120 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; } 2121 2122 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f); 2123 2124 public: 2125 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; }) 2126 }; 2127 2128 template <typename ConfigT> 2129 class Thaw : public ThawBase { 2130 public: 2131 Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {} 2132 2133 inline bool can_thaw_fast(stackChunkOop chunk) { 2134 return !_barriers 2135 && _thread->cont_fastpath_thread_state() 2136 && !chunk->has_thaw_slowpath_condition() 2137 && !PreserveFramePointer; 2138 } 2139 2140 inline intptr_t* thaw(Continuation::thaw_kind kind); 2141 template<bool check_stub = false> 2142 NOINLINE intptr_t* thaw_fast(stackChunkOop chunk); 2143 NOINLINE intptr_t* thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind); 2144 inline void patch_caller_links(intptr_t* sp, intptr_t* bottom); 2145 }; 2146 2147 template <typename ConfigT> 2148 inline intptr_t* Thaw<ConfigT>::thaw(Continuation::thaw_kind kind) { 2149 verify_continuation(_cont.continuation()); 2150 assert(!jdk_internal_vm_Continuation::done(_cont.continuation()), ""); 2151 assert(!_cont.is_empty(), ""); 2152 2153 stackChunkOop chunk = _cont.tail(); 2154 assert(chunk != nullptr, "guaranteed by prepare_thaw"); 2155 assert(!chunk->is_empty(), "guaranteed by prepare_thaw"); 2156 2157 _barriers = chunk->requires_barriers(); 2158 return (LIKELY(can_thaw_fast(chunk))) ? thaw_fast(chunk) 2159 : thaw_slow(chunk, kind); 2160 } 2161 2162 class ReconstructedStack : public StackObj { 2163 intptr_t* _base; // _cont.entrySP(); // top of the entry frame 2164 int _thaw_size; 2165 int _argsize; 2166 public: 2167 ReconstructedStack(intptr_t* base, int thaw_size, int argsize) 2168 : _base(base), _thaw_size(thaw_size - (argsize == 0 ? frame::metadata_words_at_top : 0)), _argsize(argsize) { 2169 // The only possible source of misalignment is stack-passed arguments b/c compiled frames are 16-byte aligned. 2170 assert(argsize != 0 || (_base - _thaw_size) == ContinuationHelper::frame_align_pointer(_base - _thaw_size), ""); 2171 // We're at most one alignment word away from entrySP 2172 assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame"); 2173 } 2174 2175 int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); } 2176 2177 // top and bottom stack pointers 2178 intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); } 2179 intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); } 2180 2181 // several operations operate on the totality of the stack being reconstructed, 2182 // including the metadata words 2183 intptr_t* top() const { return sp() - frame::metadata_words_at_bottom; } 2184 int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; } 2185 }; 2186 2187 inline void ThawBase::clear_chunk(stackChunkOop chunk) { 2188 chunk->set_sp(chunk->bottom()); 2189 chunk->set_max_thawing_size(0); 2190 } 2191 2192 template<bool check_stub> 2193 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) { 2194 bool empty = false; 2195 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk); 2196 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();) 2197 assert(chunk_sp == f.sp(), ""); 2198 assert(chunk_sp == f.unextended_sp(), ""); 2199 2200 int frame_size = f.cb()->frame_size(); 2201 argsize = f.stack_argsize(); 2202 2203 assert(!f.is_stub() || check_stub, ""); 2204 if (check_stub && f.is_stub()) { 2205 // If we don't thaw the top compiled frame too, after restoring the saved 2206 // registers back in Java, we would hit the return barrier to thaw one more 2207 // frame effectively overwriting the restored registers during that call. 2208 f.next(SmallRegisterMap::instance_no_args(), true /* stop */); 2209 assert(!f.is_done(), ""); 2210 2211 f.get_cb(); 2212 assert(f.is_compiled(), ""); 2213 frame_size += f.cb()->frame_size(); 2214 argsize = f.stack_argsize(); 2215 2216 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) { 2217 // The caller of the runtime stub when the continuation is preempted is not at a 2218 // Java call instruction, and so cannot rely on nmethod patching for deopt. 2219 log_develop_trace(continuations)("Deoptimizing runtime stub caller"); 2220 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for 2221 } 2222 } 2223 2224 f.next(SmallRegisterMap::instance_no_args(), true /* stop */); 2225 empty = f.is_done(); 2226 assert(!empty || argsize == chunk->argsize(), ""); 2227 2228 if (empty) { 2229 clear_chunk(chunk); 2230 } else { 2231 chunk->set_sp(chunk->sp() + frame_size); 2232 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size); 2233 // We set chunk->pc to the return pc into the next frame 2234 chunk->set_pc(f.pc()); 2235 #ifdef ASSERT 2236 { 2237 intptr_t* retaddr_slot = (chunk_sp 2238 + frame_size 2239 - frame::sender_sp_ret_address_offset()); 2240 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot), 2241 "unexpected pc"); 2242 } 2243 #endif 2244 } 2245 assert(empty == chunk->is_empty(), ""); 2246 // returns the size required to store the frame on stack, and because it is a 2247 // compiled frame, it must include a copy of the arguments passed by the caller 2248 return frame_size + argsize + frame::metadata_words_at_top; 2249 } 2250 2251 void ThawBase::thaw_lockstack(stackChunkOop chunk) { 2252 int lockStackSize = chunk->lockstack_size(); 2253 assert(lockStackSize > 0 && lockStackSize <= LockStack::CAPACITY, ""); 2254 2255 oop tmp_lockstack[LockStack::CAPACITY]; 2256 chunk->transfer_lockstack(tmp_lockstack, _barriers); 2257 _thread->lock_stack().move_from_address(tmp_lockstack, lockStackSize); 2258 2259 chunk->set_lockstack_size(0); 2260 chunk->set_has_lockstack(false); 2261 } 2262 2263 void ThawBase::copy_from_chunk(intptr_t* from, intptr_t* to, int size) { 2264 assert(to >= _top_stack_address, "overwrote past thawing space" 2265 " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(to), p2i(_top_stack_address)); 2266 assert(to + size <= _cont.entrySP(), "overwrote past thawing space"); 2267 _cont.tail()->copy_from_chunk_to_stack(from, to, size); 2268 CONT_JFR_ONLY(_jfr_info.record_size_copied(size);) 2269 } 2270 2271 void ThawBase::patch_return(intptr_t* sp, bool is_last) { 2272 log_develop_trace(continuations)("thaw_fast patching -- sp: " INTPTR_FORMAT, p2i(sp)); 2273 2274 address pc = !is_last ? StubRoutines::cont_returnBarrier() : _cont.entryPC(); 2275 ContinuationHelper::patch_return_address_at( 2276 sp - frame::sender_sp_ret_address_offset(), 2277 pc); 2278 } 2279 2280 template <typename ConfigT> 2281 template<bool check_stub> 2282 NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) { 2283 assert(chunk == _cont.tail(), ""); 2284 assert(!chunk->has_mixed_frames(), ""); 2285 assert(!chunk->requires_barriers(), ""); 2286 assert(!chunk->has_bitmap(), ""); 2287 assert(!_thread->is_interp_only_mode(), ""); 2288 2289 LogTarget(Trace, continuations) lt; 2290 if (lt.develop_is_enabled()) { 2291 LogStream ls(lt); 2292 ls.print_cr("thaw_fast"); 2293 chunk->print_on(true, &ls); 2294 } 2295 2296 // Below this heuristic, we thaw the whole chunk, above it we thaw just one frame. 2297 static const int threshold = 500; // words 2298 2299 const int full_chunk_size = chunk->stack_size() - chunk->sp(); // this initial size could be reduced if it's a partial thaw 2300 int argsize, thaw_size; 2301 2302 intptr_t* const chunk_sp = chunk->start_address() + chunk->sp(); 2303 2304 bool partial, empty; 2305 if (LIKELY(!TEST_THAW_ONE_CHUNK_FRAME && (full_chunk_size < threshold))) { 2306 prefetch_chunk_pd(chunk->start_address(), full_chunk_size); // prefetch anticipating memcpy starting at highest address 2307 2308 partial = false; 2309 argsize = chunk->argsize(); // must be called *before* clearing the chunk 2310 clear_chunk(chunk); 2311 thaw_size = full_chunk_size; 2312 empty = true; 2313 } else { // thaw a single frame 2314 partial = true; 2315 thaw_size = remove_top_compiled_frame_from_chunk<check_stub>(chunk, argsize); 2316 empty = chunk->is_empty(); 2317 } 2318 2319 // Are we thawing the last frame(s) in the continuation 2320 const bool is_last = empty && chunk->parent() == nullptr; 2321 assert(!is_last || argsize == 0, ""); 2322 2323 log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT, 2324 partial, is_last, empty, thaw_size, argsize, p2i(_cont.entrySP())); 2325 2326 ReconstructedStack rs(_cont.entrySP(), thaw_size, argsize); 2327 2328 // also copy metadata words at frame bottom 2329 copy_from_chunk(chunk_sp - frame::metadata_words_at_bottom, rs.top(), rs.total_size()); 2330 2331 // update the ContinuationEntry 2332 _cont.set_argsize(argsize); 2333 log_develop_trace(continuations)("setting entry argsize: %d", _cont.argsize()); 2334 assert(rs.bottom_sp() == _cont.entry()->bottom_sender_sp(), ""); 2335 2336 // install the return barrier if not last frame, or the entry's pc if last 2337 patch_return(rs.bottom_sp(), is_last); 2338 2339 // insert the back links from callee to caller frames 2340 patch_caller_links(rs.top(), rs.top() + rs.total_size()); 2341 2342 assert(is_last == _cont.is_empty(), ""); 2343 assert(_cont.chunk_invariant(), ""); 2344 2345 #if CONT_JFR 2346 EventContinuationThawFast e; 2347 if (e.should_commit()) { 2348 e.set_id(cast_from_oop<u8>(chunk)); 2349 e.set_size(thaw_size << LogBytesPerWord); 2350 e.set_full(!partial); 2351 e.commit(); 2352 } 2353 #endif 2354 2355 #ifdef ASSERT 2356 set_anchor(_thread, rs.sp()); 2357 log_frames(_thread); 2358 if (LoomDeoptAfterThaw) { 2359 do_deopt_after_thaw(_thread); 2360 } 2361 clear_anchor(_thread); 2362 #endif 2363 2364 return rs.sp(); 2365 } 2366 2367 inline bool ThawBase::seen_by_gc() { 2368 return _barriers || _cont.tail()->is_gc_mode(); 2369 } 2370 2371 static inline void relativize_chunk_concurrently(stackChunkOop chunk) { 2372 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC 2373 if (UseZGC || UseShenandoahGC) { 2374 chunk->relativize_derived_pointers_concurrently(); 2375 } 2376 #endif 2377 } 2378 2379 template <typename ConfigT> 2380 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) { 2381 Continuation::preempt_kind preempt_kind; 2382 bool retry_fast_path = false; 2383 2384 _process_args_at_top = false; 2385 _preempted_case = chunk->preempted(); 2386 if (_preempted_case) { 2387 ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread()); 2388 if (waiter != nullptr) { 2389 // Mounted again after preemption. Resume the pending monitor operation, 2390 // which will be either a monitorenter or Object.wait() call. 2391 ObjectMonitor* mon = waiter->monitor(); 2392 preempt_kind = waiter->is_wait() ? Continuation::object_wait : Continuation::monitorenter; 2393 2394 bool mon_acquired = mon->resume_operation(_thread, waiter, _cont); 2395 assert(!mon_acquired || mon->has_owner(_thread), "invariant"); 2396 if (!mon_acquired) { 2397 // Failed to acquire monitor. Return to enterSpecial to unmount again. 2398 log_trace(continuations, tracking)("Failed to acquire monitor, unmounting again"); 2399 return push_cleanup_continuation(); 2400 } 2401 _monitor = mon; // remember monitor since we might need it on handle_preempted_continuation() 2402 chunk = _cont.tail(); // reload oop in case of safepoint in resume_operation (if posting JVMTI events). 2403 JVMTI_ONLY(assert(_thread->contended_entered_monitor() == nullptr || _thread->contended_entered_monitor() == _monitor, "")); 2404 } else { 2405 // Preemption cancelled on moniterenter or ObjectLocker case. We 2406 // actually acquired the monitor after freezing all frames so no 2407 // need to call resume_operation. If this is the ObjectLocker case 2408 // we released the monitor already at ~ObjectLocker, so here we set 2409 // _monitor to nullptr to indicate there is no need to release it later. 2410 preempt_kind = Continuation::monitorenter; 2411 _monitor = nullptr; 2412 } 2413 2414 // Call this first to avoid racing with GC threads later when modifying the chunk flags. 2415 relativize_chunk_concurrently(chunk); 2416 2417 if (chunk->at_klass_init()) { 2418 preempt_kind = Continuation::object_locker; 2419 chunk->set_at_klass_init(false); 2420 _process_args_at_top = chunk->has_args_at_top(); 2421 if (_process_args_at_top) { 2422 // Only needed for the top frame which will be thawed. 2423 chunk->set_has_args_at_top(false); 2424 } 2425 } 2426 chunk->set_preempted(false); 2427 retry_fast_path = true; 2428 } else { 2429 relativize_chunk_concurrently(chunk); 2430 } 2431 2432 // On first thaw after freeze restore oops to the lockstack if any. 2433 assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, ""); 2434 if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) { 2435 thaw_lockstack(chunk); 2436 retry_fast_path = true; 2437 } 2438 2439 // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK 2440 // and FLAG_PREEMPTED flags from the stackChunk. 2441 if (retry_fast_path && can_thaw_fast(chunk)) { 2442 intptr_t* sp = thaw_fast<true>(chunk); 2443 if (_preempted_case) { 2444 return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */); 2445 } 2446 return sp; 2447 } 2448 2449 LogTarget(Trace, continuations) lt; 2450 if (lt.develop_is_enabled()) { 2451 LogStream ls(lt); 2452 ls.print_cr("thaw slow return_barrier: %d " INTPTR_FORMAT, kind, p2i(chunk)); 2453 chunk->print_on(true, &ls); 2454 } 2455 2456 #if CONT_JFR 2457 EventContinuationThawSlow e; 2458 if (e.should_commit()) { 2459 e.set_id(cast_from_oop<u8>(_cont.continuation())); 2460 e.commit(); 2461 } 2462 #endif 2463 2464 DEBUG_ONLY(_frames = 0;) 2465 _align_size = 0; 2466 int num_frames = kind == Continuation::thaw_top ? 2 : 1; 2467 2468 _stream = StackChunkFrameStream<ChunkFrames::Mixed>(chunk); 2469 _top_unextended_sp_before_thaw = _stream.unextended_sp(); 2470 2471 frame heap_frame = _stream.to_frame(); 2472 if (lt.develop_is_enabled()) { 2473 LogStream ls(lt); 2474 ls.print_cr("top hframe before (thaw):"); 2475 assert(heap_frame.is_heap_frame(), "should have created a relative frame"); 2476 heap_frame.print_value_on(&ls); 2477 } 2478 2479 frame caller; // the thawed caller on the stack 2480 recurse_thaw(heap_frame, caller, num_frames, _preempted_case); 2481 finish_thaw(caller); // caller is now the topmost thawed frame 2482 _cont.write(); 2483 2484 assert(_cont.chunk_invariant(), ""); 2485 2486 JVMTI_ONLY(invalidate_jvmti_stack(_thread)); 2487 2488 _thread->set_cont_fastpath(_fastpath); 2489 2490 intptr_t* sp = caller.sp(); 2491 2492 if (_preempted_case) { 2493 return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */); 2494 } 2495 return sp; 2496 } 2497 2498 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) { 2499 log_develop_debug(continuations)("thaw num_frames: %d", num_frames); 2500 assert(!_cont.is_empty(), "no more frames"); 2501 assert(num_frames > 0, ""); 2502 assert(!heap_frame.is_empty(), ""); 2503 2504 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) { 2505 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2); 2506 } else if (!heap_frame.is_interpreted_frame()) { 2507 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false); 2508 } else { 2509 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case); 2510 } 2511 } 2512 2513 template<typename FKind> 2514 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) { 2515 assert(num_frames > 0, ""); 2516 2517 DEBUG_ONLY(_frames++;) 2518 2519 int argsize = _stream.stack_argsize(); 2520 2521 _stream.next(SmallRegisterMap::instance_no_args()); 2522 assert(_stream.to_frame().is_empty() == _stream.is_done(), ""); 2523 2524 // we never leave a compiled caller of an interpreted frame as the top frame in the chunk 2525 // as it makes detecting that situation and adjusting unextended_sp tricky 2526 if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) { 2527 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top"); 2528 num_frames++; 2529 } 2530 2531 if (num_frames == 1 || _stream.is_done()) { // end recursion 2532 finalize_thaw(caller, FKind::interpreted ? 0 : argsize); 2533 return true; // bottom 2534 } else { // recurse 2535 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */); 2536 return false; 2537 } 2538 } 2539 2540 void ThawBase::finalize_thaw(frame& entry, int argsize) { 2541 stackChunkOop chunk = _cont.tail(); 2542 2543 if (!_stream.is_done()) { 2544 assert(_stream.sp() >= chunk->sp_address(), ""); 2545 chunk->set_sp(chunk->to_offset(_stream.sp())); 2546 chunk->set_pc(_stream.pc()); 2547 } else { 2548 chunk->set_sp(chunk->bottom()); 2549 chunk->set_pc(nullptr); 2550 } 2551 assert(_stream.is_done() == chunk->is_empty(), ""); 2552 2553 int total_thawed = pointer_delta_as_int(_stream.unextended_sp(), _top_unextended_sp_before_thaw); 2554 chunk->set_max_thawing_size(chunk->max_thawing_size() - total_thawed); 2555 2556 _cont.set_argsize(argsize); 2557 entry = new_entry_frame(); 2558 2559 assert(entry.sp() == _cont.entrySP(), ""); 2560 assert(Continuation::is_continuation_enterSpecial(entry), ""); 2561 assert(_cont.is_entry_frame(entry), ""); 2562 } 2563 2564 inline void ThawBase::before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) { 2565 LogTarget(Trace, continuations) lt; 2566 if (lt.develop_is_enabled()) { 2567 LogStream ls(lt); 2568 ls.print_cr("======== THAWING FRAME: %d", num_frame); 2569 assert(hf.is_heap_frame(), "should be"); 2570 hf.print_value_on(&ls); 2571 } 2572 assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf)); 2573 } 2574 2575 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) { 2576 #ifdef ASSERT 2577 LogTarget(Trace, continuations) lt; 2578 if (lt.develop_is_enabled()) { 2579 LogStream ls(lt); 2580 ls.print_cr("thawed frame:"); 2581 print_frame_layout(f, false, &ls); // f.print_on(&ls); 2582 } 2583 #endif 2584 } 2585 2586 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) { 2587 assert(!bottom || caller.fp() == _cont.entryFP(), ""); 2588 if (bottom) { 2589 ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc() 2590 : StubRoutines::cont_returnBarrier()); 2591 } else { 2592 // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap. 2593 // If the caller is not deoptimized, pc is unchanged. 2594 ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc()); 2595 } 2596 2597 patch_pd(f, caller); 2598 2599 if (f.is_interpreted_frame()) { 2600 ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller); 2601 } 2602 2603 assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), ""); 2604 assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), ""); 2605 } 2606 2607 void ThawBase::clear_bitmap_bits(address start, address end) { 2608 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start)); 2609 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end)); 2610 2611 // we need to clear the bits that correspond to arguments as they reside in the caller frame 2612 // or they will keep objects that are otherwise unreachable alive. 2613 2614 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since 2615 // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned. 2616 // If that's the case the bit range corresponding to the last stack slot should not have bits set 2617 // anyways and we assert that before returning. 2618 address effective_end = UseCompressedOops ? end : align_down(end, wordSize); 2619 log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end)); 2620 stackChunkOop chunk = _cont.tail(); 2621 chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end)); 2622 assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set"); 2623 } 2624 2625 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) { 2626 frame top(sp); 2627 assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), ""); 2628 DEBUG_ONLY(verify_frame_kind(top, preempt_kind);) 2629 NOT_PRODUCT(int64_t tid = _thread->monitor_owner_id();) 2630 2631 #if INCLUDE_JVMTI 2632 // Finish the VTMS transition. 2633 assert(_thread->is_in_VTMS_transition(), "must be"); 2634 bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope(); 2635 if (is_vthread) { 2636 if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) { 2637 jvmti_mount_end(_thread, _cont, top, preempt_kind); 2638 } else { 2639 _thread->set_is_in_VTMS_transition(false); 2640 java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false); 2641 } 2642 } 2643 #endif 2644 2645 if (fast_case) { 2646 // If we thawed in the slow path the runtime stub/native wrapper frame already 2647 // has the correct fp (see ThawBase::new_stack_frame). On the fast path though, 2648 // we copied the fp patched during freeze, which will now have to be fixed. 2649 assert(top.is_runtime_frame() || top.is_native_frame(), ""); 2650 int fsize = top.cb()->frame_size(); 2651 patch_pd(top, sp + fsize); 2652 } 2653 2654 if (preempt_kind == Continuation::object_wait) { 2655 // Check now if we need to throw IE exception. 2656 bool throw_ie = _thread->pending_interrupted_exception(); 2657 if (throw_ie) { 2658 throw_interrupted_exception(_thread, top); 2659 _thread->set_pending_interrupted_exception(false); 2660 } 2661 log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT" after preemption on Object.wait%s", tid, throw_ie ? "(throwing IE)" : ""); 2662 } else if (preempt_kind == Continuation::monitorenter) { 2663 if (top.is_runtime_frame()) { 2664 // The continuation might now run on a different platform thread than the previous time so 2665 // we need to adjust the current thread saved in the stub frame before restoring registers. 2666 JavaThread** thread_addr = frame::saved_thread_address(top); 2667 if (thread_addr != nullptr) *thread_addr = _thread; 2668 } 2669 log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT " after preemption on monitorenter", tid); 2670 } else { 2671 // We need to redo the original call into the VM. First though, we need 2672 // to exit the monitor we just acquired (except on preemption cancelled 2673 // case where it was already released). 2674 assert(preempt_kind == Continuation::object_locker, ""); 2675 if (_monitor != nullptr) _monitor->exit(_thread); 2676 sp = redo_vmcall(_thread, top); 2677 } 2678 return sp; 2679 } 2680 2681 intptr_t* ThawBase::redo_vmcall(JavaThread* current, frame& top) { 2682 assert(!current->preempting(), ""); 2683 NOT_PRODUCT(int64_t tid = current->monitor_owner_id();) 2684 intptr_t* sp = top.sp(); 2685 2686 { 2687 HandleMarkCleaner hmc(current); // Cleanup so._conth Handle 2688 ContinuationWrapper::SafepointOp so(current, _cont); 2689 AnchorMark am(current, top); // Set the anchor so that the stack is walkable. 2690 2691 Method* m = top.interpreter_frame_method(); 2692 Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp()); 2693 Bytecodes::Code code = current_bytecode.code(); 2694 log_develop_trace(continuations, preempt)("Redoing InterpreterRuntime::%s for " INT64_FORMAT, code == Bytecodes::Code::_new ? "_new" : "resolve_from_cache", tid); 2695 2696 // These InterpreterRuntime entry points use JRT_ENTRY which uses a HandleMarkCleaner. 2697 // Create a HandeMark to avoid destroying so._conth. 2698 HandleMark hm(current); 2699 DEBUG_ONLY(JavaThread::AtRedoVMCall apvmc(current);) 2700 if (code == Bytecodes::Code::_new) { 2701 InterpreterRuntime::_new(current, m->constants(), current_bytecode.get_index_u2(code)); 2702 } else { 2703 InterpreterRuntime::resolve_from_cache(current, code); 2704 } 2705 } 2706 2707 if (current->preempting()) { 2708 // Preempted again so we just arrange to return to preempt stub to unmount. 2709 sp = push_preempt_adapter(); 2710 current->set_preempt_alternate_return(nullptr); 2711 bool cancelled = current->preemption_cancelled(); 2712 if (cancelled) { 2713 // Instead of calling thaw again from the preempt stub just unmount anyways with 2714 // state of YIELDING. This will give a chance for other vthreads to run while 2715 // minimizing repeated loops of "thaw->redo_vmcall->try_preempt->preemption_cancelled->thaw..." 2716 // in case of multiple vthreads contending for the same init_lock(). 2717 current->set_preemption_cancelled(false); 2718 oop vthread = current->vthread(); 2719 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread"); 2720 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::YIELDING); 2721 #if INCLUDE_JVMTI 2722 if (current->contended_entered_monitor() != nullptr) { 2723 current->set_contended_entered_monitor(nullptr); 2724 } 2725 #endif 2726 } 2727 log_develop_trace(continuations, preempt)("Preempted " INT64_FORMAT " again%s", tid, cancelled ? "(preemption cancelled, setting state to YIELDING)" : ""); 2728 } else { 2729 log_develop_trace(continuations, preempt)("Call succesful, resuming " INT64_FORMAT, tid); 2730 } 2731 return sp; 2732 } 2733 2734 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) { 2735 HandleMarkCleaner hm(current); // Cleanup so._conth Handle 2736 ContinuationWrapper::SafepointOp so(current, _cont); 2737 // Since we might safepoint set the anchor so that the stack can be walked. 2738 set_anchor(current, top.sp()); 2739 JRT_BLOCK 2740 THROW(vmSymbols::java_lang_InterruptedException()); 2741 JRT_BLOCK_END 2742 clear_anchor(current); 2743 } 2744 2745 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top) { 2746 assert(hf.is_interpreted_frame(), ""); 2747 2748 if (UNLIKELY(seen_by_gc())) { 2749 if (is_top && _process_args_at_top) { 2750 log_trace(continuations, tracking)("Processing arguments in recurse_thaw_interpreted_frame"); 2751 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_with_args()); 2752 } else { 2753 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args()); 2754 } 2755 } 2756 2757 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames); 2758 2759 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);) 2760 2761 _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64 2762 2763 frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame); 2764 2765 intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top; 2766 intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f); 2767 intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top; 2768 intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf); 2769 2770 assert(hf.is_heap_frame(), "should be"); 2771 assert(!f.is_heap_frame(), "should not be"); 2772 2773 const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top); 2774 assert((stack_frame_bottom == stack_frame_top + fsize), ""); 2775 2776 // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned. 2777 // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame. 2778 copy_from_chunk(heap_frame_top, stack_frame_top, fsize); 2779 2780 // Make sure the relativized locals is already set. 2781 assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom"); 2782 2783 derelativize_interpreted_frame_metadata(hf, f); 2784 patch(f, caller, is_bottom_frame); 2785 2786 assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame"); 2787 assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), ""); 2788 2789 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();) 2790 2791 maybe_set_fastpath(f.sp()); 2792 2793 Method* m = hf.interpreter_frame_method(); 2794 assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame"); 2795 const int locals = m->max_locals(); 2796 2797 if (!is_bottom_frame) { 2798 // can only fix caller once this frame is thawed (due to callee saved regs) 2799 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args()); 2800 } else if (_cont.tail()->has_bitmap() && locals > 0) { 2801 assert(hf.is_heap_frame(), "should be"); 2802 address start = (address)(heap_frame_bottom - locals); 2803 address end = (address)heap_frame_bottom; 2804 clear_bitmap_bits(start, end); 2805 } 2806 2807 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);) 2808 caller = f; 2809 } 2810 2811 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) { 2812 assert(hf.is_compiled_frame(), ""); 2813 assert(_preempted_case || !stub_caller, "stub caller not at preemption"); 2814 2815 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap 2816 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args()); 2817 } 2818 2819 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames); 2820 2821 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);) 2822 2823 assert(caller.sp() == caller.unextended_sp(), ""); 2824 2825 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) { 2826 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame 2827 } 2828 2829 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not 2830 // yet laid out in the stack, and so the original_pc is not stored in it. 2831 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized. 2832 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame); 2833 intptr_t* const stack_frame_top = f.sp(); 2834 intptr_t* const heap_frame_top = hf.unextended_sp(); 2835 2836 const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0; 2837 int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize; 2838 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), ""); 2839 2840 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom; 2841 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom; 2842 // copy metadata, except the metadata at the top of the (unextended) entry frame 2843 int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top); 2844 2845 // If we're the bottom-most thawed frame, we're writing to within one word from entrySP 2846 // (we might have one padding word for alignment) 2847 assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), ""); 2848 assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), ""); 2849 2850 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above 2851 2852 patch(f, caller, is_bottom_frame); 2853 2854 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above) 2855 assert(!f.is_deoptimized_frame(), ""); 2856 if (hf.is_deoptimized_frame()) { 2857 maybe_set_fastpath(f.sp()); 2858 } else if (_thread->is_interp_only_mode() 2859 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) { 2860 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so 2861 // cannot rely on nmethod patching for deopt. 2862 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller"); 2863 2864 log_develop_trace(continuations)("Deoptimizing thawed frame"); 2865 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr)); 2866 2867 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for 2868 assert(f.is_deoptimized_frame(), ""); 2869 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), ""); 2870 maybe_set_fastpath(f.sp()); 2871 } 2872 2873 if (!is_bottom_frame) { 2874 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack 2875 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args()); 2876 } else if (_cont.tail()->has_bitmap() && added_argsize > 0) { 2877 address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top); 2878 int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */); 2879 int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size; 2880 clear_bitmap_bits(start, start + argsize_in_bytes); 2881 } 2882 2883 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);) 2884 caller = f; 2885 } 2886 2887 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) { 2888 DEBUG_ONLY(_frames++;) 2889 2890 if (UNLIKELY(seen_by_gc())) { 2891 // Process the stub's caller here since we might need the full map. 2892 RegisterMap map(nullptr, 2893 RegisterMap::UpdateMap::include, 2894 RegisterMap::ProcessFrames::skip, 2895 RegisterMap::WalkContinuation::skip); 2896 map.set_include_argument_oops(false); 2897 _stream.next(&map); 2898 assert(!_stream.is_done(), ""); 2899 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map); 2900 } else { 2901 _stream.next(SmallRegisterMap::instance_no_args()); 2902 assert(!_stream.is_done(), ""); 2903 } 2904 2905 recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true); 2906 2907 assert(caller.is_compiled_frame(), ""); 2908 assert(caller.sp() == caller.unextended_sp(), ""); 2909 2910 DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);) 2911 2912 frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false); 2913 intptr_t* stack_frame_top = f.sp(); 2914 intptr_t* heap_frame_top = hf.sp(); 2915 int fsize = ContinuationHelper::StubFrame::size(hf); 2916 2917 copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words, 2918 fsize + frame::metadata_words); 2919 2920 patch(f, caller, false /*is_bottom_frame*/); 2921 2922 // can only fix caller once this frame is thawed (due to callee saved regs) 2923 RegisterMap map(nullptr, 2924 RegisterMap::UpdateMap::include, 2925 RegisterMap::ProcessFrames::skip, 2926 RegisterMap::WalkContinuation::skip); 2927 map.set_include_argument_oops(false); 2928 f.oop_map()->update_register_map(&f, &map); 2929 ContinuationHelper::update_register_map_with_callee(caller, &map); 2930 _cont.tail()->fix_thawed_frame(caller, &map); 2931 2932 DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);) 2933 caller = f; 2934 } 2935 2936 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) { 2937 assert(hf.is_native_frame(), ""); 2938 assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), ""); 2939 2940 if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap 2941 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args()); 2942 } 2943 2944 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames); 2945 assert(!is_bottom_frame, ""); 2946 2947 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);) 2948 2949 assert(caller.sp() == caller.unextended_sp(), ""); 2950 2951 if (caller.is_interpreted_frame()) { 2952 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame 2953 } 2954 2955 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not 2956 // yet laid out in the stack, and so the original_pc is not stored in it. 2957 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized. 2958 frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */); 2959 intptr_t* const stack_frame_top = f.sp(); 2960 intptr_t* const heap_frame_top = hf.unextended_sp(); 2961 2962 int fsize = ContinuationHelper::NativeFrame::size(hf); 2963 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), ""); 2964 2965 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom; 2966 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom; 2967 int sz = fsize + frame::metadata_words_at_bottom; 2968 2969 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above 2970 2971 patch(f, caller, false /* bottom */); 2972 2973 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above) 2974 assert(!f.is_deoptimized_frame(), ""); 2975 assert(!hf.is_deoptimized_frame(), ""); 2976 assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), ""); 2977 2978 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack 2979 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args()); 2980 2981 DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);) 2982 caller = f; 2983 } 2984 2985 void ThawBase::finish_thaw(frame& f) { 2986 stackChunkOop chunk = _cont.tail(); 2987 2988 if (chunk->is_empty()) { 2989 // Only remove chunk from list if it can't be reused for another freeze 2990 if (seen_by_gc()) { 2991 _cont.set_tail(chunk->parent()); 2992 } else { 2993 chunk->set_has_mixed_frames(false); 2994 } 2995 chunk->set_max_thawing_size(0); 2996 } else { 2997 chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size); 2998 } 2999 assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), ""); 3000 3001 if (!is_aligned(f.sp(), frame::frame_alignment)) { 3002 assert(f.is_interpreted_frame(), ""); 3003 f.set_sp(align_down(f.sp(), frame::frame_alignment)); 3004 } 3005 push_return_frame(f); 3006 // can only fix caller after push_return_frame (due to callee saved regs) 3007 if (_process_args_at_top) { 3008 log_trace(continuations, tracking)("Processing arguments in finish_thaw"); 3009 chunk->fix_thawed_frame(f, SmallRegisterMap::instance_with_args()); 3010 } else { 3011 chunk->fix_thawed_frame(f, SmallRegisterMap::instance_no_args()); 3012 } 3013 3014 assert(_cont.is_empty() == _cont.last_frame().is_empty(), ""); 3015 3016 log_develop_trace(continuations)("thawed %d frames", _frames); 3017 3018 LogTarget(Trace, continuations) lt; 3019 if (lt.develop_is_enabled()) { 3020 LogStream ls(lt); 3021 ls.print_cr("top hframe after (thaw):"); 3022 _cont.last_frame().print_value_on(&ls); 3023 } 3024 } 3025 3026 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw 3027 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), ""); 3028 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), ""); 3029 3030 LogTarget(Trace, continuations) lt; 3031 if (lt.develop_is_enabled()) { 3032 LogStream ls(lt); 3033 ls.print_cr("push_return_frame"); 3034 f.print_value_on(&ls); 3035 } 3036 3037 assert(f.sp() - frame::metadata_words_at_bottom >= _top_stack_address, "overwrote past thawing space" 3038 " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(f.sp() - frame::metadata_words), p2i(_top_stack_address)); 3039 ContinuationHelper::Frame::patch_pc(f, f.raw_pc()); // in case we want to deopt the frame in a full transition, this is checked. 3040 ContinuationHelper::push_pd(f); 3041 3042 assert(ContinuationHelper::Frame::assert_frame_laid_out(f), ""); 3043 } 3044 3045 // returns new top sp 3046 // called after preparations (stack overflow check and making room) 3047 template<typename ConfigT> 3048 static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind) { 3049 assert(thread == JavaThread::current(), "Must be current thread"); 3050 3051 CONT_JFR_ONLY(EventContinuationThaw event;) 3052 3053 log_develop_trace(continuations)("~~~~ thaw kind: %d sp: " INTPTR_FORMAT, kind, p2i(thread->last_continuation()->entry_sp())); 3054 3055 ContinuationEntry* entry = thread->last_continuation(); 3056 assert(entry != nullptr, ""); 3057 oop oopCont = entry->cont_oop(thread); 3058 3059 assert(!jdk_internal_vm_Continuation::done(oopCont), ""); 3060 assert(oopCont == get_continuation(thread), ""); 3061 verify_continuation(oopCont); 3062 3063 assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), ""); 3064 3065 ContinuationWrapper cont(thread, oopCont); 3066 log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont)); 3067 3068 #ifdef ASSERT 3069 set_anchor_to_entry(thread, cont.entry()); 3070 log_frames(thread); 3071 clear_anchor(thread); 3072 #endif 3073 3074 Thaw<ConfigT> thw(thread, cont); 3075 intptr_t* const sp = thw.thaw(kind); 3076 assert(is_aligned(sp, frame::frame_alignment), ""); 3077 DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp);) 3078 3079 CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);) 3080 3081 verify_continuation(cont.continuation()); 3082 log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash()); 3083 3084 return sp; 3085 } 3086 3087 #ifdef ASSERT 3088 static void do_deopt_after_thaw(JavaThread* thread) { 3089 int i = 0; 3090 StackFrameStream fst(thread, true, false); 3091 fst.register_map()->set_include_argument_oops(false); 3092 ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map()); 3093 for (; !fst.is_done(); fst.next()) { 3094 if (fst.current()->cb()->is_nmethod()) { 3095 nmethod* nm = fst.current()->cb()->as_nmethod(); 3096 if (!nm->method()->is_continuation_native_intrinsic()) { 3097 nm->make_deoptimized(); 3098 } 3099 } 3100 } 3101 } 3102 3103 class ThawVerifyOopsClosure: public OopClosure { 3104 intptr_t* _p; 3105 outputStream* _st; 3106 bool is_good_oop(oop o) { 3107 return dbg_is_safe(o, -1) && dbg_is_safe(o->klass(), -1) && oopDesc::is_oop(o) && o->klass()->is_klass(); 3108 } 3109 public: 3110 ThawVerifyOopsClosure(outputStream* st) : _p(nullptr), _st(st) {} 3111 intptr_t* p() { return _p; } 3112 void reset() { _p = nullptr; } 3113 3114 virtual void do_oop(oop* p) { 3115 oop o = *p; 3116 if (o == nullptr || is_good_oop(o)) { 3117 return; 3118 } 3119 _p = (intptr_t*)p; 3120 _st->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(*p), p2i(p)); 3121 } 3122 virtual void do_oop(narrowOop* p) { 3123 oop o = RawAccess<>::oop_load(p); 3124 if (o == nullptr || is_good_oop(o)) { 3125 return; 3126 } 3127 _p = (intptr_t*)p; 3128 _st->print_cr("*** (narrow) non-oop %x found at " PTR_FORMAT, (int)(*p), p2i(p)); 3129 } 3130 }; 3131 3132 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st) { 3133 assert(thread->has_last_Java_frame(), ""); 3134 3135 ResourceMark rm; 3136 ThawVerifyOopsClosure cl(st); 3137 NMethodToOopClosure cf(&cl, false); 3138 3139 StackFrameStream fst(thread, true, false); 3140 fst.register_map()->set_include_argument_oops(false); 3141 ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map()); 3142 for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) { 3143 if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) { 3144 st->print_cr(">>> do_verify_after_thaw deopt"); 3145 fst.current()->deoptimize(nullptr); 3146 fst.current()->print_on(st); 3147 } 3148 3149 fst.current()->oops_do(&cl, &cf, fst.register_map()); 3150 if (cl.p() != nullptr) { 3151 frame fr = *fst.current(); 3152 st->print_cr("Failed for frame barriers: %d",chunk->requires_barriers()); 3153 fr.print_on(st); 3154 if (!fr.is_interpreted_frame()) { 3155 st->print_cr("size: %d argsize: %d", 3156 ContinuationHelper::NonInterpretedUnknownFrame::size(fr), 3157 ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr)); 3158 } 3159 VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp()); 3160 if (reg != nullptr) { 3161 st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99); 3162 } 3163 cl.reset(); 3164 DEBUG_ONLY(thread->print_frame_layout();) 3165 if (chunk != nullptr) { 3166 chunk->print_on(true, st); 3167 } 3168 return false; 3169 } 3170 } 3171 return true; 3172 } 3173 3174 static void log_frames(JavaThread* thread, bool dolog) { 3175 const static int show_entry_callers = 3; 3176 LogTarget(Trace, continuations, tracking) lt; 3177 if (!lt.develop_is_enabled() || !dolog) { 3178 return; 3179 } 3180 LogStream ls(lt); 3181 3182 ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread)); 3183 if (!thread->has_last_Java_frame()) { 3184 ls.print_cr("NO ANCHOR!"); 3185 } 3186 3187 RegisterMap map(thread, 3188 RegisterMap::UpdateMap::include, 3189 RegisterMap::ProcessFrames::include, 3190 RegisterMap::WalkContinuation::skip); 3191 map.set_include_argument_oops(false); 3192 3193 if (false) { 3194 for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) { 3195 f.print_on(&ls); 3196 } 3197 } else { 3198 map.set_skip_missing(true); 3199 ResetNoHandleMark rnhm; 3200 ResourceMark rm; 3201 HandleMark hm(Thread::current()); 3202 FrameValues values; 3203 3204 int i = 0; 3205 int post_entry = -1; 3206 for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) { 3207 f.describe(values, i, &map, i == 0); 3208 if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f)) 3209 post_entry++; 3210 if (post_entry >= show_entry_callers) 3211 break; 3212 } 3213 values.print_on(thread, &ls); 3214 } 3215 3216 ls.print_cr("======= end frames ========="); 3217 } 3218 3219 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp) { 3220 intptr_t* sp0 = sp; 3221 address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset()); 3222 3223 bool preempted = false; 3224 stackChunkOop tail = cont.tail(); 3225 if (tail != nullptr && tail->preempted()) { 3226 // Still preempted (monitor not acquired) so no frames were thawed. 3227 set_anchor(thread, cont.entrySP(), cont.entryPC()); 3228 preempted = true; 3229 } else { 3230 set_anchor(thread, sp0); 3231 } 3232 3233 log_frames(thread); 3234 if (LoomVerifyAfterThaw) { 3235 assert(do_verify_after_thaw(thread, cont.tail(), tty), ""); 3236 } 3237 assert(ContinuationEntry::assert_entry_frame_laid_out(thread, preempted), ""); 3238 clear_anchor(thread); 3239 3240 LogTarget(Trace, continuations) lt; 3241 if (lt.develop_is_enabled()) { 3242 LogStream ls(lt); 3243 ls.print_cr("Jumping to frame (thaw):"); 3244 frame(sp).print_value_on(&ls); 3245 } 3246 } 3247 #endif // ASSERT 3248 3249 #include CPU_HEADER_INLINE(continuationFreezeThaw) 3250 3251 #ifdef ASSERT 3252 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) { 3253 ResourceMark rm; 3254 FrameValues values; 3255 assert(f.get_cb() != nullptr, ""); 3256 RegisterMap map(f.is_heap_frame() ? 3257 nullptr : 3258 JavaThread::current(), 3259 RegisterMap::UpdateMap::include, 3260 RegisterMap::ProcessFrames::skip, 3261 RegisterMap::WalkContinuation::skip); 3262 map.set_include_argument_oops(false); 3263 map.set_skip_missing(true); 3264 if (callee_complete) { 3265 frame::update_map_with_saved_link(&map, ContinuationHelper::Frame::callee_link_address(f)); 3266 } 3267 const_cast<frame&>(f).describe(values, 0, &map, true); 3268 values.print_on(static_cast<JavaThread*>(nullptr), st); 3269 } 3270 #endif 3271 3272 static address thaw_entry = nullptr; 3273 static address freeze_entry = nullptr; 3274 static address freeze_preempt_entry = nullptr; 3275 3276 address Continuation::thaw_entry() { 3277 return ::thaw_entry; 3278 } 3279 3280 address Continuation::freeze_entry() { 3281 return ::freeze_entry; 3282 } 3283 3284 address Continuation::freeze_preempt_entry() { 3285 return ::freeze_preempt_entry; 3286 } 3287 3288 class ConfigResolve { 3289 public: 3290 static void resolve() { resolve_compressed(); } 3291 3292 static void resolve_compressed() { 3293 UseCompressedOops ? resolve_gc<true>() 3294 : resolve_gc<false>(); 3295 } 3296 3297 private: 3298 template <bool use_compressed> 3299 static void resolve_gc() { 3300 BarrierSet* bs = BarrierSet::barrier_set(); 3301 assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set"); 3302 switch (bs->kind()) { 3303 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ 3304 case BarrierSet::bs_name: { \ 3305 resolve<use_compressed, typename BarrierSet::GetType<BarrierSet::bs_name>::type>(); \ 3306 } \ 3307 break; 3308 FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE) 3309 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE 3310 3311 default: 3312 fatal("BarrierSet resolving not implemented"); 3313 }; 3314 } 3315 3316 template <bool use_compressed, typename BarrierSetT> 3317 static void resolve() { 3318 typedef Config<use_compressed ? oop_kind::NARROW : oop_kind::WIDE, BarrierSetT> SelectedConfigT; 3319 3320 freeze_entry = (address)freeze<SelectedConfigT>; 3321 freeze_preempt_entry = (address)SelectedConfigT::freeze_preempt; 3322 3323 // If we wanted, we could templatize by kind and have three different thaw entries 3324 thaw_entry = (address)thaw<SelectedConfigT>; 3325 } 3326 }; 3327 3328 void Continuation::init() { 3329 ConfigResolve::resolve(); 3330 }