1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.inline.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/codeCache.inline.hpp"
28 #include "code/nmethod.inline.hpp"
29 #include "code/vmreg.inline.hpp"
30 #include "compiler/oopMap.inline.hpp"
31 #include "cppstdlib/type_traits.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "gc/shared/continuationGCSupport.inline.hpp"
34 #include "gc/shared/gc_globals.hpp"
35 #include "gc/shared/memAllocator.hpp"
36 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
37 #include "interpreter/bytecodeStream.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "interpreter/interpreterRuntime.hpp"
40 #include "jfr/jfrEvents.hpp"
41 #include "logging/log.hpp"
42 #include "logging/logStream.hpp"
43 #include "oops/access.inline.hpp"
44 #include "oops/constantPool.inline.hpp"
45 #include "oops/method.inline.hpp"
46 #include "oops/objArrayOop.inline.hpp"
47 #include "oops/oopsHierarchy.hpp"
48 #include "oops/stackChunkOop.inline.hpp"
49 #include "prims/jvmtiThreadState.hpp"
50 #include "runtime/arguments.hpp"
51 #include "runtime/continuation.hpp"
52 #include "runtime/continuationEntry.inline.hpp"
53 #include "runtime/continuationHelper.inline.hpp"
54 #include "runtime/continuationJavaClasses.inline.hpp"
55 #include "runtime/continuationWrapper.inline.hpp"
56 #include "runtime/frame.inline.hpp"
57 #include "runtime/interfaceSupport.inline.hpp"
58 #include "runtime/javaThread.inline.hpp"
59 #include "runtime/jniHandles.inline.hpp"
60 #include "runtime/keepStackGCProcessed.hpp"
61 #include "runtime/mountUnmountDisabler.hpp"
62 #include "runtime/objectMonitor.inline.hpp"
63 #include "runtime/orderAccess.hpp"
64 #include "runtime/prefetch.inline.hpp"
65 #include "runtime/sharedRuntime.hpp"
66 #include "runtime/smallRegisterMap.inline.hpp"
67 #include "runtime/stackChunkFrameStream.inline.hpp"
68 #include "runtime/stackFrameStream.inline.hpp"
69 #include "runtime/stackOverflow.hpp"
70 #include "runtime/stackWatermarkSet.inline.hpp"
71 #include "runtime/vframe.inline.hpp"
72 #include "runtime/vframe_hp.hpp"
73 #include "utilities/debug.hpp"
74 #include "utilities/exceptions.hpp"
75 #include "utilities/macros.hpp"
76 #include "utilities/vmError.hpp"
77 #if INCLUDE_ZGC
78 #include "gc/z/zStackChunkGCData.inline.hpp"
79 #endif
80 #if INCLUDE_JFR
81 #include "jfr/jfr.inline.hpp"
82 #endif
83 #ifdef COMPILER1
84 #include "c1/c1_Runtime1.hpp"
85 #endif
86 #ifdef COMPILER2
87 #include "opto/runtime.hpp"
88 #endif
89
90 /*
91 * This file contains the implementation of continuation freezing (yield) and thawing (run).
92 *
93 * This code is very latency-critical and very hot. An ordinary and well-behaved server application
94 * would likely call these operations many thousands of times per second second, on every core.
95 *
96 * Freeze might be called every time the application performs any I/O operation, every time it
97 * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
98 * multiple times in each of those cases, as it is called by the return barrier, which may be
99 * invoked on method return.
100 *
101 * The amortized budget for each of those two operations is ~100-150ns. That is why, for
102 * example, every effort is made to avoid Java-VM transitions as much as possible.
103 *
104 * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
105 * and so frames simply copied, and the bottom-most one is patched.
106 * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets
107 * and absolute pointers, and barriers invoked.
108 */
109
110 /************************************************
111
112 Thread-stack layout on freeze/thaw.
113 See corresponding stack-chunk layout in instanceStackChunkKlass.hpp
114
115 +----------------------------+
116 | . |
117 | . |
118 | . |
119 | carrier frames |
120 | |
121 |----------------------------|
122 | |
123 | Continuation.run |
124 | |
125 |============================|
126 | enterSpecial frame |
127 | pc |
128 | rbp |
129 | ----- |
130 ^ | int argsize | = ContinuationEntry
131 | | oopDesc* cont |
132 | | oopDesc* chunk |
133 | | ContinuationEntry* parent |
134 | | ... |
135 | |============================| <------ JavaThread::_cont_entry = entry->sp()
136 | | ? alignment word ? |
137 | |----------------------------| <--\
138 | | | |
139 | | ? caller stack args ? | | argsize (might not be 2-word aligned) words
140 Address | | | | Caller is still in the chunk.
141 | |----------------------------| |
142 | | pc (? return barrier ?) | | This pc contains the return barrier when the bottom-most frame
143 | | rbp | | isn't the last one in the continuation.
144 | | | |
145 | | frame | |
146 | | | |
147 +----------------------------| \__ Continuation frames to be frozen/thawed
148 | | /
149 | frame | |
150 | | |
151 |----------------------------| |
152 | | |
153 | frame | |
154 | | |
155 |----------------------------| <--/
156 | |
157 | doYield/safepoint stub | When preempting forcefully, we could have a safepoint stub
158 | | instead of a doYield stub
159 |============================| <- the sp passed to freeze
160 | |
161 | Native freeze/thaw frames |
162 | . |
163 | . |
164 | . |
165 +----------------------------+
166
167 ************************************************/
168
169 static const bool TEST_THAW_ONE_CHUNK_FRAME = false; // force thawing frames one-at-a-time for testing
170
171 #define CONT_JFR false // emit low-level JFR events that count slow/fast path for continuation performance debugging only
172 #if CONT_JFR
173 #define CONT_JFR_ONLY(code) code
174 #else
175 #define CONT_JFR_ONLY(code)
176 #endif
177
178 // TODO: See AbstractAssembler::generate_stack_overflow_check,
179 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
180 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
181
182 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
183
184 // Used to just annotatate cold/hot branches
185 #define LIKELY(condition) (condition)
186 #define UNLIKELY(condition) (condition)
187
188 // debugging functions
189 #ifdef ASSERT
190 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
191
192 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
193
194 static void do_deopt_after_thaw(JavaThread* thread);
195 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
196 static void log_frames(JavaThread* thread);
197 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp);
198 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
199 static void verify_frame_kind(frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr = nullptr, const char** code_name_ptr = nullptr, int* bci_ptr = nullptr, stackChunkOop chunk = nullptr);
200
201 #define assert_pfl(p, ...) \
202 do { \
203 if (!(p)) { \
204 JavaThread* t = JavaThread::active(); \
205 if (t->has_last_Java_frame()) { \
206 tty->print_cr("assert(" #p ") failed:"); \
207 t->print_frame_layout(); \
208 } \
209 } \
210 vmassert(p, __VA_ARGS__); \
211 } while(0)
212
213 #else
214 static void verify_continuation(oop continuation) { }
215 #define assert_pfl(p, ...)
216 #endif
217
218 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
219 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);
220
221 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier);
222 template<typename ConfigT> static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind);
223
224
225 // Entry point to freeze. Transitions are handled manually
226 // Called from gen_continuation_yield() in sharedRuntime_<cpu>.cpp through Continuation::freeze_entry();
227 template<typename ConfigT>
228 static JRT_BLOCK_ENTRY(int, freeze(JavaThread* current, intptr_t* sp))
229 assert(sp == current->frame_anchor()->last_Java_sp(), "");
230
231 if (current->raw_cont_fastpath() > current->last_continuation()->entry_sp() || current->raw_cont_fastpath() < sp) {
232 current->set_cont_fastpath(nullptr);
233 }
234
235 return checked_cast<int>(ConfigT::freeze(current, sp));
236 JRT_END
237
238 JRT_LEAF(int, Continuation::prepare_thaw(JavaThread* thread, bool return_barrier))
239 return prepare_thaw_internal(thread, return_barrier);
240 JRT_END
241
242 template<typename ConfigT>
243 static JRT_LEAF(intptr_t*, thaw(JavaThread* thread, int kind))
244 // TODO: JRT_LEAF and NoHandleMark is problematic for JFR events.
245 // vFrameStreamCommon allocates Handles in RegisterMap for continuations.
246 // Also the preemption case with JVMTI events enabled might safepoint so
247 // undo the NoSafepointVerifier here and rely on handling by ContinuationWrapper.
248 // JRT_ENTRY instead?
249 ResetNoHandleMark rnhm;
250 DEBUG_ONLY(PauseNoSafepointVerifier pnsv(&__nsv);)
251
252 // we might modify the code cache via BarrierSetNMethod::nmethod_entry_barrier
253 MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread));
254 return ConfigT::thaw(thread, (Continuation::thaw_kind)kind);
255 JRT_END
256
257 JVM_ENTRY(jint, CONT_isPinned0(JNIEnv* env, jobject cont_scope)) {
258 JavaThread* thread = JavaThread::thread_from_jni_environment(env);
259 return is_pinned0(thread, JNIHandles::resolve(cont_scope), false);
260 }
261 JVM_END
262
263 ///////////
264
265 enum class oop_kind { NARROW, WIDE };
266 template <oop_kind oops, typename BarrierSetT>
267 class Config {
268 public:
269 typedef Config<oops, BarrierSetT> SelfT;
270 using OopT = std::conditional_t<oops == oop_kind::NARROW, narrowOop, oop>;
271
272 static freeze_result freeze(JavaThread* thread, intptr_t* const sp) {
273 freeze_result res = freeze_internal<SelfT, false>(thread, sp);
274 JFR_ONLY(assert((res == freeze_ok) || (res == thread->last_freeze_fail_result()), "freeze failure not set"));
275 return res;
276 }
277
278 static freeze_result freeze_preempt(JavaThread* thread, intptr_t* const sp) {
279 return freeze_internal<SelfT, true>(thread, sp);
280 }
281
282 static intptr_t* thaw(JavaThread* thread, Continuation::thaw_kind kind) {
283 return thaw_internal<SelfT>(thread, kind);
284 }
285 };
286
287 #ifdef _WINDOWS
288 static void map_stack_pages(JavaThread* thread, size_t size, address sp) {
289 address new_sp = sp - size;
290 address watermark = thread->stack_overflow_state()->shadow_zone_growth_watermark();
291
292 if (new_sp < watermark) {
293 size_t page_size = os::vm_page_size();
294 address last_touched_page = watermark - StackOverflow::stack_shadow_zone_size();
295 size_t pages_to_touch = align_up(watermark - new_sp, page_size) / page_size;
296 while (pages_to_touch-- > 0) {
297 last_touched_page -= page_size;
298 *last_touched_page = 0;
299 }
300 thread->stack_overflow_state()->set_shadow_zone_growth_watermark(new_sp);
301 }
302 }
303 #endif
304
305 static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) {
306 const size_t page_size = os::vm_page_size();
307 if (size > page_size) {
308 if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) {
309 return false;
310 }
311 WINDOWS_ONLY(map_stack_pages(thread, size, sp));
312 }
313 return true;
314 }
315
316 #ifdef ASSERT
317 static oop get_continuation(JavaThread* thread) {
318 assert(thread != nullptr, "");
319 assert(thread->threadObj() != nullptr, "");
320 return java_lang_Thread::continuation(thread->threadObj());
321 }
322 #endif // ASSERT
323
324 inline void clear_anchor(JavaThread* thread) {
325 thread->frame_anchor()->clear();
326 }
327
328 static void set_anchor(JavaThread* thread, intptr_t* sp, address pc) {
329 assert(pc != nullptr, "");
330
331 JavaFrameAnchor* anchor = thread->frame_anchor();
332 anchor->set_last_Java_sp(sp);
333 anchor->set_last_Java_pc(pc);
334 ContinuationHelper::set_anchor_pd(anchor, sp);
335
336 assert(thread->has_last_Java_frame(), "");
337 assert(thread->last_frame().cb() != nullptr, "");
338 }
339
340 static void set_anchor(JavaThread* thread, intptr_t* sp) {
341 address pc = ContinuationHelper::return_address_at(
342 sp - frame::sender_sp_ret_address_offset());
343 set_anchor(thread, sp, pc);
344 }
345
346 static void set_anchor_to_entry(JavaThread* thread, ContinuationEntry* entry) {
347 JavaFrameAnchor* anchor = thread->frame_anchor();
348 anchor->set_last_Java_sp(entry->entry_sp());
349 anchor->set_last_Java_pc(entry->entry_pc());
350 ContinuationHelper::set_anchor_to_entry_pd(anchor, entry);
351
352 assert(thread->has_last_Java_frame(), "");
353 assert(thread->last_frame().cb() != nullptr, "");
354 }
355
356 #if CONT_JFR
357 class FreezeThawJfrInfo : public StackObj {
358 short _e_size;
359 short _e_num_interpreted_frames;
360 public:
361
362 FreezeThawJfrInfo() : _e_size(0), _e_num_interpreted_frames(0) {}
363 inline void record_interpreted_frame() { _e_num_interpreted_frames++; }
364 inline void record_size_copied(int size) { _e_size += size << LogBytesPerWord; }
365 template<typename Event> void post_jfr_event(Event *e, oop continuation, JavaThread* jt);
366 };
367
368 template<typename Event> void FreezeThawJfrInfo::post_jfr_event(Event* e, oop continuation, JavaThread* jt) {
369 if (e->should_commit()) {
370 log_develop_trace(continuations)("JFR event: iframes: %d size: %d", _e_num_interpreted_frames, _e_size);
371 e->set_carrierThread(JFR_JVM_THREAD_ID(jt));
372 e->set_continuationClass(continuation->klass());
373 e->set_interpretedFrames(_e_num_interpreted_frames);
374 e->set_size(_e_size);
375 e->commit();
376 }
377 }
378 #endif // CONT_JFR
379
380 /////////////// FREEZE ////
381
382 class FreezeBase : public StackObj {
383 protected:
384 JavaThread* const _thread;
385 ContinuationWrapper& _cont;
386 bool _barriers; // only set when we allocate a chunk
387
388 intptr_t* _bottom_address;
389
390 // Used for preemption only
391 const bool _preempt;
392 frame _last_frame;
393
394 // Used to support freezing with held monitors
395 int _monitors_in_lockstack;
396
397 int _freeze_size; // total size of all frames plus metadata in words.
398 int _total_align_size;
399
400 intptr_t* _cont_stack_top;
401 intptr_t* _cont_stack_bottom;
402
403 CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
404
405 #ifdef ASSERT
406 intptr_t* _orig_chunk_sp;
407 int _fast_freeze_size;
408 bool _empty;
409 #endif
410
411 JvmtiSampledObjectAllocEventCollector* _jvmti_event_collector;
412
413 NOT_PRODUCT(int _frames;)
414 DEBUG_ONLY(intptr_t* _last_write;)
415
416 inline FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempt);
417
418 public:
419 NOINLINE freeze_result freeze_slow();
420 void freeze_fast_existing_chunk();
421
422 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
423 void set_jvmti_event_collector(JvmtiSampledObjectAllocEventCollector* jsoaec) { _jvmti_event_collector = jsoaec; }
424
425 inline int size_if_fast_freeze_available();
426
427 inline frame& last_frame() { return _last_frame; }
428
429 #ifdef ASSERT
430 bool check_valid_fast_path();
431 #endif
432
433 protected:
434 inline void init_rest();
435 void throw_stack_overflow_on_humongous_chunk();
436
437 // fast path
438 inline void copy_to_chunk(intptr_t* from, intptr_t* to, int size);
439 inline void unwind_frames();
440 inline void patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp);
441
442 // slow path
443 virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) = 0;
444
445 int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); }
446
447 private:
448 // slow path
449 frame freeze_start_frame();
450 frame freeze_start_frame_on_preempt();
451 NOINLINE freeze_result recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top);
452 inline frame freeze_start_frame_yield_stub();
453 template<typename FKind>
454 inline freeze_result recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize);
455 inline void before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame);
456 inline void after_freeze_java_frame(const frame& hf, bool is_bottom_frame);
457 freeze_result finalize_freeze(const frame& callee, frame& caller, int argsize);
458 void patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame);
459 NOINLINE freeze_result recurse_freeze_interpreted_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
460 freeze_result recurse_freeze_compiled_frame(frame& f, frame& caller, int callee_argsize, bool callee_interpreted);
461 NOINLINE freeze_result recurse_freeze_stub_frame(frame& f, frame& caller);
462 NOINLINE freeze_result recurse_freeze_native_frame(frame& f, frame& caller);
463 NOINLINE void finish_freeze(const frame& f, const frame& top);
464
465 void freeze_lockstack(stackChunkOop chunk);
466
467 inline bool stack_overflow();
468
469 static frame sender(const frame& f) { return f.is_interpreted_frame() ? sender<ContinuationHelper::InterpretedFrame>(f)
470 : sender<ContinuationHelper::NonInterpretedUnknownFrame>(f); }
471 template<typename FKind> static inline frame sender(const frame& f);
472 template<typename FKind> frame new_heap_frame(frame& f, frame& caller);
473 inline void set_top_frame_metadata_pd(const frame& hf);
474 inline void patch_pd(frame& callee, const frame& caller);
475 inline void patch_pd_unused(intptr_t* sp);
476 void adjust_interpreted_frame_unextended_sp(frame& f);
477 inline void prepare_freeze_interpreted_top_frame(frame& f);
478 static inline void relativize_interpreted_frame_metadata(const frame& f, const frame& hf);
479
480 protected:
481 void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
482 bool freeze_fast_new_chunk(stackChunkOop chunk);
483 };
484
485 template <typename ConfigT>
486 class Freeze : public FreezeBase {
487 private:
488 stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
489
490 public:
491 inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt)
492 : FreezeBase(thread, cont, frame_sp, preempt) {}
493
494 freeze_result try_freeze_fast();
495
496 protected:
497 virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) override { return allocate_chunk(stack_size, argsize_md); }
498 };
499
500 FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp, bool preempt) :
501 _thread(thread), _cont(cont), _barriers(false), _preempt(preempt), _last_frame(false /* no initialization */) {
502 DEBUG_ONLY(_jvmti_event_collector = nullptr;)
503
504 assert(_thread != nullptr, "");
505 assert(_thread->last_continuation()->entry_sp() == _cont.entrySP(), "");
506
507 DEBUG_ONLY(_cont.entry()->verify_cookie();)
508
509 assert(!Interpreter::contains(_cont.entryPC()), "");
510
511 _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
512 #ifdef _LP64
513 if (((intptr_t)_bottom_address & 0xf) != 0) {
514 _bottom_address--;
515 }
516 assert(is_aligned(_bottom_address, frame::frame_alignment), "");
517 #endif
518
519 log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
520 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
521 assert(_bottom_address != nullptr, "");
522 assert(_bottom_address <= _cont.entrySP(), "");
523 DEBUG_ONLY(_last_write = nullptr;)
524
525 assert(_cont.chunk_invariant(), "");
526 assert(!Interpreter::contains(_cont.entryPC()), "");
527 #if !defined(PPC64) || defined(ZERO)
528 static const int doYield_stub_frame_size = frame::metadata_words;
529 #else
530 static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
531 #endif
532 // With preemption doYield() might not have been resolved yet
533 assert(_preempt || SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
534
535 if (preempt) {
536 _last_frame = _thread->last_frame();
537 }
538
539 // properties of the continuation on the stack; all sizes are in words
540 _cont_stack_top = frame_sp + (!preempt ? doYield_stub_frame_size : 0); // we don't freeze the doYield stub frame
541 _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
542 - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
543
544 log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
545 cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
546 assert(cont_size() > 0, "");
547
548 _monitors_in_lockstack = _thread->lock_stack().monitor_count();
549 }
550
551 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
552 _freeze_size = 0;
553 _total_align_size = 0;
554 NOT_PRODUCT(_frames = 0;)
555 }
556
557 void FreezeBase::freeze_lockstack(stackChunkOop chunk) {
558 assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "no room for lockstack");
559
560 _thread->lock_stack().move_to_address((oop*)chunk->start_address());
561 chunk->set_lockstack_size(checked_cast<uint8_t>(_monitors_in_lockstack));
562 chunk->set_has_lockstack(true);
563 }
564
565 void FreezeBase::copy_to_chunk(intptr_t* from, intptr_t* to, int size) {
566 stackChunkOop chunk = _cont.tail();
567 chunk->copy_from_stack_to_chunk(from, to, size);
568 CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
569
570 #ifdef ASSERT
571 if (_last_write != nullptr) {
572 assert(_last_write == to + size, "Missed a spot: _last_write: " INTPTR_FORMAT " to+size: " INTPTR_FORMAT
573 " stack_size: %d _last_write offset: " PTR_FORMAT " to+size: " PTR_FORMAT, p2i(_last_write), p2i(to+size),
574 chunk->stack_size(), _last_write-chunk->start_address(), to+size-chunk->start_address());
575 _last_write = to;
576 }
577 #endif
578 }
579
580 static void assert_frames_in_continuation_are_safe(JavaThread* thread) {
581 #ifdef ASSERT
582 StackWatermark* watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc);
583 if (watermark == nullptr) {
584 return;
585 }
586 ContinuationEntry* ce = thread->last_continuation();
587 RegisterMap map(thread,
588 RegisterMap::UpdateMap::include,
589 RegisterMap::ProcessFrames::skip,
590 RegisterMap::WalkContinuation::skip);
591 map.set_include_argument_oops(false);
592 for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
593 watermark->assert_is_frame_safe(f);
594 }
595 #endif // ASSERT
596 }
597
598 // Called _after_ the last possible safepoint during the freeze operation (chunk allocation)
599 void FreezeBase::unwind_frames() {
600 ContinuationEntry* entry = _cont.entry();
601 entry->flush_stack_processing(_thread);
602 assert_frames_in_continuation_are_safe(_thread);
603 JFR_ONLY(Jfr::check_and_process_sample_request(_thread);)
604 set_anchor_to_entry(_thread, entry);
605 }
606
607 template <typename ConfigT>
608 freeze_result Freeze<ConfigT>::try_freeze_fast() {
609 assert(_thread->thread_state() == _thread_in_vm, "");
610 assert(_thread->cont_fastpath(), "");
611
612 DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
613 assert(_fast_freeze_size == 0, "");
614
615 stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words + _monitors_in_lockstack, _cont.argsize() + frame::metadata_words_at_top);
616 if (freeze_fast_new_chunk(chunk)) {
617 return freeze_ok;
618 }
619 if (_thread->has_pending_exception()) {
620 return freeze_exception;
621 }
622
623 // TODO R REMOVE when deopt change is fixed
624 assert(!_thread->cont_fastpath() || _barriers, "");
625 log_develop_trace(continuations)("-- RETRYING SLOW --");
626 return freeze_slow();
627 }
628
629 // Returns size needed if the continuation fits, otherwise 0.
630 int FreezeBase::size_if_fast_freeze_available() {
631 stackChunkOop chunk = _cont.tail();
632 if (chunk == nullptr || chunk->is_gc_mode() || chunk->requires_barriers() || chunk->has_mixed_frames()) {
633 log_develop_trace(continuations)("chunk available %s", chunk == nullptr ? "no chunk" : "chunk requires barriers");
634 return 0;
635 }
636
637 int total_size_needed = cont_size();
638 const int chunk_sp = chunk->sp();
639
640 // argsize can be nonzero if we have a caller, but the caller could be in a non-empty parent chunk,
641 // so we subtract it only if we overlap with the caller, i.e. the current chunk isn't empty.
642 // Consider leaving the chunk's argsize set when emptying it and removing the following branch,
643 // although that would require changing stackChunkOopDesc::is_empty
644 if (!chunk->is_empty()) {
645 total_size_needed -= _cont.argsize() + frame::metadata_words_at_top;
646 }
647
648 total_size_needed += _monitors_in_lockstack;
649
650 int chunk_free_room = chunk_sp - frame::metadata_words_at_bottom;
651 bool available = chunk_free_room >= total_size_needed;
652 log_develop_trace(continuations)("chunk available: %s size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
653 available ? "yes" : "no" , total_size_needed, _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
654 return available ? total_size_needed : 0;
655 }
656
657 void FreezeBase::freeze_fast_existing_chunk() {
658 stackChunkOop chunk = _cont.tail();
659
660 DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
661 assert(_fast_freeze_size > 0, "");
662
663 if (!chunk->is_empty()) { // we are copying into a non-empty chunk
664 DEBUG_ONLY(_empty = false;)
665 DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();)
666 #ifdef ASSERT
667 {
668 intptr_t* retaddr_slot = (chunk->sp_address()
669 - frame::sender_sp_ret_address_offset());
670 assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
671 "unexpected saved return address");
672 }
673 #endif
674
675 // the chunk's sp before the freeze, adjusted to point beyond the stack-passed arguments in the topmost frame
676 // we overlap; we'll overwrite the chunk's top frame's callee arguments
677 const int chunk_start_sp = chunk->sp() + _cont.argsize() + frame::metadata_words_at_top;
678 assert(chunk_start_sp <= chunk->stack_size(), "sp not pointing into stack");
679
680 // increase max_size by what we're freezing minus the overlap
681 chunk->set_max_thawing_size(chunk->max_thawing_size() + cont_size() - _cont.argsize() - frame::metadata_words_at_top);
682
683 intptr_t* const bottom_sp = _cont_stack_bottom - _cont.argsize() - frame::metadata_words_at_top;
684 assert(bottom_sp == _bottom_address, "");
685 // Because the chunk isn't empty, we know there's a caller in the chunk, therefore the bottom-most frame
686 // should have a return barrier (installed back when we thawed it).
687 #ifdef ASSERT
688 {
689 intptr_t* retaddr_slot = (bottom_sp
690 - frame::sender_sp_ret_address_offset());
691 assert(ContinuationHelper::return_address_at(retaddr_slot)
692 == StubRoutines::cont_returnBarrier(),
693 "should be the continuation return barrier");
694 }
695 #endif
696 // We copy the fp from the chunk back to the stack because it contains some caller data,
697 // including, possibly, an oop that might have gone stale since we thawed.
698 patch_stack_pd(bottom_sp, chunk->sp_address());
699 // we don't patch the return pc at this time, so as not to make the stack unwalkable for async walks
700
701 freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
702 } else { // the chunk is empty
703 const int chunk_start_sp = chunk->stack_size();
704
705 DEBUG_ONLY(_empty = true;)
706 DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
707
708 chunk->set_max_thawing_size(cont_size());
709 chunk->set_bottom(chunk_start_sp - _cont.argsize() - frame::metadata_words_at_top);
710 chunk->set_sp(chunk->bottom());
711
712 freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
713 }
714 }
715
716 bool FreezeBase::freeze_fast_new_chunk(stackChunkOop chunk) {
717 DEBUG_ONLY(_empty = true;)
718
719 // Install new chunk
720 _cont.set_tail(chunk);
721
722 if (UNLIKELY(chunk == nullptr || !_thread->cont_fastpath() || _barriers)) { // OOME/probably humongous
723 log_develop_trace(continuations)("Retrying slow. Barriers: %d", _barriers);
724 return false;
725 }
726
727 chunk->set_max_thawing_size(cont_size());
728
729 // in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments.
730 // They'll then be stored twice: in the chunk and in the parent chunk's top frame
731 const int chunk_start_sp = cont_size() + frame::metadata_words + _monitors_in_lockstack;
732 assert(chunk_start_sp == chunk->stack_size(), "");
733
734 DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
735
736 freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA true));
737
738 return true;
739 }
740
741 void FreezeBase::freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated)) {
742 assert(chunk != nullptr, "");
743 assert(!chunk->has_mixed_frames(), "");
744 assert(!chunk->is_gc_mode(), "");
745 assert(!chunk->has_bitmap(), "");
746 assert(!chunk->requires_barriers(), "");
747 assert(chunk == _cont.tail(), "");
748
749 // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
750 // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
751 // will either see no continuation on the stack, or a consistent chunk.
752 unwind_frames();
753
754 log_develop_trace(continuations)("freeze_fast start: chunk " INTPTR_FORMAT " size: %d orig sp: %d argsize: %d",
755 p2i((oopDesc*)chunk), chunk->stack_size(), chunk_start_sp, _cont.argsize());
756 assert(chunk_start_sp <= chunk->stack_size(), "");
757 assert(chunk_start_sp >= cont_size(), "no room in the chunk");
758
759 const int chunk_new_sp = chunk_start_sp - cont_size(); // the chunk's new sp, after freeze
760 assert(!(_fast_freeze_size > 0) || (_orig_chunk_sp - (chunk->start_address() + chunk_new_sp)) == (_fast_freeze_size - _monitors_in_lockstack), "");
761
762 intptr_t* chunk_top = chunk->start_address() + chunk_new_sp;
763 #ifdef ASSERT
764 if (!_empty) {
765 intptr_t* retaddr_slot = (_orig_chunk_sp
766 - frame::sender_sp_ret_address_offset());
767 assert(ContinuationHelper::return_address_at(retaddr_slot) == chunk->pc(),
768 "unexpected saved return address");
769 }
770 #endif
771
772 log_develop_trace(continuations)("freeze_fast start: " INTPTR_FORMAT " sp: %d chunk_top: " INTPTR_FORMAT,
773 p2i(chunk->start_address()), chunk_new_sp, p2i(chunk_top));
774
775 int adjust = frame::metadata_words_at_bottom;
776 #if INCLUDE_ASAN && defined(AARCH64)
777 // Reading at offset frame::metadata_words_at_bottom from _cont_stack_top
778 // will accesss memory at the callee frame, which on preemption cases will
779 // be the VM native method being called. The Arm 64-bit ABI doesn't specify
780 // a location where the frame record (returnpc+fp) has to be stored within
781 // a stack frame, and GCC currently chooses to save it at the top of the
782 // frame (lowest address). ASan treats this memory access in the callee as
783 // an overflow access to one of the locals stored in that frame. For these
784 // preemption cases we don't need to read these words anyways so we avoid it.
785 if (_preempt) {
786 adjust = 0;
787 }
788 #endif
789 intptr_t* from = _cont_stack_top - adjust;
790 intptr_t* to = chunk_top - adjust;
791 copy_to_chunk(from, to, cont_size() + adjust);
792 // Because we're not patched yet, the chunk is now in a bad state
793
794 // patch return pc of the bottom-most frozen frame (now in the chunk)
795 // with the actual caller's return address
796 intptr_t* chunk_bottom_retaddr_slot = (chunk_top + cont_size()
797 - _cont.argsize()
798 - frame::metadata_words_at_top
799 - frame::sender_sp_ret_address_offset());
800 #ifdef ASSERT
801 if (!_empty) {
802 assert(ContinuationHelper::return_address_at(chunk_bottom_retaddr_slot)
803 == StubRoutines::cont_returnBarrier(),
804 "should be the continuation return barrier");
805 }
806 #endif
807 ContinuationHelper::patch_return_address_at(chunk_bottom_retaddr_slot,
808 chunk->pc());
809
810 // We're always writing to a young chunk, so the GC can't see it until the next safepoint.
811 chunk->set_sp(chunk_new_sp);
812
813 // set chunk->pc to the return address of the topmost frame in the chunk
814 if (_preempt) {
815 // On aarch64/riscv64, the return pc of the top frame won't necessarily be at sp[-1].
816 // Also, on x64, if the top frame is the native wrapper frame, sp[-1] will not
817 // be the pc we used when creating the oopmap. Get the top's frame last pc from
818 // the anchor instead.
819 address last_pc = _last_frame.pc();
820 ContinuationHelper::patch_return_address_at(chunk_top - frame::sender_sp_ret_address_offset(), last_pc);
821 chunk->set_pc(last_pc);
822 // For stub/native frames the fp is not used while frozen, and will be constructed
823 // again when thawing the frame (see ThawBase::handle_preempted_continuation). We
824 // patch it with a special bad address to help with debugging, particularly when
825 // inspecting frames and identifying invalid accesses.
826 patch_pd_unused(chunk_top);
827 } else {
828 chunk->set_pc(ContinuationHelper::return_address_at(
829 _cont_stack_top - frame::sender_sp_ret_address_offset()));
830 }
831
832 if (_monitors_in_lockstack > 0) {
833 freeze_lockstack(chunk);
834 }
835
836 _cont.write();
837
838 log_develop_trace(continuations)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash());
839 LogTarget(Trace, continuations) lt;
840 if (lt.develop_is_enabled()) {
841 LogStream ls(lt);
842 chunk->print_on(true, &ls);
843 }
844
845 // Verification
846 assert(_cont.chunk_invariant(), "");
847 chunk->verify();
848
849 #if CONT_JFR
850 EventContinuationFreezeFast e;
851 if (e.should_commit()) {
852 e.set_id(cast_from_oop<u8>(chunk));
853 DEBUG_ONLY(e.set_allocate(chunk_is_allocated);)
854 e.set_size(cont_size() << LogBytesPerWord);
855 e.commit();
856 }
857 #endif
858 }
859
860 NOINLINE freeze_result FreezeBase::freeze_slow() {
861 #ifdef ASSERT
862 ResourceMark rm;
863 #endif
864
865 log_develop_trace(continuations)("freeze_slow #" INTPTR_FORMAT, _cont.hash());
866 assert(_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, "");
867
868 #if CONT_JFR
869 EventContinuationFreezeSlow e;
870 if (e.should_commit()) {
871 e.set_id(cast_from_oop<u8>(_cont.continuation()));
872 e.commit();
873 }
874 #endif
875
876 init_rest();
877
878 HandleMark hm(Thread::current());
879
880 frame f = freeze_start_frame();
881
882 LogTarget(Debug, continuations) lt;
883 if (lt.develop_is_enabled()) {
884 LogStream ls(lt);
885 f.print_on(&ls);
886 }
887
888 frame caller; // the frozen caller in the chunk
889 freeze_result res = recurse_freeze(f, caller, 0, false, true);
890
891 if (res == freeze_ok) {
892 finish_freeze(f, caller);
893 _cont.write();
894 }
895
896 return res;
897 }
898
899 frame FreezeBase::freeze_start_frame() {
900 if (LIKELY(!_preempt)) {
901 return freeze_start_frame_yield_stub();
902 } else {
903 return freeze_start_frame_on_preempt();
904 }
905 }
906
907 frame FreezeBase::freeze_start_frame_yield_stub() {
908 frame f = _thread->last_frame();
909 assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
910 f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
911 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
912 return f;
913 }
914
915 frame FreezeBase::freeze_start_frame_on_preempt() {
916 assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
917 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
918 return _last_frame;
919 }
920
921 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
922 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
923 assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
924 assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
925 || ((top && _preempt) == f.is_native_frame()), "");
926
927 if (stack_overflow()) {
928 return freeze_exception;
929 }
930
931 if (f.is_compiled_frame()) {
932 if (UNLIKELY(f.oop_map() == nullptr)) {
933 // special native frame
934 return freeze_pinned_native;
935 }
936 return recurse_freeze_compiled_frame(f, caller, callee_argsize, callee_interpreted);
937 } else if (f.is_interpreted_frame()) {
938 assert(!f.interpreter_frame_method()->is_native() || (top && _preempt), "");
939 return recurse_freeze_interpreted_frame(f, caller, callee_argsize, callee_interpreted);
940 } else if (top && _preempt) {
941 assert(f.is_native_frame() || f.is_runtime_frame(), "");
942 return f.is_native_frame() ? recurse_freeze_native_frame(f, caller) : recurse_freeze_stub_frame(f, caller);
943 } else {
944 // Frame can't be frozen. Most likely the call_stub or upcall_stub
945 // which indicates there are further natives frames up the stack.
946 return freeze_pinned_native;
947 }
948 }
949
950 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
951 // See also StackChunkFrameStream<frame_kind>::frame_size()
952 template<typename FKind>
953 inline freeze_result FreezeBase::recurse_freeze_java_frame(const frame& f, frame& caller, int fsize, int argsize) {
954 assert(FKind::is_instance(f), "");
955
956 assert(fsize > 0, "");
957 assert(argsize >= 0, "");
958 _freeze_size += fsize;
959 NOT_PRODUCT(_frames++;)
960
961 assert(FKind::frame_bottom(f) <= _bottom_address, "");
962
963 // We don't use FKind::frame_bottom(f) == _bottom_address because on x64 there's sometimes an extra word between
964 // enterSpecial and an interpreted frame
965 if (FKind::frame_bottom(f) >= _bottom_address - 1) {
966 return finalize_freeze(f, caller, argsize); // recursion end
967 } else {
968 frame senderf = sender<FKind>(f);
969 assert(FKind::interpreted || senderf.sp() == senderf.unextended_sp(), "");
970 freeze_result result = recurse_freeze(senderf, caller, argsize, FKind::interpreted, false); // recursive call
971 return result;
972 }
973 }
974
975 inline void FreezeBase::before_freeze_java_frame(const frame& f, const frame& caller, int fsize, int argsize, bool is_bottom_frame) {
976 LogTarget(Trace, continuations) lt;
977 if (lt.develop_is_enabled()) {
978 LogStream ls(lt);
979 ls.print_cr("======== FREEZING FRAME interpreted: %d bottom: %d", f.is_interpreted_frame(), is_bottom_frame);
980 ls.print_cr("fsize: %d argsize: %d", fsize, argsize);
981 f.print_value_on(&ls);
982 }
983 assert(caller.is_interpreted_frame() == Interpreter::contains(caller.pc()), "");
984 }
985
986 inline void FreezeBase::after_freeze_java_frame(const frame& hf, bool is_bottom_frame) {
987 LogTarget(Trace, continuations) lt;
988 if (lt.develop_is_enabled()) {
989 LogStream ls(lt);
990 DEBUG_ONLY(hf.print_value_on(&ls);)
991 assert(hf.is_heap_frame(), "should be");
992 DEBUG_ONLY(print_frame_layout(hf, false, &ls);)
993 if (is_bottom_frame) {
994 ls.print_cr("bottom h-frame:");
995 hf.print_on(&ls);
996 }
997 }
998 }
999
1000 // The parameter argsize_md includes metadata that has to be part of caller/callee overlap.
1001 // See also StackChunkFrameStream<frame_kind>::frame_size()
1002 freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, int argsize_md) {
1003 int argsize = argsize_md - frame::metadata_words_at_top;
1004 assert(callee.is_interpreted_frame()
1005 || ContinuationHelper::Frame::is_stub(callee.cb())
1006 || callee.cb()->as_nmethod()->is_osr_method()
1007 || argsize == _cont.argsize(), "argsize: %d cont.argsize: %d", argsize, _cont.argsize());
1008 log_develop_trace(continuations)("bottom: " INTPTR_FORMAT " count %d size: %d argsize: %d",
1009 p2i(_bottom_address), _frames, _freeze_size << LogBytesPerWord, argsize);
1010
1011 LogTarget(Trace, continuations) lt;
1012
1013 #ifdef ASSERT
1014 bool empty = _cont.is_empty();
1015 log_develop_trace(continuations)("empty: %d", empty);
1016 #endif
1017
1018 stackChunkOop chunk = _cont.tail();
1019
1020 assert(chunk == nullptr || (chunk->max_thawing_size() == 0) == chunk->is_empty(), "");
1021
1022 _freeze_size += frame::metadata_words; // for top frame's metadata
1023
1024 int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind
1025 int unextended_sp = -1;
1026 if (chunk != nullptr) {
1027 if (!chunk->is_empty()) {
1028 StackChunkFrameStream<ChunkFrames::Mixed> last(chunk);
1029 unextended_sp = chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp());
1030 bool top_interpreted = Interpreter::contains(chunk->pc());
1031 if (callee.is_interpreted_frame() == top_interpreted) {
1032 overlap = argsize_md;
1033 }
1034 } else {
1035 unextended_sp = chunk->stack_size() - frame::metadata_words_at_top;
1036 }
1037 }
1038
1039 log_develop_trace(continuations)("finalize _size: %d overlap: %d unextended_sp: %d", _freeze_size, overlap, unextended_sp);
1040
1041 _freeze_size -= overlap;
1042 assert(_freeze_size >= 0, "");
1043
1044 assert(chunk == nullptr || chunk->is_empty()
1045 || unextended_sp == chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp()), "");
1046 assert(chunk != nullptr || unextended_sp < _freeze_size, "");
1047
1048 _freeze_size += _monitors_in_lockstack;
1049
1050 // _barriers can be set to true by an allocation in freeze_fast, in which case the chunk is available
1051 bool allocated_old_in_freeze_fast = _barriers;
1052 assert(!allocated_old_in_freeze_fast || (unextended_sp >= _freeze_size && chunk->is_empty()),
1053 "Chunk allocated in freeze_fast is of insufficient size "
1054 "unextended_sp: %d size: %d is_empty: %d", unextended_sp, _freeze_size, chunk->is_empty());
1055 assert(!allocated_old_in_freeze_fast || (!UseZGC && !UseG1GC), "Unexpected allocation");
1056
1057 DEBUG_ONLY(bool empty_chunk = true);
1058 if (unextended_sp < _freeze_size || chunk->is_gc_mode() || (!allocated_old_in_freeze_fast && chunk->requires_barriers())) {
1059 // ALLOCATE NEW CHUNK
1060
1061 if (lt.develop_is_enabled()) {
1062 LogStream ls(lt);
1063 if (chunk == nullptr) {
1064 ls.print_cr("no chunk");
1065 } else {
1066 ls.print_cr("chunk barriers: %d _size: %d free size: %d",
1067 chunk->requires_barriers(), _freeze_size, chunk->sp() - frame::metadata_words);
1068 chunk->print_on(&ls);
1069 }
1070 }
1071
1072 _freeze_size += overlap; // we're allocating a new chunk, so no overlap
1073 // overlap = 0;
1074
1075 chunk = allocate_chunk_slow(_freeze_size, argsize_md);
1076 if (chunk == nullptr) {
1077 return freeze_exception;
1078 }
1079
1080 // Install new chunk
1081 _cont.set_tail(chunk);
1082 assert(chunk->is_empty(), "");
1083 } else {
1084 // REUSE EXISTING CHUNK
1085 log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1086 if (chunk->is_empty()) {
1087 int sp = chunk->stack_size() - argsize_md;
1088 chunk->set_sp(sp);
1089 chunk->set_bottom(sp);
1090 _freeze_size += overlap;
1091 assert(chunk->max_thawing_size() == 0, "");
1092 } DEBUG_ONLY(else empty_chunk = false;)
1093 }
1094 assert(!chunk->is_gc_mode(), "");
1095 assert(!chunk->has_bitmap(), "");
1096 chunk->set_has_mixed_frames(true);
1097
1098 assert(chunk->requires_barriers() == _barriers, "");
1099 assert(!_barriers || chunk->is_empty(), "");
1100
1101 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1102 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1103
1104 if (_preempt) {
1105 frame top_frame = _thread->last_frame();
1106 if (top_frame.is_interpreted_frame()) {
1107 // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1108 // We need it so that on resume we can restore the sp to the right place, since
1109 // thawing might add an alignment word to the expression stack (see finish_thaw()).
1110 // We do it now that we know freezing will be successful.
1111 prepare_freeze_interpreted_top_frame(top_frame);
1112 }
1113
1114 // Do this now so should_process_args_at_top() is set before calling finish_freeze
1115 // in case we might need to apply GC barriers to frames in this stackChunk.
1116 if (_thread->at_preemptable_init()) {
1117 assert(top_frame.is_interpreted_frame(), "only InterpreterRuntime::_new/resolve_from_cache allowed");
1118 chunk->set_at_klass_init(true);
1119 methodHandle m(_thread, top_frame.interpreter_frame_method());
1120 Bytecode_invoke call = Bytecode_invoke_check(m, top_frame.interpreter_frame_bci());
1121 assert(!call.is_valid() || call.is_invokestatic(), "only invokestatic allowed");
1122 if (call.is_invokestatic() && call.size_of_parameters() > 0) {
1123 assert(top_frame.interpreter_frame_expression_stack_size() > 0, "should have parameters in exp stack");
1124 chunk->set_has_args_at_top(true);
1125 }
1126 }
1127 }
1128
1129 // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1130 // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1131 // will either see no continuation or a consistent chunk.
1132 unwind_frames();
1133
1134 chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1135
1136 if (lt.develop_is_enabled()) {
1137 LogStream ls(lt);
1138 ls.print_cr("top chunk:");
1139 chunk->print_on(&ls);
1140 }
1141
1142 if (_monitors_in_lockstack > 0) {
1143 freeze_lockstack(chunk);
1144 }
1145
1146 // The topmost existing frame in the chunk; or an empty frame if the chunk is empty
1147 caller = StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame();
1148
1149 DEBUG_ONLY(_last_write = caller.unextended_sp() + (empty_chunk ? argsize_md : overlap);)
1150
1151 assert(chunk->is_in_chunk(_last_write - _freeze_size),
1152 "last_write-size: " INTPTR_FORMAT " start: " INTPTR_FORMAT, p2i(_last_write-_freeze_size), p2i(chunk->start_address()));
1153 #ifdef ASSERT
1154 if (lt.develop_is_enabled()) {
1155 LogStream ls(lt);
1156 ls.print_cr("top hframe before (freeze):");
1157 assert(caller.is_heap_frame(), "should be");
1158 caller.print_on(&ls);
1159 }
1160
1161 assert(!empty || Continuation::is_continuation_entry_frame(callee, nullptr), "");
1162
1163 frame entry = sender(callee);
1164
1165 assert((!empty && Continuation::is_return_barrier_entry(entry.pc())) || (empty && Continuation::is_continuation_enterSpecial(entry)), "");
1166 assert(callee.is_interpreted_frame() || entry.sp() == entry.unextended_sp(), "");
1167 #endif
1168
1169 return freeze_ok_bottom;
1170 }
1171
1172 // After freezing a frame we need to possibly adjust some values related to the caller frame.
1173 void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_bottom_frame) {
1174 if (is_bottom_frame) {
1175 // If we're the bottom frame, we need to replace the return barrier with the real
1176 // caller's pc.
1177 address last_pc = caller.pc();
1178 assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
1179 ContinuationHelper::Frame::patch_pc(caller, last_pc);
1180 } else {
1181 assert(!caller.is_empty(), "");
1182 }
1183
1184 patch_pd(hf, caller);
1185
1186 if (f.is_interpreted_frame()) {
1187 assert(hf.is_heap_frame(), "should be");
1188 ContinuationHelper::InterpretedFrame::patch_sender_sp(hf, caller);
1189 }
1190
1191 #ifdef ASSERT
1192 if (hf.is_compiled_frame()) {
1193 if (f.is_deoptimized_frame()) { // TODO DEOPT: long term solution: unroll on freeze and patch pc
1194 log_develop_trace(continuations)("Freezing deoptimized frame");
1195 assert(f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
1196 assert(f.cb()->as_nmethod()->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
1197 }
1198 }
1199 #endif
1200 }
1201
1202 #ifdef ASSERT
1203 static void verify_frame_top(const frame& f, intptr_t* top) {
1204 ResourceMark rm;
1205 InterpreterOopMap mask;
1206 f.interpreted_frame_oop_map(&mask);
1207 assert(top <= ContinuationHelper::InterpretedFrame::frame_top(f, &mask),
1208 "frame_top: " INTPTR_FORMAT " Interpreted::frame_top: " INTPTR_FORMAT,
1209 p2i(top), p2i(ContinuationHelper::InterpretedFrame::frame_top(f, &mask)));
1210 }
1211 #endif // ASSERT
1212
1213 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1214 // See also StackChunkFrameStream<frame_kind>::frame_size()
1215 NOINLINE freeze_result FreezeBase::recurse_freeze_interpreted_frame(frame& f, frame& caller,
1216 int callee_argsize /* incl. metadata */,
1217 bool callee_interpreted) {
1218 adjust_interpreted_frame_unextended_sp(f);
1219
1220 // The frame's top never includes the stack arguments to the callee
1221 intptr_t* const stack_frame_top = ContinuationHelper::InterpretedFrame::frame_top(f, callee_argsize, callee_interpreted);
1222 intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
1223 const int fsize = pointer_delta_as_int(stack_frame_bottom, stack_frame_top);
1224
1225 DEBUG_ONLY(verify_frame_top(f, stack_frame_top));
1226
1227 Method* frame_method = ContinuationHelper::Frame::frame_method(f);
1228 // including metadata between f and its args
1229 const int argsize = ContinuationHelper::InterpretedFrame::stack_argsize(f) + frame::metadata_words_at_top;
1230
1231 log_develop_trace(continuations)("recurse_freeze_interpreted_frame %s _size: %d fsize: %d argsize: %d callee_interpreted: %d",
1232 frame_method->name_and_sig_as_C_string(), _freeze_size, fsize, argsize, callee_interpreted);
1233 // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1234 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1235
1236 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::InterpretedFrame>(f, caller, fsize, argsize);
1237 if (UNLIKELY(result > freeze_ok_bottom)) {
1238 return result;
1239 }
1240
1241 bool is_bottom_frame = result == freeze_ok_bottom;
1242 assert(!caller.is_empty() || is_bottom_frame, "");
1243
1244 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, is_bottom_frame);)
1245
1246 frame hf = new_heap_frame<ContinuationHelper::InterpretedFrame>(f, caller);
1247 _total_align_size += frame::align_wiggle; // add alignment room for internal interpreted frame alignment on AArch64/PPC64
1248
1249 intptr_t* heap_frame_top = ContinuationHelper::InterpretedFrame::frame_top(hf, callee_argsize, callee_interpreted);
1250 intptr_t* heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
1251 assert(heap_frame_bottom == heap_frame_top + fsize, "");
1252
1253 // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
1254 // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
1255 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1256 assert(!is_bottom_frame || !caller.is_interpreted_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1257
1258 relativize_interpreted_frame_metadata(f, hf);
1259
1260 patch(f, hf, caller, is_bottom_frame);
1261
1262 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
1263 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1264 caller = hf;
1265
1266 // Mark frame_method's GC epoch for class redefinition on_stack calculation.
1267 frame_method->record_gc_epoch();
1268
1269 return freeze_ok;
1270 }
1271
1272 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
1273 // See also StackChunkFrameStream<frame_kind>::frame_size()
1274 freeze_result FreezeBase::recurse_freeze_compiled_frame(frame& f, frame& caller,
1275 int callee_argsize /* incl. metadata */,
1276 bool callee_interpreted) {
1277 // The frame's top never includes the stack arguments to the callee
1278 intptr_t* const stack_frame_top = ContinuationHelper::CompiledFrame::frame_top(f, callee_argsize, callee_interpreted);
1279 intptr_t* const stack_frame_bottom = ContinuationHelper::CompiledFrame::frame_bottom(f);
1280 // including metadata between f and its stackargs
1281 const int argsize = ContinuationHelper::CompiledFrame::stack_argsize(f) + frame::metadata_words_at_top;
1282 const int fsize = pointer_delta_as_int(stack_frame_bottom + argsize, stack_frame_top);
1283
1284 log_develop_trace(continuations)("recurse_freeze_compiled_frame %s _size: %d fsize: %d argsize: %d",
1285 ContinuationHelper::Frame::frame_method(f) != nullptr ?
1286 ContinuationHelper::Frame::frame_method(f)->name_and_sig_as_C_string() : "",
1287 _freeze_size, fsize, argsize);
1288 // we'd rather not yield inside methods annotated with @JvmtiMountTransition
1289 assert(!ContinuationHelper::Frame::frame_method(f)->jvmti_mount_transition(), "");
1290
1291 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::CompiledFrame>(f, caller, fsize, argsize);
1292 if (UNLIKELY(result > freeze_ok_bottom)) {
1293 return result;
1294 }
1295
1296 bool is_bottom_frame = result == freeze_ok_bottom;
1297 assert(!caller.is_empty() || is_bottom_frame, "");
1298
1299 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, is_bottom_frame);)
1300
1301 frame hf = new_heap_frame<ContinuationHelper::CompiledFrame>(f, caller);
1302
1303 intptr_t* heap_frame_top = ContinuationHelper::CompiledFrame::frame_top(hf, callee_argsize, callee_interpreted);
1304
1305 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1306 assert(!is_bottom_frame || !caller.is_compiled_frame() || (heap_frame_top + fsize) == (caller.unextended_sp() + argsize), "");
1307
1308 if (caller.is_interpreted_frame()) {
1309 // When thawing the frame we might need to add alignment (see Thaw::align)
1310 _total_align_size += frame::align_wiggle;
1311 }
1312
1313 patch(f, hf, caller, is_bottom_frame);
1314
1315 assert(is_bottom_frame || Interpreter::contains(ContinuationHelper::CompiledFrame::real_pc(caller)) == caller.is_interpreted_frame(), "");
1316
1317 DEBUG_ONLY(after_freeze_java_frame(hf, is_bottom_frame);)
1318 caller = hf;
1319 return freeze_ok;
1320 }
1321
1322 NOINLINE freeze_result FreezeBase::recurse_freeze_stub_frame(frame& f, frame& caller) {
1323 DEBUG_ONLY(frame fsender = sender(f);)
1324 assert(fsender.is_compiled_frame(), "sender should be compiled frame");
1325
1326 intptr_t* const stack_frame_top = ContinuationHelper::StubFrame::frame_top(f);
1327 const int fsize = f.cb()->frame_size();
1328
1329 log_develop_trace(continuations)("recurse_freeze_stub_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1330 f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1331
1332 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::StubFrame>(f, caller, fsize, 0);
1333 if (UNLIKELY(result > freeze_ok_bottom)) {
1334 return result;
1335 }
1336
1337 assert(result == freeze_ok, "should have caller");
1338 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, 0, false /*is_bottom_frame*/);)
1339
1340 frame hf = new_heap_frame<ContinuationHelper::StubFrame>(f, caller);
1341 intptr_t* heap_frame_top = ContinuationHelper::StubFrame::frame_top(hf);
1342
1343 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1344
1345 patch(f, hf, caller, false /*is_bottom_frame*/);
1346
1347 DEBUG_ONLY(after_freeze_java_frame(hf, false /*is_bottom_frame*/);)
1348
1349 caller = hf;
1350 return freeze_ok;
1351 }
1352
1353 NOINLINE freeze_result FreezeBase::recurse_freeze_native_frame(frame& f, frame& caller) {
1354 if (!f.cb()->as_nmethod()->method()->is_object_wait0()) {
1355 assert(f.cb()->as_nmethod()->method()->is_synchronized(), "");
1356 // Synchronized native method case. Unlike the interpreter native wrapper, the compiled
1357 // native wrapper tries to acquire the monitor after marshalling the arguments from the
1358 // caller into the native convention. This is so that we have a valid oopMap in case of
1359 // having to block in the slow path. But that would require freezing those registers too
1360 // and then fixing them back on thaw in case of oops. To avoid complicating things and
1361 // given that this would be a rare case anyways just pin the vthread to the carrier.
1362 return freeze_pinned_native;
1363 }
1364
1365 intptr_t* const stack_frame_top = ContinuationHelper::NativeFrame::frame_top(f);
1366 // There are no stackargs but argsize must include the metadata
1367 const int argsize = frame::metadata_words_at_top;
1368 const int fsize = f.cb()->frame_size() + argsize;
1369
1370 log_develop_trace(continuations)("recurse_freeze_native_frame %s _size: %d fsize: %d :: " INTPTR_FORMAT " - " INTPTR_FORMAT,
1371 f.cb()->name(), _freeze_size, fsize, p2i(stack_frame_top), p2i(stack_frame_top+fsize));
1372
1373 freeze_result result = recurse_freeze_java_frame<ContinuationHelper::NativeFrame>(f, caller, fsize, argsize);
1374 if (UNLIKELY(result > freeze_ok_bottom)) {
1375 return result;
1376 }
1377
1378 assert(result == freeze_ok, "should have caller frame");
1379 DEBUG_ONLY(before_freeze_java_frame(f, caller, fsize, argsize, false /* is_bottom_frame */);)
1380
1381 frame hf = new_heap_frame<ContinuationHelper::NativeFrame>(f, caller);
1382 intptr_t* heap_frame_top = ContinuationHelper::NativeFrame::frame_top(hf);
1383
1384 copy_to_chunk(stack_frame_top, heap_frame_top, fsize);
1385
1386 if (caller.is_interpreted_frame()) {
1387 // When thawing the frame we might need to add alignment (see Thaw::align)
1388 _total_align_size += frame::align_wiggle;
1389 }
1390
1391 patch(f, hf, caller, false /* is_bottom_frame */);
1392
1393 DEBUG_ONLY(after_freeze_java_frame(hf, false /* is_bottom_frame */);)
1394
1395 caller = hf;
1396 return freeze_ok;
1397 }
1398
1399 NOINLINE void FreezeBase::finish_freeze(const frame& f, const frame& top) {
1400 stackChunkOop chunk = _cont.tail();
1401
1402 LogTarget(Trace, continuations) lt;
1403 if (lt.develop_is_enabled()) {
1404 LogStream ls(lt);
1405 assert(top.is_heap_frame(), "should be");
1406 top.print_on(&ls);
1407 }
1408
1409 set_top_frame_metadata_pd(top);
1410
1411 chunk->set_sp(chunk->to_offset(top.sp()));
1412 chunk->set_pc(top.pc());
1413
1414 chunk->set_max_thawing_size(chunk->max_thawing_size() + _total_align_size);
1415
1416 assert(chunk->sp_address() - chunk->start_address() >= _monitors_in_lockstack, "clash with lockstack");
1417
1418 // At this point the chunk is consistent
1419
1420 if (UNLIKELY(_barriers)) {
1421 log_develop_trace(continuations)("do barriers on old chunk");
1422 // Serial and Parallel GC can allocate objects directly into the old generation.
1423 // Then we want to relativize the derived pointers eagerly so that
1424 // old chunks are all in GC mode.
1425 assert(!UseG1GC, "G1 can not deal with allocating outside of eden");
1426 assert(!UseZGC, "ZGC can not deal with allocating chunks visible to marking");
1427 if (UseShenandoahGC) {
1428 _cont.tail()->relativize_derived_pointers_concurrently();
1429 } else {
1430 ContinuationGCSupport::transform_stack_chunk(_cont.tail());
1431 }
1432 // For objects in the old generation we must maintain the remembered set
1433 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>();
1434 }
1435
1436 log_develop_trace(continuations)("finish_freeze: has_mixed_frames: %d", chunk->has_mixed_frames());
1437 if (lt.develop_is_enabled()) {
1438 LogStream ls(lt);
1439 chunk->print_on(true, &ls);
1440 }
1441
1442 if (lt.develop_is_enabled()) {
1443 LogStream ls(lt);
1444 ls.print_cr("top hframe after (freeze):");
1445 assert(_cont.last_frame().is_heap_frame(), "should be");
1446 _cont.last_frame().print_on(&ls);
1447 DEBUG_ONLY(print_frame_layout(top, false, &ls);)
1448 }
1449
1450 assert(_cont.chunk_invariant(), "");
1451 }
1452
1453 inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive native code
1454 JavaThread* t = !_preempt ? _thread : JavaThread::current();
1455 assert(t == JavaThread::current(), "");
1456 if (os::current_stack_pointer() < t->stack_overflow_state()->shadow_zone_safe_limit()) {
1457 if (!_preempt) {
1458 ContinuationWrapper::SafepointOp so(t, _cont); // could also call _cont.done() instead
1459 Exceptions::_throw_msg(t, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Stack overflow while freezing");
1460 }
1461 return true;
1462 }
1463 return false;
1464 }
1465
1466 class StackChunkAllocator : public MemAllocator {
1467 const size_t _stack_size;
1468 int _argsize_md;
1469 ContinuationWrapper& _continuation_wrapper;
1470 JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector;
1471 mutable bool _took_slow_path;
1472
1473 // Does the minimal amount of initialization needed for a TLAB allocation.
1474 // We don't need to do a full initialization, as such an allocation need not be immediately walkable.
1475 virtual oop initialize(HeapWord* mem) const override {
1476 assert(_stack_size > 0, "");
1477 assert(_stack_size <= max_jint, "");
1478 assert(_word_size > _stack_size, "");
1479
1480 // zero out fields (but not the stack)
1481 const size_t hs = oopDesc::header_size();
1482 if (oopDesc::has_klass_gap()) {
1483 oopDesc::set_klass_gap(mem, 0);
1484 }
1485 Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
1486
1487 int bottom = (int)_stack_size - _argsize_md;
1488
1489 jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
1490 jdk_internal_vm_StackChunk::set_bottom(mem, bottom);
1491 jdk_internal_vm_StackChunk::set_sp(mem, bottom);
1492
1493 return finish(mem);
1494 }
1495
1496 stackChunkOop allocate_fast() const {
1497 if (!UseTLAB) {
1498 return nullptr;
1499 }
1500
1501 HeapWord* const mem = MemAllocator::mem_allocate_inside_tlab_fast();
1502 if (mem == nullptr) {
1503 return nullptr;
1504 }
1505
1506 oop obj = initialize(mem);
1507 return stackChunkOopDesc::cast(obj);
1508 }
1509
1510 public:
1511 StackChunkAllocator(Klass* klass,
1512 size_t word_size,
1513 Thread* thread,
1514 size_t stack_size,
1515 int argsize_md,
1516 ContinuationWrapper& continuation_wrapper,
1517 JvmtiSampledObjectAllocEventCollector* jvmti_event_collector)
1518 : MemAllocator(klass, word_size, thread),
1519 _stack_size(stack_size),
1520 _argsize_md(argsize_md),
1521 _continuation_wrapper(continuation_wrapper),
1522 _jvmti_event_collector(jvmti_event_collector),
1523 _took_slow_path(false) {}
1524
1525 // Provides it's own, specialized allocation which skips instrumentation
1526 // if the memory can be allocated without going to a slow-path.
1527 stackChunkOop allocate() const {
1528 // First try to allocate without any slow-paths or instrumentation.
1529 stackChunkOop obj = allocate_fast();
1530 if (obj != nullptr) {
1531 return obj;
1532 }
1533
1534 // Now try full-blown allocation with all expensive operations,
1535 // including potentially safepoint operations.
1536 _took_slow_path = true;
1537
1538 // Protect unhandled Loom oops
1539 ContinuationWrapper::SafepointOp so(_thread, _continuation_wrapper);
1540
1541 // Can safepoint
1542 _jvmti_event_collector->start();
1543
1544 // Can safepoint
1545 return stackChunkOopDesc::cast(MemAllocator::allocate());
1546 }
1547
1548 bool took_slow_path() const {
1549 return _took_slow_path;
1550 }
1551 };
1552
1553 template <typename ConfigT>
1554 stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size, int argsize_md) {
1555 log_develop_trace(continuations)("allocate_chunk allocating new chunk");
1556
1557 InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass());
1558 size_t size_in_words = klass->instance_size(stack_size);
1559
1560 if (CollectedHeap::stack_chunk_max_size() > 0 && size_in_words >= CollectedHeap::stack_chunk_max_size()) {
1561 if (!_preempt) {
1562 throw_stack_overflow_on_humongous_chunk();
1563 }
1564 return nullptr;
1565 }
1566
1567 JavaThread* current = _preempt ? JavaThread::current() : _thread;
1568 assert(current == JavaThread::current(), "should be current");
1569
1570 // Allocate the chunk.
1571 //
1572 // This might safepoint while allocating, but all safepointing due to
1573 // instrumentation have been deferred. This property is important for
1574 // some GCs, as this ensures that the allocated object is in the young
1575 // generation / newly allocated memory.
1576 StackChunkAllocator allocator(klass, size_in_words, current, stack_size, argsize_md, _cont, _jvmti_event_collector);
1577 stackChunkOop chunk = allocator.allocate();
1578
1579 if (chunk == nullptr) {
1580 return nullptr; // OOME
1581 }
1582
1583 // assert that chunk is properly initialized
1584 assert(chunk->stack_size() == (int)stack_size, "");
1585 assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size);
1586 assert(chunk->sp() == chunk->bottom(), "");
1587 assert((intptr_t)chunk->start_address() % 8 == 0, "");
1588 assert(chunk->max_thawing_size() == 0, "");
1589 assert(chunk->pc() == nullptr, "");
1590 assert(chunk->is_empty(), "");
1591 assert(chunk->flags() == 0, "");
1592 assert(chunk->is_gc_mode() == false, "");
1593 assert(chunk->lockstack_size() == 0, "");
1594
1595 // fields are uninitialized
1596 chunk->set_parent_access<IS_DEST_UNINITIALIZED>(_cont.last_nonempty_chunk());
1597 chunk->set_cont_access<IS_DEST_UNINITIALIZED>(_cont.continuation());
1598
1599 #if INCLUDE_ZGC
1600 if (UseZGC) {
1601 ZStackChunkGCData::initialize(chunk);
1602 assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation");
1603 _barriers = false;
1604 } else
1605 #endif
1606 #if INCLUDE_SHENANDOAHGC
1607 if (UseShenandoahGC) {
1608 _barriers = chunk->requires_barriers();
1609 } else
1610 #endif
1611 {
1612 if (!allocator.took_slow_path()) {
1613 // Guaranteed to be in young gen / newly allocated memory
1614 assert(!chunk->requires_barriers(), "Unfamiliar GC requires barriers on TLAB allocation");
1615 _barriers = false;
1616 } else {
1617 // Some GCs could put direct allocations in old gen for slow-path
1618 // allocations; need to explicitly check if that was the case.
1619 _barriers = chunk->requires_barriers();
1620 }
1621 }
1622
1623 if (_barriers) {
1624 log_develop_trace(continuations)("allocation requires barriers");
1625 }
1626
1627 assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1628
1629 return chunk;
1630 }
1631
1632 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1633 ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1634 Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1635 }
1636
1637 class AnchorMark : public StackObj {
1638 JavaThread* _current;
1639 frame& _top_frame;
1640 intptr_t* _last_sp_from_frame;
1641 bool _is_interpreted;
1642
1643 public:
1644 AnchorMark(JavaThread* current, frame& f) : _current(current), _top_frame(f), _is_interpreted(false) {
1645 intptr_t* sp = anchor_mark_set_pd();
1646 set_anchor(_current, sp);
1647 }
1648 ~AnchorMark() {
1649 clear_anchor(_current);
1650 anchor_mark_clear_pd();
1651 }
1652 inline intptr_t* anchor_mark_set_pd();
1653 inline void anchor_mark_clear_pd();
1654 };
1655
1656 #if INCLUDE_JVMTI
1657 static int num_java_frames(ContinuationWrapper& cont) {
1658 ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1659 int count = 0;
1660 for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1661 count += chunk->num_java_frames();
1662 }
1663 return count;
1664 }
1665
1666 static void invalidate_jvmti_stack(JavaThread* thread) {
1667 JvmtiThreadState *state = thread->jvmti_thread_state();
1668 if (state != nullptr) {
1669 state->invalidate_cur_stack_depth();
1670 }
1671 }
1672
1673 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1674 if (!cont.entry()->is_virtual_thread()) {
1675 if (JvmtiExport::has_frame_pops(thread)) {
1676 int num_frames = num_java_frames(cont);
1677
1678 ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1679 JvmtiExport::continuation_yield_cleanup(thread, num_frames);
1680 }
1681 invalidate_jvmti_stack(thread);
1682 }
1683 }
1684
1685 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top, Continuation::preempt_kind pk) {
1686 assert(current->vthread() != nullptr, "must be");
1687
1688 HandleMarkCleaner hm(current); // Cleanup all handles (including so._conth) before returning to Java.
1689 Handle vth(current, current->vthread());
1690 ContinuationWrapper::SafepointOp so(current, cont);
1691 AnchorMark am(current, top); // Set anchor so that the stack is walkable.
1692
1693 JRT_BLOCK
1694 MountUnmountDisabler::end_transition(current, vth(), true /*is_mount*/, false /*is_thread_start*/);
1695
1696 if (current->pending_contended_entered_event()) {
1697 // No monitor JVMTI events for ObjectLocker case.
1698 if (pk != Continuation::object_locker) {
1699 JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1700 }
1701 current->set_contended_entered_monitor(nullptr);
1702 }
1703 JRT_BLOCK_END
1704 }
1705 #endif // INCLUDE_JVMTI
1706
1707 #ifdef ASSERT
1708 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1709 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1710 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1711 bool FreezeBase::check_valid_fast_path() {
1712 ContinuationEntry* ce = _thread->last_continuation();
1713 RegisterMap map(_thread,
1714 RegisterMap::UpdateMap::skip,
1715 RegisterMap::ProcessFrames::skip,
1716 RegisterMap::WalkContinuation::skip);
1717 map.set_include_argument_oops(false);
1718 bool is_top_frame = true;
1719 for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1720 if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1721 return false;
1722 }
1723 }
1724 return true;
1725 }
1726
1727 static void verify_frame_kind(frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr, const char** code_name_ptr, int* bci_ptr, stackChunkOop chunk) {
1728 Method* m;
1729 const char* code_name;
1730 int bci;
1731 if (preempt_kind == Continuation::monitorenter) {
1732 assert(top.is_interpreted_frame() || top.is_runtime_frame(), "unexpected %sframe",
1733 top.is_compiled_frame() ? "compiled " : top.is_native_frame() ? "native " : "");
1734 bool at_sync_method;
1735 if (top.is_interpreted_frame()) {
1736 m = top.interpreter_frame_method();
1737 assert(!m->is_native() || m->is_synchronized(), "invalid method %s", m->external_name());
1738 address bcp = top.interpreter_frame_bcp();
1739 assert(bcp != 0 || m->is_native(), "");
1740 at_sync_method = m->is_synchronized() && (bcp == 0 || bcp == m->code_base());
1741 // bcp is advanced on monitorenter before making the VM call, adjust for that.
1742 bool at_sync_bytecode = bcp > m->code_base() && Bytecode(m, bcp - 1).code() == Bytecodes::Code::_monitorenter;
1743 assert(at_sync_method || at_sync_bytecode, "");
1744 bci = at_sync_method ? -1 : top.interpreter_frame_bci();
1745 } else {
1746 JavaThread* current = JavaThread::current();
1747 ResourceMark rm(current);
1748 CodeBlob* cb = top.cb();
1749 RegisterMap reg_map(current,
1750 RegisterMap::UpdateMap::skip,
1751 RegisterMap::ProcessFrames::skip,
1752 RegisterMap::WalkContinuation::include);
1753 if (top.is_heap_frame()) {
1754 assert(chunk != nullptr, "");
1755 reg_map.set_stack_chunk(chunk);
1756 top = chunk->relativize(top);
1757 top.set_frame_index(0);
1758 }
1759 frame fr = top.sender(®_map);
1760 vframe* vf = vframe::new_vframe(&fr, ®_map, current);
1761 compiledVFrame* cvf = compiledVFrame::cast(vf);
1762 m = cvf->method();
1763 bci = cvf->scope()->bci();
1764 at_sync_method = bci == SynchronizationEntryBCI;
1765 assert(!at_sync_method || m->is_synchronized(), "bci is %d but method %s is not synchronized", bci, m->external_name());
1766 bool is_c1_monitorenter = false, is_c2_monitorenter = false;
1767 COMPILER1_PRESENT(is_c1_monitorenter = cb == Runtime1::blob_for(StubId::c1_monitorenter_id) ||
1768 cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id);)
1769 COMPILER2_PRESENT(is_c2_monitorenter = cb == CodeCache::find_blob(OptoRuntime::complete_monitor_locking_Java());)
1770 assert(is_c1_monitorenter || is_c2_monitorenter, "wrong runtime stub frame");
1771 }
1772 code_name = at_sync_method ? "synchronized method" : "monitorenter";
1773 } else if (preempt_kind == Continuation::object_wait) {
1774 assert(top.is_interpreted_frame() || top.is_native_frame(), "");
1775 m = top.is_interpreted_frame() ? top.interpreter_frame_method() : top.cb()->as_nmethod()->method();
1776 assert(m->is_object_wait0(), "");
1777 bci = 0;
1778 code_name = "";
1779 } else {
1780 assert(preempt_kind == Continuation::object_locker, "invalid preempt kind");
1781 assert(top.is_interpreted_frame(), "");
1782 m = top.interpreter_frame_method();
1783 Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
1784 Bytecodes::Code code = current_bytecode.code();
1785 assert(code == Bytecodes::Code::_new || code == Bytecodes::Code::_invokestatic ||
1786 (code == Bytecodes::Code::_getstatic || code == Bytecodes::Code::_putstatic), "invalid bytecode");
1787 bci = top.interpreter_frame_bci();
1788 code_name = Bytecodes::name(current_bytecode.code());
1789 }
1790 assert(bci >= 0 || m->is_synchronized(), "invalid bci:%d at method %s", bci, m->external_name());
1791
1792 if (m_ptr != nullptr) {
1793 *m_ptr = m;
1794 *code_name_ptr = code_name;
1795 *bci_ptr = bci;
1796 }
1797 }
1798
1799 static void log_preempt_after_freeze(const ContinuationWrapper& cont) {
1800 JavaThread* current = cont.thread();
1801 int64_t tid = current->monitor_owner_id();
1802
1803 StackChunkFrameStream<ChunkFrames::Mixed> sfs(cont.tail());
1804 frame top_frame = sfs.to_frame();
1805 bool at_init = current->at_preemptable_init();
1806 bool at_enter = current->current_pending_monitor() != nullptr;
1807 bool at_wait = current->current_waiting_monitor() != nullptr;
1808 assert((at_enter && !at_wait) || (!at_enter && at_wait), "");
1809 Continuation::preempt_kind pk = at_init ? Continuation::object_locker : at_enter ? Continuation::monitorenter : Continuation::object_wait;
1810
1811 Method* m = nullptr;
1812 const char* code_name = nullptr;
1813 int bci = InvalidFrameStateBci;
1814 verify_frame_kind(top_frame, pk, &m, &code_name, &bci, cont.tail());
1815 assert(m != nullptr && code_name != nullptr && bci != InvalidFrameStateBci, "should be set");
1816
1817 ResourceMark rm(current);
1818 if (bci < 0) {
1819 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " while synchronizing on %smethod %s", tid, m->is_native() ? "native " : "", m->external_name());
1820 } else if (m->is_object_wait0()) {
1821 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at native method %s", tid, m->external_name());
1822 } else {
1823 Klass* k = current->preempt_init_klass();
1824 assert(k != nullptr || !at_init, "");
1825 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at %s(bci:%d) in method %s %s%s", tid, code_name, bci,
1826 m->external_name(), at_init ? "trying to initialize klass " : "", at_init ? k->external_name() : "");
1827 }
1828 }
1829 #endif // ASSERT
1830
1831 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1832 verify_continuation(cont.continuation());
1833 assert(!cont.is_empty(), "");
1834
1835 log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1836 return freeze_ok;
1837 }
1838
1839 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1840 if (UNLIKELY(res != freeze_ok)) {
1841 JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1842 verify_continuation(cont.continuation());
1843 log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1844 return res;
1845 }
1846
1847 JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1848 return freeze_epilog(cont);
1849 }
1850
1851 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1852 if (UNLIKELY(res != freeze_ok)) {
1853 verify_continuation(cont.continuation());
1854 log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1855 return res;
1856 }
1857
1858 // Set up things so that on return to Java we jump to preempt stub.
1859 patch_return_pc_with_preempt_stub(old_last_frame);
1860 cont.tail()->set_preempted(true);
1861 DEBUG_ONLY(log_preempt_after_freeze(cont);)
1862 return freeze_epilog(cont);
1863 }
1864
1865 template<typename ConfigT, bool preempt>
1866 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1867 assert(!current->has_pending_exception(), "");
1868
1869 #ifdef ASSERT
1870 log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1871 log_frames(current);
1872 #endif
1873
1874 CONT_JFR_ONLY(EventContinuationFreeze event;)
1875
1876 ContinuationEntry* entry = current->last_continuation();
1877
1878 oop oopCont = entry->cont_oop(current);
1879 assert(oopCont == current->last_continuation()->cont_oop(current), "");
1880 assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1881
1882 verify_continuation(oopCont);
1883 ContinuationWrapper cont(current, oopCont);
1884 log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1885
1886 assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1887
1888 if (entry->is_pinned()) {
1889 log_develop_debug(continuations)("PINNED due to critical section");
1890 verify_continuation(cont.continuation());
1891 const freeze_result res = freeze_pinned_cs;
1892 if (!preempt) {
1893 JFR_ONLY(current->set_last_freeze_fail_result(res);)
1894 }
1895 log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1896 // Avoid Thread.yield() loops without safepoint polls.
1897 if (SafepointMechanism::should_process(current) && !preempt) {
1898 cont.done(); // allow safepoint
1899 ThreadInVMfromJava tivmfj(current);
1900 }
1901 return res;
1902 }
1903
1904 Freeze<ConfigT> freeze(current, cont, sp, preempt);
1905
1906 assert(!current->cont_fastpath() || freeze.check_valid_fast_path(), "");
1907 bool fast = UseContinuationFastPath && current->cont_fastpath();
1908 if (fast && freeze.size_if_fast_freeze_available() > 0) {
1909 freeze.freeze_fast_existing_chunk();
1910 CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1911 return !preempt ? freeze_epilog(cont) : preempt_epilog(cont, freeze_ok, freeze.last_frame());
1912 }
1913
1914 if (preempt) {
1915 JvmtiSampledObjectAllocEventCollector jsoaec(false);
1916 freeze.set_jvmti_event_collector(&jsoaec);
1917
1918 freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1919
1920 CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1921 preempt_epilog(cont, res, freeze.last_frame());
1922 return res;
1923 }
1924
1925 log_develop_trace(continuations)("chunk unavailable; transitioning to VM");
1926 assert(current == JavaThread::current(), "must be current thread");
1927 JRT_BLOCK
1928 // delays a possible JvmtiSampledObjectAllocEventCollector in alloc_chunk
1929 JvmtiSampledObjectAllocEventCollector jsoaec(false);
1930 freeze.set_jvmti_event_collector(&jsoaec);
1931
1932 freeze_result res = fast ? freeze.try_freeze_fast() : freeze.freeze_slow();
1933
1934 CONT_JFR_ONLY(freeze.jfr_info().post_jfr_event(&event, oopCont, current);)
1935 freeze_epilog(current, cont, res);
1936 cont.done(); // allow safepoint in the transition back to Java
1937 return res;
1938 JRT_BLOCK_END
1939 }
1940
1941 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint) {
1942 ContinuationEntry* entry = thread->last_continuation();
1943 if (entry == nullptr) {
1944 return freeze_ok;
1945 }
1946 if (entry->is_pinned()) {
1947 return freeze_pinned_cs;
1948 }
1949
1950 RegisterMap map(thread,
1951 RegisterMap::UpdateMap::include,
1952 RegisterMap::ProcessFrames::skip,
1953 RegisterMap::WalkContinuation::skip);
1954 map.set_include_argument_oops(false);
1955 frame f = thread->last_frame();
1956
1957 if (!safepoint) {
1958 f = f.sender(&map); // this is the yield frame
1959 } else { // safepoint yield
1960 #if (defined(X86) || defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
1961 f.set_fp(f.real_fp()); // Instead of this, maybe in ContinuationWrapper::set_last_frame always use the real_fp?
1962 #else
1963 Unimplemented();
1964 #endif
1965 if (!Interpreter::contains(f.pc())) {
1966 assert(ContinuationHelper::Frame::is_stub(f.cb()), "must be");
1967 assert(f.oop_map() != nullptr, "must be");
1968 f.oop_map()->update_register_map(&f, &map); // we have callee-save registers in this case
1969 }
1970 }
1971
1972 while (true) {
1973 if ((f.is_interpreted_frame() && f.interpreter_frame_method()->is_native()) || f.is_native_frame()) {
1974 return freeze_pinned_native;
1975 }
1976
1977 f = f.sender(&map);
1978 if (!Continuation::is_frame_in_continuation(entry, f)) {
1979 oop scope = jdk_internal_vm_Continuation::scope(entry->cont_oop(thread));
1980 if (scope == cont_scope) {
1981 break;
1982 }
1983 entry = entry->parent();
1984 if (entry == nullptr) {
1985 break;
1986 }
1987 if (entry->is_pinned()) {
1988 return freeze_pinned_cs;
1989 }
1990 }
1991 }
1992 return freeze_ok;
1993 }
1994
1995 /////////////// THAW ////
1996
1997 static int thaw_size(stackChunkOop chunk) {
1998 int size = chunk->max_thawing_size();
1999 size += frame::metadata_words; // For the top pc+fp in push_return_frame or top = stack_sp - frame::metadata_words in thaw_fast
2000 size += 2*frame::align_wiggle; // in case of alignments at the top and bottom
2001 return size;
2002 }
2003
2004 // make room on the stack for thaw
2005 // returns the size in bytes, or 0 on failure
2006 static inline int prepare_thaw_internal(JavaThread* thread, bool return_barrier) {
2007 log_develop_trace(continuations)("~~~~ prepare_thaw return_barrier: %d", return_barrier);
2008
2009 assert(thread == JavaThread::current(), "");
2010
2011 ContinuationEntry* ce = thread->last_continuation();
2012 assert(ce != nullptr, "");
2013 oop continuation = ce->cont_oop(thread);
2014 assert(continuation == get_continuation(thread), "");
2015 verify_continuation(continuation);
2016
2017 stackChunkOop chunk = jdk_internal_vm_Continuation::tail(continuation);
2018 assert(chunk != nullptr, "");
2019
2020 // The tail can be empty because it might still be available for another freeze.
2021 // However, here we want to thaw, so we get rid of it (it will be GCed).
2022 if (UNLIKELY(chunk->is_empty())) {
2023 chunk = chunk->parent();
2024 assert(chunk != nullptr, "");
2025 assert(!chunk->is_empty(), "");
2026 jdk_internal_vm_Continuation::set_tail(continuation, chunk);
2027 }
2028
2029 // Verification
2030 chunk->verify();
2031 assert(chunk->max_thawing_size() > 0, "chunk invariant violated; expected to not be empty");
2032
2033 // Only make space for the last chunk because we only thaw from the last chunk
2034 int size = thaw_size(chunk) << LogBytesPerWord;
2035
2036 const address bottom = (address)thread->last_continuation()->entry_sp();
2037 // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
2038 // for the Java frames in the check below.
2039 if (!stack_overflow_check(thread, size + 300, bottom)) {
2040 return 0;
2041 }
2042
2043 log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
2044 p2i(bottom), p2i(bottom - size), size);
2045 return size;
2046 }
2047
2048 class ThawBase : public StackObj {
2049 protected:
2050 JavaThread* _thread;
2051 ContinuationWrapper& _cont;
2052 CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
2053
2054 intptr_t* _fastpath;
2055 bool _barriers;
2056 bool _preempted_case;
2057 bool _process_args_at_top;
2058 intptr_t* _top_unextended_sp_before_thaw;
2059 int _align_size;
2060 DEBUG_ONLY(intptr_t* _top_stack_address);
2061
2062 // Only used for preemption on ObjectLocker
2063 ObjectMonitor* _init_lock;
2064
2065 StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2066
2067 NOT_PRODUCT(int _frames;)
2068
2069 protected:
2070 ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2071 _thread(thread), _cont(cont),
2072 _fastpath(nullptr) {
2073 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2074 assert (cont.tail() != nullptr, "no last chunk");
2075 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2076 }
2077
2078 void clear_chunk(stackChunkOop chunk);
2079 template<bool check_stub>
2080 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2081 void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2082
2083 void thaw_lockstack(stackChunkOop chunk);
2084
2085 // fast path
2086 inline void prefetch_chunk_pd(void* start, int size_words);
2087 void patch_return(intptr_t* sp, bool is_last);
2088
2089 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2090 inline intptr_t* push_cleanup_continuation();
2091 inline intptr_t* push_preempt_adapter();
2092 intptr_t* redo_vmcall(JavaThread* current, frame& top);
2093 void throw_interrupted_exception(JavaThread* current, frame& top);
2094
2095 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2096 void finish_thaw(frame& f);
2097
2098 private:
2099 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2100 void finalize_thaw(frame& entry, int argsize);
2101
2102 inline bool seen_by_gc();
2103
2104 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2105 inline void after_thaw_java_frame(const frame& f, bool bottom);
2106 inline void patch(frame& f, const frame& caller, bool bottom);
2107 void clear_bitmap_bits(address start, address end);
2108
2109 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2110 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2111 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2112 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2113
2114 void push_return_frame(const frame& f);
2115 inline frame new_entry_frame();
2116 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
2117 inline void patch_pd(frame& f, const frame& sender);
2118 inline void patch_pd(frame& f, intptr_t* caller_sp);
2119 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2120
2121 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2122
2123 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2124
2125 public:
2126 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2127 };
2128
2129 template <typename ConfigT>
2130 class Thaw : public ThawBase {
2131 public:
2132 Thaw(JavaThread* thread, ContinuationWrapper& cont) : ThawBase(thread, cont) {}
2133
2134 inline bool can_thaw_fast(stackChunkOop chunk) {
2135 return !_barriers
2136 && _thread->cont_fastpath_thread_state()
2137 && !chunk->has_thaw_slowpath_condition()
2138 && !PreserveFramePointer;
2139 }
2140
2141 inline intptr_t* thaw(Continuation::thaw_kind kind);
2142 template<bool check_stub = false>
2143 NOINLINE intptr_t* thaw_fast(stackChunkOop chunk);
2144 NOINLINE intptr_t* thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind);
2145 inline void patch_caller_links(intptr_t* sp, intptr_t* bottom);
2146 };
2147
2148 template <typename ConfigT>
2149 inline intptr_t* Thaw<ConfigT>::thaw(Continuation::thaw_kind kind) {
2150 verify_continuation(_cont.continuation());
2151 assert(!jdk_internal_vm_Continuation::done(_cont.continuation()), "");
2152 assert(!_cont.is_empty(), "");
2153
2154 stackChunkOop chunk = _cont.tail();
2155 assert(chunk != nullptr, "guaranteed by prepare_thaw");
2156 assert(!chunk->is_empty(), "guaranteed by prepare_thaw");
2157
2158 _barriers = chunk->requires_barriers();
2159 return (LIKELY(can_thaw_fast(chunk))) ? thaw_fast(chunk)
2160 : thaw_slow(chunk, kind);
2161 }
2162
2163 class ReconstructedStack : public StackObj {
2164 intptr_t* _base; // _cont.entrySP(); // top of the entry frame
2165 int _thaw_size;
2166 int _argsize;
2167 public:
2168 ReconstructedStack(intptr_t* base, int thaw_size, int argsize)
2169 : _base(base), _thaw_size(thaw_size - (argsize == 0 ? frame::metadata_words_at_top : 0)), _argsize(argsize) {
2170 // The only possible source of misalignment is stack-passed arguments b/c compiled frames are 16-byte aligned.
2171 assert(argsize != 0 || (_base - _thaw_size) == ContinuationHelper::frame_align_pointer(_base - _thaw_size), "");
2172 // We're at most one alignment word away from entrySP
2173 assert(_base - 1 <= top() + total_size() + frame::metadata_words_at_bottom, "missed entry frame");
2174 }
2175
2176 int entry_frame_extension() const { return _argsize + (_argsize > 0 ? frame::metadata_words_at_top : 0); }
2177
2178 // top and bottom stack pointers
2179 intptr_t* sp() const { return ContinuationHelper::frame_align_pointer(_base - _thaw_size); }
2180 intptr_t* bottom_sp() const { return ContinuationHelper::frame_align_pointer(_base - entry_frame_extension()); }
2181
2182 // several operations operate on the totality of the stack being reconstructed,
2183 // including the metadata words
2184 intptr_t* top() const { return sp() - frame::metadata_words_at_bottom; }
2185 int total_size() const { return _thaw_size + frame::metadata_words_at_bottom; }
2186 };
2187
2188 inline void ThawBase::clear_chunk(stackChunkOop chunk) {
2189 chunk->set_sp(chunk->bottom());
2190 chunk->set_max_thawing_size(0);
2191 }
2192
2193 template<bool check_stub>
2194 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2195 bool empty = false;
2196 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2197 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2198 assert(chunk_sp == f.sp(), "");
2199 assert(chunk_sp == f.unextended_sp(), "");
2200
2201 int frame_size = f.cb()->frame_size();
2202 argsize = f.stack_argsize();
2203
2204 assert(!f.is_stub() || check_stub, "");
2205 if (check_stub && f.is_stub()) {
2206 // If we don't thaw the top compiled frame too, after restoring the saved
2207 // registers back in Java, we would hit the return barrier to thaw one more
2208 // frame effectively overwriting the restored registers during that call.
2209 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2210 assert(!f.is_done(), "");
2211
2212 f.get_cb();
2213 assert(f.is_compiled(), "");
2214 frame_size += f.cb()->frame_size();
2215 argsize = f.stack_argsize();
2216
2217 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2218 // The caller of the runtime stub when the continuation is preempted is not at a
2219 // Java call instruction, and so cannot rely on nmethod patching for deopt.
2220 log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2221 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2222 }
2223 }
2224
2225 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2226 empty = f.is_done();
2227 assert(!empty || argsize == chunk->argsize(), "");
2228
2229 if (empty) {
2230 clear_chunk(chunk);
2231 } else {
2232 chunk->set_sp(chunk->sp() + frame_size);
2233 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2234 // We set chunk->pc to the return pc into the next frame
2235 chunk->set_pc(f.pc());
2236 #ifdef ASSERT
2237 {
2238 intptr_t* retaddr_slot = (chunk_sp
2239 + frame_size
2240 - frame::sender_sp_ret_address_offset());
2241 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2242 "unexpected pc");
2243 }
2244 #endif
2245 }
2246 assert(empty == chunk->is_empty(), "");
2247 // returns the size required to store the frame on stack, and because it is a
2248 // compiled frame, it must include a copy of the arguments passed by the caller
2249 return frame_size + argsize + frame::metadata_words_at_top;
2250 }
2251
2252 void ThawBase::thaw_lockstack(stackChunkOop chunk) {
2253 int lockStackSize = chunk->lockstack_size();
2254 assert(lockStackSize > 0 && lockStackSize <= LockStack::CAPACITY, "");
2255
2256 oop tmp_lockstack[LockStack::CAPACITY];
2257 chunk->transfer_lockstack(tmp_lockstack, _barriers);
2258 _thread->lock_stack().move_from_address(tmp_lockstack, lockStackSize);
2259
2260 chunk->set_lockstack_size(0);
2261 chunk->set_has_lockstack(false);
2262 }
2263
2264 void ThawBase::copy_from_chunk(intptr_t* from, intptr_t* to, int size) {
2265 assert(to >= _top_stack_address, "overwrote past thawing space"
2266 " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(to), p2i(_top_stack_address));
2267 assert(to + size <= _cont.entrySP(), "overwrote past thawing space");
2268 _cont.tail()->copy_from_chunk_to_stack(from, to, size);
2269 CONT_JFR_ONLY(_jfr_info.record_size_copied(size);)
2270 }
2271
2272 void ThawBase::patch_return(intptr_t* sp, bool is_last) {
2273 log_develop_trace(continuations)("thaw_fast patching -- sp: " INTPTR_FORMAT, p2i(sp));
2274
2275 address pc = !is_last ? StubRoutines::cont_returnBarrier() : _cont.entryPC();
2276 ContinuationHelper::patch_return_address_at(
2277 sp - frame::sender_sp_ret_address_offset(),
2278 pc);
2279 }
2280
2281 template <typename ConfigT>
2282 template<bool check_stub>
2283 NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) {
2284 assert(chunk == _cont.tail(), "");
2285 assert(!chunk->has_mixed_frames(), "");
2286 assert(!chunk->requires_barriers(), "");
2287 assert(!chunk->has_bitmap(), "");
2288 assert(!_thread->is_interp_only_mode(), "");
2289
2290 LogTarget(Trace, continuations) lt;
2291 if (lt.develop_is_enabled()) {
2292 LogStream ls(lt);
2293 ls.print_cr("thaw_fast");
2294 chunk->print_on(true, &ls);
2295 }
2296
2297 // Below this heuristic, we thaw the whole chunk, above it we thaw just one frame.
2298 static const int threshold = 500; // words
2299
2300 const int full_chunk_size = chunk->stack_size() - chunk->sp(); // this initial size could be reduced if it's a partial thaw
2301 int argsize, thaw_size;
2302
2303 intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();
2304
2305 bool partial, empty;
2306 if (LIKELY(!TEST_THAW_ONE_CHUNK_FRAME && (full_chunk_size < threshold))) {
2307 prefetch_chunk_pd(chunk->start_address(), full_chunk_size); // prefetch anticipating memcpy starting at highest address
2308
2309 partial = false;
2310 argsize = chunk->argsize(); // must be called *before* clearing the chunk
2311 clear_chunk(chunk);
2312 thaw_size = full_chunk_size;
2313 empty = true;
2314 } else { // thaw a single frame
2315 partial = true;
2316 thaw_size = remove_top_compiled_frame_from_chunk<check_stub>(chunk, argsize);
2317 empty = chunk->is_empty();
2318 }
2319
2320 // Are we thawing the last frame(s) in the continuation
2321 const bool is_last = empty && chunk->parent() == nullptr;
2322 assert(!is_last || argsize == 0, "");
2323
2324 log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT,
2325 partial, is_last, empty, thaw_size, argsize, p2i(_cont.entrySP()));
2326
2327 ReconstructedStack rs(_cont.entrySP(), thaw_size, argsize);
2328
2329 // also copy metadata words at frame bottom
2330 copy_from_chunk(chunk_sp - frame::metadata_words_at_bottom, rs.top(), rs.total_size());
2331
2332 // update the ContinuationEntry
2333 _cont.set_argsize(argsize);
2334 log_develop_trace(continuations)("setting entry argsize: %d", _cont.argsize());
2335 assert(rs.bottom_sp() == _cont.entry()->bottom_sender_sp(), "");
2336
2337 // install the return barrier if not last frame, or the entry's pc if last
2338 patch_return(rs.bottom_sp(), is_last);
2339
2340 // insert the back links from callee to caller frames
2341 patch_caller_links(rs.top(), rs.top() + rs.total_size());
2342
2343 assert(is_last == _cont.is_empty(), "");
2344 assert(_cont.chunk_invariant(), "");
2345
2346 #if CONT_JFR
2347 EventContinuationThawFast e;
2348 if (e.should_commit()) {
2349 e.set_id(cast_from_oop<u8>(chunk));
2350 e.set_size(thaw_size << LogBytesPerWord);
2351 e.set_full(!partial);
2352 e.commit();
2353 }
2354 #endif
2355
2356 #ifdef ASSERT
2357 if (LoomDeoptAfterThaw) {
2358 frame top(rs.sp());
2359 AnchorMark am(_thread, top);
2360 log_frames(_thread);
2361 do_deopt_after_thaw(_thread);
2362 }
2363 #endif
2364
2365 return rs.sp();
2366 }
2367
2368 inline bool ThawBase::seen_by_gc() {
2369 return _barriers || _cont.tail()->is_gc_mode();
2370 }
2371
2372 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2373 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2374 if (UseZGC || UseShenandoahGC) {
2375 chunk->relativize_derived_pointers_concurrently();
2376 }
2377 #endif
2378 }
2379
2380 template <typename ConfigT>
2381 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2382 Continuation::preempt_kind preempt_kind;
2383 bool retry_fast_path = false;
2384
2385 _process_args_at_top = false;
2386 _preempted_case = chunk->preempted();
2387 if (_preempted_case) {
2388 ObjectMonitor* mon = nullptr;
2389 ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2390 if (waiter != nullptr) {
2391 // Mounted again after preemption. Resume the pending monitor operation,
2392 // which will be either a monitorenter or Object.wait() call.
2393 mon = waiter->monitor();
2394 preempt_kind = waiter->is_wait() ? Continuation::object_wait : Continuation::monitorenter;
2395
2396 bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2397 assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2398 if (!mon_acquired) {
2399 // Failed to acquire monitor. Return to enterSpecial to unmount again.
2400 log_develop_trace(continuations, preempt)("Failed to acquire monitor, unmounting again");
2401 return push_cleanup_continuation();
2402 }
2403 chunk = _cont.tail(); // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2404 JVMTI_ONLY(assert(_thread->contended_entered_monitor() == nullptr || _thread->contended_entered_monitor() == mon, ""));
2405 } else {
2406 // Preemption cancelled on moniterenter or ObjectLocker case. We
2407 // actually acquired the monitor after freezing all frames so no
2408 // need to call resume_operation. If this is the ObjectLocker case
2409 // we released the monitor already at ~ObjectLocker, so _init_lock
2410 // will be set to nullptr below since there is no monitor to release.
2411 preempt_kind = Continuation::monitorenter;
2412 }
2413
2414 // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2415 relativize_chunk_concurrently(chunk);
2416
2417 if (chunk->at_klass_init()) {
2418 preempt_kind = Continuation::object_locker;
2419 chunk->set_at_klass_init(false);
2420 _process_args_at_top = chunk->has_args_at_top();
2421 if (_process_args_at_top) {
2422 // Only needed for the top frame which will be thawed.
2423 chunk->set_has_args_at_top(false);
2424 }
2425 assert(waiter == nullptr || mon != nullptr, "should have a monitor");
2426 _init_lock = mon; // remember monitor since we will need it on handle_preempted_continuation()
2427 }
2428 chunk->set_preempted(false);
2429 retry_fast_path = true;
2430 } else {
2431 relativize_chunk_concurrently(chunk);
2432 }
2433
2434 // On first thaw after freeze restore oops to the lockstack if any.
2435 assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2436 if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2437 thaw_lockstack(chunk);
2438 retry_fast_path = true;
2439 }
2440
2441 // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2442 // and FLAG_PREEMPTED flags from the stackChunk.
2443 if (retry_fast_path && can_thaw_fast(chunk)) {
2444 intptr_t* sp = thaw_fast<true>(chunk);
2445 if (_preempted_case) {
2446 return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2447 }
2448 return sp;
2449 }
2450
2451 LogTarget(Trace, continuations) lt;
2452 if (lt.develop_is_enabled()) {
2453 LogStream ls(lt);
2454 ls.print_cr("thaw slow return_barrier: %d " INTPTR_FORMAT, kind, p2i(chunk));
2455 chunk->print_on(true, &ls);
2456 }
2457
2458 #if CONT_JFR
2459 EventContinuationThawSlow e;
2460 if (e.should_commit()) {
2461 e.set_id(cast_from_oop<u8>(_cont.continuation()));
2462 e.commit();
2463 }
2464 #endif
2465
2466 DEBUG_ONLY(_frames = 0;)
2467 _align_size = 0;
2468 int num_frames = kind == Continuation::thaw_top ? 2 : 1;
2469
2470 _stream = StackChunkFrameStream<ChunkFrames::Mixed>(chunk);
2471 _top_unextended_sp_before_thaw = _stream.unextended_sp();
2472
2473 frame heap_frame = _stream.to_frame();
2474 if (lt.develop_is_enabled()) {
2475 LogStream ls(lt);
2476 ls.print_cr("top hframe before (thaw):");
2477 assert(heap_frame.is_heap_frame(), "should have created a relative frame");
2478 heap_frame.print_value_on(&ls);
2479 }
2480
2481 frame caller; // the thawed caller on the stack
2482 recurse_thaw(heap_frame, caller, num_frames, _preempted_case);
2483 finish_thaw(caller); // caller is now the topmost thawed frame
2484 _cont.write();
2485
2486 assert(_cont.chunk_invariant(), "");
2487
2488 JVMTI_ONLY(if (!_cont.entry()->is_virtual_thread()) invalidate_jvmti_stack(_thread));
2489
2490 _thread->set_cont_fastpath(_fastpath);
2491
2492 intptr_t* sp = caller.sp();
2493
2494 if (_preempted_case) {
2495 return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2496 }
2497 return sp;
2498 }
2499
2500 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2501 log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2502 assert(!_cont.is_empty(), "no more frames");
2503 assert(num_frames > 0, "");
2504 assert(!heap_frame.is_empty(), "");
2505
2506 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2507 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2508 } else if (!heap_frame.is_interpreted_frame()) {
2509 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2510 } else {
2511 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2512 }
2513 }
2514
2515 template<typename FKind>
2516 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2517 assert(num_frames > 0, "");
2518
2519 DEBUG_ONLY(_frames++;)
2520
2521 int argsize = _stream.stack_argsize();
2522
2523 _stream.next(SmallRegisterMap::instance_no_args());
2524 assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2525
2526 // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2527 // as it makes detecting that situation and adjusting unextended_sp tricky
2528 if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2529 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2530 num_frames++;
2531 }
2532
2533 if (num_frames == 1 || _stream.is_done()) { // end recursion
2534 finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2535 return true; // bottom
2536 } else { // recurse
2537 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2538 return false;
2539 }
2540 }
2541
2542 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2543 stackChunkOop chunk = _cont.tail();
2544
2545 if (!_stream.is_done()) {
2546 assert(_stream.sp() >= chunk->sp_address(), "");
2547 chunk->set_sp(chunk->to_offset(_stream.sp()));
2548 chunk->set_pc(_stream.pc());
2549 } else {
2550 chunk->set_sp(chunk->bottom());
2551 chunk->set_pc(nullptr);
2552 }
2553 assert(_stream.is_done() == chunk->is_empty(), "");
2554
2555 int total_thawed = pointer_delta_as_int(_stream.unextended_sp(), _top_unextended_sp_before_thaw);
2556 chunk->set_max_thawing_size(chunk->max_thawing_size() - total_thawed);
2557
2558 _cont.set_argsize(argsize);
2559 entry = new_entry_frame();
2560
2561 assert(entry.sp() == _cont.entrySP(), "");
2562 assert(Continuation::is_continuation_enterSpecial(entry), "");
2563 assert(_cont.is_entry_frame(entry), "");
2564 }
2565
2566 inline void ThawBase::before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) {
2567 LogTarget(Trace, continuations) lt;
2568 if (lt.develop_is_enabled()) {
2569 LogStream ls(lt);
2570 ls.print_cr("======== THAWING FRAME: %d", num_frame);
2571 assert(hf.is_heap_frame(), "should be");
2572 hf.print_value_on(&ls);
2573 }
2574 assert(bottom == _cont.is_entry_frame(caller), "bottom: %d is_entry_frame: %d", bottom, _cont.is_entry_frame(hf));
2575 }
2576
2577 inline void ThawBase::after_thaw_java_frame(const frame& f, bool bottom) {
2578 #ifdef ASSERT
2579 LogTarget(Trace, continuations) lt;
2580 if (lt.develop_is_enabled()) {
2581 LogStream ls(lt);
2582 ls.print_cr("thawed frame:");
2583 print_frame_layout(f, false, &ls); // f.print_on(&ls);
2584 }
2585 #endif
2586 }
2587
2588 inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
2589 assert(!bottom || caller.fp() == _cont.entryFP(), "");
2590 if (bottom) {
2591 ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
2592 : StubRoutines::cont_returnBarrier());
2593 } else {
2594 // caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
2595 // If the caller is not deoptimized, pc is unchanged.
2596 ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
2597 }
2598
2599 patch_pd(f, caller);
2600
2601 if (f.is_interpreted_frame()) {
2602 ContinuationHelper::InterpretedFrame::patch_sender_sp(f, caller);
2603 }
2604
2605 assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
2606 assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
2607 }
2608
2609 void ThawBase::clear_bitmap_bits(address start, address end) {
2610 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2611 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2612
2613 // we need to clear the bits that correspond to arguments as they reside in the caller frame
2614 // or they will keep objects that are otherwise unreachable alive.
2615
2616 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2617 // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2618 // If that's the case the bit range corresponding to the last stack slot should not have bits set
2619 // anyways and we assert that before returning.
2620 address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2621 log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2622 stackChunkOop chunk = _cont.tail();
2623 chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2624 assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2625 }
2626
2627 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2628 frame top(sp);
2629 assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2630 DEBUG_ONLY(verify_frame_kind(top, preempt_kind);)
2631 NOT_PRODUCT(int64_t tid = _thread->monitor_owner_id();)
2632
2633 // Finish the VTMS transition.
2634 assert(_thread->is_in_vthread_transition(), "must be");
2635 bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2636 if (is_vthread) {
2637 #if INCLUDE_JVMTI
2638 if (MountUnmountDisabler::notify_jvmti_events()) {
2639 jvmti_mount_end(_thread, _cont, top, preempt_kind);
2640 } else
2641 #endif
2642 { // Faster version of MountUnmountDisabler::end_transition() to avoid
2643 // unnecessary extra instructions from jvmti_mount_end().
2644 java_lang_Thread::set_is_in_vthread_transition(_thread->vthread(), false);
2645 _thread->set_is_in_vthread_transition(false);
2646 }
2647 }
2648
2649 if (fast_case) {
2650 // If we thawed in the slow path the runtime stub/native wrapper frame already
2651 // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2652 // we copied the fp patched during freeze, which will now have to be fixed.
2653 assert(top.is_runtime_frame() || top.is_native_frame(), "");
2654 int fsize = top.cb()->frame_size();
2655 patch_pd(top, sp + fsize);
2656 }
2657
2658 if (preempt_kind == Continuation::object_wait) {
2659 // Check now if we need to throw IE exception.
2660 bool throw_ie = _thread->pending_interrupted_exception();
2661 if (throw_ie) {
2662 throw_interrupted_exception(_thread, top);
2663 _thread->set_pending_interrupted_exception(false);
2664 }
2665 log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT" after preemption on Object.wait%s", tid, throw_ie ? "(throwing IE)" : "");
2666 } else if (preempt_kind == Continuation::monitorenter) {
2667 if (top.is_runtime_frame()) {
2668 // The continuation might now run on a different platform thread than the previous time so
2669 // we need to adjust the current thread saved in the stub frame before restoring registers.
2670 JavaThread** thread_addr = frame::saved_thread_address(top);
2671 if (thread_addr != nullptr) *thread_addr = _thread;
2672 }
2673 log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT " after preemption on monitorenter", tid);
2674 } else {
2675 // We need to redo the original call into the VM. First though, we need
2676 // to exit the monitor we just acquired (except on preemption cancelled
2677 // case where it was already released).
2678 assert(preempt_kind == Continuation::object_locker, "");
2679 if (_init_lock != nullptr) _init_lock->exit(_thread);
2680 sp = redo_vmcall(_thread, top);
2681 }
2682 return sp;
2683 }
2684
2685 intptr_t* ThawBase::redo_vmcall(JavaThread* current, frame& top) {
2686 assert(!current->preempting(), "");
2687 NOT_PRODUCT(int64_t tid = current->monitor_owner_id();)
2688 intptr_t* sp = top.sp();
2689
2690 {
2691 HandleMarkCleaner hmc(current); // Cleanup all handles (including so._conth) before returning to Java.
2692 ContinuationWrapper::SafepointOp so(current, _cont);
2693 AnchorMark am(current, top); // Set the anchor so that the stack is walkable.
2694
2695 Method* m = top.interpreter_frame_method();
2696 Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
2697 Bytecodes::Code code = current_bytecode.code();
2698 log_develop_trace(continuations, preempt)("Redoing InterpreterRuntime::%s for " INT64_FORMAT, code == Bytecodes::Code::_new ? "_new" : "resolve_from_cache", tid);
2699
2700 // These InterpreterRuntime entry points use JRT_ENTRY which uses a HandleMarkCleaner.
2701 // Create a HandeMark to avoid destroying so._conth.
2702 HandleMark hm(current);
2703 DEBUG_ONLY(JavaThread::AtRedoVMCall apvmc(current);)
2704 if (code == Bytecodes::Code::_new) {
2705 InterpreterRuntime::_new(current, m->constants(), current_bytecode.get_index_u2(code));
2706 } else {
2707 InterpreterRuntime::resolve_from_cache(current, code);
2708 }
2709 }
2710
2711 if (current->preempting()) {
2712 // Preempted again so we just arrange to return to preempt stub to unmount.
2713 sp = push_preempt_adapter();
2714 current->set_preempt_alternate_return(nullptr);
2715 bool cancelled = current->preemption_cancelled();
2716 if (cancelled) {
2717 // Since preemption was cancelled, the thread will call thaw again from the preempt
2718 // stub. These retries could happen several times due to contention on the init_lock,
2719 // so just let the vthread umount to give a chance for other vthreads to run.
2720 current->set_preemption_cancelled(false);
2721 oop vthread = current->vthread();
2722 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2723 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::YIELDING);
2724 #if INCLUDE_JVMTI
2725 if (current->contended_entered_monitor() != nullptr) {
2726 current->set_contended_entered_monitor(nullptr);
2727 }
2728 #endif
2729 }
2730 log_develop_trace(continuations, preempt)("Preempted " INT64_FORMAT " again%s", tid, cancelled ? "(preemption cancelled, setting state to YIELDING)" : "");
2731 } else {
2732 log_develop_trace(continuations, preempt)("Call succesful, resuming " INT64_FORMAT, tid);
2733 }
2734 return sp;
2735 }
2736
2737 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2738 HandleMarkCleaner hm(current); // Cleanup all handles (including so._conth) before returning to Java.
2739 ContinuationWrapper::SafepointOp so(current, _cont);
2740 AnchorMark am(current, top); // Set the anchor so that the stack is walkable.
2741 JRT_BLOCK
2742 THROW(vmSymbols::java_lang_InterruptedException());
2743 JRT_BLOCK_END
2744 }
2745
2746 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top) {
2747 assert(hf.is_interpreted_frame(), "");
2748
2749 if (UNLIKELY(seen_by_gc())) {
2750 if (is_top && _process_args_at_top) {
2751 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_with_args());
2752 } else {
2753 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2754 }
2755 }
2756
2757 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2758
2759 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2760
2761 _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2762
2763 frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2764
2765 intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2766 intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2767 intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2768 intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2769
2770 assert(hf.is_heap_frame(), "should be");
2771 assert(!f.is_heap_frame(), "should not be");
2772
2773 const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2774 assert((stack_frame_bottom == stack_frame_top + fsize), "");
2775
2776 // Some architectures (like AArch64/PPC64/RISC-V) add padding between the locals and the fixed_frame to keep the fp 16-byte-aligned.
2777 // On those architectures we freeze the padding in order to keep the same fp-relative offsets in the fixed_frame.
2778 copy_from_chunk(heap_frame_top, stack_frame_top, fsize);
2779
2780 // Make sure the relativized locals is already set.
2781 assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2782
2783 derelativize_interpreted_frame_metadata(hf, f);
2784 patch(f, caller, is_bottom_frame);
2785
2786 assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2787 assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2788
2789 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2790
2791 maybe_set_fastpath(f.sp());
2792
2793 Method* m = hf.interpreter_frame_method();
2794 assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2795 const int locals = m->max_locals();
2796
2797 if (!is_bottom_frame) {
2798 // can only fix caller once this frame is thawed (due to callee saved regs)
2799 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2800 } else if (_cont.tail()->has_bitmap() && locals > 0) {
2801 assert(hf.is_heap_frame(), "should be");
2802 address start = (address)(heap_frame_bottom - locals);
2803 address end = (address)heap_frame_bottom;
2804 clear_bitmap_bits(start, end);
2805 }
2806
2807 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2808 caller = f;
2809 }
2810
2811 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2812 assert(hf.is_compiled_frame(), "");
2813 assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2814
2815 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2816 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2817 }
2818
2819 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2820
2821 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2822
2823 assert(caller.sp() == caller.unextended_sp(), "");
2824
2825 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2826 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2827 }
2828
2829 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2830 // yet laid out in the stack, and so the original_pc is not stored in it.
2831 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2832 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2833 intptr_t* const stack_frame_top = f.sp();
2834 intptr_t* const heap_frame_top = hf.unextended_sp();
2835
2836 const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2837 int fsize = ContinuationHelper::CompiledFrame::size(hf) + added_argsize;
2838 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2839
2840 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2841 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2842 // copy metadata, except the metadata at the top of the (unextended) entry frame
2843 int sz = fsize + frame::metadata_words_at_bottom + (is_bottom_frame && added_argsize == 0 ? 0 : frame::metadata_words_at_top);
2844
2845 // If we're the bottom-most thawed frame, we're writing to within one word from entrySP
2846 // (we might have one padding word for alignment)
2847 assert(!is_bottom_frame || (_cont.entrySP() - 1 <= to + sz && to + sz <= _cont.entrySP()), "");
2848 assert(!is_bottom_frame || hf.compiled_frame_stack_argsize() != 0 || (to + sz && to + sz == _cont.entrySP()), "");
2849
2850 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2851
2852 patch(f, caller, is_bottom_frame);
2853
2854 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2855 assert(!f.is_deoptimized_frame(), "");
2856 if (hf.is_deoptimized_frame()) {
2857 maybe_set_fastpath(f.sp());
2858 } else if (_thread->is_interp_only_mode()
2859 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2860 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2861 // cannot rely on nmethod patching for deopt.
2862 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2863
2864 log_develop_trace(continuations)("Deoptimizing thawed frame");
2865 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2866
2867 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2868 assert(f.is_deoptimized_frame(), "");
2869 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2870 maybe_set_fastpath(f.sp());
2871 }
2872
2873 if (!is_bottom_frame) {
2874 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2875 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2876 } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2877 address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2878 int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2879 int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2880 clear_bitmap_bits(start, start + argsize_in_bytes);
2881 }
2882
2883 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2884 caller = f;
2885 }
2886
2887 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2888 DEBUG_ONLY(_frames++;)
2889
2890 if (UNLIKELY(seen_by_gc())) {
2891 // Process the stub's caller here since we might need the full map.
2892 RegisterMap map(nullptr,
2893 RegisterMap::UpdateMap::include,
2894 RegisterMap::ProcessFrames::skip,
2895 RegisterMap::WalkContinuation::skip);
2896 map.set_include_argument_oops(false);
2897 _stream.next(&map);
2898 assert(!_stream.is_done(), "");
2899 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2900 } else {
2901 _stream.next(SmallRegisterMap::instance_no_args());
2902 assert(!_stream.is_done(), "");
2903 }
2904
2905 recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2906
2907 assert(caller.is_compiled_frame(), "");
2908 assert(caller.sp() == caller.unextended_sp(), "");
2909
2910 DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2911
2912 frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2913 intptr_t* stack_frame_top = f.sp();
2914 intptr_t* heap_frame_top = hf.sp();
2915 int fsize = ContinuationHelper::StubFrame::size(hf);
2916
2917 copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2918 fsize + frame::metadata_words);
2919
2920 patch(f, caller, false /*is_bottom_frame*/);
2921
2922 // can only fix caller once this frame is thawed (due to callee saved regs)
2923 RegisterMap map(nullptr,
2924 RegisterMap::UpdateMap::include,
2925 RegisterMap::ProcessFrames::skip,
2926 RegisterMap::WalkContinuation::skip);
2927 map.set_include_argument_oops(false);
2928 f.oop_map()->update_register_map(&f, &map);
2929 ContinuationHelper::update_register_map_with_callee(caller, &map);
2930 _cont.tail()->fix_thawed_frame(caller, &map);
2931
2932 DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2933 caller = f;
2934 }
2935
2936 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2937 assert(hf.is_native_frame(), "");
2938 assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2939
2940 if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2941 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2942 }
2943
2944 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2945 assert(!is_bottom_frame, "");
2946
2947 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2948
2949 assert(caller.sp() == caller.unextended_sp(), "");
2950
2951 if (caller.is_interpreted_frame()) {
2952 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2953 }
2954
2955 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2956 // yet laid out in the stack, and so the original_pc is not stored in it.
2957 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2958 frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2959 intptr_t* const stack_frame_top = f.sp();
2960 intptr_t* const heap_frame_top = hf.unextended_sp();
2961
2962 int fsize = ContinuationHelper::NativeFrame::size(hf);
2963 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2964
2965 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2966 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2967 int sz = fsize + frame::metadata_words_at_bottom;
2968
2969 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2970
2971 patch(f, caller, false /* bottom */);
2972
2973 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2974 assert(!f.is_deoptimized_frame(), "");
2975 assert(!hf.is_deoptimized_frame(), "");
2976 assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2977
2978 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2979 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2980
2981 DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2982 caller = f;
2983 }
2984
2985 void ThawBase::finish_thaw(frame& f) {
2986 stackChunkOop chunk = _cont.tail();
2987
2988 if (chunk->is_empty()) {
2989 // Only remove chunk from list if it can't be reused for another freeze
2990 if (seen_by_gc()) {
2991 _cont.set_tail(chunk->parent());
2992 } else {
2993 chunk->set_has_mixed_frames(false);
2994 }
2995 chunk->set_max_thawing_size(0);
2996 } else {
2997 chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2998 }
2999 assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
3000
3001 if (!is_aligned(f.sp(), frame::frame_alignment)) {
3002 assert(f.is_interpreted_frame(), "");
3003 f.set_sp(align_down(f.sp(), frame::frame_alignment));
3004 }
3005 push_return_frame(f);
3006 // can only fix caller after push_return_frame (due to callee saved regs)
3007 if (_process_args_at_top) {
3008 chunk->fix_thawed_frame(f, SmallRegisterMap::instance_with_args());
3009 } else {
3010 chunk->fix_thawed_frame(f, SmallRegisterMap::instance_no_args());
3011 }
3012
3013 assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
3014
3015 log_develop_trace(continuations)("thawed %d frames", _frames);
3016
3017 LogTarget(Trace, continuations) lt;
3018 if (lt.develop_is_enabled()) {
3019 LogStream ls(lt);
3020 ls.print_cr("top hframe after (thaw):");
3021 _cont.last_frame().print_value_on(&ls);
3022 }
3023 }
3024
3025 void ThawBase::push_return_frame(const frame& f) { // see generate_cont_thaw
3026 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
3027 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
3028
3029 LogTarget(Trace, continuations) lt;
3030 if (lt.develop_is_enabled()) {
3031 LogStream ls(lt);
3032 ls.print_cr("push_return_frame");
3033 f.print_value_on(&ls);
3034 }
3035
3036 assert(f.sp() - frame::metadata_words_at_bottom >= _top_stack_address, "overwrote past thawing space"
3037 " to: " INTPTR_FORMAT " top_address: " INTPTR_FORMAT, p2i(f.sp() - frame::metadata_words), p2i(_top_stack_address));
3038 ContinuationHelper::Frame::patch_pc(f, f.raw_pc()); // in case we want to deopt the frame in a full transition, this is checked.
3039 ContinuationHelper::push_pd(f);
3040
3041 assert(ContinuationHelper::Frame::assert_frame_laid_out(f), "");
3042 }
3043
3044 // returns new top sp
3045 // called after preparations (stack overflow check and making room)
3046 template<typename ConfigT>
3047 static inline intptr_t* thaw_internal(JavaThread* thread, const Continuation::thaw_kind kind) {
3048 assert(thread == JavaThread::current(), "Must be current thread");
3049
3050 CONT_JFR_ONLY(EventContinuationThaw event;)
3051
3052 log_develop_trace(continuations)("~~~~ thaw kind: %d sp: " INTPTR_FORMAT, kind, p2i(thread->last_continuation()->entry_sp()));
3053
3054 ContinuationEntry* entry = thread->last_continuation();
3055 assert(entry != nullptr, "");
3056 oop oopCont = entry->cont_oop(thread);
3057
3058 assert(!jdk_internal_vm_Continuation::done(oopCont), "");
3059 assert(oopCont == get_continuation(thread), "");
3060 verify_continuation(oopCont);
3061
3062 assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
3063
3064 ContinuationWrapper cont(thread, oopCont);
3065 log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
3066
3067 #ifdef ASSERT
3068 set_anchor_to_entry(thread, cont.entry());
3069 log_frames(thread);
3070 clear_anchor(thread);
3071 #endif
3072
3073 Thaw<ConfigT> thw(thread, cont);
3074 intptr_t* const sp = thw.thaw(kind);
3075 assert(is_aligned(sp, frame::frame_alignment), "");
3076 DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp);)
3077
3078 CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
3079
3080 verify_continuation(cont.continuation());
3081 log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
3082
3083 return sp;
3084 }
3085
3086 #ifdef ASSERT
3087 static void do_deopt_after_thaw(JavaThread* thread) {
3088 int i = 0;
3089 StackFrameStream fst(thread, true, false);
3090 fst.register_map()->set_include_argument_oops(false);
3091 ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3092 for (; !fst.is_done(); fst.next()) {
3093 if (fst.current()->cb()->is_nmethod()) {
3094 nmethod* nm = fst.current()->cb()->as_nmethod();
3095 if (!nm->method()->is_continuation_native_intrinsic()) {
3096 nm->make_deoptimized();
3097 }
3098 }
3099 }
3100 }
3101
3102 class ThawVerifyOopsClosure: public OopClosure {
3103 intptr_t* _p;
3104 outputStream* _st;
3105 bool is_good_oop(oop o) {
3106 return dbg_is_safe(o, -1) && dbg_is_safe(o->klass(), -1) && oopDesc::is_oop(o) && o->klass()->is_klass();
3107 }
3108 public:
3109 ThawVerifyOopsClosure(outputStream* st) : _p(nullptr), _st(st) {}
3110 intptr_t* p() { return _p; }
3111 void reset() { _p = nullptr; }
3112
3113 virtual void do_oop(oop* p) {
3114 oop o = *p;
3115 if (o == nullptr || is_good_oop(o)) {
3116 return;
3117 }
3118 _p = (intptr_t*)p;
3119 _st->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(*p), p2i(p));
3120 }
3121 virtual void do_oop(narrowOop* p) {
3122 oop o = RawAccess<>::oop_load(p);
3123 if (o == nullptr || is_good_oop(o)) {
3124 return;
3125 }
3126 _p = (intptr_t*)p;
3127 _st->print_cr("*** (narrow) non-oop %x found at " PTR_FORMAT, (int)(*p), p2i(p));
3128 }
3129 };
3130
3131 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st) {
3132 assert(thread->has_last_Java_frame(), "");
3133
3134 ResourceMark rm;
3135 ThawVerifyOopsClosure cl(st);
3136 NMethodToOopClosure cf(&cl, false);
3137
3138 StackFrameStream fst(thread, true, false);
3139 fst.register_map()->set_include_argument_oops(false);
3140 ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3141 for (; !fst.is_done() && !Continuation::is_continuation_enterSpecial(*fst.current()); fst.next()) {
3142 if (fst.current()->cb()->is_nmethod() && fst.current()->cb()->as_nmethod()->is_marked_for_deoptimization()) {
3143 st->print_cr(">>> do_verify_after_thaw deopt");
3144 fst.current()->deoptimize(nullptr);
3145 fst.current()->print_on(st);
3146 }
3147
3148 fst.current()->oops_do(&cl, &cf, fst.register_map());
3149 if (cl.p() != nullptr) {
3150 frame fr = *fst.current();
3151 st->print_cr("Failed for frame barriers: %d",chunk->requires_barriers());
3152 fr.print_on(st);
3153 if (!fr.is_interpreted_frame()) {
3154 st->print_cr("size: %d argsize: %d",
3155 ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
3156 ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
3157 }
3158 VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
3159 if (reg != nullptr) {
3160 st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
3161 }
3162 cl.reset();
3163 DEBUG_ONLY(thread->print_frame_layout();)
3164 if (chunk != nullptr) {
3165 chunk->print_on(true, st);
3166 }
3167 return false;
3168 }
3169 }
3170 return true;
3171 }
3172
3173 static void log_frames(JavaThread* thread) {
3174 const static int show_entry_callers = 3;
3175 LogTarget(Trace, continuations) lt;
3176 if (!lt.develop_is_enabled()) {
3177 return;
3178 }
3179 LogStream ls(lt);
3180
3181 ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
3182 if (!thread->has_last_Java_frame()) {
3183 ls.print_cr("NO ANCHOR!");
3184 }
3185
3186 RegisterMap map(thread,
3187 RegisterMap::UpdateMap::include,
3188 RegisterMap::ProcessFrames::include,
3189 RegisterMap::WalkContinuation::skip);
3190 map.set_include_argument_oops(false);
3191
3192 if (false) {
3193 for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
3194 f.print_on(&ls);
3195 }
3196 } else {
3197 map.set_skip_missing(true);
3198 ResetNoHandleMark rnhm;
3199 ResourceMark rm;
3200 HandleMark hm(Thread::current());
3201 FrameValues values;
3202
3203 int i = 0;
3204 int post_entry = -1;
3205 for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
3206 f.describe(values, i, &map, i == 0);
3207 if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
3208 post_entry++;
3209 if (post_entry >= show_entry_callers)
3210 break;
3211 }
3212 values.print_on(thread, &ls);
3213 }
3214
3215 ls.print_cr("======= end frames =========");
3216 }
3217
3218 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp) {
3219 intptr_t* sp0 = sp;
3220 address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
3221
3222 bool preempted = false;
3223 stackChunkOop tail = cont.tail();
3224 if (tail != nullptr && tail->preempted()) {
3225 // Still preempted (monitor not acquired) so no frames were thawed.
3226 set_anchor(thread, cont.entrySP(), cont.entryPC());
3227 preempted = true;
3228 } else {
3229 set_anchor(thread, sp0);
3230 }
3231
3232 log_frames(thread);
3233 if (LoomVerifyAfterThaw) {
3234 assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
3235 }
3236 assert(preempted || ContinuationEntry::assert_entry_frame_laid_out(thread), "");
3237 clear_anchor(thread);
3238
3239 LogTarget(Trace, continuations) lt;
3240 if (lt.develop_is_enabled()) {
3241 LogStream ls(lt);
3242 ls.print_cr("Jumping to frame (thaw):");
3243 frame(sp).print_value_on(&ls);
3244 }
3245 }
3246 #endif // ASSERT
3247
3248 #include CPU_HEADER_INLINE(continuationFreezeThaw)
3249
3250 #ifdef ASSERT
3251 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
3252 ResourceMark rm;
3253 FrameValues values;
3254 assert(f.get_cb() != nullptr, "");
3255 RegisterMap map(f.is_heap_frame() ?
3256 nullptr :
3257 JavaThread::current(),
3258 RegisterMap::UpdateMap::include,
3259 RegisterMap::ProcessFrames::skip,
3260 RegisterMap::WalkContinuation::skip);
3261 map.set_include_argument_oops(false);
3262 map.set_skip_missing(true);
3263 if (callee_complete) {
3264 frame::update_map_with_saved_link(&map, ContinuationHelper::Frame::callee_link_address(f));
3265 }
3266 const_cast<frame&>(f).describe(values, 0, &map, true);
3267 values.print_on(static_cast<JavaThread*>(nullptr), st);
3268 }
3269 #endif
3270
3271 static address thaw_entry = nullptr;
3272 static address freeze_entry = nullptr;
3273 static address freeze_preempt_entry = nullptr;
3274
3275 address Continuation::thaw_entry() {
3276 return ::thaw_entry;
3277 }
3278
3279 address Continuation::freeze_entry() {
3280 return ::freeze_entry;
3281 }
3282
3283 address Continuation::freeze_preempt_entry() {
3284 return ::freeze_preempt_entry;
3285 }
3286
3287 class ConfigResolve {
3288 public:
3289 static void resolve() { resolve_compressed(); }
3290
3291 static void resolve_compressed() {
3292 UseCompressedOops ? resolve_gc<true>()
3293 : resolve_gc<false>();
3294 }
3295
3296 private:
3297 template <bool use_compressed>
3298 static void resolve_gc() {
3299 BarrierSet* bs = BarrierSet::barrier_set();
3300 assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set");
3301 switch (bs->kind()) {
3302 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \
3303 case BarrierSet::bs_name: { \
3304 resolve<use_compressed, typename BarrierSet::GetType<BarrierSet::bs_name>::type>(); \
3305 } \
3306 break;
3307 FOR_EACH_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
3308 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
3309
3310 default:
3311 fatal("BarrierSet resolving not implemented");
3312 };
3313 }
3314
3315 template <bool use_compressed, typename BarrierSetT>
3316 static void resolve() {
3317 typedef Config<use_compressed ? oop_kind::NARROW : oop_kind::WIDE, BarrierSetT> SelectedConfigT;
3318
3319 freeze_entry = (address)freeze<SelectedConfigT>;
3320 freeze_preempt_entry = (address)SelectedConfigT::freeze_preempt;
3321
3322 // If we wanted, we could templatize by kind and have three different thaw entries
3323 thaw_entry = (address)thaw<SelectedConfigT>;
3324 }
3325 };
3326
3327 void Continuation::init() {
3328 ConfigResolve::resolve();
3329 }