16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.inline.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/codeCache.inline.hpp"
28 #include "code/nmethod.inline.hpp"
29 #include "code/vmreg.inline.hpp"
30 #include "compiler/oopMap.inline.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shared/gc_globals.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/memAllocator.hpp"
35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "jfr/jfrEvents.hpp"
38 #include "logging/log.hpp"
39 #include "logging/logStream.hpp"
40 #include "oops/access.inline.hpp"
41 #include "oops/method.inline.hpp"
42 #include "oops/oopsHierarchy.hpp"
43 #include "oops/objArrayOop.inline.hpp"
44 #include "oops/stackChunkOop.inline.hpp"
45 #include "prims/jvmtiThreadState.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/continuation.hpp"
48 #include "runtime/continuationEntry.inline.hpp"
49 #include "runtime/continuationHelper.inline.hpp"
50 #include "runtime/continuationJavaClasses.inline.hpp"
51 #include "runtime/continuationWrapper.inline.hpp"
52 #include "runtime/frame.inline.hpp"
53 #include "runtime/interfaceSupport.inline.hpp"
54 #include "runtime/javaThread.inline.hpp"
55 #include "runtime/jniHandles.inline.hpp"
56 #include "runtime/keepStackGCProcessed.hpp"
57 #include "runtime/objectMonitor.inline.hpp"
58 #include "runtime/orderAccess.hpp"
59 #include "runtime/prefetch.inline.hpp"
60 #include "runtime/smallRegisterMap.inline.hpp"
61 #include "runtime/sharedRuntime.hpp"
62 #include "runtime/stackChunkFrameStream.inline.hpp"
63 #include "runtime/stackFrameStream.inline.hpp"
64 #include "runtime/stackOverflow.hpp"
65 #include "runtime/stackWatermarkSet.inline.hpp"
66 #include "utilities/debug.hpp"
67 #include "utilities/exceptions.hpp"
68 #include "utilities/macros.hpp"
69 #include "utilities/vmError.hpp"
70 #if INCLUDE_ZGC
71 #include "gc/z/zStackChunkGCData.inline.hpp"
72 #endif
73
74 #include <type_traits>
75
76 /*
77 * This file contains the implementation of continuation freezing (yield) and thawing (run).
78 *
79 * This code is very latency-critical and very hot. An ordinary and well-behaved server application
80 * would likely call these operations many thousands of times per second second, on every core.
81 *
82 * Freeze might be called every time the application performs any I/O operation, every time it
83 * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
84 * multiple times in each of those cases, as it is called by the return barrier, which may be
85 * invoked on method return.
86 *
87 * The amortized budget for each of those two operations is ~100-150ns. That is why, for
88 * example, every effort is made to avoid Java-VM transitions as much as possible.
89 *
90 * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
91 * and so frames simply copied, and the bottom-most one is patched.
92 * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets
162 #endif
163
164 // TODO: See AbstractAssembler::generate_stack_overflow_check,
165 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
166 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
167
168 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
169
170 // Used to just annotatate cold/hot branches
171 #define LIKELY(condition) (condition)
172 #define UNLIKELY(condition) (condition)
173
174 // debugging functions
175 #ifdef ASSERT
176 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
177
178 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
179
180 static void do_deopt_after_thaw(JavaThread* thread);
181 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
182 static void log_frames(JavaThread* thread);
183 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted);
184 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
185
186 #define assert_pfl(p, ...) \
187 do { \
188 if (!(p)) { \
189 JavaThread* t = JavaThread::active(); \
190 if (t->has_last_Java_frame()) { \
191 tty->print_cr("assert(" #p ") failed:"); \
192 t->print_frame_layout(); \
193 } \
194 } \
195 vmassert(p, __VA_ARGS__); \
196 } while(0)
197
198 #else
199 static void verify_continuation(oop continuation) { }
200 #define assert_pfl(p, ...)
201 #endif
202
203 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
204 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);
492
493 assert(!Interpreter::contains(_cont.entryPC()), "");
494
495 _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
496 #ifdef _LP64
497 if (((intptr_t)_bottom_address & 0xf) != 0) {
498 _bottom_address--;
499 }
500 assert(is_aligned(_bottom_address, frame::frame_alignment), "");
501 #endif
502
503 log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
504 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
505 assert(_bottom_address != nullptr, "");
506 assert(_bottom_address <= _cont.entrySP(), "");
507 DEBUG_ONLY(_last_write = nullptr;)
508
509 assert(_cont.chunk_invariant(), "");
510 assert(!Interpreter::contains(_cont.entryPC()), "");
511 #if !defined(PPC64) || defined(ZERO)
512 static const int doYield_stub_frame_size = frame::metadata_words;
513 #else
514 static const int doYield_stub_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
515 #endif
516 // With preemption doYield() might not have been resolved yet
517 assert(_preempt || SharedRuntime::cont_doYield_stub()->frame_size() == doYield_stub_frame_size, "");
518
519 if (preempt) {
520 _last_frame = _thread->last_frame();
521 }
522
523 // properties of the continuation on the stack; all sizes are in words
524 _cont_stack_top = frame_sp + (!preempt ? doYield_stub_frame_size : 0); // we don't freeze the doYield stub frame
525 _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
526 - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
527
528 log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
529 cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
530 assert(cont_size() > 0, "");
531
532 if (LockingMode != LM_LIGHTWEIGHT) {
533 _monitors_in_lockstack = 0;
534 } else {
535 _monitors_in_lockstack = _thread->lock_stack().monitor_count();
536 }
537 }
538
539 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
540 _freeze_size = 0;
541 _total_align_size = 0;
542 NOT_PRODUCT(_frames = 0;)
543 }
544
837 freeze_result res = recurse_freeze(f, caller, 0, false, true);
838
839 if (res == freeze_ok) {
840 finish_freeze(f, caller);
841 _cont.write();
842 }
843
844 return res;
845 }
846
847 frame FreezeBase::freeze_start_frame() {
848 if (LIKELY(!_preempt)) {
849 return freeze_start_frame_yield_stub();
850 } else {
851 return freeze_start_frame_on_preempt();
852 }
853 }
854
855 frame FreezeBase::freeze_start_frame_yield_stub() {
856 frame f = _thread->last_frame();
857 assert(SharedRuntime::cont_doYield_stub()->contains(f.pc()), "must be");
858 f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
859 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
860 return f;
861 }
862
863 frame FreezeBase::freeze_start_frame_on_preempt() {
864 assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
865 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
866 return _last_frame;
867 }
868
869 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
870 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
871 assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
872 assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
873 || ((top && _preempt) == f.is_native_frame()), "");
874
875 if (stack_overflow()) {
876 return freeze_exception;
877 }
1033 log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1034 if (chunk->is_empty()) {
1035 int sp = chunk->stack_size() - argsize_md;
1036 chunk->set_sp(sp);
1037 chunk->set_bottom(sp);
1038 _freeze_size += overlap;
1039 assert(chunk->max_thawing_size() == 0, "");
1040 } DEBUG_ONLY(else empty_chunk = false;)
1041 }
1042 assert(!chunk->is_gc_mode(), "");
1043 assert(!chunk->has_bitmap(), "");
1044 chunk->set_has_mixed_frames(true);
1045
1046 assert(chunk->requires_barriers() == _barriers, "");
1047 assert(!_barriers || chunk->is_empty(), "");
1048
1049 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1050 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1051
1052 if (_preempt) {
1053 frame f = _thread->last_frame();
1054 if (f.is_interpreted_frame()) {
1055 // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1056 // We need it so that on resume we can restore the sp to the right place, since
1057 // thawing might add an alignment word to the expression stack (see finish_thaw()).
1058 // We do it now that we know freezing will be successful.
1059 prepare_freeze_interpreted_top_frame(f);
1060 }
1061 }
1062
1063 // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1064 // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1065 // will either see no continuation or a consistent chunk.
1066 unwind_frames();
1067
1068 chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1069
1070 if (lt.develop_is_enabled()) {
1071 LogStream ls(lt);
1072 ls.print_cr("top chunk:");
1073 chunk->print_on(&ls);
1074 }
1075
1076 if (_monitors_in_lockstack > 0) {
1077 freeze_lockstack(chunk);
1078 }
1079
1551 // Some GCs could put direct allocations in old gen for slow-path
1552 // allocations; need to explicitly check if that was the case.
1553 _barriers = chunk->requires_barriers();
1554 }
1555 }
1556
1557 if (_barriers) {
1558 log_develop_trace(continuations)("allocation requires barriers");
1559 }
1560
1561 assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1562
1563 return chunk;
1564 }
1565
1566 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1567 ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1568 Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1569 }
1570
1571 #if INCLUDE_JVMTI
1572 static int num_java_frames(ContinuationWrapper& cont) {
1573 ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1574 int count = 0;
1575 for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1576 count += chunk->num_java_frames();
1577 }
1578 return count;
1579 }
1580
1581 static void invalidate_jvmti_stack(JavaThread* thread) {
1582 if (thread->is_interp_only_mode()) {
1583 JvmtiThreadState *state = thread->jvmti_thread_state();
1584 if (state != nullptr)
1585 state->invalidate_cur_stack_depth();
1586 }
1587 }
1588
1589 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1590 if (JvmtiExport::can_post_frame_pop()) {
1591 int num_frames = num_java_frames(cont);
1592
1593 ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1594 JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1595 }
1596 invalidate_jvmti_stack(thread);
1597 }
1598
1599 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top) {
1600 assert(current->vthread() != nullptr, "must be");
1601
1602 HandleMarkCleaner hm(current);
1603 Handle vth(current, current->vthread());
1604
1605 ContinuationWrapper::SafepointOp so(current, cont);
1606
1607 // Since we might safepoint set the anchor so that the stack can be walked.
1608 set_anchor(current, top.sp());
1609
1610 JRT_BLOCK
1611 JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
1612
1613 if (current->pending_contended_entered_event()) {
1614 JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1615 current->set_contended_entered_monitor(nullptr);
1616 }
1617 JRT_BLOCK_END
1618
1619 clear_anchor(current);
1620 }
1621 #endif // INCLUDE_JVMTI
1622
1623 #ifdef ASSERT
1624 static bool monitors_on_stack(JavaThread* thread) {
1625 ContinuationEntry* ce = thread->last_continuation();
1626 RegisterMap map(thread,
1627 RegisterMap::UpdateMap::include,
1628 RegisterMap::ProcessFrames::include,
1629 RegisterMap::WalkContinuation::skip);
1630 map.set_include_argument_oops(false);
1631 for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
1632 if ((f.is_interpreted_frame() && ContinuationHelper::InterpretedFrame::is_owning_locks(f)) ||
1633 (f.is_compiled_frame() && ContinuationHelper::CompiledFrame::is_owning_locks(map.thread(), &map, f)) ||
1634 (f.is_native_frame() && ContinuationHelper::NativeFrame::is_owning_locks(map.thread(), f))) {
1635 return true;
1636 }
1637 }
1638 return false;
1639 }
1640
1641 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1642 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1643 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1644 bool FreezeBase::check_valid_fast_path() {
1645 ContinuationEntry* ce = _thread->last_continuation();
1646 RegisterMap map(_thread,
1647 RegisterMap::UpdateMap::skip,
1648 RegisterMap::ProcessFrames::skip,
1649 RegisterMap::WalkContinuation::skip);
1650 map.set_include_argument_oops(false);
1651 bool is_top_frame = true;
1652 for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1653 if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1654 return false;
1655 }
1656 }
1657 return true;
1658 }
1659 #endif // ASSERT
1660
1661 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1662 verify_continuation(cont.continuation());
1663 assert(!cont.is_empty(), "");
1664
1665 log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1666 return freeze_ok;
1667 }
1668
1669 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1670 if (UNLIKELY(res != freeze_ok)) {
1671 JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1672 verify_continuation(cont.continuation());
1673 log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1674 return res;
1675 }
1676
1677 JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1678 return freeze_epilog(cont);
1679 }
1680
1681 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1682 if (UNLIKELY(res != freeze_ok)) {
1683 verify_continuation(cont.continuation());
1684 log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1685 return res;
1686 }
1687
1688 patch_return_pc_with_preempt_stub(old_last_frame);
1689 cont.tail()->set_preempted(true);
1690
1691 return freeze_epilog(cont);
1692 }
1693
1694 template<typename ConfigT, bool preempt>
1695 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1696 assert(!current->has_pending_exception(), "");
1697
1698 #ifdef ASSERT
1699 log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1700 log_frames(current);
1701 #endif
1702
1703 CONT_JFR_ONLY(EventContinuationFreeze event;)
1704
1705 ContinuationEntry* entry = current->last_continuation();
1706
1707 oop oopCont = entry->cont_oop(current);
1708 assert(oopCont == current->last_continuation()->cont_oop(current), "");
1709 assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1710
1711 verify_continuation(oopCont);
1712 ContinuationWrapper cont(current, oopCont);
1713 log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1714
1715 assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1716
1717 assert(LockingMode != LM_LEGACY || (monitors_on_stack(current) == ((current->held_monitor_count() - current->jni_monitor_count()) > 0)),
1718 "Held monitor count and locks on stack invariant: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1719 assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
1720 "Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1876 // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
1877 // for the Java frames in the check below.
1878 if (!stack_overflow_check(thread, size + 300, bottom)) {
1879 return 0;
1880 }
1881
1882 log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
1883 p2i(bottom), p2i(bottom - size), size);
1884 return size;
1885 }
1886
1887 class ThawBase : public StackObj {
1888 protected:
1889 JavaThread* _thread;
1890 ContinuationWrapper& _cont;
1891 CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
1892
1893 intptr_t* _fastpath;
1894 bool _barriers;
1895 bool _preempted_case;
1896 intptr_t* _top_unextended_sp_before_thaw;
1897 int _align_size;
1898 DEBUG_ONLY(intptr_t* _top_stack_address);
1899
1900 StackChunkFrameStream<ChunkFrames::Mixed> _stream;
1901
1902 NOT_PRODUCT(int _frames;)
1903
1904 protected:
1905 ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
1906 _thread(thread), _cont(cont),
1907 _fastpath(nullptr) {
1908 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
1909 assert (cont.tail() != nullptr, "no last chunk");
1910 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
1911 }
1912
1913 void clear_chunk(stackChunkOop chunk);
1914 template<bool check_stub>
1915 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
1916 void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
1917
1918 void thaw_lockstack(stackChunkOop chunk);
1919
1920 // fast path
1921 inline void prefetch_chunk_pd(void* start, int size_words);
1922 void patch_return(intptr_t* sp, bool is_last);
1923
1924 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
1925 inline intptr_t* push_cleanup_continuation();
1926 void throw_interrupted_exception(JavaThread* current, frame& top);
1927
1928 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
1929 void finish_thaw(frame& f);
1930
1931 private:
1932 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
1933 void finalize_thaw(frame& entry, int argsize);
1934
1935 inline bool seen_by_gc();
1936
1937 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
1938 inline void after_thaw_java_frame(const frame& f, bool bottom);
1939 inline void patch(frame& f, const frame& caller, bool bottom);
1940 void clear_bitmap_bits(address start, address end);
1941
1942 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
1943 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
1944 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
1945 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
1946
1947 void push_return_frame(frame& f);
1948 inline frame new_entry_frame();
1949 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
1950 inline void patch_pd(frame& f, const frame& sender);
1951 inline void patch_pd(frame& f, intptr_t* caller_sp);
1952 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
1953
1954 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
1955
1956 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
1957
1958 public:
1959 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
1960 };
1961
1962 template <typename ConfigT>
2022 chunk->set_sp(chunk->bottom());
2023 chunk->set_max_thawing_size(0);
2024 }
2025
2026 template<bool check_stub>
2027 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2028 bool empty = false;
2029 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2030 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2031 assert(chunk_sp == f.sp(), "");
2032 assert(chunk_sp == f.unextended_sp(), "");
2033
2034 int frame_size = f.cb()->frame_size();
2035 argsize = f.stack_argsize();
2036
2037 assert(!f.is_stub() || check_stub, "");
2038 if (check_stub && f.is_stub()) {
2039 // If we don't thaw the top compiled frame too, after restoring the saved
2040 // registers back in Java, we would hit the return barrier to thaw one more
2041 // frame effectively overwriting the restored registers during that call.
2042 f.next(SmallRegisterMap::instance(), true /* stop */);
2043 assert(!f.is_done(), "");
2044
2045 f.get_cb();
2046 assert(f.is_compiled(), "");
2047 frame_size += f.cb()->frame_size();
2048 argsize = f.stack_argsize();
2049
2050 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2051 // The caller of the runtime stub when the continuation is preempted is not at a
2052 // Java call instruction, and so cannot rely on nmethod patching for deopt.
2053 log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2054 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2055 }
2056 }
2057
2058 f.next(SmallRegisterMap::instance(), true /* stop */);
2059 empty = f.is_done();
2060 assert(!empty || argsize == chunk->argsize(), "");
2061
2062 if (empty) {
2063 clear_chunk(chunk);
2064 } else {
2065 chunk->set_sp(chunk->sp() + frame_size);
2066 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2067 // We set chunk->pc to the return pc into the next frame
2068 chunk->set_pc(f.pc());
2069 #ifdef ASSERT
2070 {
2071 intptr_t* retaddr_slot = (chunk_sp
2072 + frame_size
2073 - frame::sender_sp_ret_address_offset());
2074 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2075 "unexpected pc");
2076 }
2077 #endif
2078 }
2198 return rs.sp();
2199 }
2200
2201 inline bool ThawBase::seen_by_gc() {
2202 return _barriers || _cont.tail()->is_gc_mode();
2203 }
2204
2205 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2206 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2207 if (UseZGC || UseShenandoahGC) {
2208 chunk->relativize_derived_pointers_concurrently();
2209 }
2210 #endif
2211 }
2212
2213 template <typename ConfigT>
2214 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2215 Continuation::preempt_kind preempt_kind;
2216 bool retry_fast_path = false;
2217
2218 _preempted_case = chunk->preempted();
2219 if (_preempted_case) {
2220 ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2221 if (waiter != nullptr) {
2222 // Mounted again after preemption. Resume the pending monitor operation,
2223 // which will be either a monitorenter or Object.wait() call.
2224 ObjectMonitor* mon = waiter->monitor();
2225 preempt_kind = waiter->is_wait() ? Continuation::freeze_on_wait : Continuation::freeze_on_monitorenter;
2226
2227 bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2228 assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2229 if (!mon_acquired) {
2230 // Failed to acquire monitor. Return to enterSpecial to unmount again.
2231 return push_cleanup_continuation();
2232 }
2233 chunk = _cont.tail(); // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2234 } else {
2235 // Preemption cancelled in moniterenter case. We actually acquired
2236 // the monitor after freezing all frames so nothing to do.
2237 preempt_kind = Continuation::freeze_on_monitorenter;
2238 }
2239 // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2240 relativize_chunk_concurrently(chunk);
2241 chunk->set_preempted(false);
2242 retry_fast_path = true;
2243 } else {
2244 relativize_chunk_concurrently(chunk);
2245 }
2246
2247 // On first thaw after freeze restore oops to the lockstack if any.
2248 assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2249 if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2250 thaw_lockstack(chunk);
2251 retry_fast_path = true;
2252 }
2253
2254 // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2255 // and FLAG_PREEMPTED flags from the stackChunk.
2256 if (retry_fast_path && can_thaw_fast(chunk)) {
2257 intptr_t* sp = thaw_fast<true>(chunk);
2258 if (_preempted_case) {
2259 return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2260 }
2304
2305 intptr_t* sp = caller.sp();
2306
2307 if (_preempted_case) {
2308 return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2309 }
2310 return sp;
2311 }
2312
2313 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2314 log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2315 assert(!_cont.is_empty(), "no more frames");
2316 assert(num_frames > 0, "");
2317 assert(!heap_frame.is_empty(), "");
2318
2319 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2320 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2321 } else if (!heap_frame.is_interpreted_frame()) {
2322 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2323 } else {
2324 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames);
2325 }
2326 }
2327
2328 template<typename FKind>
2329 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2330 assert(num_frames > 0, "");
2331
2332 DEBUG_ONLY(_frames++;)
2333
2334 int argsize = _stream.stack_argsize();
2335
2336 _stream.next(SmallRegisterMap::instance());
2337 assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2338
2339 // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2340 // as it makes detecting that situation and adjusting unextended_sp tricky
2341 if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2342 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2343 num_frames++;
2344 }
2345
2346 if (num_frames == 1 || _stream.is_done()) { // end recursion
2347 finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2348 return true; // bottom
2349 } else { // recurse
2350 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2351 return false;
2352 }
2353 }
2354
2355 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2356 stackChunkOop chunk = _cont.tail();
2421
2422 void ThawBase::clear_bitmap_bits(address start, address end) {
2423 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2424 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2425
2426 // we need to clear the bits that correspond to arguments as they reside in the caller frame
2427 // or they will keep objects that are otherwise unreachable alive.
2428
2429 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2430 // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2431 // If that's the case the bit range corresponding to the last stack slot should not have bits set
2432 // anyways and we assert that before returning.
2433 address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2434 log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2435 stackChunkOop chunk = _cont.tail();
2436 chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2437 assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2438 }
2439
2440 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2441 assert(preempt_kind == Continuation::freeze_on_wait || preempt_kind == Continuation::freeze_on_monitorenter, "");
2442 frame top(sp);
2443 assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2444
2445 #if INCLUDE_JVMTI
2446 // Finish the VTMS transition.
2447 assert(_thread->is_in_VTMS_transition(), "must be");
2448 bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2449 if (is_vthread) {
2450 if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
2451 jvmti_mount_end(_thread, _cont, top);
2452 } else {
2453 _thread->set_is_in_VTMS_transition(false);
2454 java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
2455 }
2456 }
2457 #endif
2458
2459 if (fast_case) {
2460 // If we thawed in the slow path the runtime stub/native wrapper frame already
2461 // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2462 // we copied the original fp at the time of freeze which now will have to be fixed.
2463 assert(top.is_runtime_frame() || top.is_native_frame(), "");
2464 int fsize = top.cb()->frame_size();
2465 patch_pd(top, sp + fsize);
2466 }
2467
2468 if (preempt_kind == Continuation::freeze_on_wait) {
2469 // Check now if we need to throw IE exception.
2470 if (_thread->pending_interrupted_exception()) {
2471 throw_interrupted_exception(_thread, top);
2472 _thread->set_pending_interrupted_exception(false);
2473 }
2474 } else if (top.is_runtime_frame()) {
2475 // The continuation might now run on a different platform thread than the previous time so
2476 // we need to adjust the current thread saved in the stub frame before restoring registers.
2477 JavaThread** thread_addr = frame::saved_thread_address(top);
2478 if (thread_addr != nullptr) *thread_addr = _thread;
2479 }
2480 return sp;
2481 }
2482
2483 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2484 ContinuationWrapper::SafepointOp so(current, _cont);
2485 // Since we might safepoint set the anchor so that the stack can be walked.
2486 set_anchor(current, top.sp());
2487 JRT_BLOCK
2488 THROW(vmSymbols::java_lang_InterruptedException());
2489 JRT_BLOCK_END
2490 clear_anchor(current);
2491 }
2492
2493 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
2494 assert(hf.is_interpreted_frame(), "");
2495
2496 if (UNLIKELY(seen_by_gc())) {
2497 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2498 }
2499
2500 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2501
2502 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2503
2504 _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2505
2506 frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2507
2508 intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2509 intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2510 intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2511 intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2512
2513 assert(hf.is_heap_frame(), "should be");
2514 assert(!f.is_heap_frame(), "should not be");
2515
2516 const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2517 assert((stack_frame_bottom == stack_frame_top + fsize), "");
2522
2523 // Make sure the relativized locals is already set.
2524 assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2525
2526 derelativize_interpreted_frame_metadata(hf, f);
2527 patch(f, caller, is_bottom_frame);
2528
2529 assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2530 assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2531
2532 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2533
2534 maybe_set_fastpath(f.sp());
2535
2536 Method* m = hf.interpreter_frame_method();
2537 assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2538 const int locals = m->max_locals();
2539
2540 if (!is_bottom_frame) {
2541 // can only fix caller once this frame is thawed (due to callee saved regs)
2542 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2543 } else if (_cont.tail()->has_bitmap() && locals > 0) {
2544 assert(hf.is_heap_frame(), "should be");
2545 address start = (address)(heap_frame_bottom - locals);
2546 address end = (address)heap_frame_bottom;
2547 clear_bitmap_bits(start, end);
2548 }
2549
2550 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2551 caller = f;
2552 }
2553
2554 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2555 assert(hf.is_compiled_frame(), "");
2556 assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2557
2558 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2559 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2560 }
2561
2562 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2563
2564 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2565
2566 assert(caller.sp() == caller.unextended_sp(), "");
2567
2568 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2569 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2570 }
2571
2572 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2573 // yet laid out in the stack, and so the original_pc is not stored in it.
2574 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2575 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2576 intptr_t* const stack_frame_top = f.sp();
2577 intptr_t* const heap_frame_top = hf.unextended_sp();
2578
2579 const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2598 assert(!f.is_deoptimized_frame(), "");
2599 if (hf.is_deoptimized_frame()) {
2600 maybe_set_fastpath(f.sp());
2601 } else if (_thread->is_interp_only_mode()
2602 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2603 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2604 // cannot rely on nmethod patching for deopt.
2605 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2606
2607 log_develop_trace(continuations)("Deoptimizing thawed frame");
2608 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2609
2610 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2611 assert(f.is_deoptimized_frame(), "");
2612 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2613 maybe_set_fastpath(f.sp());
2614 }
2615
2616 if (!is_bottom_frame) {
2617 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2618 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2619 } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2620 address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2621 int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2622 int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2623 clear_bitmap_bits(start, start + argsize_in_bytes);
2624 }
2625
2626 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2627 caller = f;
2628 }
2629
2630 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2631 DEBUG_ONLY(_frames++;)
2632
2633 if (UNLIKELY(seen_by_gc())) {
2634 // Process the stub's caller here since we might need the full map.
2635 RegisterMap map(nullptr,
2636 RegisterMap::UpdateMap::include,
2637 RegisterMap::ProcessFrames::skip,
2638 RegisterMap::WalkContinuation::skip);
2639 map.set_include_argument_oops(false);
2640 _stream.next(&map);
2641 assert(!_stream.is_done(), "");
2642 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2643 } else {
2644 _stream.next(SmallRegisterMap::instance());
2645 assert(!_stream.is_done(), "");
2646 }
2647
2648 recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2649
2650 assert(caller.is_compiled_frame(), "");
2651 assert(caller.sp() == caller.unextended_sp(), "");
2652
2653 DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2654
2655 frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2656 intptr_t* stack_frame_top = f.sp();
2657 intptr_t* heap_frame_top = hf.sp();
2658 int fsize = ContinuationHelper::StubFrame::size(hf);
2659
2660 copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2661 fsize + frame::metadata_words);
2662
2663 patch(f, caller, false /*is_bottom_frame*/);
2664
2665 // can only fix caller once this frame is thawed (due to callee saved regs)
2666 RegisterMap map(nullptr,
2667 RegisterMap::UpdateMap::include,
2668 RegisterMap::ProcessFrames::skip,
2669 RegisterMap::WalkContinuation::skip);
2670 map.set_include_argument_oops(false);
2671 f.oop_map()->update_register_map(&f, &map);
2672 ContinuationHelper::update_register_map_with_callee(caller, &map);
2673 _cont.tail()->fix_thawed_frame(caller, &map);
2674
2675 DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2676 caller = f;
2677 }
2678
2679 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2680 assert(hf.is_native_frame(), "");
2681 assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2682
2683 if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2684 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance());
2685 }
2686
2687 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2688 assert(!is_bottom_frame, "");
2689
2690 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2691
2692 assert(caller.sp() == caller.unextended_sp(), "");
2693
2694 if (caller.is_interpreted_frame()) {
2695 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2696 }
2697
2698 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2699 // yet laid out in the stack, and so the original_pc is not stored in it.
2700 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2701 frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2702 intptr_t* const stack_frame_top = f.sp();
2703 intptr_t* const heap_frame_top = hf.unextended_sp();
2704
2705 int fsize = ContinuationHelper::NativeFrame::size(hf);
2706 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2707
2708 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2709 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2710 int sz = fsize + frame::metadata_words_at_bottom;
2711
2712 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2713
2714 patch(f, caller, false /* bottom */);
2715
2716 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2717 assert(!f.is_deoptimized_frame(), "");
2718 assert(!hf.is_deoptimized_frame(), "");
2719 assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2720
2721 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2722 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance());
2723
2724 DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2725 caller = f;
2726 }
2727
2728 void ThawBase::finish_thaw(frame& f) {
2729 stackChunkOop chunk = _cont.tail();
2730
2731 if (chunk->is_empty()) {
2732 // Only remove chunk from list if it can't be reused for another freeze
2733 if (seen_by_gc()) {
2734 _cont.set_tail(chunk->parent());
2735 } else {
2736 chunk->set_has_mixed_frames(false);
2737 }
2738 chunk->set_max_thawing_size(0);
2739 } else {
2740 chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2741 }
2742 assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
2743
2744 if (!is_aligned(f.sp(), frame::frame_alignment)) {
2745 assert(f.is_interpreted_frame(), "");
2746 f.set_sp(align_down(f.sp(), frame::frame_alignment));
2747 }
2748 push_return_frame(f);
2749 chunk->fix_thawed_frame(f, SmallRegisterMap::instance()); // can only fix caller after push_return_frame (due to callee saved regs)
2750
2751 assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
2752
2753 log_develop_trace(continuations)("thawed %d frames", _frames);
2754
2755 LogTarget(Trace, continuations) lt;
2756 if (lt.develop_is_enabled()) {
2757 LogStream ls(lt);
2758 ls.print_cr("top hframe after (thaw):");
2759 _cont.last_frame().print_value_on(&ls);
2760 }
2761 }
2762
2763 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
2764 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
2765 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
2766
2767 LogTarget(Trace, continuations) lt;
2768 if (lt.develop_is_enabled()) {
2769 LogStream ls(lt);
2791
2792 ContinuationEntry* entry = thread->last_continuation();
2793 assert(entry != nullptr, "");
2794 oop oopCont = entry->cont_oop(thread);
2795
2796 assert(!jdk_internal_vm_Continuation::done(oopCont), "");
2797 assert(oopCont == get_continuation(thread), "");
2798 verify_continuation(oopCont);
2799
2800 assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
2801
2802 ContinuationWrapper cont(thread, oopCont);
2803 log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
2804
2805 #ifdef ASSERT
2806 set_anchor_to_entry(thread, cont.entry());
2807 log_frames(thread);
2808 clear_anchor(thread);
2809 #endif
2810
2811 DEBUG_ONLY(bool preempted = cont.tail()->preempted();)
2812 Thaw<ConfigT> thw(thread, cont);
2813 intptr_t* const sp = thw.thaw(kind);
2814 assert(is_aligned(sp, frame::frame_alignment), "");
2815 DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp, preempted);)
2816
2817 CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
2818
2819 verify_continuation(cont.continuation());
2820 log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
2821
2822 return sp;
2823 }
2824
2825 #ifdef ASSERT
2826 static void do_deopt_after_thaw(JavaThread* thread) {
2827 int i = 0;
2828 StackFrameStream fst(thread, true, false);
2829 fst.register_map()->set_include_argument_oops(false);
2830 ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
2831 for (; !fst.is_done(); fst.next()) {
2832 if (fst.current()->cb()->is_nmethod()) {
2833 nmethod* nm = fst.current()->cb()->as_nmethod();
2834 if (!nm->method()->is_continuation_native_intrinsic()) {
2835 nm->make_deoptimized();
2892 if (!fr.is_interpreted_frame()) {
2893 st->print_cr("size: %d argsize: %d",
2894 ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
2895 ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
2896 }
2897 VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
2898 if (reg != nullptr) {
2899 st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
2900 }
2901 cl.reset();
2902 DEBUG_ONLY(thread->print_frame_layout();)
2903 if (chunk != nullptr) {
2904 chunk->print_on(true, st);
2905 }
2906 return false;
2907 }
2908 }
2909 return true;
2910 }
2911
2912 static void log_frames(JavaThread* thread) {
2913 const static int show_entry_callers = 3;
2914 LogTarget(Trace, continuations) lt;
2915 if (!lt.develop_is_enabled()) {
2916 return;
2917 }
2918 LogStream ls(lt);
2919
2920 ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
2921 if (!thread->has_last_Java_frame()) {
2922 ls.print_cr("NO ANCHOR!");
2923 }
2924
2925 RegisterMap map(thread,
2926 RegisterMap::UpdateMap::include,
2927 RegisterMap::ProcessFrames::include,
2928 RegisterMap::WalkContinuation::skip);
2929 map.set_include_argument_oops(false);
2930
2931 if (false) {
2932 for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
2933 f.print_on(&ls);
2934 }
2935 } else {
2937 ResetNoHandleMark rnhm;
2938 ResourceMark rm;
2939 HandleMark hm(Thread::current());
2940 FrameValues values;
2941
2942 int i = 0;
2943 int post_entry = -1;
2944 for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
2945 f.describe(values, i, &map, i == 0);
2946 if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
2947 post_entry++;
2948 if (post_entry >= show_entry_callers)
2949 break;
2950 }
2951 values.print_on(thread, &ls);
2952 }
2953
2954 ls.print_cr("======= end frames =========");
2955 }
2956
2957 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp, bool preempted) {
2958 intptr_t* sp0 = sp;
2959 address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
2960
2961 if (preempted && sp0 == cont.entrySP()) {
2962 // Still preempted (monitor not acquired) so no frames were thawed.
2963 assert(cont.tail()->preempted(), "");
2964 set_anchor(thread, cont.entrySP(), cont.entryPC());
2965 } else {
2966 set_anchor(thread, sp0);
2967 }
2968
2969 log_frames(thread);
2970 if (LoomVerifyAfterThaw) {
2971 assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
2972 }
2973 assert(ContinuationEntry::assert_entry_frame_laid_out(thread), "");
2974 clear_anchor(thread);
2975
2976 LogTarget(Trace, continuations) lt;
2977 if (lt.develop_is_enabled()) {
2978 LogStream ls(lt);
2979 ls.print_cr("Jumping to frame (thaw):");
2980 frame(sp).print_value_on(&ls);
2981 }
2982 }
2983 #endif // ASSERT
2984
2985 #include CPU_HEADER_INLINE(continuationFreezeThaw)
2986
2987 #ifdef ASSERT
2988 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
2989 ResourceMark rm;
2990 FrameValues values;
2991 assert(f.get_cb() != nullptr, "");
2992 RegisterMap map(f.is_heap_frame() ?
2993 nullptr :
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.inline.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "code/codeCache.inline.hpp"
28 #include "code/nmethod.inline.hpp"
29 #include "code/vmreg.inline.hpp"
30 #include "compiler/oopMap.inline.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shared/gc_globals.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/memAllocator.hpp"
35 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
36 #include "interpreter/bytecodeStream.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "interpreter/interpreterRuntime.hpp"
39 #include "jfr/jfrEvents.hpp"
40 #include "logging/log.hpp"
41 #include "logging/logStream.hpp"
42 #include "oops/access.inline.hpp"
43 #include "oops/constantPool.inline.hpp"
44 #include "oops/method.inline.hpp"
45 #include "oops/oopsHierarchy.hpp"
46 #include "oops/objArrayOop.inline.hpp"
47 #include "oops/stackChunkOop.inline.hpp"
48 #include "prims/jvmtiThreadState.hpp"
49 #include "runtime/arguments.hpp"
50 #include "runtime/continuation.hpp"
51 #include "runtime/continuationEntry.inline.hpp"
52 #include "runtime/continuationHelper.inline.hpp"
53 #include "runtime/continuationJavaClasses.inline.hpp"
54 #include "runtime/continuationWrapper.inline.hpp"
55 #include "runtime/frame.inline.hpp"
56 #include "runtime/interfaceSupport.inline.hpp"
57 #include "runtime/javaThread.inline.hpp"
58 #include "runtime/jniHandles.inline.hpp"
59 #include "runtime/keepStackGCProcessed.hpp"
60 #include "runtime/objectMonitor.inline.hpp"
61 #include "runtime/orderAccess.hpp"
62 #include "runtime/prefetch.inline.hpp"
63 #include "runtime/smallRegisterMap.inline.hpp"
64 #include "runtime/sharedRuntime.hpp"
65 #include "runtime/stackChunkFrameStream.inline.hpp"
66 #include "runtime/stackFrameStream.inline.hpp"
67 #include "runtime/stackOverflow.hpp"
68 #include "runtime/stackWatermarkSet.inline.hpp"
69 #include "runtime/vframe.inline.hpp"
70 #include "runtime/vframe_hp.hpp"
71 #include "utilities/debug.hpp"
72 #include "utilities/exceptions.hpp"
73 #include "utilities/macros.hpp"
74 #include "utilities/vmError.hpp"
75 #if INCLUDE_ZGC
76 #include "gc/z/zStackChunkGCData.inline.hpp"
77 #endif
78 #ifdef COMPILER1
79 #include "c1/c1_Runtime1.hpp"
80 #endif
81 #ifdef COMPILER2
82 #include "opto/runtime.hpp"
83 #endif
84
85 #include <type_traits>
86
87 /*
88 * This file contains the implementation of continuation freezing (yield) and thawing (run).
89 *
90 * This code is very latency-critical and very hot. An ordinary and well-behaved server application
91 * would likely call these operations many thousands of times per second second, on every core.
92 *
93 * Freeze might be called every time the application performs any I/O operation, every time it
94 * acquires a j.u.c. lock, every time it takes a message from a queue, and thaw can be called
95 * multiple times in each of those cases, as it is called by the return barrier, which may be
96 * invoked on method return.
97 *
98 * The amortized budget for each of those two operations is ~100-150ns. That is why, for
99 * example, every effort is made to avoid Java-VM transitions as much as possible.
100 *
101 * On the fast path, all frames are known to be compiled, and the chunk requires no barriers
102 * and so frames simply copied, and the bottom-most one is patched.
103 * On the slow path, internal pointers in interpreted frames are de/relativized to/from offsets
173 #endif
174
175 // TODO: See AbstractAssembler::generate_stack_overflow_check,
176 // Compile::bang_size_in_bytes(), m->as_SafePoint()->jvms()->interpreter_frame_size()
177 // when we stack-bang, we need to update a thread field with the lowest (farthest) bang point.
178
179 // Data invariants are defined by Continuation::debug_verify_continuation and Continuation::debug_verify_stack_chunk
180
181 // Used to just annotatate cold/hot branches
182 #define LIKELY(condition) (condition)
183 #define UNLIKELY(condition) (condition)
184
185 // debugging functions
186 #ifdef ASSERT
187 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); // address p is readable and *(intptr_t*)p != errvalue
188
189 static void verify_continuation(oop continuation) { Continuation::debug_verify_continuation(continuation); }
190
191 static void do_deopt_after_thaw(JavaThread* thread);
192 static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
193 static void log_frames(JavaThread* thread, bool dolog = false);
194 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp);
195 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
196 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr = nullptr, const char** code_name_ptr = nullptr, int* bci_ptr = nullptr);
197
198 #define assert_pfl(p, ...) \
199 do { \
200 if (!(p)) { \
201 JavaThread* t = JavaThread::active(); \
202 if (t->has_last_Java_frame()) { \
203 tty->print_cr("assert(" #p ") failed:"); \
204 t->print_frame_layout(); \
205 } \
206 } \
207 vmassert(p, __VA_ARGS__); \
208 } while(0)
209
210 #else
211 static void verify_continuation(oop continuation) { }
212 #define assert_pfl(p, ...)
213 #endif
214
215 static freeze_result is_pinned0(JavaThread* thread, oop cont_scope, bool safepoint);
216 template<typename ConfigT, bool preempt> static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp);
504
505 assert(!Interpreter::contains(_cont.entryPC()), "");
506
507 _bottom_address = _cont.entrySP() - _cont.entry_frame_extension();
508 #ifdef _LP64
509 if (((intptr_t)_bottom_address & 0xf) != 0) {
510 _bottom_address--;
511 }
512 assert(is_aligned(_bottom_address, frame::frame_alignment), "");
513 #endif
514
515 log_develop_trace(continuations)("bottom_address: " INTPTR_FORMAT " entrySP: " INTPTR_FORMAT " argsize: " PTR_FORMAT,
516 p2i(_bottom_address), p2i(_cont.entrySP()), (_cont.entrySP() - _bottom_address) << LogBytesPerWord);
517 assert(_bottom_address != nullptr, "");
518 assert(_bottom_address <= _cont.entrySP(), "");
519 DEBUG_ONLY(_last_write = nullptr;)
520
521 assert(_cont.chunk_invariant(), "");
522 assert(!Interpreter::contains(_cont.entryPC()), "");
523 #if !defined(PPC64) || defined(ZERO)
524 static const int do_yield_frame_size = frame::metadata_words;
525 #else
526 static const int do_yield_frame_size = frame::native_abi_reg_args_size >> LogBytesPerWord;
527 #endif
528 // With preemption doYield() might not have been resolved yet
529 assert(_preempt || ContinuationEntry::do_yield_nmethod()->frame_size() == do_yield_frame_size, "");
530
531 if (preempt) {
532 _last_frame = _thread->last_frame();
533 }
534
535 // properties of the continuation on the stack; all sizes are in words
536 _cont_stack_top = frame_sp + (!preempt ? do_yield_frame_size : 0); // we don't freeze the doYield stub frame
537 _cont_stack_bottom = _cont.entrySP() + (_cont.argsize() == 0 ? frame::metadata_words_at_top : 0)
538 - ContinuationHelper::frame_align_words(_cont.argsize()); // see alignment in thaw
539
540 log_develop_trace(continuations)("freeze size: %d argsize: %d top: " INTPTR_FORMAT " bottom: " INTPTR_FORMAT,
541 cont_size(), _cont.argsize(), p2i(_cont_stack_top), p2i(_cont_stack_bottom));
542 assert(cont_size() > 0, "");
543
544 if (LockingMode != LM_LIGHTWEIGHT) {
545 _monitors_in_lockstack = 0;
546 } else {
547 _monitors_in_lockstack = _thread->lock_stack().monitor_count();
548 }
549 }
550
551 void FreezeBase::init_rest() { // we want to postpone some initialization after chunk handling
552 _freeze_size = 0;
553 _total_align_size = 0;
554 NOT_PRODUCT(_frames = 0;)
555 }
556
849 freeze_result res = recurse_freeze(f, caller, 0, false, true);
850
851 if (res == freeze_ok) {
852 finish_freeze(f, caller);
853 _cont.write();
854 }
855
856 return res;
857 }
858
859 frame FreezeBase::freeze_start_frame() {
860 if (LIKELY(!_preempt)) {
861 return freeze_start_frame_yield_stub();
862 } else {
863 return freeze_start_frame_on_preempt();
864 }
865 }
866
867 frame FreezeBase::freeze_start_frame_yield_stub() {
868 frame f = _thread->last_frame();
869 assert(ContinuationEntry::do_yield_nmethod()->contains(f.pc()), "must be");
870 f = sender<ContinuationHelper::NonInterpretedUnknownFrame>(f);
871 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), f), "");
872 return f;
873 }
874
875 frame FreezeBase::freeze_start_frame_on_preempt() {
876 assert(_last_frame.sp() == _thread->last_frame().sp(), "_last_frame should be already initialized");
877 assert(Continuation::is_frame_in_continuation(_thread->last_continuation(), _last_frame), "");
878 return _last_frame;
879 }
880
881 // The parameter callee_argsize includes metadata that has to be part of caller/callee overlap.
882 NOINLINE freeze_result FreezeBase::recurse_freeze(frame& f, frame& caller, int callee_argsize, bool callee_interpreted, bool top) {
883 assert(f.unextended_sp() < _bottom_address, ""); // see recurse_freeze_java_frame
884 assert(f.is_interpreted_frame() || ((top && _preempt) == ContinuationHelper::Frame::is_stub(f.cb()))
885 || ((top && _preempt) == f.is_native_frame()), "");
886
887 if (stack_overflow()) {
888 return freeze_exception;
889 }
1045 log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
1046 if (chunk->is_empty()) {
1047 int sp = chunk->stack_size() - argsize_md;
1048 chunk->set_sp(sp);
1049 chunk->set_bottom(sp);
1050 _freeze_size += overlap;
1051 assert(chunk->max_thawing_size() == 0, "");
1052 } DEBUG_ONLY(else empty_chunk = false;)
1053 }
1054 assert(!chunk->is_gc_mode(), "");
1055 assert(!chunk->has_bitmap(), "");
1056 chunk->set_has_mixed_frames(true);
1057
1058 assert(chunk->requires_barriers() == _barriers, "");
1059 assert(!_barriers || chunk->is_empty(), "");
1060
1061 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
1062 assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
1063
1064 if (_preempt) {
1065 frame top_frame = _thread->last_frame();
1066 if (top_frame.is_interpreted_frame()) {
1067 // Some platforms do not save the last_sp in the top interpreter frame on VM calls.
1068 // We need it so that on resume we can restore the sp to the right place, since
1069 // thawing might add an alignment word to the expression stack (see finish_thaw()).
1070 // We do it now that we know freezing will be successful.
1071 prepare_freeze_interpreted_top_frame(top_frame);
1072 }
1073
1074 // Do this now so should_process_args_at_top() is set before calling finish_freeze
1075 // in case we might need to apply GC barriers to frames in this stackChunk.
1076 if (_thread->at_preemptable_init()) {
1077 assert(top_frame.is_interpreted_frame(), "only InterpreterRuntime::_new/resolve_from_cache allowed");
1078 chunk->set_at_klass_init(true);
1079 Method* m = top_frame.interpreter_frame_method();
1080 Bytecode current_bytecode = Bytecode(m, top_frame.interpreter_frame_bcp());
1081 Bytecodes::Code code = current_bytecode.code();
1082 int exp_size = top_frame.interpreter_frame_expression_stack_size();
1083 if (code == Bytecodes::Code::_invokestatic && exp_size > 0) {
1084 chunk->set_has_args_at_top(true);
1085 }
1086 }
1087 }
1088
1089 // We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
1090 // writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
1091 // will either see no continuation or a consistent chunk.
1092 unwind_frames();
1093
1094 chunk->set_max_thawing_size(chunk->max_thawing_size() + _freeze_size - _monitors_in_lockstack - frame::metadata_words);
1095
1096 if (lt.develop_is_enabled()) {
1097 LogStream ls(lt);
1098 ls.print_cr("top chunk:");
1099 chunk->print_on(&ls);
1100 }
1101
1102 if (_monitors_in_lockstack > 0) {
1103 freeze_lockstack(chunk);
1104 }
1105
1577 // Some GCs could put direct allocations in old gen for slow-path
1578 // allocations; need to explicitly check if that was the case.
1579 _barriers = chunk->requires_barriers();
1580 }
1581 }
1582
1583 if (_barriers) {
1584 log_develop_trace(continuations)("allocation requires barriers");
1585 }
1586
1587 assert(chunk->parent() == nullptr || chunk->parent()->is_stackChunk(), "");
1588
1589 return chunk;
1590 }
1591
1592 void FreezeBase::throw_stack_overflow_on_humongous_chunk() {
1593 ContinuationWrapper::SafepointOp so(_thread, _cont); // could also call _cont.done() instead
1594 Exceptions::_throw_msg(_thread, __FILE__, __LINE__, vmSymbols::java_lang_StackOverflowError(), "Humongous stack chunk");
1595 }
1596
1597 class AnchorMark : public StackObj {
1598 JavaThread* _current;
1599 frame& _top_frame;
1600 intptr_t* _last_sp_from_frame;
1601 bool _is_interpreted;
1602
1603 public:
1604 AnchorMark(JavaThread* current, frame& f) : _current(current), _top_frame(f), _is_interpreted(false) {
1605 intptr_t* sp = anchor_mark_set_pd();
1606 set_anchor(_current, sp);
1607 }
1608 ~AnchorMark() {
1609 clear_anchor(_current);
1610 anchor_mark_clear_pd();
1611 }
1612 inline intptr_t* anchor_mark_set_pd();
1613 inline void anchor_mark_clear_pd();
1614 };
1615
1616 #if INCLUDE_JVMTI
1617 static int num_java_frames(ContinuationWrapper& cont) {
1618 ResourceMark rm; // used for scope traversal in num_java_frames(nmethod*, address)
1619 int count = 0;
1620 for (stackChunkOop chunk = cont.tail(); chunk != nullptr; chunk = chunk->parent()) {
1621 count += chunk->num_java_frames();
1622 }
1623 return count;
1624 }
1625
1626 static void invalidate_jvmti_stack(JavaThread* thread) {
1627 if (thread->is_interp_only_mode()) {
1628 JvmtiThreadState *state = thread->jvmti_thread_state();
1629 if (state != nullptr)
1630 state->invalidate_cur_stack_depth();
1631 }
1632 }
1633
1634 static void jvmti_yield_cleanup(JavaThread* thread, ContinuationWrapper& cont) {
1635 if (JvmtiExport::can_post_frame_pop()) {
1636 int num_frames = num_java_frames(cont);
1637
1638 ContinuationWrapper::SafepointOp so(Thread::current(), cont);
1639 JvmtiExport::continuation_yield_cleanup(JavaThread::current(), num_frames);
1640 }
1641 invalidate_jvmti_stack(thread);
1642 }
1643
1644 static void jvmti_mount_end(JavaThread* current, ContinuationWrapper& cont, frame top, Continuation::preempt_kind pk) {
1645 assert(current->vthread() != nullptr, "must be");
1646
1647 HandleMarkCleaner hm(current); // Cleanup vth and so._conth Handles
1648 Handle vth(current, current->vthread());
1649 ContinuationWrapper::SafepointOp so(current, cont);
1650
1651 AnchorMark am(current, top); // Set anchor so that the stack is walkable.
1652
1653 JRT_BLOCK
1654 JvmtiVTMSTransitionDisabler::VTMS_vthread_mount((jthread)vth.raw_value(), false);
1655
1656 if (current->pending_contended_entered_event()) {
1657 // No monitor JVMTI events for ObjectLocker case.
1658 if (pk != Continuation::object_locker) {
1659 JvmtiExport::post_monitor_contended_entered(current, current->contended_entered_monitor());
1660 }
1661 current->set_contended_entered_monitor(nullptr);
1662 }
1663 JRT_BLOCK_END
1664 }
1665 #endif // INCLUDE_JVMTI
1666
1667 #ifdef ASSERT
1668 static bool monitors_on_stack(JavaThread* thread) {
1669 ContinuationEntry* ce = thread->last_continuation();
1670 RegisterMap map(thread,
1671 RegisterMap::UpdateMap::include,
1672 RegisterMap::ProcessFrames::include,
1673 RegisterMap::WalkContinuation::skip);
1674 map.set_include_argument_oops(false);
1675 for (frame f = thread->last_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map)) {
1676 if ((f.is_interpreted_frame() && ContinuationHelper::InterpretedFrame::is_owning_locks(f)) ||
1677 (f.is_compiled_frame() && ContinuationHelper::CompiledFrame::is_owning_locks(map.thread(), &map, f)) ||
1678 (f.is_native_frame() && ContinuationHelper::NativeFrame::is_owning_locks(map.thread(), f))) {
1679 return true;
1680 }
1681 }
1682 return false;
1683 }
1684
1685 // There are no interpreted frames if we're not called from the interpreter and we haven't ancountered an i2c
1686 // adapter or called Deoptimization::unpack_frames. As for native frames, upcalls from JNI also go through the
1687 // interpreter (see JavaCalls::call_helper), while the UpcallLinker explicitly sets cont_fastpath.
1688 bool FreezeBase::check_valid_fast_path() {
1689 ContinuationEntry* ce = _thread->last_continuation();
1690 RegisterMap map(_thread,
1691 RegisterMap::UpdateMap::skip,
1692 RegisterMap::ProcessFrames::skip,
1693 RegisterMap::WalkContinuation::skip);
1694 map.set_include_argument_oops(false);
1695 bool is_top_frame = true;
1696 for (frame f = freeze_start_frame(); Continuation::is_frame_in_continuation(ce, f); f = f.sender(&map), is_top_frame = false) {
1697 if (!((f.is_compiled_frame() && !f.is_deoptimized_frame()) || (is_top_frame && (f.is_runtime_frame() || f.is_native_frame())))) {
1698 return false;
1699 }
1700 }
1701 return true;
1702 }
1703
1704 static void verify_frame_kind(const frame& top, Continuation::preempt_kind preempt_kind, Method** m_ptr, const char** code_name_ptr, int* bci_ptr) {
1705 JavaThread* current = JavaThread::current();
1706 ResourceMark rm(current);
1707
1708 Method* m;
1709 const char* code_name;
1710 int bci;
1711 if (preempt_kind == Continuation::monitorenter) {
1712 assert(top.is_interpreted_frame() || top.is_runtime_frame(), "");
1713 bool at_sync_method;
1714 if (top.is_interpreted_frame()) {
1715 m = top.interpreter_frame_method();
1716 assert(!m->is_native() || m->is_synchronized(), "invalid method %s", m->external_name());
1717 address bcp = top.interpreter_frame_bcp();
1718 assert(bcp != 0 || m->is_native(), "");
1719 at_sync_method = m->is_synchronized() && (bcp == 0 || bcp == m->code_base());
1720 // bcp is advanced on monitorenter before making the VM call, adjust for that.
1721 bool at_sync_bytecode = bcp > m->code_base() && Bytecode(m, bcp - 1).code() == Bytecodes::Code::_monitorenter;
1722 assert(at_sync_method || at_sync_bytecode, "");
1723 bci = at_sync_method ? -1 : top.interpreter_frame_bci();
1724 } else {
1725 CodeBlob* cb = top.cb();
1726 RegisterMap reg_map(current,
1727 RegisterMap::UpdateMap::skip,
1728 RegisterMap::ProcessFrames::skip,
1729 RegisterMap::WalkContinuation::skip);
1730 frame fr = top.sender(®_map);
1731 vframe* vf = vframe::new_vframe(&fr, ®_map, current);
1732 compiledVFrame* cvf = compiledVFrame::cast(vf);
1733 m = cvf->method();
1734 bci = cvf->scope()->bci();
1735 at_sync_method = bci == SynchronizationEntryBCI;
1736 assert(!at_sync_method || m->is_synchronized(), "bci is %d but method %s is not synchronized", bci, m->external_name());
1737 bool is_c1_monitorenter = false, is_c2_monitorenter = false;
1738 COMPILER1_PRESENT(is_c1_monitorenter = cb == Runtime1::blob_for(C1StubId::monitorenter_id) ||
1739 cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id);)
1740 COMPILER2_PRESENT(is_c2_monitorenter = cb == CodeCache::find_blob(OptoRuntime::complete_monitor_locking_Java());)
1741 assert(is_c1_monitorenter || is_c2_monitorenter, "wrong runtime stub frame");
1742 }
1743 code_name = at_sync_method ? "synchronized method" : "monitorenter";
1744 } else if (preempt_kind == Continuation::object_wait) {
1745 assert(top.is_interpreted_frame() || top.is_native_frame(), "");
1746 m = top.is_interpreted_frame() ? top.interpreter_frame_method() : top.cb()->as_nmethod()->method();
1747 assert(m->is_object_wait0(), "");
1748 bci = 0;
1749 code_name = "";
1750 } else {
1751 assert(preempt_kind == Continuation::object_locker, "invalid preempt kind");
1752 assert(top.is_interpreted_frame(), "");
1753 m = top.interpreter_frame_method();
1754 Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
1755 Bytecodes::Code code = current_bytecode.code();
1756 assert(code == Bytecodes::Code::_new || code == Bytecodes::Code::_invokestatic ||
1757 (code == Bytecodes::Code::_getstatic || code == Bytecodes::Code::_putstatic), "invalid bytecode");
1758 bci = top.interpreter_frame_bci();
1759 code_name = Bytecodes::name(current_bytecode.code());
1760 }
1761 assert(bci >= 0 || m->is_synchronized(), "invalid bci:%d at method %s", bci, m->external_name());
1762
1763 if (m_ptr != nullptr) {
1764 *m_ptr = m;
1765 *code_name_ptr = code_name;
1766 *bci_ptr = bci;
1767 }
1768 }
1769
1770 static void log_preempt_after_freeze(ContinuationWrapper& cont) {
1771 JavaThread* current = cont.thread();
1772 StackChunkFrameStream<ChunkFrames::Mixed> sfs(cont.tail());
1773 frame top_frame = sfs.to_frame();
1774 bool at_init = current->at_preemptable_init();
1775 bool at_enter = current->current_pending_monitor() != nullptr;
1776 bool at_wait = current->current_waiting_monitor() != nullptr;
1777 assert((at_enter && !at_wait) || (!at_enter && at_wait), "");
1778 Continuation::preempt_kind pk = at_init ? Continuation::object_locker : at_enter ? Continuation::monitorenter : Continuation::object_wait;
1779
1780 Method* m = nullptr;
1781 const char* code_name = nullptr;
1782 int bci = InvalidFrameStateBci;
1783 verify_frame_kind(top_frame, pk, &m, &code_name, &bci);
1784 assert(m != nullptr && code_name != nullptr && bci != InvalidFrameStateBci, "should be set");
1785
1786 ResourceMark rm(current);
1787 if (bci < 0) {
1788 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " while synchronizing on %smethod %s", current->monitor_owner_id(), m->is_native() ? "native " : "", m->external_name());
1789 } else if (m->is_object_wait0()) {
1790 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at native method %s", current->monitor_owner_id(), m->external_name());
1791 } else {
1792 Klass* k = current->preempt_init_klass();
1793 assert(k != nullptr || !at_init, "");
1794 log_trace(continuations, preempt)("Preempted " INT64_FORMAT " at %s(bci:%d) in method %s %s%s", current->monitor_owner_id(),
1795 code_name, bci, m->external_name(), at_init ? "trying to initialize klass " : "", at_init ? k->external_name() : "");
1796 }
1797 }
1798 #endif // ASSERT
1799
1800 static inline freeze_result freeze_epilog(ContinuationWrapper& cont) {
1801 verify_continuation(cont.continuation());
1802 assert(!cont.is_empty(), "");
1803
1804 log_develop_debug(continuations)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
1805 return freeze_ok;
1806 }
1807
1808 static freeze_result freeze_epilog(JavaThread* thread, ContinuationWrapper& cont, freeze_result res) {
1809 if (UNLIKELY(res != freeze_ok)) {
1810 JFR_ONLY(thread->set_last_freeze_fail_result(res);)
1811 verify_continuation(cont.continuation());
1812 log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1813 return res;
1814 }
1815
1816 JVMTI_ONLY(jvmti_yield_cleanup(thread, cont)); // can safepoint
1817 return freeze_epilog(cont);
1818 }
1819
1820 static freeze_result preempt_epilog(ContinuationWrapper& cont, freeze_result res, frame& old_last_frame) {
1821 if (UNLIKELY(res != freeze_ok)) {
1822 verify_continuation(cont.continuation());
1823 log_develop_trace(continuations)("=== end of freeze (fail %d)", res);
1824 return res;
1825 }
1826
1827 // Set up things so that on return to Java we jump to preempt stub.
1828 patch_return_pc_with_preempt_stub(old_last_frame);
1829 cont.tail()->set_preempted(true);
1830 DEBUG_ONLY(log_preempt_after_freeze(cont);)
1831 return freeze_epilog(cont);
1832 }
1833
1834 template<typename ConfigT, bool preempt>
1835 static inline freeze_result freeze_internal(JavaThread* current, intptr_t* const sp) {
1836 assert(!current->has_pending_exception(), "");
1837
1838 #ifdef ASSERT
1839 log_trace(continuations)("~~~~ freeze sp: " INTPTR_FORMAT "JavaThread: " INTPTR_FORMAT, p2i(current->last_continuation()->entry_sp()), p2i(current));
1840 log_frames(current, false);
1841 #endif
1842
1843 CONT_JFR_ONLY(EventContinuationFreeze event;)
1844
1845 ContinuationEntry* entry = current->last_continuation();
1846
1847 oop oopCont = entry->cont_oop(current);
1848 assert(oopCont == current->last_continuation()->cont_oop(current), "");
1849 assert(ContinuationEntry::assert_entry_frame_laid_out(current), "");
1850
1851 verify_continuation(oopCont);
1852 ContinuationWrapper cont(current, oopCont);
1853 log_develop_debug(continuations)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
1854
1855 assert(entry->is_virtual_thread() == (entry->scope(current) == java_lang_VirtualThread::vthread_scope()), "");
1856
1857 assert(LockingMode != LM_LEGACY || (monitors_on_stack(current) == ((current->held_monitor_count() - current->jni_monitor_count()) > 0)),
1858 "Held monitor count and locks on stack invariant: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
1859 assert(LockingMode == LM_LEGACY || (current->held_monitor_count() == 0 && current->jni_monitor_count() == 0),
1860 "Held monitor count should only be used for LM_LEGACY: " INT64_FORMAT " JNI: " INT64_FORMAT, (int64_t)current->held_monitor_count(), (int64_t)current->jni_monitor_count());
2016 // 300 is an estimate for stack size taken for this native code, in addition to StackShadowPages
2017 // for the Java frames in the check below.
2018 if (!stack_overflow_check(thread, size + 300, bottom)) {
2019 return 0;
2020 }
2021
2022 log_develop_trace(continuations)("prepare_thaw bottom: " INTPTR_FORMAT " top: " INTPTR_FORMAT " size: %d",
2023 p2i(bottom), p2i(bottom - size), size);
2024 return size;
2025 }
2026
2027 class ThawBase : public StackObj {
2028 protected:
2029 JavaThread* _thread;
2030 ContinuationWrapper& _cont;
2031 CONT_JFR_ONLY(FreezeThawJfrInfo _jfr_info;)
2032
2033 intptr_t* _fastpath;
2034 bool _barriers;
2035 bool _preempted_case;
2036 bool _process_args_at_top;
2037 intptr_t* _top_unextended_sp_before_thaw;
2038 int _align_size;
2039 DEBUG_ONLY(intptr_t* _top_stack_address);
2040
2041 // Only used for some preemption cases.
2042 ObjectMonitor* _monitor;
2043
2044 StackChunkFrameStream<ChunkFrames::Mixed> _stream;
2045
2046 NOT_PRODUCT(int _frames;)
2047
2048 protected:
2049 ThawBase(JavaThread* thread, ContinuationWrapper& cont) :
2050 _thread(thread), _cont(cont),
2051 _fastpath(nullptr) {
2052 DEBUG_ONLY(_top_unextended_sp_before_thaw = nullptr;)
2053 assert (cont.tail() != nullptr, "no last chunk");
2054 DEBUG_ONLY(_top_stack_address = _cont.entrySP() - thaw_size(cont.tail());)
2055 }
2056
2057 void clear_chunk(stackChunkOop chunk);
2058 template<bool check_stub>
2059 int remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize);
2060 void copy_from_chunk(intptr_t* from, intptr_t* to, int size);
2061
2062 void thaw_lockstack(stackChunkOop chunk);
2063
2064 // fast path
2065 inline void prefetch_chunk_pd(void* start, int size_words);
2066 void patch_return(intptr_t* sp, bool is_last);
2067
2068 intptr_t* handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case);
2069 inline intptr_t* push_cleanup_continuation();
2070 inline intptr_t* push_preempt_adapter();
2071 intptr_t* redo_vmcall(JavaThread* current, frame& top);
2072 void throw_interrupted_exception(JavaThread* current, frame& top);
2073
2074 void recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case);
2075 void finish_thaw(frame& f);
2076
2077 private:
2078 template<typename FKind> bool recurse_thaw_java_frame(frame& caller, int num_frames);
2079 void finalize_thaw(frame& entry, int argsize);
2080
2081 inline bool seen_by_gc();
2082
2083 inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
2084 inline void after_thaw_java_frame(const frame& f, bool bottom);
2085 inline void patch(frame& f, const frame& caller, bool bottom);
2086 void clear_bitmap_bits(address start, address end);
2087
2088 NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top);
2089 void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
2090 void recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames);
2091 void recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames);
2092
2093 void push_return_frame(frame& f);
2094 inline frame new_entry_frame();
2095 template<typename FKind> frame new_stack_frame(const frame& hf, frame& caller, bool bottom);
2096 inline void patch_pd(frame& f, const frame& sender);
2097 inline void patch_pd(frame& f, intptr_t* caller_sp);
2098 inline intptr_t* align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom);
2099
2100 void maybe_set_fastpath(intptr_t* sp) { if (sp > _fastpath) _fastpath = sp; }
2101
2102 static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
2103
2104 public:
2105 CONT_JFR_ONLY(FreezeThawJfrInfo& jfr_info() { return _jfr_info; })
2106 };
2107
2108 template <typename ConfigT>
2168 chunk->set_sp(chunk->bottom());
2169 chunk->set_max_thawing_size(0);
2170 }
2171
2172 template<bool check_stub>
2173 int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
2174 bool empty = false;
2175 StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
2176 DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
2177 assert(chunk_sp == f.sp(), "");
2178 assert(chunk_sp == f.unextended_sp(), "");
2179
2180 int frame_size = f.cb()->frame_size();
2181 argsize = f.stack_argsize();
2182
2183 assert(!f.is_stub() || check_stub, "");
2184 if (check_stub && f.is_stub()) {
2185 // If we don't thaw the top compiled frame too, after restoring the saved
2186 // registers back in Java, we would hit the return barrier to thaw one more
2187 // frame effectively overwriting the restored registers during that call.
2188 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2189 assert(!f.is_done(), "");
2190
2191 f.get_cb();
2192 assert(f.is_compiled(), "");
2193 frame_size += f.cb()->frame_size();
2194 argsize = f.stack_argsize();
2195
2196 if (f.cb()->as_nmethod()->is_marked_for_deoptimization()) {
2197 // The caller of the runtime stub when the continuation is preempted is not at a
2198 // Java call instruction, and so cannot rely on nmethod patching for deopt.
2199 log_develop_trace(continuations)("Deoptimizing runtime stub caller");
2200 f.to_frame().deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2201 }
2202 }
2203
2204 f.next(SmallRegisterMap::instance_no_args(), true /* stop */);
2205 empty = f.is_done();
2206 assert(!empty || argsize == chunk->argsize(), "");
2207
2208 if (empty) {
2209 clear_chunk(chunk);
2210 } else {
2211 chunk->set_sp(chunk->sp() + frame_size);
2212 chunk->set_max_thawing_size(chunk->max_thawing_size() - frame_size);
2213 // We set chunk->pc to the return pc into the next frame
2214 chunk->set_pc(f.pc());
2215 #ifdef ASSERT
2216 {
2217 intptr_t* retaddr_slot = (chunk_sp
2218 + frame_size
2219 - frame::sender_sp_ret_address_offset());
2220 assert(f.pc() == ContinuationHelper::return_address_at(retaddr_slot),
2221 "unexpected pc");
2222 }
2223 #endif
2224 }
2344 return rs.sp();
2345 }
2346
2347 inline bool ThawBase::seen_by_gc() {
2348 return _barriers || _cont.tail()->is_gc_mode();
2349 }
2350
2351 static inline void relativize_chunk_concurrently(stackChunkOop chunk) {
2352 #if INCLUDE_ZGC || INCLUDE_SHENANDOAHGC
2353 if (UseZGC || UseShenandoahGC) {
2354 chunk->relativize_derived_pointers_concurrently();
2355 }
2356 #endif
2357 }
2358
2359 template <typename ConfigT>
2360 NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::thaw_kind kind) {
2361 Continuation::preempt_kind preempt_kind;
2362 bool retry_fast_path = false;
2363
2364 _process_args_at_top = false;
2365 _preempted_case = chunk->preempted();
2366 if (_preempted_case) {
2367 ObjectWaiter* waiter = java_lang_VirtualThread::objectWaiter(_thread->vthread());
2368 if (waiter != nullptr) {
2369 // Mounted again after preemption. Resume the pending monitor operation,
2370 // which will be either a monitorenter or Object.wait() call.
2371 ObjectMonitor* mon = waiter->monitor();
2372 preempt_kind = waiter->is_wait() ? Continuation::object_wait : Continuation::monitorenter;
2373
2374 bool mon_acquired = mon->resume_operation(_thread, waiter, _cont);
2375 assert(!mon_acquired || mon->has_owner(_thread), "invariant");
2376 if (!mon_acquired) {
2377 // Failed to acquire monitor. Return to enterSpecial to unmount again.
2378 log_trace(continuations, tracking)("Failed to acquire monitor, unmounting again");
2379 return push_cleanup_continuation();
2380 }
2381 _monitor = mon; // remember monitor since we might need it on handle_preempted_continuation()
2382 chunk = _cont.tail(); // reload oop in case of safepoint in resume_operation (if posting JVMTI events).
2383 } else {
2384 // Preemption cancelled in moniterenter case. We actually acquired
2385 // the monitor after freezing all frames so nothing to do. In case
2386 // of preemption on ObjectLocker during klass init, we released the
2387 // monitor already at ~ObjectLocker so here we just set _monitor to
2388 // nullptr so we know there is no need to release it later.
2389 preempt_kind = Continuation::monitorenter;
2390 _monitor = nullptr;
2391 }
2392
2393 // Call this first to avoid racing with GC threads later when modifying the chunk flags.
2394 relativize_chunk_concurrently(chunk);
2395
2396 if (chunk->at_klass_init()) {
2397 preempt_kind = Continuation::object_locker;
2398 chunk->set_at_klass_init(false);
2399 _process_args_at_top = chunk->has_args_at_top();
2400 if (_process_args_at_top) chunk->set_has_args_at_top(false);
2401 }
2402 chunk->set_preempted(false);
2403 retry_fast_path = true;
2404 } else {
2405 relativize_chunk_concurrently(chunk);
2406 }
2407
2408 // On first thaw after freeze restore oops to the lockstack if any.
2409 assert(chunk->lockstack_size() == 0 || kind == Continuation::thaw_top, "");
2410 if (kind == Continuation::thaw_top && chunk->lockstack_size() > 0) {
2411 thaw_lockstack(chunk);
2412 retry_fast_path = true;
2413 }
2414
2415 // Retry the fast path now that we possibly cleared the FLAG_HAS_LOCKSTACK
2416 // and FLAG_PREEMPTED flags from the stackChunk.
2417 if (retry_fast_path && can_thaw_fast(chunk)) {
2418 intptr_t* sp = thaw_fast<true>(chunk);
2419 if (_preempted_case) {
2420 return handle_preempted_continuation(sp, preempt_kind, true /* fast_case */);
2421 }
2465
2466 intptr_t* sp = caller.sp();
2467
2468 if (_preempted_case) {
2469 return handle_preempted_continuation(sp, preempt_kind, false /* fast_case */);
2470 }
2471 return sp;
2472 }
2473
2474 void ThawBase::recurse_thaw(const frame& heap_frame, frame& caller, int num_frames, bool top_on_preempt_case) {
2475 log_develop_debug(continuations)("thaw num_frames: %d", num_frames);
2476 assert(!_cont.is_empty(), "no more frames");
2477 assert(num_frames > 0, "");
2478 assert(!heap_frame.is_empty(), "");
2479
2480 if (top_on_preempt_case && (heap_frame.is_native_frame() || heap_frame.is_runtime_frame())) {
2481 heap_frame.is_native_frame() ? recurse_thaw_native_frame(heap_frame, caller, 2) : recurse_thaw_stub_frame(heap_frame, caller, 2);
2482 } else if (!heap_frame.is_interpreted_frame()) {
2483 recurse_thaw_compiled_frame(heap_frame, caller, num_frames, false);
2484 } else {
2485 recurse_thaw_interpreted_frame(heap_frame, caller, num_frames, top_on_preempt_case);
2486 }
2487 }
2488
2489 template<typename FKind>
2490 bool ThawBase::recurse_thaw_java_frame(frame& caller, int num_frames) {
2491 assert(num_frames > 0, "");
2492
2493 DEBUG_ONLY(_frames++;)
2494
2495 int argsize = _stream.stack_argsize();
2496
2497 _stream.next(SmallRegisterMap::instance_no_args());
2498 assert(_stream.to_frame().is_empty() == _stream.is_done(), "");
2499
2500 // we never leave a compiled caller of an interpreted frame as the top frame in the chunk
2501 // as it makes detecting that situation and adjusting unextended_sp tricky
2502 if (num_frames == 1 && !_stream.is_done() && FKind::interpreted && _stream.is_compiled()) {
2503 log_develop_trace(continuations)("thawing extra compiled frame to not leave a compiled interpreted-caller at top");
2504 num_frames++;
2505 }
2506
2507 if (num_frames == 1 || _stream.is_done()) { // end recursion
2508 finalize_thaw(caller, FKind::interpreted ? 0 : argsize);
2509 return true; // bottom
2510 } else { // recurse
2511 recurse_thaw(_stream.to_frame(), caller, num_frames - 1, false /* top_on_preempt_case */);
2512 return false;
2513 }
2514 }
2515
2516 void ThawBase::finalize_thaw(frame& entry, int argsize) {
2517 stackChunkOop chunk = _cont.tail();
2582
2583 void ThawBase::clear_bitmap_bits(address start, address end) {
2584 assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
2585 assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
2586
2587 // we need to clear the bits that correspond to arguments as they reside in the caller frame
2588 // or they will keep objects that are otherwise unreachable alive.
2589
2590 // Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
2591 // `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
2592 // If that's the case the bit range corresponding to the last stack slot should not have bits set
2593 // anyways and we assert that before returning.
2594 address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
2595 log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
2596 stackChunkOop chunk = _cont.tail();
2597 chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
2598 assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
2599 }
2600
2601 intptr_t* ThawBase::handle_preempted_continuation(intptr_t* sp, Continuation::preempt_kind preempt_kind, bool fast_case) {
2602 frame top(sp);
2603 assert(top.pc() == *(address*)(sp - frame::sender_sp_ret_address_offset()), "");
2604 DEBUG_ONLY(verify_frame_kind(top, preempt_kind);)
2605 NOT_PRODUCT(int64_t tid = _thread->monitor_owner_id();)
2606
2607 #if INCLUDE_JVMTI
2608 // Finish the VTMS transition.
2609 assert(_thread->is_in_VTMS_transition(), "must be");
2610 bool is_vthread = Continuation::continuation_scope(_cont.continuation()) == java_lang_VirtualThread::vthread_scope();
2611 if (is_vthread) {
2612 if (JvmtiVTMSTransitionDisabler::VTMS_notify_jvmti_events()) {
2613 jvmti_mount_end(_thread, _cont, top, preempt_kind);
2614 } else {
2615 _thread->set_is_in_VTMS_transition(false);
2616 java_lang_Thread::set_is_in_VTMS_transition(_thread->vthread(), false);
2617 }
2618 }
2619 #endif
2620
2621 if (fast_case) {
2622 // If we thawed in the slow path the runtime stub/native wrapper frame already
2623 // has the correct fp (see ThawBase::new_stack_frame). On the fast path though,
2624 // we copied the original fp at the time of freeze which now will have to be fixed.
2625 assert(top.is_runtime_frame() || top.is_native_frame(), "");
2626 int fsize = top.cb()->frame_size();
2627 patch_pd(top, sp + fsize);
2628 }
2629
2630 if (preempt_kind == Continuation::object_wait) {
2631 // Check now if we need to throw IE exception.
2632 bool throw_ie = _thread->pending_interrupted_exception();
2633 if (throw_ie) {
2634 throw_interrupted_exception(_thread, top);
2635 _thread->set_pending_interrupted_exception(false);
2636 }
2637 log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT" after preemption on Object.wait%s", tid, throw_ie ? "(throwing IE)" : "");
2638 } else if (preempt_kind == Continuation::monitorenter) {
2639 if (top.is_runtime_frame()) {
2640 // The continuation might now run on a different platform thread than the previous time so
2641 // we need to adjust the current thread saved in the stub frame before restoring registers.
2642 JavaThread** thread_addr = frame::saved_thread_address(top);
2643 if (thread_addr != nullptr) *thread_addr = _thread;
2644 }
2645 log_develop_trace(continuations, preempt)("Resuming " INT64_FORMAT " after preemption on monitorenter", tid);
2646 } else {
2647 // We need to redo the original call into the VM. First though, we need
2648 // to exit the monitor we just acquired (except on preemption cancelled
2649 // case where it was already released).
2650 assert(preempt_kind == Continuation::object_locker, "");
2651 if (_monitor != nullptr) _monitor->exit(_thread);
2652 sp = redo_vmcall(_thread, top);
2653 }
2654 return sp;
2655 }
2656
2657 intptr_t* ThawBase::redo_vmcall(JavaThread* current, frame& top) {
2658 assert(!current->preempting(), "");
2659 NOT_PRODUCT(int64_t tid = current->monitor_owner_id();)
2660 intptr_t* sp = top.sp();
2661
2662 {
2663 HandleMarkCleaner hmc(current); // Cleanup so._conth Handle
2664 ContinuationWrapper::SafepointOp so(current, _cont);
2665 AnchorMark am(current, top); // Set the anchor so that the stack is walkable.
2666
2667 Method* m = top.interpreter_frame_method();
2668 Bytecode current_bytecode = Bytecode(m, top.interpreter_frame_bcp());
2669 Bytecodes::Code code = current_bytecode.code();
2670 log_develop_trace(continuations, preempt)("Redoing InterpreterRuntime::%s for " INT64_FORMAT, code == Bytecodes::Code::_new ? "_new" : "resolve_from_cache", tid);
2671
2672 // These InterpreterRuntime entry points use JRT_ENTRY which uses a HandleMarkCleaner.
2673 // Create a HandeMark to avoid destroying so._conth.
2674 HandleMark hm(current);
2675 if (code == Bytecodes::Code::_new) {
2676 InterpreterRuntime::_new(current, m->constants(), current_bytecode.get_index_u2(code));
2677 } else {
2678 InterpreterRuntime::resolve_from_cache(current, code);
2679 }
2680 }
2681
2682 if (current->preempting()) {
2683 // Preempted again so we just arrange to return to preempt stub to unmount.
2684 sp = push_preempt_adapter();
2685 current->set_preempt_alternate_return(nullptr);
2686 bool cancelled = current->preemption_cancelled();
2687 if (cancelled) {
2688 // Instead of calling thaw again from the preempt stub just unmount anyways with
2689 // state of YIELDING. This will give a chance for other vthreads to run while
2690 // minimizing repeated loops of "thaw->redo_vmcall->try_preempt->preemption_cancelled->thaw..."
2691 // in case of multiple vthreads contending for the same init_lock().
2692 current->set_preemption_cancelled(false);
2693 oop vthread = current->vthread();
2694 assert(java_lang_VirtualThread::state(vthread) == java_lang_VirtualThread::RUNNING, "wrong state for vthread");
2695 java_lang_VirtualThread::set_state(vthread, java_lang_VirtualThread::YIELDING);
2696 }
2697 log_develop_trace(continuations, preempt)("Preempted " INT64_FORMAT " again%s", tid, cancelled ? "(preemption cancelled, setting state to YIELDING)" : "");
2698 } else {
2699 log_develop_trace(continuations, preempt)("Call succesful, resuming " INT64_FORMAT, tid);
2700 }
2701 return sp;
2702 }
2703
2704 void ThawBase::throw_interrupted_exception(JavaThread* current, frame& top) {
2705 HandleMarkCleaner hm(current); // Cleanup so._conth Handle
2706 ContinuationWrapper::SafepointOp so(current, _cont);
2707 // Since we might safepoint set the anchor so that the stack can be walked.
2708 set_anchor(current, top.sp());
2709 JRT_BLOCK
2710 THROW(vmSymbols::java_lang_InterruptedException());
2711 JRT_BLOCK_END
2712 clear_anchor(current);
2713 }
2714
2715 NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames, bool is_top) {
2716 assert(hf.is_interpreted_frame(), "");
2717
2718 if (UNLIKELY(seen_by_gc())) {
2719 if (is_top && _process_args_at_top) {
2720 log_trace(continuations, tracking)("Processing arguments in recurse_thaw_interpreted_frame");
2721 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_with_args());
2722 } else {
2723 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2724 }
2725 }
2726
2727 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::InterpretedFrame>(caller, num_frames);
2728
2729 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2730
2731 _align_size += frame::align_wiggle; // possible added alignment for internal interpreted frame alignment om AArch64
2732
2733 frame f = new_stack_frame<ContinuationHelper::InterpretedFrame>(hf, caller, is_bottom_frame);
2734
2735 intptr_t* const stack_frame_top = f.sp() + frame::metadata_words_at_top;
2736 intptr_t* const stack_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(f);
2737 intptr_t* const heap_frame_top = hf.unextended_sp() + frame::metadata_words_at_top;
2738 intptr_t* const heap_frame_bottom = ContinuationHelper::InterpretedFrame::frame_bottom(hf);
2739
2740 assert(hf.is_heap_frame(), "should be");
2741 assert(!f.is_heap_frame(), "should not be");
2742
2743 const int fsize = pointer_delta_as_int(heap_frame_bottom, heap_frame_top);
2744 assert((stack_frame_bottom == stack_frame_top + fsize), "");
2749
2750 // Make sure the relativized locals is already set.
2751 assert(f.interpreter_frame_local_at(0) == stack_frame_bottom - 1, "invalid frame bottom");
2752
2753 derelativize_interpreted_frame_metadata(hf, f);
2754 patch(f, caller, is_bottom_frame);
2755
2756 assert(f.is_interpreted_frame_valid(_cont.thread()), "invalid thawed frame");
2757 assert(stack_frame_bottom <= ContinuationHelper::Frame::frame_top(caller), "");
2758
2759 CONT_JFR_ONLY(_jfr_info.record_interpreted_frame();)
2760
2761 maybe_set_fastpath(f.sp());
2762
2763 Method* m = hf.interpreter_frame_method();
2764 assert(!m->is_native() || !is_bottom_frame, "should be top frame of thaw_top case; missing caller frame");
2765 const int locals = m->max_locals();
2766
2767 if (!is_bottom_frame) {
2768 // can only fix caller once this frame is thawed (due to callee saved regs)
2769 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2770 } else if (_cont.tail()->has_bitmap() && locals > 0) {
2771 assert(hf.is_heap_frame(), "should be");
2772 address start = (address)(heap_frame_bottom - locals);
2773 address end = (address)heap_frame_bottom;
2774 clear_bitmap_bits(start, end);
2775 }
2776
2777 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2778 caller = f;
2779 }
2780
2781 void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller) {
2782 assert(hf.is_compiled_frame(), "");
2783 assert(_preempted_case || !stub_caller, "stub caller not at preemption");
2784
2785 if (!stub_caller && UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2786 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2787 }
2788
2789 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::CompiledFrame>(caller, num_frames);
2790
2791 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2792
2793 assert(caller.sp() == caller.unextended_sp(), "");
2794
2795 if ((!is_bottom_frame && caller.is_interpreted_frame()) || (is_bottom_frame && Interpreter::contains(_cont.tail()->pc()))) {
2796 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_compiled_frame
2797 }
2798
2799 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2800 // yet laid out in the stack, and so the original_pc is not stored in it.
2801 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2802 frame f = new_stack_frame<ContinuationHelper::CompiledFrame>(hf, caller, is_bottom_frame);
2803 intptr_t* const stack_frame_top = f.sp();
2804 intptr_t* const heap_frame_top = hf.unextended_sp();
2805
2806 const int added_argsize = (is_bottom_frame || caller.is_interpreted_frame()) ? hf.compiled_frame_stack_argsize() : 0;
2825 assert(!f.is_deoptimized_frame(), "");
2826 if (hf.is_deoptimized_frame()) {
2827 maybe_set_fastpath(f.sp());
2828 } else if (_thread->is_interp_only_mode()
2829 || (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
2830 // The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2831 // cannot rely on nmethod patching for deopt.
2832 assert(_thread->is_interp_only_mode() || stub_caller, "expected a stub-caller");
2833
2834 log_develop_trace(continuations)("Deoptimizing thawed frame");
2835 DEBUG_ONLY(ContinuationHelper::Frame::patch_pc(f, nullptr));
2836
2837 f.deoptimize(nullptr); // the null thread simply avoids the assertion in deoptimize which we're not set up for
2838 assert(f.is_deoptimized_frame(), "");
2839 assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
2840 maybe_set_fastpath(f.sp());
2841 }
2842
2843 if (!is_bottom_frame) {
2844 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2845 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2846 } else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
2847 address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
2848 int stack_args_slots = f.cb()->as_nmethod()->num_stack_arg_slots(false /* rounded */);
2849 int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
2850 clear_bitmap_bits(start, start + argsize_in_bytes);
2851 }
2852
2853 DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
2854 caller = f;
2855 }
2856
2857 void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_frames) {
2858 DEBUG_ONLY(_frames++;)
2859
2860 if (UNLIKELY(seen_by_gc())) {
2861 // Process the stub's caller here since we might need the full map.
2862 RegisterMap map(nullptr,
2863 RegisterMap::UpdateMap::include,
2864 RegisterMap::ProcessFrames::skip,
2865 RegisterMap::WalkContinuation::skip);
2866 map.set_include_argument_oops(false);
2867 _stream.next(&map);
2868 assert(!_stream.is_done(), "");
2869 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, &map);
2870 } else {
2871 _stream.next(SmallRegisterMap::instance_no_args());
2872 assert(!_stream.is_done(), "");
2873 }
2874
2875 recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames, true);
2876
2877 assert(caller.is_compiled_frame(), "");
2878 assert(caller.sp() == caller.unextended_sp(), "");
2879
2880 DEBUG_ONLY(before_thaw_java_frame(hf, caller, false /*is_bottom_frame*/, num_frames);)
2881
2882 frame f = new_stack_frame<ContinuationHelper::StubFrame>(hf, caller, false);
2883 intptr_t* stack_frame_top = f.sp();
2884 intptr_t* heap_frame_top = hf.sp();
2885 int fsize = ContinuationHelper::StubFrame::size(hf);
2886
2887 copy_from_chunk(heap_frame_top - frame::metadata_words, stack_frame_top - frame::metadata_words,
2888 fsize + frame::metadata_words);
2889
2890 patch(f, caller, false /*is_bottom_frame*/);
2891
2892 // can only fix caller once this frame is thawed (due to callee saved regs)
2893 RegisterMap map(nullptr,
2894 RegisterMap::UpdateMap::include,
2895 RegisterMap::ProcessFrames::skip,
2896 RegisterMap::WalkContinuation::skip);
2897 map.set_include_argument_oops(false);
2898 f.oop_map()->update_register_map(&f, &map);
2899 ContinuationHelper::update_register_map_with_callee(caller, &map);
2900 _cont.tail()->fix_thawed_frame(caller, &map);
2901
2902 DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
2903 caller = f;
2904 }
2905
2906 void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num_frames) {
2907 assert(hf.is_native_frame(), "");
2908 assert(_preempted_case && hf.cb()->as_nmethod()->method()->is_object_wait0(), "");
2909
2910 if (UNLIKELY(seen_by_gc())) { // recurse_thaw_stub_frame already invoked our barriers with a full regmap
2911 _cont.tail()->do_barriers<stackChunkOopDesc::BarrierType::Store>(_stream, SmallRegisterMap::instance_no_args());
2912 }
2913
2914 const bool is_bottom_frame = recurse_thaw_java_frame<ContinuationHelper::NativeFrame>(caller, num_frames);
2915 assert(!is_bottom_frame, "");
2916
2917 DEBUG_ONLY(before_thaw_java_frame(hf, caller, is_bottom_frame, num_frames);)
2918
2919 assert(caller.sp() == caller.unextended_sp(), "");
2920
2921 if (caller.is_interpreted_frame()) {
2922 _align_size += frame::align_wiggle; // we add one whether or not we've aligned because we add it in recurse_freeze_native_frame
2923 }
2924
2925 // new_stack_frame must construct the resulting frame using hf.pc() rather than hf.raw_pc() because the frame is not
2926 // yet laid out in the stack, and so the original_pc is not stored in it.
2927 // As a result, f.is_deoptimized_frame() is always false and we must test hf to know if the frame is deoptimized.
2928 frame f = new_stack_frame<ContinuationHelper::NativeFrame>(hf, caller, false /* bottom */);
2929 intptr_t* const stack_frame_top = f.sp();
2930 intptr_t* const heap_frame_top = hf.unextended_sp();
2931
2932 int fsize = ContinuationHelper::NativeFrame::size(hf);
2933 assert(fsize <= (int)(caller.unextended_sp() - f.unextended_sp()), "");
2934
2935 intptr_t* from = heap_frame_top - frame::metadata_words_at_bottom;
2936 intptr_t* to = stack_frame_top - frame::metadata_words_at_bottom;
2937 int sz = fsize + frame::metadata_words_at_bottom;
2938
2939 copy_from_chunk(from, to, sz); // copying good oops because we invoked barriers above
2940
2941 patch(f, caller, false /* bottom */);
2942
2943 // f.is_deoptimized_frame() is always false and we must test hf.is_deoptimized_frame() (see comment above)
2944 assert(!f.is_deoptimized_frame(), "");
2945 assert(!hf.is_deoptimized_frame(), "");
2946 assert(!f.cb()->as_nmethod()->is_marked_for_deoptimization(), "");
2947
2948 // can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
2949 _cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
2950
2951 DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
2952 caller = f;
2953 }
2954
2955 void ThawBase::finish_thaw(frame& f) {
2956 stackChunkOop chunk = _cont.tail();
2957
2958 if (chunk->is_empty()) {
2959 // Only remove chunk from list if it can't be reused for another freeze
2960 if (seen_by_gc()) {
2961 _cont.set_tail(chunk->parent());
2962 } else {
2963 chunk->set_has_mixed_frames(false);
2964 }
2965 chunk->set_max_thawing_size(0);
2966 } else {
2967 chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
2968 }
2969 assert(chunk->is_empty() == (chunk->max_thawing_size() == 0), "");
2970
2971 if (!is_aligned(f.sp(), frame::frame_alignment)) {
2972 assert(f.is_interpreted_frame(), "");
2973 f.set_sp(align_down(f.sp(), frame::frame_alignment));
2974 }
2975 push_return_frame(f);
2976 // can only fix caller after push_return_frame (due to callee saved regs)
2977 if (_process_args_at_top) {
2978 log_trace(continuations, tracking)("Processing arguments in finish_thaw");
2979 chunk->fix_thawed_frame(f, SmallRegisterMap::instance_with_args());
2980 } else {
2981 chunk->fix_thawed_frame(f, SmallRegisterMap::instance_no_args());
2982 }
2983
2984 assert(_cont.is_empty() == _cont.last_frame().is_empty(), "");
2985
2986 log_develop_trace(continuations)("thawed %d frames", _frames);
2987
2988 LogTarget(Trace, continuations) lt;
2989 if (lt.develop_is_enabled()) {
2990 LogStream ls(lt);
2991 ls.print_cr("top hframe after (thaw):");
2992 _cont.last_frame().print_value_on(&ls);
2993 }
2994 }
2995
2996 void ThawBase::push_return_frame(frame& f) { // see generate_cont_thaw
2997 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
2998 assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
2999
3000 LogTarget(Trace, continuations) lt;
3001 if (lt.develop_is_enabled()) {
3002 LogStream ls(lt);
3024
3025 ContinuationEntry* entry = thread->last_continuation();
3026 assert(entry != nullptr, "");
3027 oop oopCont = entry->cont_oop(thread);
3028
3029 assert(!jdk_internal_vm_Continuation::done(oopCont), "");
3030 assert(oopCont == get_continuation(thread), "");
3031 verify_continuation(oopCont);
3032
3033 assert(entry->is_virtual_thread() == (entry->scope(thread) == java_lang_VirtualThread::vthread_scope()), "");
3034
3035 ContinuationWrapper cont(thread, oopCont);
3036 log_develop_debug(continuations)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
3037
3038 #ifdef ASSERT
3039 set_anchor_to_entry(thread, cont.entry());
3040 log_frames(thread);
3041 clear_anchor(thread);
3042 #endif
3043
3044 Thaw<ConfigT> thw(thread, cont);
3045 intptr_t* const sp = thw.thaw(kind);
3046 assert(is_aligned(sp, frame::frame_alignment), "");
3047 DEBUG_ONLY(log_frames_after_thaw(thread, cont, sp);)
3048
3049 CONT_JFR_ONLY(thw.jfr_info().post_jfr_event(&event, cont.continuation(), thread);)
3050
3051 verify_continuation(cont.continuation());
3052 log_develop_debug(continuations)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
3053
3054 return sp;
3055 }
3056
3057 #ifdef ASSERT
3058 static void do_deopt_after_thaw(JavaThread* thread) {
3059 int i = 0;
3060 StackFrameStream fst(thread, true, false);
3061 fst.register_map()->set_include_argument_oops(false);
3062 ContinuationHelper::update_register_map_with_callee(*fst.current(), fst.register_map());
3063 for (; !fst.is_done(); fst.next()) {
3064 if (fst.current()->cb()->is_nmethod()) {
3065 nmethod* nm = fst.current()->cb()->as_nmethod();
3066 if (!nm->method()->is_continuation_native_intrinsic()) {
3067 nm->make_deoptimized();
3124 if (!fr.is_interpreted_frame()) {
3125 st->print_cr("size: %d argsize: %d",
3126 ContinuationHelper::NonInterpretedUnknownFrame::size(fr),
3127 ContinuationHelper::NonInterpretedUnknownFrame::stack_argsize(fr));
3128 }
3129 VMReg reg = fst.register_map()->find_register_spilled_here(cl.p(), fst.current()->sp());
3130 if (reg != nullptr) {
3131 st->print_cr("Reg %s %d", reg->name(), reg->is_stack() ? (int)reg->reg2stack() : -99);
3132 }
3133 cl.reset();
3134 DEBUG_ONLY(thread->print_frame_layout();)
3135 if (chunk != nullptr) {
3136 chunk->print_on(true, st);
3137 }
3138 return false;
3139 }
3140 }
3141 return true;
3142 }
3143
3144 static void log_frames(JavaThread* thread, bool dolog) {
3145 const static int show_entry_callers = 3;
3146 LogTarget(Trace, continuations, tracking) lt;
3147 if (!lt.develop_is_enabled() || !dolog) {
3148 return;
3149 }
3150 LogStream ls(lt);
3151
3152 ls.print_cr("------- frames --------- for thread " INTPTR_FORMAT, p2i(thread));
3153 if (!thread->has_last_Java_frame()) {
3154 ls.print_cr("NO ANCHOR!");
3155 }
3156
3157 RegisterMap map(thread,
3158 RegisterMap::UpdateMap::include,
3159 RegisterMap::ProcessFrames::include,
3160 RegisterMap::WalkContinuation::skip);
3161 map.set_include_argument_oops(false);
3162
3163 if (false) {
3164 for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
3165 f.print_on(&ls);
3166 }
3167 } else {
3169 ResetNoHandleMark rnhm;
3170 ResourceMark rm;
3171 HandleMark hm(Thread::current());
3172 FrameValues values;
3173
3174 int i = 0;
3175 int post_entry = -1;
3176 for (frame f = thread->last_frame(); !f.is_first_frame(); f = f.sender(&map), i++) {
3177 f.describe(values, i, &map, i == 0);
3178 if (post_entry >= 0 || Continuation::is_continuation_enterSpecial(f))
3179 post_entry++;
3180 if (post_entry >= show_entry_callers)
3181 break;
3182 }
3183 values.print_on(thread, &ls);
3184 }
3185
3186 ls.print_cr("======= end frames =========");
3187 }
3188
3189 static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp) {
3190 intptr_t* sp0 = sp;
3191 address pc0 = *(address*)(sp - frame::sender_sp_ret_address_offset());
3192
3193 bool preempted = false;
3194 stackChunkOop tail = cont.tail();
3195 if (tail != nullptr && tail->preempted()) {
3196 // Still preempted (monitor not acquired) so no frames were thawed.
3197 set_anchor(thread, cont.entrySP(), cont.entryPC());
3198 preempted = true;
3199 } else {
3200 set_anchor(thread, sp0);
3201 }
3202
3203 log_frames(thread);
3204 if (LoomVerifyAfterThaw) {
3205 assert(do_verify_after_thaw(thread, cont.tail(), tty), "");
3206 }
3207 assert(ContinuationEntry::assert_entry_frame_laid_out(thread, preempted), "");
3208 clear_anchor(thread);
3209
3210 LogTarget(Trace, continuations) lt;
3211 if (lt.develop_is_enabled()) {
3212 LogStream ls(lt);
3213 ls.print_cr("Jumping to frame (thaw):");
3214 frame(sp).print_value_on(&ls);
3215 }
3216 }
3217 #endif // ASSERT
3218
3219 #include CPU_HEADER_INLINE(continuationFreezeThaw)
3220
3221 #ifdef ASSERT
3222 static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st) {
3223 ResourceMark rm;
3224 FrameValues values;
3225 assert(f.get_cb() != nullptr, "");
3226 RegisterMap map(f.is_heap_frame() ?
3227 nullptr :
|