48 // copy the spilled rbp from the heap to the stack
49 *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
50 }
51
52 // Slow path
53
54 template<typename FKind>
55 inline frame FreezeBase::sender(const frame& f) {
56 assert(FKind::is_instance(f), "");
57 if (FKind::interpreted) {
58 return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
59 }
60 intptr_t** link_addr = link_address<FKind>(f);
61
62 intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); // f.unextended_sp() + (fsize/wordSize); //
63 address sender_pc = (address) *(sender_sp-1);
64 assert(sender_sp != f.sp(), "must have changed");
65
66 int slot = 0;
67 CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot);
68 return sender_cb != nullptr
69 ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb,
70 slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc), false)
71 : frame(sender_sp, sender_sp, *link_addr, sender_pc);
72 }
73
74 template<typename FKind>
75 frame FreezeBase::new_heap_frame(frame& f, frame& caller) {
76 assert(FKind::is_instance(f), "");
77 assert(!caller.is_interpreted_frame()
78 || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
79
80 intptr_t *sp, *fp; // sp is really our unextended_sp
81 if (FKind::interpreted) {
82 assert((intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset) == nullptr
83 || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
84 intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
85 // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
86 // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
87 bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
88 fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
89 sp = fp - (f.fp() - f.unextended_sp());
90 assert(sp <= fp, "");
91 assert(fp <= caller.unextended_sp(), "");
92 caller.set_sp(fp + frame::sender_sp_offset);
93
94 assert(_cont.tail()->is_in_chunk(sp), "");
95
96 frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
97 // copy relativized locals from the stack frame
98 *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
99 return hf;
100 } else {
101 // For a compiled frame we need to re-read fp out of the frame because it may be an
102 // oop and we might have had a safepoint in finalize_freeze, after constructing f.
103 // For stub/native frames the value is not used while frozen, and will be constructed again
104 // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
105 // help with debugging, particularly when inspecting frames and identifying invalid accesses.
106 fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
107
108 int fsize = FKind::size(f);
109 sp = caller.unextended_sp() - fsize;
110 if (caller.is_interpreted_frame()) {
111 // If the caller is interpreted, our stackargs are not supposed to overlap with it
112 // so we make more room by moving sp down by argsize
113 int argsize = FKind::stack_argsize(f);
114 sp -= argsize;
115 }
116 caller.set_sp(sp + fsize);
117
118 assert(_cont.tail()->is_in_chunk(sp), "");
119
120 return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
121 }
122 }
123
124 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
125 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
126 intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
127 if (real_unextended_sp != nullptr) {
128 f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
129 }
130 }
157 assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
158 assert(hf.fp() > (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
159 assert(hf.fp() <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
160 }
161
162 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
163 stackChunkOop chunk = _cont.tail();
164 assert(chunk->is_in_chunk(hf.sp() - 1), "");
165 assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
166
167 address frame_pc = hf.pc();
168
169 *(hf.sp() - 1) = (intptr_t)hf.pc();
170
171 intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
172 *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
173 : (intptr_t)hf.fp();
174 assert(frame_pc == ContinuationHelper::Frame::real_pc(hf), "");
175 }
176
177 inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
178 if (caller.is_interpreted_frame()) {
179 assert(!caller.is_empty(), "");
180 patch_callee_link_relative(caller, caller.fp());
181 } else {
182 // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
183 // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
184 // as read from the chunk.
185 patch_callee_link(caller, caller.fp());
186 }
187 }
188
189 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
190 intptr_t* fp_addr = sp - frame::sender_sp_offset;
191 *fp_addr = badAddressVal;
192 }
193
194 inline intptr_t* AnchorMark::anchor_mark_set_pd() {
195 intptr_t* sp = _top_frame.sp();
196 if (_top_frame.is_interpreted_frame()) {
197 // In case the top frame is interpreted we need to set up the anchor using
198 // the last_sp saved in the frame (remove possible alignment added while
199 // thawing, see ThawBase::finish_thaw()). We also clear last_sp to match
200 // the behavior when calling the VM from the interpreter (we check for this
201 // in FreezeBase::prepare_freeze_interpreted_top_frame, which can be reached
232
233 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
234 size <<= LogBytesPerWord;
235 Prefetch::read(start, size);
236 Prefetch::read(start, size - 64);
237 }
238
239 template <typename ConfigT>
240 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
241 // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
242 assert(!PreserveFramePointer, "Frame pointers need to be fixed");
243 }
244
245 // Slow path
246
247 inline frame ThawBase::new_entry_frame() {
248 intptr_t* sp = _cont.entrySP();
249 return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
250 }
251
252 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
253 assert(FKind::is_instance(hf), "");
254 // The values in the returned frame object will be written into the callee's stack in patch.
255
256 if (FKind::interpreted) {
257 intptr_t* heap_sp = hf.unextended_sp();
258 // If caller is interpreted it already made room for the callee arguments
259 int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
260 const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
261 intptr_t* frame_sp = caller.unextended_sp() - fsize;
262 intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
263 DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
264 assert(frame_sp == unextended_sp, "");
265 caller.set_sp(fp + frame::sender_sp_offset);
266 frame f(frame_sp, frame_sp, fp, hf.pc());
267 // we need to set the locals so that the caller of new_stack_frame() can call
268 // ContinuationHelper::InterpretedFrame::frame_bottom
269 intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
270 DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
271 // Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
272 DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
273 assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
274 // copy relativized locals from the heap frame
275 *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
276 return f;
277 } else {
278 int fsize = FKind::size(hf);
279 intptr_t* frame_sp = caller.unextended_sp() - fsize;
280 if (bottom || caller.is_interpreted_frame()) {
281 int argsize = FKind::stack_argsize(hf);
282
283 fsize += argsize;
284 frame_sp -= argsize;
285 caller.set_sp(caller.sp() - argsize);
286 assert(caller.sp() == frame_sp + (fsize-argsize), "");
287
288 frame_sp = align(hf, frame_sp, caller, bottom);
289 }
290
291 assert(hf.cb() != nullptr, "");
292 assert(hf.oop_map() != nullptr, "");
293 intptr_t* fp;
294 if (PreserveFramePointer) {
295 // we need to recreate a "real" frame pointer, pointing into the stack
296 fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
297 } else {
298 fp = FKind::stub || FKind::native
299 ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
300 : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
301 }
302 return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
303 }
304 }
305
306 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
307 if (((intptr_t)frame_sp & 0xf) != 0) {
308 assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
309 frame_sp--;
310 caller.set_sp(caller.sp() - 1);
311 }
312 assert(is_aligned(frame_sp, frame::frame_alignment), "");
313 return frame_sp;
314 }
315
316 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
317 patch_callee_link(caller, caller.fp());
318 }
319
320 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
321 intptr_t* fp = caller_sp - frame::sender_sp_offset;
322 patch_callee_link(f, fp);
323 }
324
325 inline intptr_t* ThawBase::push_cleanup_continuation() {
326 frame enterSpecial = new_entry_frame();
327 intptr_t* sp = enterSpecial.sp();
328
329 // We only need to set the return pc. rbp will be restored back in gen_continuation_enter().
330 sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
331 return sp;
332 }
333
334 inline intptr_t* ThawBase::push_preempt_adapter() {
335 frame enterSpecial = new_entry_frame();
336 intptr_t* sp = enterSpecial.sp();
337
|
48 // copy the spilled rbp from the heap to the stack
49 *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
50 }
51
52 // Slow path
53
54 template<typename FKind>
55 inline frame FreezeBase::sender(const frame& f) {
56 assert(FKind::is_instance(f), "");
57 if (FKind::interpreted) {
58 return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
59 }
60 intptr_t** link_addr = link_address<FKind>(f);
61
62 intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); // f.unextended_sp() + (fsize/wordSize); //
63 address sender_pc = (address) *(sender_sp-1);
64 assert(sender_sp != f.sp(), "must have changed");
65
66 int slot = 0;
67 CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot);
68
69 // Repair the sender sp if the frame has been extended
70 if (sender_cb->is_nmethod()) {
71 sender_sp = f.repair_sender_sp(sender_sp, link_addr);
72 }
73
74 return sender_cb != nullptr
75 ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb,
76 slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc), false)
77 : frame(sender_sp, sender_sp, *link_addr, sender_pc);
78 }
79
80 template<typename FKind>
81 frame FreezeBase::new_heap_frame(frame& f, frame& caller, int size_adjust) {
82 assert(FKind::is_instance(f), "");
83 assert(!caller.is_interpreted_frame()
84 || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
85
86 intptr_t *sp, *fp; // sp is really our unextended_sp
87 if (FKind::interpreted) {
88 assert((intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset) == nullptr
89 || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
90 intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
91 // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
92 // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
93 bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
94 fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
95 sp = fp - (f.fp() - f.unextended_sp());
96 assert(sp <= fp, "");
97 assert(fp <= caller.unextended_sp(), "");
98 caller.set_sp(fp + frame::sender_sp_offset);
99
100 assert(_cont.tail()->is_in_chunk(sp), "");
101
102 frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
103 // copy relativized locals from the stack frame
104 *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
105 return hf;
106 } else {
107 // For a compiled frame we need to re-read fp out of the frame because it may be an
108 // oop and we might have had a safepoint in finalize_freeze, after constructing f.
109 // For stub/native frames the value is not used while frozen, and will be constructed again
110 // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
111 // help with debugging, particularly when inspecting frames and identifying invalid accesses.
112 fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
113
114 int fsize = FKind::size(f);
115 sp = caller.unextended_sp() - fsize - size_adjust;
116 if (caller.is_interpreted_frame() && size_adjust == 0) {
117 // If the caller is interpreted, our stackargs are not supposed to overlap with it
118 // so we make more room by moving sp down by argsize
119 int argsize = FKind::stack_argsize(f);
120 sp -= argsize;
121 }
122 caller.set_sp(sp + fsize);
123
124 assert(_cont.tail()->is_in_chunk(sp), "");
125
126 return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
127 }
128 }
129
130 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
131 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
132 intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
133 if (real_unextended_sp != nullptr) {
134 f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
135 }
136 }
163 assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
164 assert(hf.fp() > (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
165 assert(hf.fp() <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
166 }
167
168 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
169 stackChunkOop chunk = _cont.tail();
170 assert(chunk->is_in_chunk(hf.sp() - 1), "");
171 assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
172
173 address frame_pc = hf.pc();
174
175 *(hf.sp() - 1) = (intptr_t)hf.pc();
176
177 intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
178 *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
179 : (intptr_t)hf.fp();
180 assert(frame_pc == ContinuationHelper::Frame::real_pc(hf), "");
181 }
182
183 inline void FreezeBase::patch_pd(frame& hf, const frame& caller, bool is_bottom_frame) {
184 if (caller.is_interpreted_frame()) {
185 assert(!caller.is_empty(), "");
186 patch_callee_link_relative(caller, caller.fp());
187 } else if (is_bottom_frame && caller.pc() != nullptr) {
188 assert(caller.is_compiled_frame(), "");
189 // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
190 // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
191 // as read from the chunk.
192 patch_callee_link(caller, caller.fp());
193 }
194 }
195
196 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
197 intptr_t* fp_addr = sp - frame::sender_sp_offset;
198 *fp_addr = badAddressVal;
199 }
200
201 inline intptr_t* AnchorMark::anchor_mark_set_pd() {
202 intptr_t* sp = _top_frame.sp();
203 if (_top_frame.is_interpreted_frame()) {
204 // In case the top frame is interpreted we need to set up the anchor using
205 // the last_sp saved in the frame (remove possible alignment added while
206 // thawing, see ThawBase::finish_thaw()). We also clear last_sp to match
207 // the behavior when calling the VM from the interpreter (we check for this
208 // in FreezeBase::prepare_freeze_interpreted_top_frame, which can be reached
239
240 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
241 size <<= LogBytesPerWord;
242 Prefetch::read(start, size);
243 Prefetch::read(start, size - 64);
244 }
245
246 template <typename ConfigT>
247 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
248 // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
249 assert(!PreserveFramePointer, "Frame pointers need to be fixed");
250 }
251
252 // Slow path
253
254 inline frame ThawBase::new_entry_frame() {
255 intptr_t* sp = _cont.entrySP();
256 return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
257 }
258
259 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust) {
260 assert(FKind::is_instance(hf), "");
261 // The values in the returned frame object will be written into the callee's stack in patch.
262
263 if (FKind::interpreted) {
264 intptr_t* heap_sp = hf.unextended_sp();
265 // If caller is interpreted it already made room for the callee arguments
266 int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
267 const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
268 intptr_t* frame_sp = caller.unextended_sp() - fsize;
269 intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
270 DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
271 assert(frame_sp == unextended_sp, "");
272 caller.set_sp(fp + frame::sender_sp_offset);
273 frame f(frame_sp, frame_sp, fp, hf.pc());
274 // we need to set the locals so that the caller of new_stack_frame() can call
275 // ContinuationHelper::InterpretedFrame::frame_bottom
276 intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
277 DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
278 // Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
279 DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
280 assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
281 // copy relativized locals from the heap frame
282 *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
283 return f;
284 } else {
285 int fsize = FKind::size(hf);
286 intptr_t* frame_sp = caller.unextended_sp() - fsize - size_adjust;
287 if (bottom || caller.is_interpreted_frame()) {
288 if (size_adjust == 0) {
289 int argsize = FKind::stack_argsize(hf);
290 frame_sp -= argsize;
291 }
292 frame_sp = align(hf, frame_sp, caller, bottom);
293 }
294 caller.set_sp(frame_sp + fsize);
295 assert(is_aligned(frame_sp, frame::frame_alignment), "");
296
297 assert(hf.cb() != nullptr, "");
298 assert(hf.oop_map() != nullptr, "");
299 intptr_t* fp;
300 if (PreserveFramePointer) {
301 // we need to recreate a "real" frame pointer, pointing into the stack
302 fp = frame_sp + fsize - frame::sender_sp_offset;
303 } else {
304 fp = FKind::stub || FKind::native
305 ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
306 : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
307 }
308 return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
309 }
310 }
311
312 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
313 if (((intptr_t)frame_sp & 0xf) != 0) {
314 assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
315 frame_sp--;
316 }
317 assert(is_aligned(frame_sp, frame::frame_alignment), "");
318 return frame_sp;
319 }
320
321 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
322 if (caller.is_interpreted_frame() || PreserveFramePointer) {
323 patch_callee_link(caller, caller.fp());
324 }
325 }
326
327 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
328 intptr_t* fp = caller_sp - frame::sender_sp_offset;
329 patch_callee_link(f, fp);
330 }
331
332 inline intptr_t* ThawBase::push_cleanup_continuation() {
333 frame enterSpecial = new_entry_frame();
334 intptr_t* sp = enterSpecial.sp();
335
336 // We only need to set the return pc. rbp will be restored back in gen_continuation_enter().
337 sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
338 return sp;
339 }
340
341 inline intptr_t* ThawBase::push_preempt_adapter() {
342 frame enterSpecial = new_entry_frame();
343 intptr_t* sp = enterSpecial.sp();
344
|