< prev index next >

src/hotspot/cpu/x86/continuationFreezeThaw_x86.inline.hpp

Print this page

 48   // copy the spilled rbp from the heap to the stack
 49   *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
 50 }
 51 
 52 // Slow path
 53 
 54 template<typename FKind>
 55 inline frame FreezeBase::sender(const frame& f) {
 56   assert(FKind::is_instance(f), "");
 57   if (FKind::interpreted) {
 58     return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
 59   }
 60   intptr_t** link_addr = link_address<FKind>(f);
 61 
 62   intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); //  f.unextended_sp() + (fsize/wordSize); //
 63   address sender_pc = (address) *(sender_sp-1);
 64   assert(sender_sp != f.sp(), "must have changed");
 65 
 66   int slot = 0;
 67   CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot);






 68   return sender_cb != nullptr
 69     ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb,
 70             slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc), false)
 71     : frame(sender_sp, sender_sp, *link_addr, sender_pc);
 72 }
 73 
 74 template<typename FKind>
 75 frame FreezeBase::new_heap_frame(frame& f, frame& caller) {
 76   assert(FKind::is_instance(f), "");
 77   assert(!caller.is_interpreted_frame()
 78     || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
 79 
 80   intptr_t *sp, *fp; // sp is really our unextended_sp
 81   if (FKind::interpreted) {
 82     assert((intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset) == nullptr
 83       || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
 84     intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
 85     // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
 86     // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
 87     bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
 88     fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
 89     sp = fp - (f.fp() - f.unextended_sp());
 90     assert(sp <= fp, "");
 91     assert(fp <= caller.unextended_sp(), "");
 92     caller.set_sp(fp + frame::sender_sp_offset);
 93 
 94     assert(_cont.tail()->is_in_chunk(sp), "");
 95 
 96     frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
 97     // copy relativized locals from the stack frame
 98     *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
 99     return hf;
100   } else {
101     // For a compiled frame we need to re-read fp out of the frame because it may be an
102     // oop and we might have had a safepoint in finalize_freeze, after constructing f.
103     // For stub/native frames the value is not used while frozen, and will be constructed again
104     // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
105     // help with debugging, particularly when inspecting frames and identifying invalid accesses.
106     fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
107 
108     int fsize = FKind::size(f);
109     sp = caller.unextended_sp() - fsize;
110     if (caller.is_interpreted_frame()) {
111       // If the caller is interpreted, our stackargs are not supposed to overlap with it
112       // so we make more room by moving sp down by argsize
113       int argsize = FKind::stack_argsize(f);
114       sp -= argsize;
115     }
116     caller.set_sp(sp + fsize);
117 
118     assert(_cont.tail()->is_in_chunk(sp), "");
119 
120     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
121   }
122 }
123 
124 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
125   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
126   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
127   if (real_unextended_sp != nullptr) {
128     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
129   }
130 }

157   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
158   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
159   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
160 }
161 
162 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
163   stackChunkOop chunk = _cont.tail();
164   assert(chunk->is_in_chunk(hf.sp() - 1), "");
165   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
166 
167   address frame_pc = hf.pc();
168 
169   *(hf.sp() - 1) = (intptr_t)hf.pc();
170 
171   intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
172   *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
173                                        : (intptr_t)hf.fp();
174   assert(frame_pc == ContinuationHelper::Frame::real_pc(hf), "");
175 }
176 
177 inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
178   if (caller.is_interpreted_frame()) {
179     assert(!caller.is_empty(), "");
180     patch_callee_link_relative(caller, caller.fp());
181   } else {

182     // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
183     // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
184     // as read from the chunk.
185     patch_callee_link(caller, caller.fp());
186   }
187 }
188 
189 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
190   intptr_t* fp_addr = sp - frame::sender_sp_offset;
191   *fp_addr = badAddressVal;
192 }
193 
194 //////// Thaw
195 
196 // Fast path
197 
198 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
199   size <<= LogBytesPerWord;
200   Prefetch::read(start, size);
201   Prefetch::read(start, size - 64);
202 }
203 
204 template <typename ConfigT>
205 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
206   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
207   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
208 }
209 
210 // Slow path
211 
212 inline frame ThawBase::new_entry_frame() {
213   intptr_t* sp = _cont.entrySP();
214   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
215 }
216 
217 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
218   assert(FKind::is_instance(hf), "");
219   // The values in the returned frame object will be written into the callee's stack in patch.
220 
221   if (FKind::interpreted) {
222     intptr_t* heap_sp = hf.unextended_sp();
223     // If caller is interpreted it already made room for the callee arguments
224     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
225     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
226     intptr_t* frame_sp = caller.unextended_sp() - fsize;
227     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
228     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
229     assert(frame_sp == unextended_sp, "");
230     caller.set_sp(fp + frame::sender_sp_offset);
231     frame f(frame_sp, frame_sp, fp, hf.pc());
232     // we need to set the locals so that the caller of new_stack_frame() can call
233     // ContinuationHelper::InterpretedFrame::frame_bottom
234     intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
235     DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
236     // Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
237     DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
238     assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
239     // copy relativized locals from the heap frame
240     *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
241     return f;
242   } else {
243     int fsize = FKind::size(hf);
244     intptr_t* frame_sp = caller.unextended_sp() - fsize;
245     if (bottom || caller.is_interpreted_frame()) {
246       int argsize = FKind::stack_argsize(hf);
247 
248       fsize += argsize;
249       frame_sp   -= argsize;
250       caller.set_sp(caller.sp() - argsize);
251       assert(caller.sp() == frame_sp + (fsize-argsize), "");
252 
253       frame_sp = align(hf, frame_sp, caller, bottom);
254     }


255 
256     assert(hf.cb() != nullptr, "");
257     assert(hf.oop_map() != nullptr, "");
258     intptr_t* fp;
259     if (PreserveFramePointer) {
260       // we need to recreate a "real" frame pointer, pointing into the stack
261       fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
262     } else {
263       fp = FKind::stub || FKind::native
264         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
265         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
266     }
267     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
268   }
269 }
270 
271 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
272   if (((intptr_t)frame_sp & 0xf) != 0) {
273     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
274     frame_sp--;
275     caller.set_sp(caller.sp() - 1);
276   }
277   assert(is_aligned(frame_sp, frame::frame_alignment), "");
278   return frame_sp;
279 }
280 
281 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
282   patch_callee_link(caller, caller.fp());


283 }
284 
285 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
286   intptr_t* fp = caller_sp - frame::sender_sp_offset;
287   patch_callee_link(f, fp);
288 }
289 
290 inline intptr_t* ThawBase::push_cleanup_continuation() {
291   frame enterSpecial = new_entry_frame();
292   intptr_t* sp = enterSpecial.sp();
293 
294   sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
295   sp[-2] = (intptr_t)enterSpecial.fp();
296 
297   log_develop_trace(continuations, preempt)("push_cleanup_continuation initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp));
298   return sp;
299 }
300 
301 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
302   // Make sure that last_sp is kept relativized.

 48   // copy the spilled rbp from the heap to the stack
 49   *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
 50 }
 51 
 52 // Slow path
 53 
 54 template<typename FKind>
 55 inline frame FreezeBase::sender(const frame& f) {
 56   assert(FKind::is_instance(f), "");
 57   if (FKind::interpreted) {
 58     return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
 59   }
 60   intptr_t** link_addr = link_address<FKind>(f);
 61 
 62   intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); //  f.unextended_sp() + (fsize/wordSize); //
 63   address sender_pc = (address) *(sender_sp-1);
 64   assert(sender_sp != f.sp(), "must have changed");
 65 
 66   int slot = 0;
 67   CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot);
 68 
 69   // Repair the sender sp if the frame has been extended
 70   if (sender_cb->is_nmethod()) {
 71     sender_sp = f.repair_sender_sp(sender_sp, link_addr);
 72   }
 73 
 74   return sender_cb != nullptr
 75     ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb,
 76             slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc), false)
 77     : frame(sender_sp, sender_sp, *link_addr, sender_pc);
 78 }
 79 
 80 template<typename FKind>
 81 frame FreezeBase::new_heap_frame(frame& f, frame& caller, int size_adjust) {
 82   assert(FKind::is_instance(f), "");
 83   assert(!caller.is_interpreted_frame()
 84     || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
 85 
 86   intptr_t *sp, *fp; // sp is really our unextended_sp
 87   if (FKind::interpreted) {
 88     assert((intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset) == nullptr
 89       || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
 90     intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
 91     // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
 92     // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
 93     bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
 94     fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
 95     sp = fp - (f.fp() - f.unextended_sp());
 96     assert(sp <= fp, "");
 97     assert(fp <= caller.unextended_sp(), "");
 98     caller.set_sp(fp + frame::sender_sp_offset);
 99 
100     assert(_cont.tail()->is_in_chunk(sp), "");
101 
102     frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
103     // copy relativized locals from the stack frame
104     *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
105     return hf;
106   } else {
107     // For a compiled frame we need to re-read fp out of the frame because it may be an
108     // oop and we might have had a safepoint in finalize_freeze, after constructing f.
109     // For stub/native frames the value is not used while frozen, and will be constructed again
110     // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
111     // help with debugging, particularly when inspecting frames and identifying invalid accesses.
112     fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
113 
114     int fsize = FKind::size(f);
115     sp = caller.unextended_sp() - fsize - size_adjust;
116     if (caller.is_interpreted_frame() && size_adjust == 0) {
117       // If the caller is interpreted, our stackargs are not supposed to overlap with it
118       // so we make more room by moving sp down by argsize
119       int argsize = FKind::stack_argsize(f);
120       sp -= argsize;
121     }
122     caller.set_sp(sp + fsize);
123 
124     assert(_cont.tail()->is_in_chunk(sp), "");
125 
126     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
127   }
128 }
129 
130 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
131   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
132   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
133   if (real_unextended_sp != nullptr) {
134     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
135   }
136 }

163   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
164   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
165   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
166 }
167 
168 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
169   stackChunkOop chunk = _cont.tail();
170   assert(chunk->is_in_chunk(hf.sp() - 1), "");
171   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
172 
173   address frame_pc = hf.pc();
174 
175   *(hf.sp() - 1) = (intptr_t)hf.pc();
176 
177   intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
178   *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
179                                        : (intptr_t)hf.fp();
180   assert(frame_pc == ContinuationHelper::Frame::real_pc(hf), "");
181 }
182 
183 inline void FreezeBase::patch_pd(frame& hf, const frame& caller, bool is_bottom_frame) {
184   if (caller.is_interpreted_frame()) {
185     assert(!caller.is_empty(), "");
186     patch_callee_link_relative(caller, caller.fp());
187   } else if (is_bottom_frame && caller.pc() != nullptr) {
188     assert(caller.is_compiled_frame(), "");
189     // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
190     // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
191     // as read from the chunk.
192     patch_callee_link(caller, caller.fp());
193   }
194 }
195 
196 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
197   intptr_t* fp_addr = sp - frame::sender_sp_offset;
198   *fp_addr = badAddressVal;
199 }
200 
201 //////// Thaw
202 
203 // Fast path
204 
205 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
206   size <<= LogBytesPerWord;
207   Prefetch::read(start, size);
208   Prefetch::read(start, size - 64);
209 }
210 
211 template <typename ConfigT>
212 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
213   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
214   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
215 }
216 
217 // Slow path
218 
219 inline frame ThawBase::new_entry_frame() {
220   intptr_t* sp = _cont.entrySP();
221   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
222 }
223 
224 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust) {
225   assert(FKind::is_instance(hf), "");
226   // The values in the returned frame object will be written into the callee's stack in patch.
227 
228   if (FKind::interpreted) {
229     intptr_t* heap_sp = hf.unextended_sp();
230     // If caller is interpreted it already made room for the callee arguments
231     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
232     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
233     intptr_t* frame_sp = caller.unextended_sp() - fsize;
234     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
235     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
236     assert(frame_sp == unextended_sp, "");
237     caller.set_sp(fp + frame::sender_sp_offset);
238     frame f(frame_sp, frame_sp, fp, hf.pc());
239     // we need to set the locals so that the caller of new_stack_frame() can call
240     // ContinuationHelper::InterpretedFrame::frame_bottom
241     intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
242     DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
243     // Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
244     DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
245     assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
246     // copy relativized locals from the heap frame
247     *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
248     return f;
249   } else {
250     int fsize = FKind::size(hf);
251     intptr_t* frame_sp = caller.unextended_sp() - fsize - size_adjust;
252     if (bottom || caller.is_interpreted_frame()) {
253       if (size_adjust == 0) {
254         int argsize = FKind::stack_argsize(hf);
255         frame_sp -= argsize;
256       }



257       frame_sp = align(hf, frame_sp, caller, bottom);
258     }
259     caller.set_sp(frame_sp + fsize);
260     assert(is_aligned(frame_sp, frame::frame_alignment), "");
261 
262     assert(hf.cb() != nullptr, "");
263     assert(hf.oop_map() != nullptr, "");
264     intptr_t* fp;
265     if (PreserveFramePointer) {
266       // we need to recreate a "real" frame pointer, pointing into the stack
267       fp = frame_sp + fsize - frame::sender_sp_offset;
268     } else {
269       fp = FKind::stub || FKind::native
270         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
271         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
272     }
273     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
274   }
275 }
276 
277 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
278   if (((intptr_t)frame_sp & 0xf) != 0) {
279     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
280     frame_sp--;

281   }
282   assert(is_aligned(frame_sp, frame::frame_alignment), "");
283   return frame_sp;
284 }
285 
286 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
287   if (caller.is_interpreted_frame() || PreserveFramePointer) {
288     patch_callee_link(caller, caller.fp());
289   }
290 }
291 
292 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
293   intptr_t* fp = caller_sp - frame::sender_sp_offset;
294   patch_callee_link(f, fp);
295 }
296 
297 inline intptr_t* ThawBase::push_cleanup_continuation() {
298   frame enterSpecial = new_entry_frame();
299   intptr_t* sp = enterSpecial.sp();
300 
301   sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
302   sp[-2] = (intptr_t)enterSpecial.fp();
303 
304   log_develop_trace(continuations, preempt)("push_cleanup_continuation initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp));
305   return sp;
306 }
307 
308 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
309   // Make sure that last_sp is kept relativized.
< prev index next >