< prev index next >

src/hotspot/cpu/x86/continuationFreezeThaw_x86.inline.hpp

Print this page

 40   *la = new_value;
 41 }
 42 
 43 ////// Freeze
 44 
 45 // Fast path
 46 
 47 inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
 48   // copy the spilled rbp from the heap to the stack
 49   *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
 50 }
 51 
 52 // Slow path
 53 
 54 template<typename FKind>
 55 inline frame FreezeBase::sender(const frame& f) {
 56   assert(FKind::is_instance(f), "");
 57   if (FKind::interpreted) {
 58     return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
 59   }
 60   intptr_t** link_addr = link_address<FKind>(f);
 61 
 62   intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); //  f.unextended_sp() + (fsize/wordSize); //
 63   address sender_pc = (address) *(sender_sp-1);
 64   assert(sender_sp != f.sp(), "must have changed");
 65 
 66   int slot = 0;
 67   CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot);

 68   return sender_cb != nullptr
 69     ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb,
 70             slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc), false)
 71     : frame(sender_sp, sender_sp, *link_addr, sender_pc);
 72 }
 73 
 74 template<typename FKind>
 75 frame FreezeBase::new_heap_frame(frame& f, frame& caller) {
 76   assert(FKind::is_instance(f), "");
 77   assert(!caller.is_interpreted_frame()
 78     || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
 79 
 80   intptr_t *sp, *fp; // sp is really our unextended_sp
 81   if (FKind::interpreted) {
 82     assert((intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset) == nullptr
 83       || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
 84     intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
 85     // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
 86     // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
 87     bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
 88     fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
 89     sp = fp - (f.fp() - f.unextended_sp());
 90     assert(sp <= fp, "");
 91     assert(fp <= caller.unextended_sp(), "");
 92     caller.set_sp(fp + frame::sender_sp_offset);
 93 
 94     assert(_cont.tail()->is_in_chunk(sp), "");
 95 
 96     frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
 97     // copy relativized locals from the stack frame
 98     *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
 99     return hf;
100   } else {
101     // For a compiled frame we need to re-read fp out of the frame because it may be an
102     // oop and we might have had a safepoint in finalize_freeze, after constructing f.
103     // For stub/native frames the value is not used while frozen, and will be constructed again
104     // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
105     // help with debugging, particularly when inspecting frames and identifying invalid accesses.
106     fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
107 
108     int fsize = FKind::size(f);
109     sp = caller.unextended_sp() - fsize;
110     if (caller.is_interpreted_frame()) {
111       // If the caller is interpreted, our stackargs are not supposed to overlap with it
112       // so we make more room by moving sp down by argsize
113       int argsize = FKind::stack_argsize(f);
114       sp -= argsize;

115     }
116     caller.set_sp(sp + fsize);
117 
118     assert(_cont.tail()->is_in_chunk(sp), "");
119 
120     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
121   }
122 }
123 
124 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
125   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
126   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
127   if (real_unextended_sp != nullptr) {
128     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
129   }
130 }
131 
132 inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
133   assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
134   f.interpreter_frame_set_last_sp(f.unextended_sp());
135 }
136 

157   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
158   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
159   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
160 }
161 
162 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
163   stackChunkOop chunk = _cont.tail();
164   assert(chunk->is_in_chunk(hf.sp() - 1), "");
165   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
166 
167   address frame_pc = hf.pc();
168 
169   *(hf.sp() - 1) = (intptr_t)hf.pc();
170 
171   intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
172   *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
173                                        : (intptr_t)hf.fp();
174   assert(frame_pc == ContinuationHelper::Frame::real_pc(hf), "");
175 }
176 
177 inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
178   if (caller.is_interpreted_frame()) {
179     assert(!caller.is_empty(), "");
180     patch_callee_link_relative(caller, caller.fp());
181   } else {

182     // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
183     // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
184     // as read from the chunk.
185     patch_callee_link(caller, caller.fp());
186   }
187 }
188 
189 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
190   intptr_t* fp_addr = sp - frame::sender_sp_offset;
191   *fp_addr = badAddressVal;
192 }
193 
194 inline intptr_t* AnchorMark::anchor_mark_set_pd() {
195   intptr_t* sp = _top_frame.sp();
196   if (_top_frame.is_interpreted_frame()) {
197     // In case the top frame is interpreted we need to set up the anchor using
198     // the last_sp saved in the frame (remove possible alignment added while
199     // thawing, see ThawBase::finish_thaw()). We also clear last_sp to match
200     // the behavior when calling the VM from the interpreter (we check for this
201     // in FreezeBase::prepare_freeze_interpreted_top_frame, which can be reached

232 
233 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
234   size <<= LogBytesPerWord;
235   Prefetch::read(start, size);
236   Prefetch::read(start, size - 64);
237 }
238 
239 template <typename ConfigT>
240 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
241   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
242   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
243 }
244 
245 // Slow path
246 
247 inline frame ThawBase::new_entry_frame() {
248   intptr_t* sp = _cont.entrySP();
249   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
250 }
251 
252 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
253   assert(FKind::is_instance(hf), "");
254   // The values in the returned frame object will be written into the callee's stack in patch.
255 
256   if (FKind::interpreted) {
257     intptr_t* heap_sp = hf.unextended_sp();
258     // If caller is interpreted it already made room for the callee arguments
259     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
260     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
261     intptr_t* frame_sp = caller.unextended_sp() - fsize;
262     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
263     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
264     assert(frame_sp == unextended_sp, "");
265     caller.set_sp(fp + frame::sender_sp_offset);
266     frame f(frame_sp, frame_sp, fp, hf.pc());
267     // we need to set the locals so that the caller of new_stack_frame() can call
268     // ContinuationHelper::InterpretedFrame::frame_bottom
269     intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
270     DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
271     // Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
272     DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
273     assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
274     // copy relativized locals from the heap frame
275     *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
276     return f;
277   } else {
278     int fsize = FKind::size(hf);
279     intptr_t* frame_sp = caller.unextended_sp() - fsize;
280     if (bottom || caller.is_interpreted_frame()) {
281       int argsize = FKind::stack_argsize(hf);
282 
283       fsize += argsize;
284       frame_sp   -= argsize;
285       caller.set_sp(caller.sp() - argsize);
286       assert(caller.sp() == frame_sp + (fsize-argsize), "");
287 
288       frame_sp = align(hf, frame_sp, caller, bottom);

289     }

290 
291     assert(hf.cb() != nullptr, "");
292     assert(hf.oop_map() != nullptr, "");
293     intptr_t* fp;
294     if (PreserveFramePointer) {
295       // we need to recreate a "real" frame pointer, pointing into the stack
296       fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
297     } else {
298       fp = FKind::stub || FKind::native
299         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
300         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
301     }
302     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
303   }
304 }
305 
306 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
307   if (((intptr_t)frame_sp & 0xf) != 0) {
308     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
309     frame_sp--;
310     caller.set_sp(caller.sp() - 1);
311   }
312   assert(is_aligned(frame_sp, frame::frame_alignment), "");
313   return frame_sp;
314 }
315 
316 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
317   patch_callee_link(caller, caller.fp());


318 }
319 
320 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
321   intptr_t* fp = caller_sp - frame::sender_sp_offset;
322   patch_callee_link(f, fp);
323 }
324 
325 inline intptr_t* ThawBase::push_cleanup_continuation() {
326   frame enterSpecial = new_entry_frame();
327   intptr_t* sp = enterSpecial.sp();
328 
329   // We only need to set the return pc. rbp will be restored back in gen_continuation_enter().
330   sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
331   return sp;
332 }
333 
334 inline intptr_t* ThawBase::push_preempt_adapter() {
335   frame enterSpecial = new_entry_frame();
336   intptr_t* sp = enterSpecial.sp();
337 

 40   *la = new_value;
 41 }
 42 
 43 ////// Freeze
 44 
 45 // Fast path
 46 
 47 inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
 48   // copy the spilled rbp from the heap to the stack
 49   *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
 50 }
 51 
 52 // Slow path
 53 
 54 template<typename FKind>
 55 inline frame FreezeBase::sender(const frame& f) {
 56   assert(FKind::is_instance(f), "");
 57   if (FKind::interpreted) {
 58     return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
 59   }

 60 
 61   frame::CompiledFramePointers cfp = f.compiled_frame_details();


 62 
 63   int slot = 0;
 64   CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(*cfp.sender_pc_addr, slot);
 65 
 66   return sender_cb != nullptr
 67     ? frame(cfp.sender_sp, cfp.sender_sp, *cfp.saved_fp_addr, *cfp.sender_pc_addr, sender_cb,
 68             slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, *cfp.sender_pc_addr), false)
 69     : frame(cfp.sender_sp, cfp.sender_sp, *cfp.saved_fp_addr, *cfp.sender_pc_addr);
 70 }
 71 
 72 template<typename FKind>
 73 frame FreezeBase::new_heap_frame(frame& f, frame& caller, int size_adjust) {
 74   assert(FKind::is_instance(f), "");
 75   assert(!caller.is_interpreted_frame()
 76     || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
 77 
 78   intptr_t *sp, *fp; // sp is really our unextended_sp
 79   if (FKind::interpreted) {
 80     assert((intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset) == nullptr
 81       || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
 82     intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
 83     // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
 84     // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
 85     bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
 86     fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
 87     sp = fp - (f.fp() - f.unextended_sp());
 88     assert(sp <= fp, "");
 89     assert(fp <= caller.unextended_sp(), "");
 90     caller.set_sp(fp + frame::sender_sp_offset);
 91 
 92     assert(_cont.tail()->is_in_chunk(sp), "");
 93 
 94     frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
 95     // copy relativized locals from the stack frame
 96     *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
 97     return hf;
 98   } else {
 99     // For a compiled frame we need to re-read fp out of the frame because it may be an
100     // oop and we might have had a safepoint in finalize_freeze, after constructing f.
101     // For stub/native frames the value is not used while frozen, and will be constructed again
102     // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
103     // help with debugging, particularly when inspecting frames and identifying invalid accesses.
104     fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
105 
106     int fsize = FKind::size(f);
107     sp = caller.unextended_sp() - fsize - size_adjust;
108     if (caller.is_interpreted_frame() && size_adjust == 0) {
109       // If the caller is interpreted, our stackargs are not supposed to overlap with it
110       // so we make more room by moving sp down by argsize
111       int argsize = FKind::stack_argsize(f);
112       sp -= argsize;
113       caller.set_sp(sp + fsize);
114     }

115 
116     assert(_cont.tail()->is_in_chunk(sp), "");
117 
118     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
119   }
120 }
121 
122 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
123   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
124   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
125   if (real_unextended_sp != nullptr) {
126     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
127   }
128 }
129 
130 inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
131   assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
132   f.interpreter_frame_set_last_sp(f.unextended_sp());
133 }
134 

155   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
156   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
157   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
158 }
159 
160 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
161   stackChunkOop chunk = _cont.tail();
162   assert(chunk->is_in_chunk(hf.sp() - 1), "");
163   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
164 
165   address frame_pc = hf.pc();
166 
167   *(hf.sp() - 1) = (intptr_t)hf.pc();
168 
169   intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
170   *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
171                                        : (intptr_t)hf.fp();
172   assert(frame_pc == ContinuationHelper::Frame::real_pc(hf), "");
173 }
174 
175 inline void FreezeBase::patch_pd(frame& hf, const frame& caller, bool is_bottom_frame) {
176   if (caller.is_interpreted_frame()) {
177     assert(!caller.is_empty(), "");
178     patch_callee_link_relative(caller, caller.fp());
179   } else if (is_bottom_frame && caller.pc() != nullptr) {
180     assert(caller.is_compiled_frame(), "");
181     // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
182     // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
183     // as read from the chunk.
184     patch_callee_link(caller, caller.fp());
185   }
186 }
187 
188 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
189   intptr_t* fp_addr = sp - frame::sender_sp_offset;
190   *fp_addr = badAddressVal;
191 }
192 
193 inline intptr_t* AnchorMark::anchor_mark_set_pd() {
194   intptr_t* sp = _top_frame.sp();
195   if (_top_frame.is_interpreted_frame()) {
196     // In case the top frame is interpreted we need to set up the anchor using
197     // the last_sp saved in the frame (remove possible alignment added while
198     // thawing, see ThawBase::finish_thaw()). We also clear last_sp to match
199     // the behavior when calling the VM from the interpreter (we check for this
200     // in FreezeBase::prepare_freeze_interpreted_top_frame, which can be reached

231 
232 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
233   size <<= LogBytesPerWord;
234   Prefetch::read(start, size);
235   Prefetch::read(start, size - 64);
236 }
237 
238 template <typename ConfigT>
239 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
240   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
241   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
242 }
243 
244 // Slow path
245 
246 inline frame ThawBase::new_entry_frame() {
247   intptr_t* sp = _cont.entrySP();
248   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
249 }
250 
251 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust) {
252   assert(FKind::is_instance(hf), "");
253   // The values in the returned frame object will be written into the callee's stack in patch.
254 
255   if (FKind::interpreted) {
256     intptr_t* heap_sp = hf.unextended_sp();
257     // If caller is interpreted it already made room for the callee arguments
258     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
259     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
260     intptr_t* frame_sp = caller.unextended_sp() - fsize;
261     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
262     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
263     assert(frame_sp == unextended_sp, "");
264     caller.set_sp(fp + frame::sender_sp_offset);
265     frame f(frame_sp, frame_sp, fp, hf.pc());
266     // we need to set the locals so that the caller of new_stack_frame() can call
267     // ContinuationHelper::InterpretedFrame::frame_bottom
268     intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
269     DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
270     // Frames for native methods have 2 extra words (temp oop/result handler) before fixed part of frame.
271     DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
272     assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
273     // copy relativized locals from the heap frame
274     *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
275     return f;
276   } else {
277     int fsize = FKind::size(hf);
278     intptr_t* frame_sp = caller.unextended_sp() - fsize - size_adjust;
279     if (bottom || caller.is_interpreted_frame()) {
280       if (size_adjust == 0) {
281         int argsize = FKind::stack_argsize(hf);
282         frame_sp -= argsize;
283       }



284       frame_sp = align(hf, frame_sp, caller, bottom);
285       caller.set_sp(frame_sp + fsize + size_adjust);
286     }
287     assert(is_aligned(frame_sp, frame::frame_alignment), "");
288 
289     assert(hf.cb() != nullptr, "");
290     assert(hf.oop_map() != nullptr, "");
291     intptr_t* fp;
292     if (PreserveFramePointer) {
293       // we need to recreate a "real" frame pointer, pointing into the stack
294       fp = frame_sp + fsize - frame::sender_sp_offset;
295     } else {
296       fp = FKind::stub || FKind::native
297         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
298         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
299     }
300     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
301   }
302 }
303 
304 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
305   if (((intptr_t)frame_sp & 0xf) != 0) {
306     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
307     frame_sp--;

308   }
309   assert(is_aligned(frame_sp, frame::frame_alignment), "");
310   return frame_sp;
311 }
312 
313 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
314   if (caller.is_interpreted_frame() || PreserveFramePointer) {
315     patch_callee_link(caller, caller.fp());
316   }
317 }
318 
319 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
320   intptr_t* fp = caller_sp - frame::sender_sp_offset;
321   patch_callee_link(f, fp);
322 }
323 
324 inline intptr_t* ThawBase::push_cleanup_continuation() {
325   frame enterSpecial = new_entry_frame();
326   intptr_t* sp = enterSpecial.sp();
327 
328   // We only need to set the return pc. rbp will be restored back in gen_continuation_enter().
329   sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
330   return sp;
331 }
332 
333 inline intptr_t* ThawBase::push_preempt_adapter() {
334   frame enterSpecial = new_entry_frame();
335   intptr_t* sp = enterSpecial.sp();
336 
< prev index next >