< prev index next >

src/hotspot/cpu/aarch64/continuationFreezeThaw_aarch64.inline.hpp

Print this page

 42   *la = new_value;
 43 }
 44 
 45 ////// Freeze
 46 
 47 // Fast path
 48 
 49 inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
 50   // copy the spilled fp from the heap to the stack
 51   *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
 52 }
 53 
 54 // Slow path
 55 
 56 template<typename FKind>
 57 inline frame FreezeBase::sender(const frame& f) {
 58   assert(FKind::is_instance(f), "");
 59   if (FKind::interpreted) {
 60     return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
 61   }
 62   intptr_t** link_addr = link_address<FKind>(f);
 63 
 64   intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); //  f.unextended_sp() + (fsize/wordSize); //
 65   address sender_pc = ContinuationHelper::return_address_at(sender_sp - 1);
 66   assert(sender_sp != f.sp(), "must have changed");
 67 
 68   int slot = 0;
 69   CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot);

 70   return sender_cb != nullptr
 71     ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb,
 72             slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc),
 73             false /* on_heap ? */)
 74     : frame(sender_sp, sender_sp, *link_addr, sender_pc);
 75 }
 76 
 77 template<typename FKind>
 78 frame FreezeBase::new_heap_frame(frame& f, frame& caller) {
 79   assert(FKind::is_instance(f), "");
 80   assert(!caller.is_interpreted_frame()
 81     || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
 82 
 83   intptr_t *sp, *fp; // sp is really our unextended_sp
 84   if (FKind::interpreted) {
 85     assert((intptr_t*)f.at(frame::interpreter_frame_last_sp_offset) == nullptr
 86       || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
 87     intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
 88     // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
 89     // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
 90     bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
 91     fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
 92     sp = fp - (f.fp() - f.unextended_sp());
 93     assert(sp <= fp, "");
 94     assert(fp <= caller.unextended_sp(), "");
 95     caller.set_sp(fp + frame::sender_sp_offset);
 96 
 97     assert(_cont.tail()->is_in_chunk(sp), "");
 98 
 99     frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
100     // copy relativized locals from the stack frame
101     *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
102     return hf;
103   } else {
104     // For a compiled frame we need to re-read fp out of the frame because it may be an
105     // oop and we might have had a safepoint in finalize_freeze, after constructing f.
106     // For stub/native frames the value is not used while frozen, and will be constructed again
107     // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
108     // help with debugging, particularly when inspecting frames and identifying invalid accesses.
109     fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
110 
111     int fsize = FKind::size(f);
112     sp = caller.unextended_sp() - fsize;
113     if (caller.is_interpreted_frame()) {
114       // If the caller is interpreted, our stackargs are not supposed to overlap with it
115       // so we make more room by moving sp down by argsize
116       int argsize = FKind::stack_argsize(f);
117       sp -= argsize;

118     }
119     caller.set_sp(sp + fsize);
120 
121     assert(_cont.tail()->is_in_chunk(sp), "");
122 
123     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
124   }
125 }
126 
127 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
128   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
129   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
130   if (real_unextended_sp != nullptr) {
131     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
132   }
133 }
134 
135 inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
136   assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
137   f.interpreter_frame_set_last_sp(f.unextended_sp());
138 }
139 

166   assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
167   assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
168   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
169   assert(hf.unextended_sp() + extra_space >  (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), "");
170   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
171   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
172 }
173 
174 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
175   stackChunkOop chunk = _cont.tail();
176   assert(chunk->is_in_chunk(hf.sp() - 1), "");
177   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
178 
179   *(hf.sp() - 1) = (intptr_t)hf.pc();
180 
181   intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
182   *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
183                                        : (intptr_t)hf.fp();
184 }
185 
186 inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
187   if (caller.is_interpreted_frame()) {
188     assert(!caller.is_empty(), "");
189     patch_callee_link_relative(caller, caller.fp());
190   } else {

191     // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
192     // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
193     // as read from the chunk.
194     patch_callee_link(caller, caller.fp());
195   }
196 }
197 
198 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
199   intptr_t* fp_addr = sp - frame::sender_sp_offset;
200   *fp_addr = badAddressVal;
201 }
202 
203 inline intptr_t* AnchorMark::anchor_mark_set_pd() {
204   intptr_t* sp = _top_frame.sp();
205   if (_top_frame.is_interpreted_frame()) {
206     // In case the top frame is interpreted we need to set up the anchor using
207     // the last_sp saved in the frame (remove possible alignment added while
208     // thawing, see ThawBase::finish_thaw()). We also clear last_sp to match
209     // the behavior when calling the VM from the interpreter (we check for this
210     // in FreezeBase::prepare_freeze_interpreted_top_frame, which can be reached

241 
242 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
243   size <<= LogBytesPerWord;
244   Prefetch::read(start, size);
245   Prefetch::read(start, size - 64);
246 }
247 
248 template <typename ConfigT>
249 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
250   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
251   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
252 }
253 
254 // Slow path
255 
256 inline frame ThawBase::new_entry_frame() {
257   intptr_t* sp = _cont.entrySP();
258   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
259 }
260 
261 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
262   assert(FKind::is_instance(hf), "");
263   // The values in the returned frame object will be written into the callee's stack in patch.
264 
265   if (FKind::interpreted) {
266     intptr_t* heap_sp = hf.unextended_sp();
267     // If caller is interpreted it already made room for the callee arguments
268     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
269     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
270     intptr_t* frame_sp = caller.unextended_sp() - fsize;
271     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
272     if ((intptr_t)fp % frame::frame_alignment != 0) {
273       fp--;
274       frame_sp--;
275       log_develop_trace(continuations)("Adding internal interpreted frame alignment");
276     }
277     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
278     assert(frame_sp == unextended_sp, "");
279     caller.set_sp(fp + frame::sender_sp_offset);
280     frame f(frame_sp, frame_sp, fp, hf.pc());
281     // we need to set the locals so that the caller of new_stack_frame() can call
282     // ContinuationHelper::InterpretedFrame::frame_bottom
283     // copy relativized locals from the heap frame
284     *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset);
285     assert((intptr_t)f.fp() % frame::frame_alignment == 0, "");
286     return f;
287   } else {
288     int fsize = FKind::size(hf);
289     intptr_t* frame_sp = caller.unextended_sp() - fsize;
290     if (bottom || caller.is_interpreted_frame()) {
291       int argsize = FKind::stack_argsize(hf);
292 
293       fsize += argsize;
294       frame_sp   -= argsize;
295       caller.set_sp(caller.sp() - argsize);
296       assert(caller.sp() == frame_sp + (fsize-argsize), "");
297 
298       frame_sp = align(hf, frame_sp, caller, bottom);

299     }

300 
301     assert(hf.cb() != nullptr, "");
302     assert(hf.oop_map() != nullptr, "");
303     intptr_t* fp;
304     if (PreserveFramePointer) {
305       // we need to recreate a "real" frame pointer, pointing into the stack
306       fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
307     } else {
308       fp = FKind::stub || FKind::native
309         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
310         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
311     }
312     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
313   }
314 }
315 
316 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
317 #ifdef _LP64
318   if (((intptr_t)frame_sp & 0xf) != 0) {
319     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
320     frame_sp--;
321     caller.set_sp(caller.sp() - 1);
322   }
323   assert(is_aligned(frame_sp, frame::frame_alignment), "");
324 #endif
325 
326   return frame_sp;
327 }
328 
329 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
330   patch_callee_link(caller, caller.fp());


331 }
332 
333 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
334   intptr_t* fp = caller_sp - frame::sender_sp_offset;
335   patch_callee_link(f, fp);
336 }
337 
338 inline intptr_t* ThawBase::push_cleanup_continuation() {
339   frame enterSpecial = new_entry_frame();
340   intptr_t* sp = enterSpecial.sp();
341 
342   // We only need to set the return pc. rfp will be restored back in gen_continuation_enter().
343   sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
344   return sp;
345 }
346 
347 inline intptr_t* ThawBase::push_preempt_adapter() {
348   frame enterSpecial = new_entry_frame();
349   intptr_t* sp = enterSpecial.sp();
350 

 42   *la = new_value;
 43 }
 44 
 45 ////// Freeze
 46 
 47 // Fast path
 48 
 49 inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
 50   // copy the spilled fp from the heap to the stack
 51   *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
 52 }
 53 
 54 // Slow path
 55 
 56 template<typename FKind>
 57 inline frame FreezeBase::sender(const frame& f) {
 58   assert(FKind::is_instance(f), "");
 59   if (FKind::interpreted) {
 60     return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
 61   }

 62 
 63   frame::CompiledFramePointers cfp = f.compiled_frame_details();


 64 
 65   int slot = 0;
 66   CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(*cfp.sender_pc_addr, slot);
 67 
 68   return sender_cb != nullptr
 69     ? frame(cfp.sender_sp, cfp.sender_sp, *cfp.saved_fp_addr, *cfp.sender_pc_addr, sender_cb,
 70             slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, *cfp.sender_pc_addr), false)
 71     : frame(cfp.sender_sp, cfp.sender_sp, *cfp.saved_fp_addr, *cfp.sender_pc_addr);

 72 }
 73 
 74 template<typename FKind>
 75 frame FreezeBase::new_heap_frame(frame& f, frame& caller, int size_adjust) {
 76   assert(FKind::is_instance(f), "");
 77   assert(!caller.is_interpreted_frame()
 78     || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
 79 
 80   intptr_t *sp, *fp; // sp is really our unextended_sp
 81   if (FKind::interpreted) {
 82     assert((intptr_t*)f.at(frame::interpreter_frame_last_sp_offset) == nullptr
 83       || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
 84     intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
 85     // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
 86     // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
 87     bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
 88     fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
 89     sp = fp - (f.fp() - f.unextended_sp());
 90     assert(sp <= fp, "");
 91     assert(fp <= caller.unextended_sp(), "");
 92     caller.set_sp(fp + frame::sender_sp_offset);
 93 
 94     assert(_cont.tail()->is_in_chunk(sp), "");
 95 
 96     frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
 97     // copy relativized locals from the stack frame
 98     *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
 99     return hf;
100   } else {
101     // For a compiled frame we need to re-read fp out of the frame because it may be an
102     // oop and we might have had a safepoint in finalize_freeze, after constructing f.
103     // For stub/native frames the value is not used while frozen, and will be constructed again
104     // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
105     // help with debugging, particularly when inspecting frames and identifying invalid accesses.
106     fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
107 
108     int fsize = FKind::size(f);
109     sp = caller.unextended_sp() - fsize - size_adjust;
110     if (caller.is_interpreted_frame() && size_adjust == 0) {
111       // If the caller is interpreted, our stackargs are not supposed to overlap with it
112       // so we make more room by moving sp down by argsize
113       int argsize = FKind::stack_argsize(f);
114       sp -= argsize;
115       caller.set_sp(sp + fsize);
116     }

117 
118     assert(_cont.tail()->is_in_chunk(sp), "");
119 
120     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
121   }
122 }
123 
124 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
125   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
126   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
127   if (real_unextended_sp != nullptr) {
128     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
129   }
130 }
131 
132 inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
133   assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
134   f.interpreter_frame_set_last_sp(f.unextended_sp());
135 }
136 

163   assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
164   assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
165   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
166   assert(hf.unextended_sp() + extra_space >  (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), "");
167   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
168   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
169 }
170 
171 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
172   stackChunkOop chunk = _cont.tail();
173   assert(chunk->is_in_chunk(hf.sp() - 1), "");
174   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
175 
176   *(hf.sp() - 1) = (intptr_t)hf.pc();
177 
178   intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
179   *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
180                                        : (intptr_t)hf.fp();
181 }
182 
183 inline void FreezeBase::patch_pd(frame& hf, const frame& caller, bool is_bottom_frame) {
184   if (caller.is_interpreted_frame()) {
185     assert(!caller.is_empty(), "");
186     patch_callee_link_relative(caller, caller.fp());
187   } else if (is_bottom_frame && caller.pc() != nullptr) {
188     assert(caller.is_compiled_frame(), "");
189     // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
190     // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
191     // as read from the chunk.
192     patch_callee_link(caller, caller.fp());
193   }
194 }
195 
196 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
197   intptr_t* fp_addr = sp - frame::sender_sp_offset;
198   *fp_addr = badAddressVal;
199 }
200 
201 inline intptr_t* AnchorMark::anchor_mark_set_pd() {
202   intptr_t* sp = _top_frame.sp();
203   if (_top_frame.is_interpreted_frame()) {
204     // In case the top frame is interpreted we need to set up the anchor using
205     // the last_sp saved in the frame (remove possible alignment added while
206     // thawing, see ThawBase::finish_thaw()). We also clear last_sp to match
207     // the behavior when calling the VM from the interpreter (we check for this
208     // in FreezeBase::prepare_freeze_interpreted_top_frame, which can be reached

239 
240 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
241   size <<= LogBytesPerWord;
242   Prefetch::read(start, size);
243   Prefetch::read(start, size - 64);
244 }
245 
246 template <typename ConfigT>
247 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
248   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
249   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
250 }
251 
252 // Slow path
253 
254 inline frame ThawBase::new_entry_frame() {
255   intptr_t* sp = _cont.entrySP();
256   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
257 }
258 
259 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust) {
260   assert(FKind::is_instance(hf), "");
261   // The values in the returned frame object will be written into the callee's stack in patch.
262 
263   if (FKind::interpreted) {
264     intptr_t* heap_sp = hf.unextended_sp();
265     // If caller is interpreted it already made room for the callee arguments
266     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
267     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
268     intptr_t* frame_sp = caller.unextended_sp() - fsize;
269     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
270     if ((intptr_t)fp % frame::frame_alignment != 0) {
271       fp--;
272       frame_sp--;
273       log_develop_trace(continuations)("Adding internal interpreted frame alignment");
274     }
275     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
276     assert(frame_sp == unextended_sp, "");
277     caller.set_sp(fp + frame::sender_sp_offset);
278     frame f(frame_sp, frame_sp, fp, hf.pc());
279     // we need to set the locals so that the caller of new_stack_frame() can call
280     // ContinuationHelper::InterpretedFrame::frame_bottom
281     // copy relativized locals from the heap frame
282     *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset);
283     assert((intptr_t)f.fp() % frame::frame_alignment == 0, "");
284     return f;
285   } else {
286     int fsize = FKind::size(hf);
287     intptr_t* frame_sp = caller.unextended_sp() - fsize - size_adjust;
288     if (bottom || caller.is_interpreted_frame()) {
289       if (size_adjust == 0) {
290         int argsize = FKind::stack_argsize(hf);
291         frame_sp -= argsize;
292       }



293       frame_sp = align(hf, frame_sp, caller, bottom);
294       caller.set_sp(frame_sp + fsize + size_adjust);
295     }
296     assert(is_aligned(frame_sp, frame::frame_alignment), "");
297 
298     assert(hf.cb() != nullptr, "");
299     assert(hf.oop_map() != nullptr, "");
300     intptr_t* fp;
301     if (PreserveFramePointer) {
302       // we need to recreate a "real" frame pointer, pointing into the stack
303       fp = frame_sp + fsize - frame::sender_sp_offset;
304     } else {
305       fp = FKind::stub || FKind::native
306         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
307         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
308     }
309     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
310   }
311 }
312 
313 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
314 #ifdef _LP64
315   if (((intptr_t)frame_sp & 0xf) != 0) {
316     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
317     frame_sp--;

318   }
319   assert(is_aligned(frame_sp, frame::frame_alignment), "");
320 #endif

321   return frame_sp;
322 }
323 
324 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
325   if (caller.is_interpreted_frame() || PreserveFramePointer) {
326     patch_callee_link(caller, caller.fp());
327   }
328 }
329 
330 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
331   intptr_t* fp = caller_sp - frame::sender_sp_offset;
332   patch_callee_link(f, fp);
333 }
334 
335 inline intptr_t* ThawBase::push_cleanup_continuation() {
336   frame enterSpecial = new_entry_frame();
337   intptr_t* sp = enterSpecial.sp();
338 
339   // We only need to set the return pc. rfp will be restored back in gen_continuation_enter().
340   sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
341   return sp;
342 }
343 
344 inline intptr_t* ThawBase::push_preempt_adapter() {
345   frame enterSpecial = new_entry_frame();
346   intptr_t* sp = enterSpecial.sp();
347 
< prev index next >