< prev index next >

src/hotspot/cpu/x86/continuationFreezeThaw_x86.inline.hpp

Print this page

109       // so we make more room by moving sp down by argsize
110       int argsize = FKind::stack_argsize(f);
111       sp -= argsize;
112     }
113     caller.set_sp(sp + fsize);
114 
115     assert(_cont.tail()->is_in_chunk(sp), "");
116 
117     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
118   }
119 }
120 
121 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
122   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
123   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
124   if (real_unextended_sp != nullptr) {
125     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
126   }
127 }
128 






129 inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
130   assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
131   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
132     || (f.unextended_sp() == f.sp()), "");
133   assert(f.fp() > (intptr_t*)f.at_relative(frame::interpreter_frame_initial_sp_offset), "");
134 
135   // Make sure that last_sp is already relativized.
136   assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), "");
137 
138   // Make sure that locals is already relativized.
139   assert((*hf.addr_at(frame::interpreter_frame_locals_offset) == frame::sender_sp_offset + f.interpreter_frame_method()->max_locals() - 1), "");


140 
141   // Make sure that monitor_block_top is already relativized.
142   assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
143 
144   assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
145   assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
146   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
147   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
148   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
149 }
150 
151 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
152   stackChunkOop chunk = _cont.tail();
153   assert(chunk->is_in_chunk(hf.sp() - 1), "");
154   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
155 
156   address frame_pc = hf.pc();
157 
158   *(hf.sp() - 1) = (intptr_t)hf.pc();
159 

190   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
191   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
192 }
193 
194 // Slow path
195 
196 inline frame ThawBase::new_entry_frame() {
197   intptr_t* sp = _cont.entrySP();
198   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
199 }
200 
201 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
202   assert(FKind::is_instance(hf), "");
203   // The values in the returned frame object will be written into the callee's stack in patch.
204 
205   if (FKind::interpreted) {
206     intptr_t* heap_sp = hf.unextended_sp();
207     // If caller is interpreted it already made room for the callee arguments
208     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
209     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
210     const int locals = hf.interpreter_frame_method()->max_locals();
211     intptr_t* frame_sp = caller.unextended_sp() - fsize;
212     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
213     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
214     assert(frame_sp == unextended_sp, "");
215     caller.set_sp(fp + frame::sender_sp_offset);
216     frame f(frame_sp, frame_sp, fp, hf.pc());
217     // we need to set the locals so that the caller of new_stack_frame() can call
218     // ContinuationHelper::InterpretedFrame::frame_bottom
219     intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
220     assert((int)locals_offset == frame::sender_sp_offset + locals - 1, "");


221     // copy relativized locals from the heap frame
222     *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
223     return f;
224   } else {
225     int fsize = FKind::size(hf);
226     intptr_t* frame_sp = caller.unextended_sp() - fsize;
227     if (bottom || caller.is_interpreted_frame()) {
228       int argsize = hf.compiled_frame_stack_argsize();
229 
230       fsize += argsize;
231       frame_sp   -= argsize;
232       caller.set_sp(caller.sp() - argsize);
233       assert(caller.sp() == frame_sp + (fsize-argsize), "");
234 
235       frame_sp = align(hf, frame_sp, caller, bottom);
236     }
237 
238     assert(hf.cb() != nullptr, "");
239     assert(hf.oop_map() != nullptr, "");
240     intptr_t* fp;
241     if (PreserveFramePointer) {
242       // we need to recreate a "real" frame pointer, pointing into the stack
243       fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
244     } else {
245        // we need to re-read fp because it may be an oop and we might have fixed the frame.
246       fp = *(intptr_t**)(hf.sp() - frame::sender_sp_offset);

247     }
248     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
249   }
250 }
251 
252 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
253 #ifdef _LP64
254   if (((intptr_t)frame_sp & 0xf) != 0) {
255     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
256     frame_sp--;
257     caller.set_sp(caller.sp() - 1);
258   }
259   assert(is_aligned(frame_sp, frame::frame_alignment), "");
260 #endif
261 
262   return frame_sp;
263 }
264 
265 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
266   patch_callee_link(caller, caller.fp());
267 }
268 





























































269 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
270   // Make sure that last_sp is kept relativized.
271   assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
272 
273   // Make sure that monitor_block_top is still relativized.
274   assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
275 }
276 
277 #endif // CPU_X86_CONTINUATIONFREEZE_THAW_X86_INLINE_HPP

109       // so we make more room by moving sp down by argsize
110       int argsize = FKind::stack_argsize(f);
111       sp -= argsize;
112     }
113     caller.set_sp(sp + fsize);
114 
115     assert(_cont.tail()->is_in_chunk(sp), "");
116 
117     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
118   }
119 }
120 
121 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
122   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
123   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
124   if (real_unextended_sp != nullptr) {
125     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
126   }
127 }
128 
129 inline void FreezeBase::prepare_freeze_interpreted_top_frame(const frame& f) {
130   assert(*f.addr_at(frame::interpreter_frame_last_sp_offset) == 0, "should be null for top frame");
131   intptr_t* lspp = f.addr_at(frame::interpreter_frame_last_sp_offset);
132   *lspp = f.unextended_sp() - f.fp();
133 }
134 
135 inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
136   assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
137   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
138     || (f.unextended_sp() == f.sp()), "");
139   assert(f.fp() > (intptr_t*)f.at_relative(frame::interpreter_frame_initial_sp_offset), "");
140 
141   // Make sure that last_sp is already relativized.
142   assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), "");
143 
144   // Make sure that locals is already relativized.
145   DEBUG_ONLY(Method* m = f.interpreter_frame_method();)
146   DEBUG_ONLY(int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
147   assert((*hf.addr_at(frame::interpreter_frame_locals_offset) == frame::sender_sp_offset + max_locals - 1), "");
148 
149   // Make sure that monitor_block_top is already relativized.
150   assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
151 
152   assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
153   assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
154   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
155   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
156   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
157 }
158 
159 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
160   stackChunkOop chunk = _cont.tail();
161   assert(chunk->is_in_chunk(hf.sp() - 1), "");
162   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
163 
164   address frame_pc = hf.pc();
165 
166   *(hf.sp() - 1) = (intptr_t)hf.pc();
167 

198   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
199   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
200 }
201 
202 // Slow path
203 
204 inline frame ThawBase::new_entry_frame() {
205   intptr_t* sp = _cont.entrySP();
206   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
207 }
208 
209 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom) {
210   assert(FKind::is_instance(hf), "");
211   // The values in the returned frame object will be written into the callee's stack in patch.
212 
213   if (FKind::interpreted) {
214     intptr_t* heap_sp = hf.unextended_sp();
215     // If caller is interpreted it already made room for the callee arguments
216     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
217     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);

218     intptr_t* frame_sp = caller.unextended_sp() - fsize;
219     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
220     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
221     assert(frame_sp == unextended_sp, "");
222     caller.set_sp(fp + frame::sender_sp_offset);
223     frame f(frame_sp, frame_sp, fp, hf.pc());
224     // we need to set the locals so that the caller of new_stack_frame() can call
225     // ContinuationHelper::InterpretedFrame::frame_bottom
226     intptr_t locals_offset = *hf.addr_at(frame::interpreter_frame_locals_offset);
227     DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
228     DEBUG_ONLY(const int max_locals = !m->is_native() ? m->max_locals() : m->size_of_parameters() + 2;)
229     assert((int)locals_offset == frame::sender_sp_offset + max_locals - 1, "");
230     // copy relativized locals from the heap frame
231     *f.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
232     return f;
233   } else {
234     int fsize = FKind::size(hf);
235     intptr_t* frame_sp = caller.unextended_sp() - fsize;
236     if (bottom || caller.is_interpreted_frame()) {
237       int argsize = FKind::stack_argsize(hf);
238 
239       fsize += argsize;
240       frame_sp   -= argsize;
241       caller.set_sp(caller.sp() - argsize);
242       assert(caller.sp() == frame_sp + (fsize-argsize), "");
243 
244       frame_sp = align(hf, frame_sp, caller, bottom);
245     }
246 
247     assert(hf.cb() != nullptr, "");
248     assert(hf.oop_map() != nullptr, "");
249     intptr_t* fp;
250     if (PreserveFramePointer) {
251       // we need to recreate a "real" frame pointer, pointing into the stack
252       fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
253     } else {
254       fp = FKind::stub || FKind::native
255         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
256         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
257     }
258     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
259   }
260 }
261 
262 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
263 #ifdef _LP64
264   if (((intptr_t)frame_sp & 0xf) != 0) {
265     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
266     frame_sp--;
267     caller.set_sp(caller.sp() - 1);
268   }
269   assert(is_aligned(frame_sp, frame::frame_alignment), "");
270 #endif
271 
272   return frame_sp;
273 }
274 
275 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
276   patch_callee_link(caller, caller.fp());
277 }
278 
279 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
280   intptr_t* fp = caller_sp - frame::sender_sp_offset;
281   patch_callee_link(f, fp);
282 }
283 
284 inline void ThawBase::fix_native_wrapper_return_pc_pd(frame& top) {
285   bool from_interpreted = top.is_interpreted_frame();
286   address resume_address = from_interpreted ? Interpreter::native_frame_resume_entry() : SharedRuntime::native_frame_resume_entry();
287   DEBUG_ONLY(Method* method = from_interpreted ? top.interpreter_frame_method() : CodeCache::find_blob(resume_address)->as_nmethod()->method();)
288   assert(method->is_object_wait0(), "");
289   ContinuationHelper::Frame::patch_pc(top, resume_address);
290 }
291 
292 inline intptr_t* ThawBase::push_resume_adapter(frame& top) {
293   intptr_t* sp = top.sp();
294 
295 #ifdef ASSERT
296   RegisterMap map(JavaThread::current(),
297                   RegisterMap::UpdateMap::skip,
298                   RegisterMap::ProcessFrames::skip,
299                   RegisterMap::WalkContinuation::skip);
300   frame caller = top.sender(&map);
301   intptr_t link_addr = (intptr_t)ContinuationHelper::Frame::callee_link_address(caller);
302   assert(sp[-2] == link_addr, "wrong link address: " INTPTR_FORMAT " != " INTPTR_FORMAT, sp[-2], link_addr);
303 #endif
304 
305   intptr_t* fp = sp - frame::sender_sp_offset;
306   address pc = top.is_interpreted_frame() ? Interpreter::cont_resume_interpreter_adapter()
307                                           : StubRoutines::cont_resume_compiler_adapter();
308 
309   sp -= frame::metadata_words;
310   *(address*)(sp - frame::sender_sp_ret_address_offset()) = pc;
311   *(intptr_t**)(sp - frame::sender_sp_offset) = fp;
312 
313   log_develop_trace(continuations, preempt)("push_resume_%s_adapter() initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT,
314     top.is_interpreted_frame() ? "interpreter" : "compiler", p2i(sp + frame::metadata_words), p2i(sp), p2i(fp));
315   return sp;
316 }
317 
318 inline intptr_t* ThawBase::push_resume_monitor_operation(stackChunkOop chunk) {
319   frame enterSpecial = new_entry_frame();
320   intptr_t* sp = enterSpecial.sp();
321 
322   // First push the return barrier frame
323   sp -= frame::metadata_words;
324   sp[1] = (intptr_t)StubRoutines::cont_returnBarrier();
325   sp[0] = (intptr_t)enterSpecial.fp();
326 
327   // Now push the ObjectWaiter*
328   sp -= frame::metadata_words;
329   sp[1] = (intptr_t)chunk->object_waiter(); // alignment
330   sp[0] = (intptr_t)chunk->object_waiter();
331 
332   // Finally arrange to return to the resume_monitor_operation stub
333   sp[-1] = (intptr_t)StubRoutines::cont_resume_monitor_operation();
334   sp[-2] = (intptr_t)enterSpecial.fp();
335 
336   log_develop_trace(continuations, preempt)("push_resume_monitor_operation initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp));
337   return sp;
338 }
339 
340 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
341   // Make sure that last_sp is kept relativized.
342   assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
343 
344   // Make sure that monitor_block_top is still relativized.
345   assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
346 }
347 
348 #endif // CPU_X86_CONTINUATIONFREEZE_THAW_X86_INLINE_HPP
< prev index next >