112 // so we make more room by moving sp down by argsize
113 int argsize = FKind::stack_argsize(f);
114 sp -= argsize;
115 }
116 caller.set_sp(sp + fsize);
117
118 assert(_cont.tail()->is_in_chunk(sp), "");
119
120 return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
121 }
122 }
123
124 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
125 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
126 intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
127 if (real_unextended_sp != nullptr) {
128 f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
129 }
130 }
131
132 inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
133 assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
134 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
135 || (f.unextended_sp() == f.sp()), "");
136 assert(f.fp() > (intptr_t*)f.at_relative(frame::interpreter_frame_initial_sp_offset), "");
137
138 // on AARCH64, we may insert padding between the locals and the rest of the frame
139 // (see TemplateInterpreterGenerator::generate_normal_entry, and AbstractInterpreter::layout_activation)
140 // because we freeze the padding word (see recurse_freeze_interpreted_frame) in order to keep the same relativized
141 // locals value, we don't need to change the locals value here.
142
143 // Make sure that last_sp is already relativized.
144 assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), "");
145
146 // Make sure that monitor_block_top is already relativized.
147 assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
148
149 // extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or
150 // AbstractInterpreter::layout_activation
151
218 intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
219 if ((intptr_t)fp % frame::frame_alignment != 0) {
220 fp--;
221 frame_sp--;
222 log_develop_trace(continuations)("Adding internal interpreted frame alignment");
223 }
224 DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
225 assert(frame_sp == unextended_sp, "");
226 caller.set_sp(fp + frame::sender_sp_offset);
227 frame f(frame_sp, frame_sp, fp, hf.pc());
228 // we need to set the locals so that the caller of new_stack_frame() can call
229 // ContinuationHelper::InterpretedFrame::frame_bottom
230 // copy relativized locals from the heap frame
231 *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset);
232 assert((intptr_t)f.fp() % frame::frame_alignment == 0, "");
233 return f;
234 } else {
235 int fsize = FKind::size(hf);
236 intptr_t* frame_sp = caller.unextended_sp() - fsize;
237 if (bottom || caller.is_interpreted_frame()) {
238 int argsize = hf.compiled_frame_stack_argsize();
239
240 fsize += argsize;
241 frame_sp -= argsize;
242 caller.set_sp(caller.sp() - argsize);
243 assert(caller.sp() == frame_sp + (fsize-argsize), "");
244
245 frame_sp = align(hf, frame_sp, caller, bottom);
246 }
247
248 assert(hf.cb() != nullptr, "");
249 assert(hf.oop_map() != nullptr, "");
250 intptr_t* fp;
251 if (PreserveFramePointer) {
252 // we need to recreate a "real" frame pointer, pointing into the stack
253 fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
254 } else {
255 fp = FKind::stub
256 ? frame_sp + fsize - frame::sender_sp_offset // on AArch64, this value is used for the safepoint stub
257 : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
258 }
259 return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
260 }
261 }
262
263 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
264 #ifdef _LP64
265 if (((intptr_t)frame_sp & 0xf) != 0) {
266 assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
267 frame_sp--;
268 caller.set_sp(caller.sp() - 1);
269 }
270 assert(is_aligned(frame_sp, frame::frame_alignment), "");
271 #endif
272
273 return frame_sp;
274 }
275
276 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
277 patch_callee_link(caller, caller.fp());
278 }
279
280 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
281 // Make sure that last_sp is kept relativized.
282 assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
283
284 // Make sure that monitor_block_top is still relativized.
285 assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
286
287 // Make sure that extended_sp is kept relativized.
288 assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp(), "");
289 }
290
291 #endif // CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP
|
112 // so we make more room by moving sp down by argsize
113 int argsize = FKind::stack_argsize(f);
114 sp -= argsize;
115 }
116 caller.set_sp(sp + fsize);
117
118 assert(_cont.tail()->is_in_chunk(sp), "");
119
120 return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
121 }
122 }
123
124 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
125 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
126 intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
127 if (real_unextended_sp != nullptr) {
128 f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
129 }
130 }
131
132 inline void FreezeBase::prepare_freeze_interpreted_top_frame(const frame& f) {
133 assert(*f.addr_at(frame::interpreter_frame_last_sp_offset) == 0, "should be null for top frame");
134 intptr_t* lspp = f.addr_at(frame::interpreter_frame_last_sp_offset);
135 *lspp = f.unextended_sp() - f.fp();
136 }
137
138 inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
139 assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
140 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
141 || (f.unextended_sp() == f.sp()), "");
142 assert(f.fp() > (intptr_t*)f.at_relative(frame::interpreter_frame_initial_sp_offset), "");
143
144 // on AARCH64, we may insert padding between the locals and the rest of the frame
145 // (see TemplateInterpreterGenerator::generate_normal_entry, and AbstractInterpreter::layout_activation)
146 // because we freeze the padding word (see recurse_freeze_interpreted_frame) in order to keep the same relativized
147 // locals value, we don't need to change the locals value here.
148
149 // Make sure that last_sp is already relativized.
150 assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), "");
151
152 // Make sure that monitor_block_top is already relativized.
153 assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
154
155 // extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or
156 // AbstractInterpreter::layout_activation
157
224 intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
225 if ((intptr_t)fp % frame::frame_alignment != 0) {
226 fp--;
227 frame_sp--;
228 log_develop_trace(continuations)("Adding internal interpreted frame alignment");
229 }
230 DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
231 assert(frame_sp == unextended_sp, "");
232 caller.set_sp(fp + frame::sender_sp_offset);
233 frame f(frame_sp, frame_sp, fp, hf.pc());
234 // we need to set the locals so that the caller of new_stack_frame() can call
235 // ContinuationHelper::InterpretedFrame::frame_bottom
236 // copy relativized locals from the heap frame
237 *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset);
238 assert((intptr_t)f.fp() % frame::frame_alignment == 0, "");
239 return f;
240 } else {
241 int fsize = FKind::size(hf);
242 intptr_t* frame_sp = caller.unextended_sp() - fsize;
243 if (bottom || caller.is_interpreted_frame()) {
244 int argsize = FKind::stack_argsize(hf);
245
246 fsize += argsize;
247 frame_sp -= argsize;
248 caller.set_sp(caller.sp() - argsize);
249 assert(caller.sp() == frame_sp + (fsize-argsize), "");
250
251 frame_sp = align(hf, frame_sp, caller, bottom);
252 }
253
254 assert(hf.cb() != nullptr, "");
255 assert(hf.oop_map() != nullptr, "");
256 intptr_t* fp;
257 if (PreserveFramePointer) {
258 // we need to recreate a "real" frame pointer, pointing into the stack
259 fp = frame_sp + FKind::size(hf) - frame::sender_sp_offset;
260 } else {
261 fp = FKind::stub
262 ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
263 : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
264 }
265 return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
266 }
267 }
268
269 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
270 #ifdef _LP64
271 if (((intptr_t)frame_sp & 0xf) != 0) {
272 assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
273 frame_sp--;
274 caller.set_sp(caller.sp() - 1);
275 }
276 assert(is_aligned(frame_sp, frame::frame_alignment), "");
277 #endif
278
279 return frame_sp;
280 }
281
282 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
283 patch_callee_link(caller, caller.fp());
284 }
285
286 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
287 Unimplemented();
288 }
289
290 inline intptr_t* ThawBase::push_preempt_rerun_adapter(frame top, bool is_interpreted_frame) {
291 intptr_t* sp = top.sp();
292 CodeBlob* cb = top.cb();
293 if (!is_interpreted_frame && cb->frame_size() == 2) {
294 // C2 runtime stub case. For aarch64 the real size of the c2 runtime stub is 2 words bigger
295 // than what we think, i.e. size is 4. This is because the _last_Java_sp is not set to the
296 // sp right before making the call to the VM, but rather it is artificially set 2 words above
297 // this real sp so that we can store the return address at last_Java_sp[-1], and keep this
298 // property where we can retrieve the last_Java_pc from the last_Java_sp. But that means that
299 // once we return to the runtime stub, the code will adjust sp according to this real size.
300 // So we must adjust the frame size back here. We just copy lr/rfp again. These 2 top words
301 // will be the ones popped in generate_cont_preempt_rerun_compiler_adapter(). The other 2 words
302 // will just be discarded once back in the runtime stub (add sp, sp, #0x10).
303 sp -= 2;
304 sp[-2] = sp[0];
305 sp[-1] = sp[1];
306 }
307
308 intptr_t* fp = sp - frame::sender_sp_offset;
309 address pc = is_interpreted_frame ? Interpreter::cont_preempt_rerun_interpreter_adapter()
310 : StubRoutines::cont_preempt_rerun_compiler_adapter();
311
312 sp -= frame::metadata_words;
313 *(address*)(sp - frame::sender_sp_ret_address_offset()) = pc;
314 *(intptr_t**)(sp - frame::sender_sp_offset) = fp;
315
316 log_develop_trace(continuations, preempt)("push_preempt_rerun_%s_adapter() initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT,
317 is_interpreted_frame ? "interpreter" : "safepointblob", p2i(sp + frame::metadata_words), p2i(sp), p2i(fp));
318 return sp;
319 }
320
321 inline intptr_t* ThawBase::push_preempt_monitorenter_redo(stackChunkOop chunk) {
322
323 // fprintf(stderr, "push_preempt_monitorenter_redo\n");
324 frame enterSpecial = new_entry_frame();
325 intptr_t* sp = enterSpecial.sp();
326
327 // First push the return barrier frame
328 sp -= frame::metadata_words;
329 sp[1] = (intptr_t)StubRoutines::cont_returnBarrier();
330 sp[0] = (intptr_t)enterSpecial.fp();
331
332 // Now push the ObjectMonitor*
333 sp -= frame::metadata_words;
334 sp[1] = (intptr_t)chunk->objectMonitor(); // alignment
335 sp[0] = (intptr_t)chunk->objectMonitor();
336
337 // Finally arrange to return to the monitorenter_redo stub
338 sp[-1] = (intptr_t)StubRoutines::cont_preempt_monitorenter_redo();
339 sp[-2] = (intptr_t)enterSpecial.fp();
340 log_develop_trace(continuations, preempt)("push_preempt_monitorenter_redo initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp));
341 return sp;
342 }
343
344 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
345 // Make sure that last_sp is kept relativized.
346 assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
347
348 // Make sure that monitor_block_top is still relativized.
349 assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
350
351 // Make sure that extended_sp is kept relativized.
352 assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp(), "");
353 }
354
355 #endif // CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP
|