1 /*
  2  * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP
 26 #define CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP
 27 
 28 #include "code/codeBlob.inline.hpp"
 29 #include "oops/stackChunkOop.inline.hpp"
 30 #include "runtime/frame.hpp"
 31 #include "runtime/frame.inline.hpp"
 32 
 33 
 34 inline void patch_callee_link(const frame& f, intptr_t* fp) {
 35   DEBUG_ONLY(intptr_t* orig = *ContinuationHelper::Frame::callee_link_address(f));
 36   *ContinuationHelper::Frame::callee_link_address(f) = fp;
 37 }
 38 
 39 inline void patch_callee_link_relative(const frame& f, intptr_t* fp) {
 40   intptr_t* la = (intptr_t*)ContinuationHelper::Frame::callee_link_address(f);
 41   intptr_t new_value = fp - la;
 42   *la = new_value;
 43 }
 44 
 45 ////// Freeze
 46 
 47 // Fast path
 48 
 49 inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
 50   // copy the spilled fp from the heap to the stack
 51   *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset);
 52 }
 53 
 54 // Slow path
 55 
 56 template<typename FKind>
 57 inline frame FreezeBase::sender(const frame& f) {
 58   assert(FKind::is_instance(f), "");
 59   if (FKind::interpreted) {
 60     return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc());
 61   }
 62   intptr_t** link_addr = link_address<FKind>(f);
 63 
 64   intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); //  f.unextended_sp() + (fsize/wordSize); //
 65   address sender_pc = ContinuationHelper::return_address_at(sender_sp - 1);
 66   assert(sender_sp != f.sp(), "must have changed");
 67 
 68   int slot = 0;
 69   CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot);
 70 
 71   // Repair the sender sp if the frame has been extended
 72   if (sender_cb->is_nmethod()) {
 73     sender_sp = f.repair_sender_sp(sender_sp, link_addr);
 74   }
 75 
 76   return sender_cb != nullptr
 77     ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb,
 78             slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc),
 79             false /* on_heap ? */)
 80     : frame(sender_sp, sender_sp, *link_addr, sender_pc);
 81 }
 82 
 83 template<typename FKind>
 84 frame FreezeBase::new_heap_frame(frame& f, frame& caller, int size_adjust) {
 85   assert(FKind::is_instance(f), "");
 86   assert(!caller.is_interpreted_frame()
 87     || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), "");
 88 
 89   intptr_t *sp, *fp; // sp is really our unextended_sp
 90   if (FKind::interpreted) {
 91     assert((intptr_t*)f.at(frame::interpreter_frame_last_sp_offset) == nullptr
 92       || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), "");
 93     intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset);
 94     // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set
 95     // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp
 96     bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty();
 97     fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0);
 98     sp = fp - (f.fp() - f.unextended_sp());
 99     assert(sp <= fp, "");
100     assert(fp <= caller.unextended_sp(), "");
101     caller.set_sp(fp + frame::sender_sp_offset);
102 
103     assert(_cont.tail()->is_in_chunk(sp), "");
104 
105     frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
106     // copy relativized locals from the stack frame
107     *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
108     return hf;
109   } else {
110     // For a compiled frame we need to re-read fp out of the frame because it may be an
111     // oop and we might have had a safepoint in finalize_freeze, after constructing f.
112     // For stub/native frames the value is not used while frozen, and will be constructed again
113     // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
114     // help with debugging, particularly when inspecting frames and identifying invalid accesses.
115     fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
116 
117     int fsize = FKind::size(f);
118     sp = caller.unextended_sp() - fsize - size_adjust;
119     if (caller.is_interpreted_frame() && size_adjust == 0) {
120       // If the caller is interpreted, our stackargs are not supposed to overlap with it
121       // so we make more room by moving sp down by argsize
122       int argsize = FKind::stack_argsize(f);
123       sp -= argsize;
124     }
125     caller.set_sp(sp + fsize);
126 
127     assert(_cont.tail()->is_in_chunk(sp), "");
128 
129     return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */);
130   }
131 }
132 
133 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) {
134   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), "");
135   intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset);
136   if (real_unextended_sp != nullptr) {
137     f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint
138   }
139 }
140 
141 inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) {
142   assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame");
143   f.interpreter_frame_set_last_sp(f.unextended_sp());
144 }
145 
146 inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) {
147   assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), "");
148   assert((f.at(frame::interpreter_frame_last_sp_offset) != 0)
149     || (f.unextended_sp() == f.sp()), "");
150   assert(f.fp() > (intptr_t*)f.at_relative(frame::interpreter_frame_initial_sp_offset), "");
151 
152   // on AARCH64, we may insert padding between the locals and the rest of the frame
153   // (see TemplateInterpreterGenerator::generate_normal_entry, and AbstractInterpreter::layout_activation)
154   // because we freeze the padding word (see recurse_freeze_interpreted_frame) in order to keep the same relativized
155   // locals value, we don't need to change the locals value here.
156 
157   // Make sure that last_sp is already relativized.
158   assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), "");
159 
160   // Make sure that monitor_block_top is already relativized.
161   assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
162 
163   // extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or
164   // AbstractInterpreter::layout_activation
165 
166   // The interpreter native wrapper code adds space in the stack equal to size_of_parameters()
167   // after the fixed part of the frame. For wait0 this is equal to 3 words (this + long parameter).
168   // We adjust by this size since otherwise the saved last sp will be less than the extended_sp.
169   DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
170   DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;)
171 
172   assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), "");
173   assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), "");
174   assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
175   assert(hf.unextended_sp() + extra_space >  (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), "");
176   assert(hf.fp()            >  (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), "");
177   assert(hf.fp()            <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), "");
178 }
179 
180 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) {
181   stackChunkOop chunk = _cont.tail();
182   assert(chunk->is_in_chunk(hf.sp() - 1), "");
183   assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), "");
184 
185   *(hf.sp() - 1) = (intptr_t)hf.pc();
186 
187   intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset;
188   *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr)
189                                        : (intptr_t)hf.fp();
190 }
191 
192 inline void FreezeBase::patch_pd(frame& hf, const frame& caller, bool is_bottom_frame) {
193   if (caller.is_interpreted_frame()) {
194     assert(!caller.is_empty(), "");
195     patch_callee_link_relative(caller, caller.fp());
196   } else if (is_bottom_frame && caller.pc() != nullptr) {
197     assert(caller.is_compiled_frame(), "");
198     // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk,
199     // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value
200     // as read from the chunk.
201     patch_callee_link(caller, caller.fp());
202   }
203 }
204 
205 inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
206   intptr_t* fp_addr = sp - frame::sender_sp_offset;
207   *fp_addr = badAddressVal;
208 }
209 
210 inline intptr_t* AnchorMark::anchor_mark_set_pd() {
211   intptr_t* sp = _top_frame.sp();
212   if (_top_frame.is_interpreted_frame()) {
213     // In case the top frame is interpreted we need to set up the anchor using
214     // the last_sp saved in the frame (remove possible alignment added while
215     // thawing, see ThawBase::finish_thaw()). We also clear last_sp to match
216     // the behavior when calling the VM from the interpreter (we check for this
217     // in FreezeBase::prepare_freeze_interpreted_top_frame, which can be reached
218     // if preempting again at redo_vmcall()).
219     _last_sp_from_frame = _top_frame.interpreter_frame_last_sp();
220     assert(_last_sp_from_frame != nullptr, "");
221     _top_frame.interpreter_frame_set_last_sp(nullptr);
222     if (sp != _last_sp_from_frame) {
223       // We need to move up return pc and fp. They will be read next in
224       // set_anchor() and set as _last_Java_pc and _last_Java_fp respectively.
225       _last_sp_from_frame[-1] = (intptr_t)_top_frame.pc();
226       _last_sp_from_frame[-2] = (intptr_t)_top_frame.fp();
227     }
228     _is_interpreted = true;
229     sp = _last_sp_from_frame;
230   }
231   return sp;
232 }
233 
234 inline void AnchorMark::anchor_mark_clear_pd() {
235   if (_is_interpreted) {
236     // Restore last_sp_from_frame and possibly overwritten pc.
237     _top_frame.interpreter_frame_set_last_sp(_last_sp_from_frame);
238     intptr_t* sp = _top_frame.sp();
239     if (sp != _last_sp_from_frame) {
240       sp[-1] = (intptr_t)_top_frame.pc();
241     }
242   }
243 }
244 
245 //////// Thaw
246 
247 // Fast path
248 
249 inline void ThawBase::prefetch_chunk_pd(void* start, int size) {
250   size <<= LogBytesPerWord;
251   Prefetch::read(start, size);
252   Prefetch::read(start, size - 64);
253 }
254 
255 template <typename ConfigT>
256 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) {
257   // Fast path depends on !PreserveFramePointer. See can_thaw_fast().
258   assert(!PreserveFramePointer, "Frame pointers need to be fixed");
259 }
260 
261 // Slow path
262 
263 inline frame ThawBase::new_entry_frame() {
264   intptr_t* sp = _cont.entrySP();
265   return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state
266 }
267 
268 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust) {
269   assert(FKind::is_instance(hf), "");
270   // The values in the returned frame object will be written into the callee's stack in patch.
271 
272   if (FKind::interpreted) {
273     intptr_t* heap_sp = hf.unextended_sp();
274     // If caller is interpreted it already made room for the callee arguments
275     int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0;
276     const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap);
277     intptr_t* frame_sp = caller.unextended_sp() - fsize;
278     intptr_t* fp = frame_sp + (hf.fp() - heap_sp);
279     if ((intptr_t)fp % frame::frame_alignment != 0) {
280       fp--;
281       frame_sp--;
282       log_develop_trace(continuations)("Adding internal interpreted frame alignment");
283     }
284     DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);)
285     assert(frame_sp == unextended_sp, "");
286     caller.set_sp(fp + frame::sender_sp_offset);
287     frame f(frame_sp, frame_sp, fp, hf.pc());
288     // we need to set the locals so that the caller of new_stack_frame() can call
289     // ContinuationHelper::InterpretedFrame::frame_bottom
290     // copy relativized locals from the heap frame
291     *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset);
292     assert((intptr_t)f.fp() % frame::frame_alignment == 0, "");
293     return f;
294   } else {
295     int fsize = FKind::size(hf);
296     intptr_t* frame_sp = caller.unextended_sp() - fsize - size_adjust;
297     if (bottom || caller.is_interpreted_frame()) {
298       if (size_adjust == 0) {
299         int argsize = FKind::stack_argsize(hf);
300         frame_sp -= argsize;
301       }
302       frame_sp = align(hf, frame_sp, caller, bottom);
303     }
304     caller.set_sp(frame_sp + fsize);
305     assert(is_aligned(frame_sp, frame::frame_alignment), "");
306 
307     assert(hf.cb() != nullptr, "");
308     assert(hf.oop_map() != nullptr, "");
309     intptr_t* fp;
310     if (PreserveFramePointer) {
311       // we need to recreate a "real" frame pointer, pointing into the stack
312       fp = frame_sp + fsize - frame::sender_sp_offset;
313     } else {
314       fp = FKind::stub || FKind::native
315         ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address.
316         : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame.
317     }
318     return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary?
319   }
320 }
321 
322 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) {
323 #ifdef _LP64
324   if (((intptr_t)frame_sp & 0xf) != 0) {
325     assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), "");
326     frame_sp--;
327   }
328   assert(is_aligned(frame_sp, frame::frame_alignment), "");
329 #endif
330   return frame_sp;
331 }
332 
333 inline void ThawBase::patch_pd(frame& f, const frame& caller) {
334   if (caller.is_interpreted_frame() || PreserveFramePointer) {
335     patch_callee_link(caller, caller.fp());
336   }
337 }
338 
339 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) {
340   intptr_t* fp = caller_sp - frame::sender_sp_offset;
341   patch_callee_link(f, fp);
342 }
343 
344 inline intptr_t* ThawBase::push_cleanup_continuation() {
345   frame enterSpecial = new_entry_frame();
346   intptr_t* sp = enterSpecial.sp();
347 
348   // We only need to set the return pc. rfp will be restored back in gen_continuation_enter().
349   sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc();
350   return sp;
351 }
352 
353 inline intptr_t* ThawBase::push_preempt_adapter() {
354   frame enterSpecial = new_entry_frame();
355   intptr_t* sp = enterSpecial.sp();
356 
357   // We only need to set the return pc. rfp will be restored back in generate_cont_preempt_stub().
358   sp[-1] = (intptr_t)StubRoutines::cont_preempt_stub();
359   return sp;
360 }
361 
362 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) {
363   // Make sure that last_sp is kept relativized.
364   assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), "");
365 
366   // Make sure that monitor_block_top is still relativized.
367   assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, "");
368 
369   // Make sure that extended_sp is kept relativized.
370   DEBUG_ONLY(Method* m = hf.interpreter_frame_method();)
371   DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;) // see comment in relativize_interpreted_frame_metadata()
372   assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp() + extra_space, "");
373 }
374 
375 #endif // CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP