1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 27 #define CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 28 
 29 #include "code/codeBlob.inline.hpp"
 30 #include "code/codeCache.inline.hpp"
 31 #include "code/vmreg.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "pauth_aarch64.hpp"
 35 #ifdef COMPILER1
 36 #include "c1/c1_Runtime1.hpp"
 37 #endif
 38 
 39 // Inline functions for AArch64 frames:
 40 
 41 #if INCLUDE_JFR
 42 
 43 // Static helper routines
 44 
 45 inline address frame::interpreter_bcp(const intptr_t* fp) {
 46   assert(fp != nullptr, "invariant");
 47   return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
 48 }
 49 
 50 inline address frame::interpreter_return_address(const intptr_t* fp) {
 51   assert(fp != nullptr, "invariant");
 52   return reinterpret_cast<address>(fp[frame::return_addr_offset]);
 53 }
 54 
 55 inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
 56   assert(fp != nullptr, "invariant");
 57   return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
 58 }
 59 
 60 inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
 61   assert(fp != nullptr, "invariant");
 62   assert(sp != nullptr, "invariant");
 63   return sp <= fp + frame::interpreter_frame_initial_sp_offset;
 64 }
 65 
 66 inline intptr_t* frame::sender_sp(intptr_t* fp) {
 67   assert(fp != nullptr, "invariant");
 68   return fp + frame::sender_sp_offset;
 69 }
 70 
 71 inline intptr_t* frame::link(const intptr_t* fp) {
 72   assert(fp != nullptr, "invariant");
 73   return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
 74 }
 75 
 76 inline address frame::return_address(const intptr_t* sp) {
 77   assert(sp != nullptr, "invariant");
 78   return reinterpret_cast<address>(sp[-1]);
 79 }
 80 
 81 inline intptr_t* frame::fp(const intptr_t* sp) {
 82   assert(sp != nullptr, "invariant");
 83   return reinterpret_cast<intptr_t*>(sp[-2]);
 84 }
 85 
 86 #endif // INCLUDE_JFR
 87 
 88 // Constructors:
 89 
 90 inline frame::frame() {
 91   _pc = nullptr;
 92   _sp = nullptr;
 93   _unextended_sp = nullptr;
 94   _fp = nullptr;
 95   _cb = nullptr;
 96   _deopt_state = unknown;
 97   _sp_is_trusted = false;
 98   _on_heap = false;
 99   DEBUG_ONLY(_frame_index = -1;)
100 }
101 
102 static int spin;
103 
104 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
105   assert(pauth_ptr_is_raw(pc), "cannot be signed");
106   intptr_t a = intptr_t(sp);
107   intptr_t b = intptr_t(fp);
108   _sp = sp;
109   _unextended_sp = sp;
110   _fp = fp;
111   _pc = pc;
112   _oop_map = nullptr;
113   _on_heap = false;
114   DEBUG_ONLY(_frame_index = -1;)
115 
116   assert(pc != nullptr, "no pc?");
117   _cb = CodeCache::find_blob(pc);
118   setup(pc);
119 }
120 
121 inline void frame::setup(address pc) {
122   address original_pc = get_deopt_original_pc();
123   if (original_pc != nullptr) {
124     _pc = original_pc;
125     _deopt_state = is_deoptimized;
126     assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
127            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
128   } else {
129     if (_cb == SharedRuntime::deopt_blob()) {
130       _deopt_state = is_deoptimized;
131     } else {
132       _deopt_state = not_deoptimized;
133     }
134   }
135   _sp_is_trusted = false;
136 }
137 
138 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
139   init(sp, fp, pc);
140 }
141 
142 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null) {
143   assert(pauth_ptr_is_raw(pc), "cannot be signed");
144   intptr_t a = intptr_t(sp);
145   intptr_t b = intptr_t(fp);
146   _sp = sp;
147   _unextended_sp = unextended_sp;
148   _fp = fp;
149   _pc = pc;
150   assert(pc != nullptr, "no pc?");
151   _cb = cb;
152   _oop_map = nullptr;
153   assert(_cb != nullptr || allow_cb_null, "pc: " INTPTR_FORMAT, p2i(pc));
154   _on_heap = false;
155   DEBUG_ONLY(_frame_index = -1;)
156 
157   setup(pc);
158 }
159 
160 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap) {
161   _sp = sp;
162   _unextended_sp = unextended_sp;
163   _fp = fp;
164   _pc = pc;
165   _cb = cb;
166   _oop_map = oop_map;
167   _deopt_state = not_deoptimized;
168   _sp_is_trusted = false;
169   _on_heap = on_heap;
170   DEBUG_ONLY(_frame_index = -1;)
171 
172   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
173   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
174   if (cb != nullptr) {
175     setup(pc);
176   }
177 #ifdef ASSERT
178   // The following assertion has been disabled because it would sometime trap for Continuation.run,
179   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
180   // is benign even in fast mode (see Freeze::setup_jump)
181   // We might freeze deoptimized frame in slow mode
182   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
183 #endif
184 }
185 
186 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
187   intptr_t a = intptr_t(sp);
188   intptr_t b = intptr_t(fp);
189   _sp = sp;
190   _unextended_sp = unextended_sp;
191   _fp = fp;
192   _pc = pc;
193   _cb = CodeCache::find_blob_fast(pc);
194   _oop_map = nullptr;
195   assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
196   _on_heap = false;
197   DEBUG_ONLY(_frame_index = -1;)
198 
199   setup(pc);
200 }
201 
202 inline frame::frame(intptr_t* sp)
203   : frame(sp, sp,
204           *(intptr_t**)(sp - frame::sender_sp_offset),
205           pauth_strip_verifiable(*(address*)(sp - 1))) {}
206 
207 inline frame::frame(intptr_t* sp, intptr_t* fp) {
208   intptr_t a = intptr_t(sp);
209   intptr_t b = intptr_t(fp);
210   _sp = sp;
211   _unextended_sp = sp;
212   _fp = fp;
213   _pc = (address)(sp[-1]);
214   _on_heap = false;
215   DEBUG_ONLY(_frame_index = -1;)
216 
217   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
218   // when last_Java_sp is non-null but the pc fetched is junk.
219   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
220   // -> pd_last_frame should use a specialized version of pd_last_frame which could
221   // call a specilaized frame constructor instead of this one.
222   // Then we could use the assert below. However this assert is of somewhat dubious
223   // value.
224   // assert(_pc != nullptr, "no pc?");
225 
226   _cb = CodeCache::find_blob(_pc);
227 
228   address original_pc = get_deopt_original_pc();
229   if (original_pc != nullptr) {
230     _pc = original_pc;
231     _deopt_state = is_deoptimized;
232   } else {
233     _deopt_state = not_deoptimized;
234   }
235   _sp_is_trusted = false;
236 }
237 
238 // Accessors
239 
240 inline bool frame::equal(frame other) const {
241   bool ret =  sp() == other.sp()
242               && unextended_sp() == other.unextended_sp()
243               && fp() == other.fp()
244               && pc() == other.pc();
245   assert(!ret || (cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction");
246   return ret;
247 }
248 
249 // Return unique id for this frame. The id must have a value where we can distinguish
250 // identity and younger/older relationship. null represents an invalid (incomparable)
251 // frame.
252 inline intptr_t* frame::id(void) const { return unextended_sp(); }
253 
254 // Return true if the frame is older (less recent activation) than the frame represented by id
255 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != nullptr && id != nullptr, "null frame id");
256                                                     return this->id() > id ; }
257 
258 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
259 
260 inline intptr_t* frame::link_or_null() const {
261   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
262   return os::is_readable_pointer(ptr) ? *ptr : nullptr;
263 }
264 
265 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
266 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
267 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
268 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
269 
270 inline intptr_t* frame::real_fp() const {
271   if (_cb != nullptr) {
272     // use the frame size if valid
273     int size = _cb->frame_size();
274     if (size > 0) {
275       return unextended_sp() + size;
276     }
277   }
278   // else rely on fp()
279   assert(! is_compiled_frame(), "unknown compiled frame size");
280   return fp();
281 }
282 
283 inline int frame::frame_size() const {
284   return is_interpreted_frame()
285     ? pointer_delta_as_int(sender_sp(), sp())
286     : cb()->frame_size();
287 }
288 
289 inline int frame::compiled_frame_stack_argsize() const {
290   assert(cb()->is_nmethod(), "");
291   return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
292 }
293 
294 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
295   assert(mask != nullptr, "");
296   Method* m = interpreter_frame_method();
297   int   bci = interpreter_frame_bci();
298   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
299 }
300 
301 // Return address:
302 
303 inline address* frame::sender_pc_addr()         const { return (address*) addr_at( return_addr_offset); }
304 inline address  frame::sender_pc_maybe_signed() const { return *sender_pc_addr(); }
305 inline address  frame::sender_pc()              const { return pauth_strip_pointer(sender_pc_maybe_signed()); }
306 
307 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
308 
309 inline intptr_t* frame::interpreter_frame_locals() const {
310   intptr_t n = *addr_at(interpreter_frame_locals_offset);
311   return &fp()[n]; // return relativized locals
312 }
313 
314 inline intptr_t* frame::interpreter_frame_last_sp() const {
315   intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
316   assert(n <= 0, "n: " INTPTR_FORMAT, n);
317   return n != 0 ? &fp()[n] : nullptr;
318 }
319 
320 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
321   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
322 }
323 
324 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
325   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
326 }
327 
328 
329 // Constant pool cache
330 
331 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
332   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
333 }
334 
335 // Method
336 
337 inline Method** frame::interpreter_frame_method_addr() const {
338   return (Method**)addr_at(interpreter_frame_method_offset);
339 }
340 
341 // Mirror
342 
343 inline oop* frame::interpreter_frame_mirror_addr() const {
344   return (oop*)addr_at(interpreter_frame_mirror_offset);
345 }
346 
347 // top of expression stack
348 inline intptr_t* frame::interpreter_frame_tos_address() const {
349   intptr_t* last_sp = interpreter_frame_last_sp();
350   if (last_sp == nullptr) {
351     return sp();
352   } else {
353     // sp() may have been extended or shrunk by an adapter.  At least
354     // check that we don't fall behind the legal region.
355     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
356     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
357     return last_sp;
358   }
359 }
360 
361 inline oop* frame::interpreter_frame_temp_oop_addr() const {
362   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
363 }
364 
365 inline int frame::interpreter_frame_monitor_size() {
366   return BasicObjectLock::size();
367 }
368 
369 
370 // expression stack
371 // (the max_stack arguments are used by the GC; see class FrameClosure)
372 
373 inline intptr_t* frame::interpreter_frame_expression_stack() const {
374   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
375   return monitor_end-1;
376 }
377 
378 
379 // Entry frames
380 
381 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
382  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
383 }
384 
385 
386 // Compiled frames
387 
388 inline oop frame::saved_oop_result(RegisterMap* map) const {
389   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
390   guarantee(result_adr != nullptr, "bad register save location");
391   return *result_adr;
392 }
393 
394 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
395   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
396   guarantee(result_adr != nullptr, "bad register save location");
397 
398   *result_adr = obj;
399 }
400 
401 inline bool frame::is_interpreted_frame() const {
402   return Interpreter::contains(pc());
403 }
404 
405 inline int frame::sender_sp_ret_address_offset() {
406   return frame::sender_sp_offset - frame::return_addr_offset;
407 }
408 
409 //------------------------------------------------------------------------------
410 // frame::sender
411 inline frame frame::sender(RegisterMap* map) const {
412   frame result = sender_raw(map);
413 
414   if (map->process_frames() && !map->in_cont()) {
415     StackWatermarkSet::on_iteration(map->thread(), result);
416   }
417 
418   return result;
419 }
420 
421 inline frame frame::sender_raw(RegisterMap* map) const {
422   // Default is we done have to follow them. The sender_for_xxx will
423   // update it accordingly
424   map->set_include_argument_oops(false);
425 
426   if (map->in_cont()) { // already in an h-stack
427     return map->stack_chunk()->sender(*this, map);
428   }
429 
430   if (is_entry_frame())       return sender_for_entry_frame(map);
431   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
432   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
433 
434   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
435   if (_cb != nullptr) return sender_for_compiled_frame(map);
436 
437   // Must be native-compiled frame, i.e. the marshaling code for native
438   // methods that exists in the core system.
439 
440   // Native code may or may not have signed the return address, we have no way to be sure or what
441   // signing methods they used. Instead, just ensure the stripped value is used.
442 
443   return frame(sender_sp(), link(), sender_pc());
444 }
445 
446 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
447   // we cannot rely upon the last fp having been saved to the thread
448   // in C2 code but it will have been pushed onto the stack. so we
449   // have to find it relative to the unextended sp
450 
451   assert(_cb->frame_size() > 0, "must have non-zero frame size");
452   intptr_t* l_sender_sp = (!PreserveFramePointer || _sp_is_trusted) ? unextended_sp() + _cb->frame_size()
453                                                                     : sender_sp();
454 #ifdef ASSERT
455    address sender_pc_copy = pauth_strip_verifiable((address) *(l_sender_sp-1));
456 #endif
457 
458   assert(!_sp_is_trusted || l_sender_sp == real_fp(), "");
459 
460   intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
461 
462   // Repair the sender sp if the frame has been extended
463   l_sender_sp = repair_sender_sp(l_sender_sp, saved_fp_addr);
464 
465   // The return_address is always the word on the stack.
466   // For ROP protection, C1/C2 will have signed the sender_pc,
467   // but there is no requirement to authenticate it here.
468   address sender_pc = pauth_strip_verifiable((address) *(l_sender_sp - 1));
469 
470 #ifdef ASSERT
471   if (sender_pc != sender_pc_copy) {
472     // When extending the stack in the callee method entry to make room for unpacking of value
473     // type args, we keep a copy of the sender pc at the expected location in the callee frame.
474     // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
475     nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
476     assert(sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
477   }
478 #endif
479 
480   if (map->update_map()) {
481     // Tell GC to use argument oopmaps for some runtime stubs that need it.
482     // For C1, the runtime stub might not have oop maps, so set this flag
483     // outside of update_register_map.
484     bool c1_buffering = false;
485 #ifdef COMPILER1
486     nmethod* nm = _cb->as_nmethod_or_null();
487     if (nm != nullptr && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
488         pc() < nm->verified_inline_entry_point()) {
489       // TODO 8284443 Can't we do that by not passing 'dont_gc_arguments' in case 'StubId::c1_buffer_inline_args_id' in 'Runtime1::generate_code_for'?
490       // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
491       // before doing any argument shuffling, so we need to scan the oops
492       // as the caller passes them.
493       c1_buffering = true;
494     }
495 #endif
496     if (!_cb->is_nmethod() || c1_buffering) { // compiled frames do not use callee-saved registers
497       bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
498       map->set_include_argument_oops(caller_args);
499       if (oop_map() != nullptr) {
500         _oop_map->update_register_map(this, map);
501       }
502     } else {
503       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
504       assert(!map->include_argument_oops(), "");
505       assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
506     }
507 
508     // Since the prolog does the save and restore of FP there is no oopmap
509     // for it so we must fill in its location as if there was an oopmap entry
510     // since if our caller was compiled code there could be live jvm state in it.
511     update_map_with_saved_link(map, saved_fp_addr);
512   }
513 
514   if (Continuation::is_return_barrier_entry(sender_pc)) {
515     if (map->walk_cont()) { // about to walk into an h-stack
516       return Continuation::top_frame(*this, map);
517     } else {
518       return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp);
519     }
520   }
521 
522   intptr_t* unextended_sp = l_sender_sp;
523   return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
524 }
525 
526 template <typename RegisterMapT>
527 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
528   // The interpreter and compiler(s) always save FP in a known
529   // location on entry. C2-compiled code uses FP as an allocatable
530   // callee-saved register. We must record where that location is so
531   // that if FP was live on callout from c2 we can find the saved copy.
532 
533   map->set_location(rfp->as_VMReg(), (address) link_addr);
534   // this is weird "H" ought to be at a higher address however the
535   // oopMaps seems to have the "H" regs at the same address and the
536   // vanilla register.
537   // XXXX make this go away
538   if (true) {
539     map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
540   }
541 }
542 #endif // CPU_AARCH64_FRAME_AARCH64_INLINE_HPP