1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 27 #define CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 28 
 29 #include "code/codeBlob.inline.hpp"
 30 #include "code/codeCache.inline.hpp"
 31 #include "code/vmreg.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "pauth_aarch64.hpp"
 35 #ifdef COMPILER1
 36 #include "c1/c1_Runtime1.hpp"
 37 #endif
 38 
 39 // Inline functions for AArch64 frames:
 40 
 41 // Constructors:
 42 
 43 inline frame::frame() {
 44   _pc = nullptr;
 45   _sp = nullptr;
 46   _unextended_sp = nullptr;
 47   _fp = nullptr;
 48   _cb = nullptr;
 49   _deopt_state = unknown;
 50   _sp_is_trusted = false;
 51   _on_heap = false;
 52   DEBUG_ONLY(_frame_index = -1;)
 53 }
 54 
 55 static int spin;
 56 
 57 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 58   assert(pauth_ptr_is_raw(pc), "cannot be signed");
 59   intptr_t a = intptr_t(sp);
 60   intptr_t b = intptr_t(fp);
 61   _sp = sp;
 62   _unextended_sp = sp;
 63   _fp = fp;
 64   _pc = pc;
 65   _oop_map = nullptr;
 66   _on_heap = false;
 67   DEBUG_ONLY(_frame_index = -1;)
 68 
 69   assert(pc != nullptr, "no pc?");
 70   _cb = CodeCache::find_blob(pc);
 71   setup(pc);
 72 }
 73 
 74 inline void frame::setup(address pc) {
 75   adjust_unextended_sp();
 76 
 77   address original_pc = get_deopt_original_pc();
 78   if (original_pc != nullptr) {
 79     _pc = original_pc;
 80     _deopt_state = is_deoptimized;
 81     assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
 82            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 83   } else {
 84     if (_cb == SharedRuntime::deopt_blob()) {
 85       _deopt_state = is_deoptimized;
 86     } else {
 87       _deopt_state = not_deoptimized;
 88     }
 89   }
 90   _sp_is_trusted = false;
 91 }
 92 
 93 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 94   init(sp, fp, pc);
 95 }
 96 
 97 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null) {
 98   assert(pauth_ptr_is_raw(pc), "cannot be signed");
 99   intptr_t a = intptr_t(sp);
100   intptr_t b = intptr_t(fp);
101   _sp = sp;
102   _unextended_sp = unextended_sp;
103   _fp = fp;
104   _pc = pc;
105   assert(pc != nullptr, "no pc?");
106   _cb = cb;
107   _oop_map = nullptr;
108   assert(_cb != nullptr || allow_cb_null, "pc: " INTPTR_FORMAT, p2i(pc));
109   _on_heap = false;
110   DEBUG_ONLY(_frame_index = -1;)
111 
112   setup(pc);
113 }
114 
115 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap) {
116   _sp = sp;
117   _unextended_sp = unextended_sp;
118   _fp = fp;
119   _pc = pc;
120   _cb = cb;
121   _oop_map = oop_map;
122   _deopt_state = not_deoptimized;
123   _sp_is_trusted = false;
124   _on_heap = on_heap;
125   DEBUG_ONLY(_frame_index = -1;)
126 
127   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
128   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
129   if (cb != nullptr) {
130     setup(pc);
131   }
132 #ifdef ASSERT
133   // The following assertion has been disabled because it would sometime trap for Continuation.run,
134   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
135   // is benign even in fast mode (see Freeze::setup_jump)
136   // We might freeze deoptimized frame in slow mode
137   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
138 #endif
139 }
140 
141 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
142   intptr_t a = intptr_t(sp);
143   intptr_t b = intptr_t(fp);
144   _sp = sp;
145   _unextended_sp = unextended_sp;
146   _fp = fp;
147   _pc = pc;
148   _cb = CodeCache::find_blob_fast(pc);
149   _oop_map = nullptr;
150   assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
151   _on_heap = false;
152   DEBUG_ONLY(_frame_index = -1;)
153 
154   setup(pc);
155 }
156 
157 inline frame::frame(intptr_t* sp)
158   : frame(sp, sp,
159           *(intptr_t**)(sp - frame::sender_sp_offset),
160           pauth_strip_verifiable(*(address*)(sp - 1))) {}
161 
162 inline frame::frame(intptr_t* sp, intptr_t* fp) {
163   intptr_t a = intptr_t(sp);
164   intptr_t b = intptr_t(fp);
165   _sp = sp;
166   _unextended_sp = sp;
167   _fp = fp;
168   _pc = (address)(sp[-1]);
169   _on_heap = false;
170   DEBUG_ONLY(_frame_index = -1;)
171 
172   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
173   // when last_Java_sp is non-null but the pc fetched is junk.
174   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
175   // -> pd_last_frame should use a specialized version of pd_last_frame which could
176   // call a specilaized frame constructor instead of this one.
177   // Then we could use the assert below. However this assert is of somewhat dubious
178   // value.
179   // assert(_pc != nullptr, "no pc?");
180 
181   _cb = CodeCache::find_blob(_pc);
182   adjust_unextended_sp();
183 
184   address original_pc = get_deopt_original_pc();
185   if (original_pc != nullptr) {
186     _pc = original_pc;
187     _deopt_state = is_deoptimized;
188   } else {
189     _deopt_state = not_deoptimized;
190   }
191   _sp_is_trusted = false;
192 }
193 
194 // Accessors
195 
196 inline bool frame::equal(frame other) const {
197   bool ret =  sp() == other.sp()
198               && unextended_sp() == other.unextended_sp()
199               && fp() == other.fp()
200               && pc() == other.pc();
201   assert(!ret || (cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction");
202   return ret;
203 }
204 
205 // Return unique id for this frame. The id must have a value where we can distinguish
206 // identity and younger/older relationship. null represents an invalid (incomparable)
207 // frame.
208 inline intptr_t* frame::id(void) const { return unextended_sp(); }
209 
210 // Return true if the frame is older (less recent activation) than the frame represented by id
211 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != nullptr && id != nullptr, "null frame id");
212                                                     return this->id() > id ; }
213 
214 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
215 
216 inline intptr_t* frame::link_or_null() const {
217   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
218   return os::is_readable_pointer(ptr) ? *ptr : nullptr;
219 }
220 
221 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
222 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
223 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
224 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
225 
226 inline intptr_t* frame::real_fp() const {
227   if (_cb != nullptr) {
228     // use the frame size if valid
229     int size = _cb->frame_size();
230     if (size > 0) {
231       return unextended_sp() + size;
232     }
233   }
234   // else rely on fp()
235   assert(! is_compiled_frame(), "unknown compiled frame size");
236   return fp();
237 }
238 
239 inline int frame::frame_size() const {
240   return is_interpreted_frame()
241     ? pointer_delta_as_int(sender_sp(), sp())
242     : cb()->frame_size();
243 }
244 
245 inline int frame::compiled_frame_stack_argsize() const {
246   assert(cb()->is_nmethod(), "");
247   return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
248 }
249 
250 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
251   assert(mask != nullptr, "");
252   Method* m = interpreter_frame_method();
253   int   bci = interpreter_frame_bci();
254   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
255 }
256 
257 // Return address:
258 
259 inline address* frame::sender_pc_addr()         const { return (address*) addr_at( return_addr_offset); }
260 inline address  frame::sender_pc_maybe_signed() const { return *sender_pc_addr(); }
261 inline address  frame::sender_pc()              const { return pauth_strip_pointer(sender_pc_maybe_signed()); }
262 
263 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
264 
265 inline intptr_t* frame::interpreter_frame_locals() const {
266   intptr_t n = *addr_at(interpreter_frame_locals_offset);
267   return &fp()[n]; // return relativized locals
268 }
269 
270 inline intptr_t* frame::interpreter_frame_last_sp() const {
271   intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
272   assert(n <= 0, "n: " INTPTR_FORMAT, n);
273   return n != 0 ? &fp()[n] : nullptr;
274 }
275 
276 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
277   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
278 }
279 
280 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
281   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
282 }
283 
284 
285 // Constant pool cache
286 
287 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
288   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
289 }
290 
291 // Method
292 
293 inline Method** frame::interpreter_frame_method_addr() const {
294   return (Method**)addr_at(interpreter_frame_method_offset);
295 }
296 
297 // Mirror
298 
299 inline oop* frame::interpreter_frame_mirror_addr() const {
300   return (oop*)addr_at(interpreter_frame_mirror_offset);
301 }
302 
303 // top of expression stack
304 inline intptr_t* frame::interpreter_frame_tos_address() const {
305   intptr_t* last_sp = interpreter_frame_last_sp();
306   if (last_sp == nullptr) {
307     return sp();
308   } else {
309     // sp() may have been extended or shrunk by an adapter.  At least
310     // check that we don't fall behind the legal region.
311     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
312     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
313     return last_sp;
314   }
315 }
316 
317 inline oop* frame::interpreter_frame_temp_oop_addr() const {
318   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
319 }
320 
321 inline int frame::interpreter_frame_monitor_size() {
322   return BasicObjectLock::size();
323 }
324 
325 
326 // expression stack
327 // (the max_stack arguments are used by the GC; see class FrameClosure)
328 
329 inline intptr_t* frame::interpreter_frame_expression_stack() const {
330   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
331   return monitor_end-1;
332 }
333 
334 
335 // Entry frames
336 
337 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
338  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
339 }
340 
341 
342 // Compiled frames
343 
344 inline oop frame::saved_oop_result(RegisterMap* map) const {
345   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
346   guarantee(result_adr != nullptr, "bad register save location");
347   return *result_adr;
348 }
349 
350 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
351   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
352   guarantee(result_adr != nullptr, "bad register save location");
353 
354   *result_adr = obj;
355 }
356 
357 inline bool frame::is_interpreted_frame() const {
358   return Interpreter::contains(pc());
359 }
360 
361 inline int frame::sender_sp_ret_address_offset() {
362   return frame::sender_sp_offset - frame::return_addr_offset;
363 }
364 
365 //------------------------------------------------------------------------------
366 // frame::sender
367 inline frame frame::sender(RegisterMap* map) const {
368   frame result = sender_raw(map);
369 
370   if (map->process_frames() && !map->in_cont()) {
371     StackWatermarkSet::on_iteration(map->thread(), result);
372   }
373 
374   return result;
375 }
376 
377 inline frame frame::sender_raw(RegisterMap* map) const {
378   // Default is we done have to follow them. The sender_for_xxx will
379   // update it accordingly
380   map->set_include_argument_oops(false);
381 
382   if (map->in_cont()) { // already in an h-stack
383     return map->stack_chunk()->sender(*this, map);
384   }
385 
386   if (is_entry_frame())       return sender_for_entry_frame(map);
387   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
388   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
389 
390   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
391   if (_cb != nullptr) return sender_for_compiled_frame(map);
392 
393   // Must be native-compiled frame, i.e. the marshaling code for native
394   // methods that exists in the core system.
395 
396   // Native code may or may not have signed the return address, we have no way to be sure or what
397   // signing methods they used. Instead, just ensure the stripped value is used.
398 
399   return frame(sender_sp(), link(), sender_pc());
400 }
401 
402 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
403   // we cannot rely upon the last fp having been saved to the thread
404   // in C2 code but it will have been pushed onto the stack. so we
405   // have to find it relative to the unextended sp
406 
407   assert(_cb->frame_size() > 0, "must have non-zero frame size");
408   intptr_t* l_sender_sp = (!PreserveFramePointer || _sp_is_trusted) ? unextended_sp() + _cb->frame_size()
409                                                                     : sender_sp();
410 #ifdef ASSERT
411    address sender_pc_copy = pauth_strip_verifiable((address) *(l_sender_sp-1));
412 #endif
413 
414   assert(!_sp_is_trusted || l_sender_sp == real_fp(), "");
415 
416   intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
417 
418   // Repair the sender sp if the frame has been extended
419   l_sender_sp = repair_sender_sp(l_sender_sp, saved_fp_addr);
420 
421   // The return_address is always the word on the stack.
422   // For ROP protection, C1/C2 will have signed the sender_pc,
423   // but there is no requirement to authenticate it here.
424   address sender_pc = pauth_strip_verifiable((address) *(l_sender_sp - 1));
425 
426 #ifdef ASSERT
427   if (sender_pc != sender_pc_copy) {
428     // When extending the stack in the callee method entry to make room for unpacking of value
429     // type args, we keep a copy of the sender pc at the expected location in the callee frame.
430     // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
431     nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
432     assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
433   }
434 #endif
435 
436   if (map->update_map()) {
437     // Tell GC to use argument oopmaps for some runtime stubs that need it.
438     // For C1, the runtime stub might not have oop maps, so set this flag
439     // outside of update_register_map.
440     bool c1_buffering = false;
441 #ifdef COMPILER1
442     nmethod* nm = _cb->as_nmethod_or_null();
443     if (nm != nullptr && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
444         pc() < nm->verified_inline_entry_point()) {
445       // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
446       // before doing any argument shuffling, so we need to scan the oops
447       // as the caller passes them.
448       c1_buffering = true;
449     }
450 #endif
451     if (!_cb->is_nmethod() || c1_buffering) { // compiled frames do not use callee-saved registers
452       bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
453       map->set_include_argument_oops(caller_args);
454       if (oop_map() != nullptr) {
455         _oop_map->update_register_map(this, map);
456       }
457     } else {
458       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
459       assert(!map->include_argument_oops(), "");
460       assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
461     }
462 
463     // Since the prolog does the save and restore of FP there is no oopmap
464     // for it so we must fill in its location as if there was an oopmap entry
465     // since if our caller was compiled code there could be live jvm state in it.
466     update_map_with_saved_link(map, saved_fp_addr);
467   }
468 
469   if (Continuation::is_return_barrier_entry(sender_pc)) {
470     if (map->walk_cont()) { // about to walk into an h-stack
471       return Continuation::top_frame(*this, map);
472     } else {
473       return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp);
474     }
475   }
476 
477   intptr_t* unextended_sp = l_sender_sp;
478   return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
479 }
480 
481 template <typename RegisterMapT>
482 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
483   // The interpreter and compiler(s) always save FP in a known
484   // location on entry. C2-compiled code uses FP as an allocatable
485   // callee-saved register. We must record where that location is so
486   // that if FP was live on callout from c2 we can find the saved copy.
487 
488   map->set_location(rfp->as_VMReg(), (address) link_addr);
489   // this is weird "H" ought to be at a higher address however the
490   // oopMaps seems to have the "H" regs at the same address and the
491   // vanilla register.
492   // XXXX make this go away
493   if (true) {
494     map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
495   }
496 }
497 #endif // CPU_AARCH64_FRAME_AARCH64_INLINE_HPP