1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 27 #define CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 28 
 29 #include "code/codeBlob.inline.hpp"
 30 #include "code/codeCache.inline.hpp"
 31 #include "code/vmreg.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "pauth_aarch64.hpp"
 35 
 36 // Inline functions for AArch64 frames:
 37 
 38 // Constructors:
 39 
 40 inline frame::frame() {
 41   _pc = nullptr;
 42   _sp = nullptr;
 43   _unextended_sp = nullptr;
 44   _fp = nullptr;
 45   _cb = nullptr;
 46   _deopt_state = unknown;
 47   _sp_is_trusted = false;
 48   _on_heap = false;
 49   DEBUG_ONLY(_frame_index = -1;)
 50 }
 51 
 52 static int spin;
 53 
 54 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 55   assert(pauth_ptr_is_raw(pc), "cannot be signed");
 56   intptr_t a = intptr_t(sp);
 57   intptr_t b = intptr_t(fp);
 58   _sp = sp;
 59   _unextended_sp = sp;
 60   _fp = fp;
 61   _pc = pc;
 62   _oop_map = nullptr;
 63   _on_heap = false;
 64   DEBUG_ONLY(_frame_index = -1;)
 65 
 66   assert(pc != nullptr, "no pc?");
 67   _cb = CodeCache::find_blob(pc);
 68   setup(pc);
 69 }
 70 
 71 inline void frame::setup(address pc) {
 72   adjust_unextended_sp();
 73 
 74   address original_pc = get_deopt_original_pc();
 75   if (original_pc != nullptr) {
 76     _pc = original_pc;
 77     _deopt_state = is_deoptimized;
 78     assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
 79            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 80   } else {
 81     if (_cb == SharedRuntime::deopt_blob()) {
 82       _deopt_state = is_deoptimized;
 83     } else {
 84       _deopt_state = not_deoptimized;
 85     }
 86   }
 87   _sp_is_trusted = false;
 88 }
 89 
 90 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 91   init(sp, fp, pc);
 92 }
 93 
 94 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null) {
 95   assert(pauth_ptr_is_raw(pc), "cannot be signed");
 96   intptr_t a = intptr_t(sp);
 97   intptr_t b = intptr_t(fp);
 98   _sp = sp;
 99   _unextended_sp = unextended_sp;
100   _fp = fp;
101   _pc = pc;
102   assert(pc != nullptr, "no pc?");
103   _cb = cb;
104   _oop_map = nullptr;
105   assert(_cb != nullptr || allow_cb_null, "pc: " INTPTR_FORMAT, p2i(pc));
106   _on_heap = false;
107   DEBUG_ONLY(_frame_index = -1;)
108 
109   setup(pc);
110 }
111 
112 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap) {
113   _sp = sp;
114   _unextended_sp = unextended_sp;
115   _fp = fp;
116   _pc = pc;
117   _cb = cb;
118   _oop_map = oop_map;
119   _deopt_state = not_deoptimized;
120   _sp_is_trusted = false;
121   _on_heap = on_heap;
122   DEBUG_ONLY(_frame_index = -1;)
123 
124   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
125   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
126   if (cb != nullptr) {
127     setup(pc);
128   }
129 #ifdef ASSERT
130   // The following assertion has been disabled because it would sometime trap for Continuation.run,
131   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
132   // is benign even in fast mode (see Freeze::setup_jump)
133   // We might freeze deoptimized frame in slow mode
134   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
135 #endif
136 }
137 
138 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
139   intptr_t a = intptr_t(sp);
140   intptr_t b = intptr_t(fp);
141   _sp = sp;
142   _unextended_sp = unextended_sp;
143   _fp = fp;
144   _pc = pc;
145   _cb = CodeCache::find_blob_fast(pc);
146   _oop_map = nullptr;
147   assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
148   _on_heap = false;
149   DEBUG_ONLY(_frame_index = -1;)
150 
151   setup(pc);
152 }
153 
154 inline frame::frame(intptr_t* sp)
155   : frame(sp, sp,
156           *(intptr_t**)(sp - frame::sender_sp_offset),
157           pauth_strip_verifiable(*(address*)(sp - 1))) {}
158 
159 inline frame::frame(intptr_t* sp, intptr_t* fp) {
160   intptr_t a = intptr_t(sp);
161   intptr_t b = intptr_t(fp);
162   _sp = sp;
163   _unextended_sp = sp;
164   _fp = fp;
165   _pc = (address)(sp[-1]);
166   _on_heap = false;
167   DEBUG_ONLY(_frame_index = -1;)
168 
169   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
170   // when last_Java_sp is non-null but the pc fetched is junk.
171   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
172   // -> pd_last_frame should use a specialized version of pd_last_frame which could
173   // call a specilaized frame constructor instead of this one.
174   // Then we could use the assert below. However this assert is of somewhat dubious
175   // value.
176   // assert(_pc != nullptr, "no pc?");
177 
178   _cb = CodeCache::find_blob(_pc);
179   adjust_unextended_sp();
180 
181   address original_pc = get_deopt_original_pc();
182   if (original_pc != nullptr) {
183     _pc = original_pc;
184     _deopt_state = is_deoptimized;
185   } else {
186     _deopt_state = not_deoptimized;
187   }
188   _sp_is_trusted = false;
189 }
190 
191 // Accessors
192 
193 inline bool frame::equal(frame other) const {
194   bool ret =  sp() == other.sp()
195               && unextended_sp() == other.unextended_sp()
196               && fp() == other.fp()
197               && pc() == other.pc();
198   assert(!ret || (cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction");
199   return ret;
200 }
201 
202 // Return unique id for this frame. The id must have a value where we can distinguish
203 // identity and younger/older relationship. null represents an invalid (incomparable)
204 // frame.
205 inline intptr_t* frame::id(void) const { return unextended_sp(); }
206 
207 // Return true if the frame is older (less recent activation) than the frame represented by id
208 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != nullptr && id != nullptr, "null frame id");
209                                                     return this->id() > id ; }
210 
211 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
212 
213 inline intptr_t* frame::link_or_null() const {
214   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
215   return os::is_readable_pointer(ptr) ? *ptr : nullptr;
216 }
217 
218 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
219 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
220 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
221 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
222 
223 inline intptr_t* frame::real_fp() const {
224   if (_cb != nullptr) {
225     // use the frame size if valid
226     int size = _cb->frame_size();
227     if (size > 0) {
228       return unextended_sp() + size;
229     }
230   }
231   // else rely on fp()
232   assert(! is_compiled_frame(), "unknown compiled frame size");
233   return fp();
234 }
235 
236 inline int frame::frame_size() const {
237   return is_interpreted_frame()
238     ? pointer_delta_as_int(sender_sp(), sp())
239     : cb()->frame_size();
240 }
241 
242 inline int frame::compiled_frame_stack_argsize() const {
243   assert(cb()->is_nmethod(), "");
244   return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
245 }
246 
247 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
248   assert(mask != nullptr, "");
249   Method* m = interpreter_frame_method();
250   int   bci = interpreter_frame_bci();
251   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
252 }
253 
254 // Return address:
255 
256 inline address* frame::sender_pc_addr()         const { return (address*) addr_at( return_addr_offset); }
257 inline address  frame::sender_pc_maybe_signed() const { return *sender_pc_addr(); }
258 inline address  frame::sender_pc()              const { return pauth_strip_pointer(sender_pc_maybe_signed()); }
259 
260 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
261 
262 inline intptr_t* frame::interpreter_frame_locals() const {
263   intptr_t n = *addr_at(interpreter_frame_locals_offset);
264   return &fp()[n]; // return relativized locals
265 }
266 
267 inline intptr_t* frame::interpreter_frame_last_sp() const {
268   intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
269   assert(n <= 0, "n: " INTPTR_FORMAT, n);
270   return n != 0 ? &fp()[n] : nullptr;
271 }
272 
273 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
274   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
275 }
276 
277 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
278   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
279 }
280 
281 
282 // Constant pool cache
283 
284 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
285   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
286 }
287 
288 // Method
289 
290 inline Method** frame::interpreter_frame_method_addr() const {
291   return (Method**)addr_at(interpreter_frame_method_offset);
292 }
293 
294 // Mirror
295 
296 inline oop* frame::interpreter_frame_mirror_addr() const {
297   return (oop*)addr_at(interpreter_frame_mirror_offset);
298 }
299 
300 // top of expression stack
301 inline intptr_t* frame::interpreter_frame_tos_address() const {
302   intptr_t* last_sp = interpreter_frame_last_sp();
303   if (last_sp == nullptr) {
304     return sp();
305   } else {
306     // sp() may have been extended or shrunk by an adapter.  At least
307     // check that we don't fall behind the legal region.
308     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
309     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
310     return last_sp;
311   }
312 }
313 
314 inline oop* frame::interpreter_frame_temp_oop_addr() const {
315   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
316 }
317 
318 inline int frame::interpreter_frame_monitor_size() {
319   return BasicObjectLock::size();
320 }
321 
322 
323 // expression stack
324 // (the max_stack arguments are used by the GC; see class FrameClosure)
325 
326 inline intptr_t* frame::interpreter_frame_expression_stack() const {
327   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
328   return monitor_end-1;
329 }
330 
331 
332 // Entry frames
333 
334 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
335  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
336 }
337 
338 
339 // Compiled frames
340 
341 inline oop frame::saved_oop_result(RegisterMap* map) const {
342   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
343   guarantee(result_adr != nullptr, "bad register save location");
344   return *result_adr;
345 }
346 
347 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
348   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
349   guarantee(result_adr != nullptr, "bad register save location");
350 
351   *result_adr = obj;
352 }
353 
354 inline bool frame::is_interpreted_frame() const {
355   return Interpreter::contains(pc());
356 }
357 
358 inline int frame::sender_sp_ret_address_offset() {
359   return frame::sender_sp_offset - frame::return_addr_offset;
360 }
361 
362 //------------------------------------------------------------------------------
363 // frame::sender
364 inline frame frame::sender(RegisterMap* map) const {
365   frame result = sender_raw(map);
366 
367   if (map->process_frames() && !map->in_cont()) {
368     StackWatermarkSet::on_iteration(map->thread(), result);
369   }
370 
371   return result;
372 }
373 
374 inline frame frame::sender_raw(RegisterMap* map) const {
375   // Default is we done have to follow them. The sender_for_xxx will
376   // update it accordingly
377   map->set_include_argument_oops(false);
378 
379   if (map->in_cont()) { // already in an h-stack
380     return map->stack_chunk()->sender(*this, map);
381   }
382 
383   if (is_entry_frame())       return sender_for_entry_frame(map);
384   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
385   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
386 
387   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
388   if (_cb != nullptr) return sender_for_compiled_frame(map);
389 
390   // Must be native-compiled frame, i.e. the marshaling code for native
391   // methods that exists in the core system.
392 
393   // Native code may or may not have signed the return address, we have no way to be sure or what
394   // signing methods they used. Instead, just ensure the stripped value is used.
395 
396   return frame(sender_sp(), link(), sender_pc());
397 }
398 
399 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
400   // we cannot rely upon the last fp having been saved to the thread
401   // in C2 code but it will have been pushed onto the stack. so we
402   // have to find it relative to the unextended sp
403 
404   assert(_cb->frame_size() > 0, "must have non-zero frame size");
405   intptr_t* l_sender_sp = (!PreserveFramePointer || _sp_is_trusted) ? unextended_sp() + _cb->frame_size()
406                                                                     : sender_sp();
407   assert(!_sp_is_trusted || l_sender_sp == real_fp(), "");
408 
409   // The return_address is always the word on the stack.
410   // For ROP protection, C1/C2 will have signed the sender_pc,
411   // but there is no requirement to authenticate it here.
412   address sender_pc = pauth_strip_verifiable((address) *(l_sender_sp - 1));
413 
414   intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
415 
416   if (map->update_map()) {
417     // Tell GC to use argument oopmaps for some runtime stubs that need it.
418     // For C1, the runtime stub might not have oop maps, so set this flag
419     // outside of update_register_map.
420     if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers
421       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
422       if (oop_map() != nullptr) {
423         _oop_map->update_register_map(this, map);
424       }
425     } else {
426       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
427       assert(!map->include_argument_oops(), "");
428       assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
429     }
430 
431     // Since the prolog does the save and restore of FP there is no oopmap
432     // for it so we must fill in its location as if there was an oopmap entry
433     // since if our caller was compiled code there could be live jvm state in it.
434     update_map_with_saved_link(map, saved_fp_addr);
435   }
436 
437   if (Continuation::is_return_barrier_entry(sender_pc)) {
438     if (map->walk_cont()) { // about to walk into an h-stack
439       return Continuation::top_frame(*this, map);
440     } else {
441       return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp);
442     }
443   }
444 
445   intptr_t* unextended_sp = l_sender_sp;
446   return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
447 }
448 
449 template <typename RegisterMapT>
450 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
451   // The interpreter and compiler(s) always save FP in a known
452   // location on entry. C2-compiled code uses FP as an allocatable
453   // callee-saved register. We must record where that location is so
454   // that if FP was live on callout from c2 we can find the saved copy.
455 
456   map->set_location(rfp->as_VMReg(), (address) link_addr);
457   // this is weird "H" ought to be at a higher address however the
458   // oopMaps seems to have the "H" regs at the same address and the
459   // vanilla register.
460   // XXXX make this go away
461   if (true) {
462     map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
463   }
464 }
465 #endif // CPU_AARCH64_FRAME_AARCH64_INLINE_HPP