1 /*
  2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 27 #define CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 28 
 29 #include "code/codeBlob.inline.hpp"
 30 #include "code/codeCache.inline.hpp"
 31 #include "code/vmreg.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "interpreter/oopMapCache.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "pauth_aarch64.hpp"
 36 #ifdef COMPILER1
 37 #include "c1/c1_Runtime1.hpp"
 38 #endif
 39 
 40 // Inline functions for AArch64 frames:
 41 
 42 // Constructors:
 43 
 44 inline frame::frame() {
 45   _pc = NULL;
 46   _sp = NULL;
 47   _unextended_sp = NULL;
 48   _fp = NULL;
 49   _cb = NULL;
 50   _deopt_state = unknown;
 51   _sp_is_trusted = false;
 52   _on_heap = false;
 53   DEBUG_ONLY(_frame_index = -1;)
 54 }
 55 
 56 static int spin;
 57 
 58 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 59   assert(pauth_ptr_is_raw(pc), "cannot be signed");
 60   intptr_t a = intptr_t(sp);
 61   intptr_t b = intptr_t(fp);
 62   _sp = sp;
 63   _unextended_sp = sp;
 64   _fp = fp;
 65   _pc = pc;
 66   _oop_map = NULL;
 67   _on_heap = false;
 68   DEBUG_ONLY(_frame_index = -1;)
 69 
 70   assert(pc != NULL, "no pc?");
 71   _cb = CodeCache::find_blob(pc);
 72   setup(pc);
 73 }
 74 
 75 inline void frame::setup(address pc) {
 76   adjust_unextended_sp();
 77 
 78   address original_pc = CompiledMethod::get_deopt_original_pc(this);
 79   if (original_pc != NULL) {
 80     _pc = original_pc;
 81     _deopt_state = is_deoptimized;
 82     assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
 83            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 84   } else {
 85     if (_cb == SharedRuntime::deopt_blob()) {
 86       _deopt_state = is_deoptimized;
 87     } else {
 88       _deopt_state = not_deoptimized;
 89     }
 90   }
 91   _sp_is_trusted = false;
 92 }
 93 
 94 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 95   init(sp, fp, pc);
 96 }
 97 
 98 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
 99   assert(pauth_ptr_is_raw(pc), "cannot be signed");
100   intptr_t a = intptr_t(sp);
101   intptr_t b = intptr_t(fp);
102   _sp = sp;
103   _unextended_sp = unextended_sp;
104   _fp = fp;
105   _pc = pc;
106   assert(pc != NULL, "no pc?");
107   _cb = cb;
108   _oop_map = NULL;
109   assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc));
110   _on_heap = false;
111   DEBUG_ONLY(_frame_index = -1;)
112 
113   setup(pc);
114 }
115 
116 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap) {
117   _sp = sp;
118   _unextended_sp = unextended_sp;
119   _fp = fp;
120   _pc = pc;
121   _cb = cb;
122   _oop_map = oop_map;
123   _deopt_state = not_deoptimized;
124   _sp_is_trusted = false;
125   _on_heap = on_heap;
126   DEBUG_ONLY(_frame_index = -1;)
127 
128   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
129   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
130   if (cb != NULL) {
131     setup(pc);
132   }
133 #ifdef ASSERT
134   // The following assertion has been disabled because it would sometime trap for Continuation.run,
135   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
136   // is benign even in fast mode (see Freeze::setup_jump)
137   // We might freeze deoptimized frame in slow mode
138   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
139 #endif
140 }
141 
142 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
143   intptr_t a = intptr_t(sp);
144   intptr_t b = intptr_t(fp);
145   _sp = sp;
146   _unextended_sp = unextended_sp;
147   _fp = fp;
148   _pc = pc;
149   _cb = CodeCache::find_blob_fast(pc);
150   _oop_map = NULL;
151   assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
152   _on_heap = false;
153   DEBUG_ONLY(_frame_index = -1;)
154 
155   setup(pc);
156 }
157 
158 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
159 
160 inline frame::frame(intptr_t* sp, intptr_t* fp) {
161   intptr_t a = intptr_t(sp);
162   intptr_t b = intptr_t(fp);
163   _sp = sp;
164   _unextended_sp = sp;
165   _fp = fp;
166   _pc = (address)(sp[-1]);
167   _on_heap = false;
168   DEBUG_ONLY(_frame_index = -1;)
169 
170   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
171   // when last_Java_sp is non-null but the pc fetched is junk.
172   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
173   // -> pd_last_frame should use a specialized version of pd_last_frame which could
174   // call a specilaized frame constructor instead of this one.
175   // Then we could use the assert below. However this assert is of somewhat dubious
176   // value.
177   // assert(_pc != NULL, "no pc?");
178 
179   _cb = CodeCache::find_blob(_pc);
180   adjust_unextended_sp();
181 
182   address original_pc = CompiledMethod::get_deopt_original_pc(this);
183   if (original_pc != NULL) {
184     _pc = original_pc;
185     _deopt_state = is_deoptimized;
186   } else {
187     _deopt_state = not_deoptimized;
188   }
189   _sp_is_trusted = false;
190 }
191 
192 // Accessors
193 
194 inline bool frame::equal(frame other) const {
195   bool ret =  sp() == other.sp()
196               && unextended_sp() == other.unextended_sp()
197               && fp() == other.fp()
198               && pc() == other.pc();
199   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
200   return ret;
201 }
202 
203 // Return unique id for this frame. The id must have a value where we can distinguish
204 // identity and younger/older relationship. NULL represents an invalid (incomparable)
205 // frame.
206 inline intptr_t* frame::id(void) const { return unextended_sp(); }
207 
208 // Return true if the frame is older (less recent activation) than the frame represented by id
209 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
210                                                     return this->id() > id ; }
211 
212 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
213 
214 inline intptr_t* frame::link_or_null() const {
215   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
216   return os::is_readable_pointer(ptr) ? *ptr : NULL;
217 }
218 
219 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
220 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
221 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
222 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
223 
224 inline intptr_t* frame::real_fp() const {
225   if (_cb != NULL) {
226     // use the frame size if valid
227     int size = _cb->frame_size();
228     if (size > 0) {
229       return unextended_sp() + size;
230     }
231   }
232   // else rely on fp()
233   assert(! is_compiled_frame(), "unknown compiled frame size");
234   return fp();
235 }
236 
237 inline int frame::frame_size() const {
238   return is_interpreted_frame()
239     ? sender_sp() - sp()
240     : cb()->frame_size();
241 }
242 
243 inline int frame::compiled_frame_stack_argsize() const {
244   assert(cb()->is_compiled(), "");
245   return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
246 }
247 
248 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
249   assert(mask != NULL, "");
250   Method* m = interpreter_frame_method();
251   int   bci = interpreter_frame_bci();
252   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
253 }
254 
255 // Return address:
256 
257 inline address* frame::sender_pc_addr()         const { return (address*) addr_at( return_addr_offset); }
258 inline address  frame::sender_pc_maybe_signed() const { return *sender_pc_addr(); }
259 inline address  frame::sender_pc()              const { return pauth_strip_pointer(sender_pc_maybe_signed()); }
260 
261 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
262 
263 inline intptr_t** frame::interpreter_frame_locals_addr() const {
264   return (intptr_t**)addr_at(interpreter_frame_locals_offset);
265 }
266 
267 inline intptr_t* frame::interpreter_frame_last_sp() const {
268   return (intptr_t*)at(interpreter_frame_last_sp_offset);
269 }
270 
271 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
272   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
273 }
274 
275 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
276   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
277 }
278 
279 
280 // Constant pool cache
281 
282 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
283   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
284 }
285 
286 // Method
287 
288 inline Method** frame::interpreter_frame_method_addr() const {
289   return (Method**)addr_at(interpreter_frame_method_offset);
290 }
291 
292 // Mirror
293 
294 inline oop* frame::interpreter_frame_mirror_addr() const {
295   return (oop*)addr_at(interpreter_frame_mirror_offset);
296 }
297 
298 // top of expression stack
299 inline intptr_t* frame::interpreter_frame_tos_address() const {
300   intptr_t* last_sp = interpreter_frame_last_sp();
301   if (last_sp == NULL) {
302     return sp();
303   } else {
304     // sp() may have been extended or shrunk by an adapter.  At least
305     // check that we don't fall behind the legal region.
306     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
307     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
308     return last_sp;
309   }
310 }
311 
312 inline oop* frame::interpreter_frame_temp_oop_addr() const {
313   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
314 }
315 
316 inline int frame::interpreter_frame_monitor_size() {
317   return BasicObjectLock::size();
318 }
319 
320 
321 // expression stack
322 // (the max_stack arguments are used by the GC; see class FrameClosure)
323 
324 inline intptr_t* frame::interpreter_frame_expression_stack() const {
325   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
326   return monitor_end-1;
327 }
328 
329 
330 // Entry frames
331 
332 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
333  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
334 }
335 
336 
337 // Compiled frames
338 
339 inline oop frame::saved_oop_result(RegisterMap* map) const {
340   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
341   guarantee(result_adr != NULL, "bad register save location");
342   return *result_adr;
343 }
344 
345 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
346   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
347   guarantee(result_adr != NULL, "bad register save location");
348 
349   *result_adr = obj;
350 }
351 
352 inline bool frame::is_interpreted_frame() const {
353   return Interpreter::contains(pc());
354 }
355 
356 inline int frame::sender_sp_ret_address_offset() {
357   return frame::sender_sp_offset - frame::return_addr_offset;
358 }
359 
360 inline const ImmutableOopMap* frame::get_oop_map() const {
361   if (_cb == NULL) return NULL;
362   if (_cb->oop_maps() != NULL) {
363     NativePostCallNop* nop = nativePostCallNop_at(_pc);
364     if (nop != NULL && nop->displacement() != 0) {
365       int slot = ((nop->displacement() >> 24) & 0xff);
366       return _cb->oop_map_for_slot(slot, _pc);
367     }
368     const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
369     return oop_map;
370   }
371   return NULL;
372 }
373 
374 //------------------------------------------------------------------------------
375 // frame::sender
376 inline frame frame::sender(RegisterMap* map) const {
377   frame result = sender_raw(map);
378 
379   if (map->process_frames() && !map->in_cont()) {
380     StackWatermarkSet::on_iteration(map->thread(), result);
381   }
382 
383   return result;
384 }
385 
386 inline frame frame::sender_raw(RegisterMap* map) const {
387   // Default is we done have to follow them. The sender_for_xxx will
388   // update it accordingly
389   map->set_include_argument_oops(false);
390 
391   if (map->in_cont()) { // already in an h-stack
392     return map->stack_chunk()->sender(*this, map);
393   }
394 
395   if (is_entry_frame())       return sender_for_entry_frame(map);
396   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
397   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
398 
399   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
400   if (_cb != NULL) return sender_for_compiled_frame(map);
401 
402   // Must be native-compiled frame, i.e. the marshaling code for native
403   // methods that exists in the core system.
404 
405   // Native code may or may not have signed the return address, we have no way to be sure or what
406   // signing methods they used. Instead, just ensure the stripped value is used.
407 
408   return frame(sender_sp(), link(), sender_pc());
409 }
410 
411 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
412   // we cannot rely upon the last fp having been saved to the thread
413   // in C2 code but it will have been pushed onto the stack. so we
414   // have to find it relative to the unextended sp
415 
416   assert(_cb->frame_size() > 0, "must have non-zero frame size");
417   intptr_t* l_sender_sp = (!PreserveFramePointer || _sp_is_trusted) ? unextended_sp() + _cb->frame_size()
418                                                                     : sender_sp();
419 #ifdef ASSERT
420    address sender_pc_copy = pauth_strip_verifiable((address) *(l_sender_sp-1), (address) *(l_sender_sp-2));
421 #endif
422 
423   assert(!_sp_is_trusted || l_sender_sp == real_fp(), "");
424 
425   intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
426 
427   // Repair the sender sp if the frame has been extended
428   l_sender_sp = repair_sender_sp(l_sender_sp, saved_fp_addr);
429 
430   // the return_address is always the word on the stack
431   // For ROP protection, C1/C2 will have signed the sender_pc, but there is no requirement to authenticate it here.
432   address sender_pc = pauth_strip_verifiable((address) *(l_sender_sp-1), (address) *(l_sender_sp-2));
433 
434 #ifdef ASSERT
435   if (sender_pc != sender_pc_copy) {
436     // When extending the stack in the callee method entry to make room for unpacking of value
437     // type args, we keep a copy of the sender pc at the expected location in the callee frame.
438     // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
439     nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
440     assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
441   }
442 #endif
443 
444   if (map->update_map()) {
445     // Tell GC to use argument oopmaps for some runtime stubs that need it.
446     // For C1, the runtime stub might not have oop maps, so set this flag
447     // outside of update_register_map.
448     bool c1_buffering = false;
449 #ifdef COMPILER1
450     nmethod* nm = _cb->as_nmethod_or_null();
451     if (nm != NULL && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
452         pc() < nm->verified_inline_entry_point()) {
453       // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
454       // before doing any argument shuffling, so we need to scan the oops
455       // as the caller passes them.
456       c1_buffering = true;
457     }
458 #endif
459     if (!_cb->is_compiled() || c1_buffering) { // compiled frames do not use callee-saved registers
460       bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
461       map->set_include_argument_oops(caller_args);
462       if (oop_map() != NULL) {
463         _oop_map->update_register_map(this, map);
464       }
465     } else {
466       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
467       assert(!map->include_argument_oops(), "");
468       assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
469     }
470 
471     // Since the prolog does the save and restore of FP there is no oopmap
472     // for it so we must fill in its location as if there was an oopmap entry
473     // since if our caller was compiled code there could be live jvm state in it.
474     update_map_with_saved_link(map, saved_fp_addr);
475   }
476 
477   if (Continuation::is_return_barrier_entry(sender_pc)) {
478     if (map->walk_cont()) { // about to walk into an h-stack
479       return Continuation::top_frame(*this, map);
480     } else {
481       return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp);
482     }
483   }
484 
485   intptr_t* unextended_sp = l_sender_sp;
486   return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
487 }
488 
489 template <typename RegisterMapT>
490 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
491   // The interpreter and compiler(s) always save FP in a known
492   // location on entry. C2-compiled code uses FP as an allocatable
493   // callee-saved register. We must record where that location is so
494   // that if FP was live on callout from c2 we can find the saved copy.
495 
496   map->set_location(rfp->as_VMReg(), (address) link_addr);
497   // this is weird "H" ought to be at a higher address however the
498   // oopMaps seems to have the "H" regs at the same address and the
499   // vanilla register.
500   // XXXX make this go away
501   if (true) {
502     map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
503   }
504 }
505 #endif // CPU_AARCH64_FRAME_AARCH64_INLINE_HPP