1 /*
  2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 27 #define CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
 28 
 29 #include "code/codeBlob.inline.hpp"
 30 #include "code/codeCache.inline.hpp"
 31 #include "code/vmreg.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "interpreter/oopMapCache.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "pauth_aarch64.hpp"
 36 
 37 // Inline functions for AArch64 frames:
 38 
 39 // Constructors:
 40 
 41 inline frame::frame() {
 42   _pc = NULL;
 43   _sp = NULL;
 44   _unextended_sp = NULL;
 45   _fp = NULL;
 46   _cb = NULL;
 47   _deopt_state = unknown;
 48   _sp_is_trusted = false;
 49   _on_heap = false;
 50   DEBUG_ONLY(_frame_index = -1;)
 51 }
 52 
 53 static int spin;
 54 
 55 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 56   assert(pauth_ptr_is_raw(pc), "cannot be signed");
 57   intptr_t a = intptr_t(sp);
 58   intptr_t b = intptr_t(fp);
 59   _sp = sp;
 60   _unextended_sp = sp;
 61   _fp = fp;
 62   _pc = pc;
 63   _oop_map = NULL;
 64   _on_heap = false;
 65   DEBUG_ONLY(_frame_index = -1;)
 66 
 67   assert(pc != NULL, "no pc?");
 68   _cb = CodeCache::find_blob(pc);
 69   setup(pc);
 70 }
 71 
 72 inline void frame::setup(address pc) {
 73   adjust_unextended_sp();
 74 
 75   address original_pc = CompiledMethod::get_deopt_original_pc(this);
 76   if (original_pc != NULL) {
 77     _pc = original_pc;
 78     _deopt_state = is_deoptimized;
 79     assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
 80            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 81   } else {
 82     if (_cb == SharedRuntime::deopt_blob()) {
 83       _deopt_state = is_deoptimized;
 84     } else {
 85       _deopt_state = not_deoptimized;
 86     }
 87   }
 88   _sp_is_trusted = false;
 89 }
 90 
 91 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 92   init(sp, fp, pc);
 93 }
 94 
 95 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
 96   assert(pauth_ptr_is_raw(pc), "cannot be signed");
 97   intptr_t a = intptr_t(sp);
 98   intptr_t b = intptr_t(fp);
 99   _sp = sp;
100   _unextended_sp = unextended_sp;
101   _fp = fp;
102   _pc = pc;
103   assert(pc != NULL, "no pc?");
104   _cb = cb;
105   _oop_map = NULL;
106   assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc));
107   _on_heap = false;
108   DEBUG_ONLY(_frame_index = -1;)
109 
110   setup(pc);
111 }
112 
113 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap) {
114   _sp = sp;
115   _unextended_sp = unextended_sp;
116   _fp = fp;
117   _pc = pc;
118   _cb = cb;
119   _oop_map = oop_map;
120   _deopt_state = not_deoptimized;
121   _sp_is_trusted = false;
122   _on_heap = on_heap;
123   DEBUG_ONLY(_frame_index = -1;)
124 
125   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
126   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
127   if (cb != NULL) {
128     setup(pc);
129   }
130 #ifdef ASSERT
131   // The following assertion has been disabled because it would sometime trap for Continuation.run,
132   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
133   // is benign even in fast mode (see Freeze::setup_jump)
134   // We might freeze deoptimized frame in slow mode
135   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
136 #endif
137 }
138 
139 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
140   intptr_t a = intptr_t(sp);
141   intptr_t b = intptr_t(fp);
142   _sp = sp;
143   _unextended_sp = unextended_sp;
144   _fp = fp;
145   _pc = pc;
146   _cb = CodeCache::find_blob_fast(pc);
147   _oop_map = NULL;
148   assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
149   _on_heap = false;
150   DEBUG_ONLY(_frame_index = -1;)
151 
152   setup(pc);
153 }
154 
155 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
156 
157 inline frame::frame(intptr_t* sp, intptr_t* fp) {
158   intptr_t a = intptr_t(sp);
159   intptr_t b = intptr_t(fp);
160   _sp = sp;
161   _unextended_sp = sp;
162   _fp = fp;
163   _pc = (address)(sp[-1]);
164   _on_heap = false;
165   DEBUG_ONLY(_frame_index = -1;)
166 
167   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
168   // when last_Java_sp is non-null but the pc fetched is junk.
169   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
170   // -> pd_last_frame should use a specialized version of pd_last_frame which could
171   // call a specilaized frame constructor instead of this one.
172   // Then we could use the assert below. However this assert is of somewhat dubious
173   // value.
174   // assert(_pc != NULL, "no pc?");
175 
176   _cb = CodeCache::find_blob(_pc);
177   adjust_unextended_sp();
178 
179   address original_pc = CompiledMethod::get_deopt_original_pc(this);
180   if (original_pc != NULL) {
181     _pc = original_pc;
182     _deopt_state = is_deoptimized;
183   } else {
184     _deopt_state = not_deoptimized;
185   }
186   _sp_is_trusted = false;
187 }
188 
189 // Accessors
190 
191 inline bool frame::equal(frame other) const {
192   bool ret =  sp() == other.sp()
193               && unextended_sp() == other.unextended_sp()
194               && fp() == other.fp()
195               && pc() == other.pc();
196   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
197   return ret;
198 }
199 
200 // Return unique id for this frame. The id must have a value where we can distinguish
201 // identity and younger/older relationship. NULL represents an invalid (incomparable)
202 // frame.
203 inline intptr_t* frame::id(void) const { return unextended_sp(); }
204 
205 // Return true if the frame is older (less recent activation) than the frame represented by id
206 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
207                                                     return this->id() > id ; }
208 
209 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
210 
211 inline intptr_t* frame::link_or_null() const {
212   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
213   return os::is_readable_pointer(ptr) ? *ptr : NULL;
214 }
215 
216 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
217 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
218 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
219 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
220 
221 inline intptr_t* frame::real_fp() const {
222   if (_cb != NULL) {
223     // use the frame size if valid
224     int size = _cb->frame_size();
225     if (size > 0) {
226       return unextended_sp() + size;
227     }
228   }
229   // else rely on fp()
230   assert(! is_compiled_frame(), "unknown compiled frame size");
231   return fp();
232 }
233 
234 inline int frame::frame_size() const {
235   return is_interpreted_frame()
236     ? sender_sp() - sp()
237     : cb()->frame_size();
238 }
239 
240 inline int frame::compiled_frame_stack_argsize() const {
241   assert(cb()->is_compiled(), "");
242   return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
243 }
244 
245 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
246   assert(mask != NULL, "");
247   Method* m = interpreter_frame_method();
248   int   bci = interpreter_frame_bci();
249   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
250 }
251 
252 // Return address:
253 
254 inline address* frame::sender_pc_addr()         const { return (address*) addr_at( return_addr_offset); }
255 inline address  frame::sender_pc_maybe_signed() const { return *sender_pc_addr(); }
256 inline address  frame::sender_pc()              const { return pauth_strip_pointer(sender_pc_maybe_signed()); }
257 
258 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
259 
260 inline intptr_t** frame::interpreter_frame_locals_addr() const {
261   return (intptr_t**)addr_at(interpreter_frame_locals_offset);
262 }
263 
264 inline intptr_t* frame::interpreter_frame_last_sp() const {
265   return (intptr_t*)at(interpreter_frame_last_sp_offset);
266 }
267 
268 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
269   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
270 }
271 
272 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
273   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
274 }
275 
276 
277 // Constant pool cache
278 
279 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
280   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
281 }
282 
283 // Method
284 
285 inline Method** frame::interpreter_frame_method_addr() const {
286   return (Method**)addr_at(interpreter_frame_method_offset);
287 }
288 
289 // Mirror
290 
291 inline oop* frame::interpreter_frame_mirror_addr() const {
292   return (oop*)addr_at(interpreter_frame_mirror_offset);
293 }
294 
295 // top of expression stack
296 inline intptr_t* frame::interpreter_frame_tos_address() const {
297   intptr_t* last_sp = interpreter_frame_last_sp();
298   if (last_sp == NULL) {
299     return sp();
300   } else {
301     // sp() may have been extended or shrunk by an adapter.  At least
302     // check that we don't fall behind the legal region.
303     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
304     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
305     return last_sp;
306   }
307 }
308 
309 inline oop* frame::interpreter_frame_temp_oop_addr() const {
310   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
311 }
312 
313 inline int frame::interpreter_frame_monitor_size() {
314   return BasicObjectLock::size();
315 }
316 
317 
318 // expression stack
319 // (the max_stack arguments are used by the GC; see class FrameClosure)
320 
321 inline intptr_t* frame::interpreter_frame_expression_stack() const {
322   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
323   return monitor_end-1;
324 }
325 
326 
327 // Entry frames
328 
329 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
330  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
331 }
332 
333 
334 // Compiled frames
335 
336 inline oop frame::saved_oop_result(RegisterMap* map) const {
337   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
338   guarantee(result_adr != NULL, "bad register save location");
339   return *result_adr;
340 }
341 
342 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
343   oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
344   guarantee(result_adr != NULL, "bad register save location");
345 
346   *result_adr = obj;
347 }
348 
349 inline bool frame::is_interpreted_frame() const {
350   return Interpreter::contains(pc());
351 }
352 
353 inline int frame::sender_sp_ret_address_offset() {
354   return frame::sender_sp_offset - frame::return_addr_offset;
355 }
356 
357 inline const ImmutableOopMap* frame::get_oop_map() const {
358   if (_cb == NULL) return NULL;
359   if (_cb->oop_maps() != NULL) {
360     NativePostCallNop* nop = nativePostCallNop_at(_pc);
361     if (nop != NULL && nop->displacement() != 0) {
362       int slot = ((nop->displacement() >> 24) & 0xff);
363       return _cb->oop_map_for_slot(slot, _pc);
364     }
365     const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
366     return oop_map;
367   }
368   return NULL;
369 }
370 
371 //------------------------------------------------------------------------------
372 // frame::sender
373 inline frame frame::sender(RegisterMap* map) const {
374   frame result = sender_raw(map);
375 
376   if (map->process_frames() && !map->in_cont()) {
377     StackWatermarkSet::on_iteration(map->thread(), result);
378   }
379 
380   return result;
381 }
382 
383 inline frame frame::sender_raw(RegisterMap* map) const {
384   // Default is we done have to follow them. The sender_for_xxx will
385   // update it accordingly
386   map->set_include_argument_oops(false);
387 
388   if (map->in_cont()) { // already in an h-stack
389     return map->stack_chunk()->sender(*this, map);
390   }
391 
392   if (is_entry_frame())       return sender_for_entry_frame(map);
393   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
394   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
395 
396   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
397   if (_cb != NULL) return sender_for_compiled_frame(map);
398 
399   // Must be native-compiled frame, i.e. the marshaling code for native
400   // methods that exists in the core system.
401 
402   // Native code may or may not have signed the return address, we have no way to be sure or what
403   // signing methods they used. Instead, just ensure the stripped value is used.
404 
405   return frame(sender_sp(), link(), sender_pc());
406 }
407 
408 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
409   // we cannot rely upon the last fp having been saved to the thread
410   // in C2 code but it will have been pushed onto the stack. so we
411   // have to find it relative to the unextended sp
412 
413   assert(_cb->frame_size() > 0, "must have non-zero frame size");
414   intptr_t* l_sender_sp = (!PreserveFramePointer || _sp_is_trusted) ? unextended_sp() + _cb->frame_size()
415                                                                     : sender_sp();
416   assert(!_sp_is_trusted || l_sender_sp == real_fp(), "");
417 
418   // the return_address is always the word on the stack
419   // For ROP protection, C1/C2 will have signed the sender_pc, but there is no requirement to authenticate it here.
420   address sender_pc = pauth_strip_verifiable((address) *(l_sender_sp-1), (address) *(l_sender_sp-2));
421 
422   intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
423 
424   if (map->update_map()) {
425     // Tell GC to use argument oopmaps for some runtime stubs that need it.
426     // For C1, the runtime stub might not have oop maps, so set this flag
427     // outside of update_register_map.
428     if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
429       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
430       if (oop_map() != NULL) {
431         _oop_map->update_register_map(this, map);
432       }
433     } else {
434       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
435       assert(!map->include_argument_oops(), "");
436       assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
437     }
438 
439     // Since the prolog does the save and restore of FP there is no oopmap
440     // for it so we must fill in its location as if there was an oopmap entry
441     // since if our caller was compiled code there could be live jvm state in it.
442     update_map_with_saved_link(map, saved_fp_addr);
443   }
444 
445   if (Continuation::is_return_barrier_entry(sender_pc)) {
446     if (map->walk_cont()) { // about to walk into an h-stack
447       return Continuation::top_frame(*this, map);
448     } else {
449       return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp);
450     }
451   }
452 
453   intptr_t* unextended_sp = l_sender_sp;
454   return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
455 }
456 
457 template <typename RegisterMapT>
458 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
459   // The interpreter and compiler(s) always save FP in a known
460   // location on entry. C2-compiled code uses FP as an allocatable
461   // callee-saved register. We must record where that location is so
462   // that if FP was live on callout from c2 we can find the saved copy.
463 
464   map->set_location(rfp->as_VMReg(), (address) link_addr);
465   // this is weird "H" ought to be at a higher address however the
466   // oopMaps seems to have the "H" regs at the same address and the
467   // vanilla register.
468   // XXXX make this go away
469   if (true) {
470     map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
471   }
472 }
473 #endif // CPU_AARCH64_FRAME_AARCH64_INLINE_HPP