1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
27 #define CPU_AARCH64_FRAME_AARCH64_INLINE_HPP
28
29 #include "code/codeBlob.inline.hpp"
30 #include "code/codeCache.inline.hpp"
31 #include "code/vmreg.inline.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "pauth_aarch64.hpp"
35
36 // Inline functions for AArch64 frames:
37
38 #if INCLUDE_JFR
39
40 // Static helper routines
41
42 inline address frame::interpreter_bcp(const intptr_t* fp) {
43 assert(fp != nullptr, "invariant");
44 return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
45 }
46
47 inline address frame::interpreter_return_address(const intptr_t* fp) {
48 assert(fp != nullptr, "invariant");
49 return reinterpret_cast<address>(fp[frame::return_addr_offset]);
50 }
51
52 inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
53 assert(fp != nullptr, "invariant");
54 return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
55 }
56
57 inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
58 assert(fp != nullptr, "invariant");
59 assert(sp != nullptr, "invariant");
60 return sp <= fp + frame::interpreter_frame_initial_sp_offset;
61 }
62
63 inline intptr_t* frame::sender_sp(intptr_t* fp) {
64 assert(fp != nullptr, "invariant");
65 return fp + frame::sender_sp_offset;
66 }
67
68 inline intptr_t* frame::link(const intptr_t* fp) {
69 assert(fp != nullptr, "invariant");
70 return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
71 }
72
73 inline address frame::return_address(const intptr_t* sp) {
74 assert(sp != nullptr, "invariant");
75 return reinterpret_cast<address>(sp[-1]);
76 }
77
78 inline intptr_t* frame::fp(const intptr_t* sp) {
79 assert(sp != nullptr, "invariant");
80 return reinterpret_cast<intptr_t*>(sp[-2]);
81 }
82
83 #endif // INCLUDE_JFR
84
85 // Constructors:
86
87 inline frame::frame() {
88 _pc = nullptr;
89 _sp = nullptr;
90 _unextended_sp = nullptr;
91 _fp = nullptr;
92 _cb = nullptr;
93 _deopt_state = unknown;
94 _sp_is_trusted = false;
95 _on_heap = false;
96 DEBUG_ONLY(_frame_index = -1;)
97 }
98
99 static int spin;
100
101 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
102 assert(pauth_ptr_is_raw(pc), "cannot be signed");
103 intptr_t a = intptr_t(sp);
104 intptr_t b = intptr_t(fp);
105 _sp = sp;
106 _unextended_sp = sp;
107 _fp = fp;
108 _pc = pc;
109 _oop_map = nullptr;
110 _on_heap = false;
111 DEBUG_ONLY(_frame_index = -1;)
112
113 assert(pc != nullptr, "no pc?");
114 _cb = CodeCache::find_blob(pc);
115 setup(pc);
116 }
117
118 inline void frame::setup(address pc) {
119 address original_pc = get_deopt_original_pc();
120 if (original_pc != nullptr) {
121 _pc = original_pc;
122 _deopt_state = is_deoptimized;
123 assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
124 "original PC must be in the main code section of the compiled method (or must be immediately following it)");
125 } else {
126 if (_cb == SharedRuntime::deopt_blob()) {
127 _deopt_state = is_deoptimized;
128 } else {
129 _deopt_state = not_deoptimized;
130 }
131 }
132 _sp_is_trusted = false;
133 }
134
135 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
136 init(sp, fp, pc);
137 }
138
139 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null) {
140 assert(pauth_ptr_is_raw(pc), "cannot be signed");
141 intptr_t a = intptr_t(sp);
142 intptr_t b = intptr_t(fp);
143 _sp = sp;
144 _unextended_sp = unextended_sp;
145 _fp = fp;
146 _pc = pc;
147 assert(pc != nullptr, "no pc?");
148 _cb = cb;
149 _oop_map = nullptr;
150 assert(_cb != nullptr || allow_cb_null, "pc: " INTPTR_FORMAT, p2i(pc));
151 _on_heap = false;
152 DEBUG_ONLY(_frame_index = -1;)
153
154 setup(pc);
155 }
156
157 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap) {
158 _sp = sp;
159 _unextended_sp = unextended_sp;
160 _fp = fp;
161 _pc = pc;
162 _cb = cb;
163 _oop_map = oop_map;
164 _deopt_state = not_deoptimized;
165 _sp_is_trusted = false;
166 _on_heap = on_heap;
167 DEBUG_ONLY(_frame_index = -1;)
168
169 // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
170 assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
171 if (cb != nullptr) {
172 setup(pc);
173 }
174 #ifdef ASSERT
175 // The following assertion has been disabled because it would sometime trap for Continuation.run,
176 // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
177 // is benign even in fast mode (see Freeze::setup_jump)
178 // We might freeze deoptimized frame in slow mode
179 // assert(_pc == pc && _deopt_state == not_deoptimized, "");
180 #endif
181 }
182
183 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
184 intptr_t a = intptr_t(sp);
185 intptr_t b = intptr_t(fp);
186 _sp = sp;
187 _unextended_sp = unextended_sp;
188 _fp = fp;
189 _pc = pc;
190 _cb = CodeCache::find_blob_fast(pc);
191 _oop_map = nullptr;
192 assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
193 _on_heap = false;
194 DEBUG_ONLY(_frame_index = -1;)
195
196 setup(pc);
197 }
198
199 inline frame::frame(intptr_t* sp)
200 : frame(sp, sp,
201 *(intptr_t**)(sp - frame::sender_sp_offset),
202 pauth_strip_verifiable(*(address*)(sp - 1))) {}
203
204 inline frame::frame(intptr_t* sp, intptr_t* fp) {
205 intptr_t a = intptr_t(sp);
206 intptr_t b = intptr_t(fp);
207 _sp = sp;
208 _unextended_sp = sp;
209 _fp = fp;
210 _pc = (address)(sp[-1]);
211 _on_heap = false;
212 DEBUG_ONLY(_frame_index = -1;)
213
214 // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
215 // when last_Java_sp is non-null but the pc fetched is junk.
216 // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
217 // -> pd_last_frame should use a specialized version of pd_last_frame which could
218 // call a specilaized frame constructor instead of this one.
219 // Then we could use the assert below. However this assert is of somewhat dubious
220 // value.
221 // assert(_pc != nullptr, "no pc?");
222
223 _cb = CodeCache::find_blob(_pc);
224
225 address original_pc = get_deopt_original_pc();
226 if (original_pc != nullptr) {
227 _pc = original_pc;
228 _deopt_state = is_deoptimized;
229 } else {
230 _deopt_state = not_deoptimized;
231 }
232 _sp_is_trusted = false;
233 }
234
235 // Accessors
236
237 inline bool frame::equal(frame other) const {
238 bool ret = sp() == other.sp()
239 && unextended_sp() == other.unextended_sp()
240 && fp() == other.fp()
241 && pc() == other.pc();
242 assert(!ret || (cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction");
243 return ret;
244 }
245
246 // Return unique id for this frame. The id must have a value where we can distinguish
247 // identity and younger/older relationship. null represents an invalid (incomparable)
248 // frame.
249 inline intptr_t* frame::id(void) const { return unextended_sp(); }
250
251 // Return true if the frame is older (less recent activation) than the frame represented by id
252 inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
253 return this->id() > id ; }
254
255 inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
256
257 inline intptr_t* frame::link_or_null() const {
258 intptr_t** ptr = (intptr_t **)addr_at(link_offset);
259 return os::is_readable_pointer(ptr) ? *ptr : nullptr;
260 }
261
262 inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
263 inline void frame::set_unextended_sp(intptr_t* value) { _unextended_sp = value; }
264 inline int frame::offset_unextended_sp() const { assert_offset(); return _offset_unextended_sp; }
265 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
266
267 inline intptr_t* frame::real_fp() const {
268 if (_cb != nullptr) {
269 // use the frame size if valid
270 int size = _cb->frame_size();
271 if (size > 0) {
272 return unextended_sp() + size;
273 }
274 }
275 // else rely on fp()
276 assert(! is_compiled_frame(), "unknown compiled frame size");
277 return fp();
278 }
279
280 inline int frame::frame_size() const {
281 return is_interpreted_frame()
282 ? pointer_delta_as_int(sender_sp(), sp())
283 : cb()->frame_size();
284 }
285
286 inline int frame::compiled_frame_stack_argsize() const {
287 assert(cb()->is_nmethod(), "");
288 return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
289 }
290
291 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
292 assert(mask != nullptr, "");
293 Method* m = interpreter_frame_method();
294 int bci = interpreter_frame_bci();
295 m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
296 }
297
298 // Return address:
299
300 inline address* frame::sender_pc_addr() const { return (address*) addr_at( return_addr_offset); }
301 inline address frame::sender_pc_maybe_signed() const { return *sender_pc_addr(); }
302 inline address frame::sender_pc() const { return pauth_strip_pointer(sender_pc_maybe_signed()); }
303
304 inline intptr_t* frame::sender_sp() const { return addr_at( sender_sp_offset); }
305
306 inline intptr_t* frame::interpreter_frame_locals() const {
307 intptr_t n = *addr_at(interpreter_frame_locals_offset);
308 return &fp()[n]; // return relativized locals
309 }
310
311 inline intptr_t* frame::interpreter_frame_last_sp() const {
312 intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
313 assert(n <= 0, "n: " INTPTR_FORMAT, n);
314 return n != 0 ? &fp()[n] : nullptr;
315 }
316
317 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
318 return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
319 }
320
321 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
322 return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
323 }
324
325
326 // Constant pool cache
327
328 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
329 return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
330 }
331
332 // Method
333
334 inline Method** frame::interpreter_frame_method_addr() const {
335 return (Method**)addr_at(interpreter_frame_method_offset);
336 }
337
338 // Mirror
339
340 inline oop* frame::interpreter_frame_mirror_addr() const {
341 return (oop*)addr_at(interpreter_frame_mirror_offset);
342 }
343
344 // top of expression stack
345 inline intptr_t* frame::interpreter_frame_tos_address() const {
346 intptr_t* last_sp = interpreter_frame_last_sp();
347 if (last_sp == nullptr) {
348 return sp();
349 } else {
350 // sp() may have been extended or shrunk by an adapter. At least
351 // check that we don't fall behind the legal region.
352 // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
353 assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
354 return last_sp;
355 }
356 }
357
358 inline oop* frame::interpreter_frame_temp_oop_addr() const {
359 return (oop *)(fp() + interpreter_frame_oop_temp_offset);
360 }
361
362 inline int frame::interpreter_frame_monitor_size() {
363 return BasicObjectLock::size();
364 }
365
366
367 // expression stack
368 // (the max_stack arguments are used by the GC; see class FrameClosure)
369
370 inline intptr_t* frame::interpreter_frame_expression_stack() const {
371 intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
372 return monitor_end-1;
373 }
374
375
376 // Entry frames
377
378 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
379 return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
380 }
381
382
383 // Compiled frames
384
385 inline oop frame::saved_oop_result(RegisterMap* map) const {
386 oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
387 guarantee(result_adr != nullptr, "bad register save location");
388 return *result_adr;
389 }
390
391 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
392 oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
393 guarantee(result_adr != nullptr, "bad register save location");
394
395 *result_adr = obj;
396 }
397
398 inline bool frame::is_interpreted_frame() const {
399 return Interpreter::contains(pc());
400 }
401
402 inline int frame::sender_sp_ret_address_offset() {
403 return frame::sender_sp_offset - frame::return_addr_offset;
404 }
405
406 //------------------------------------------------------------------------------
407 // frame::sender
408 inline frame frame::sender(RegisterMap* map) const {
409 frame result = sender_raw(map);
410
411 if (map->process_frames() && !map->in_cont()) {
412 StackWatermarkSet::on_iteration(map->thread(), result);
413 }
414
415 return result;
416 }
417
418 inline frame frame::sender_raw(RegisterMap* map) const {
419 // Default is we done have to follow them. The sender_for_xxx will
420 // update it accordingly
421 map->set_include_argument_oops(false);
422
423 if (map->in_cont()) { // already in an h-stack
424 return map->stack_chunk()->sender(*this, map);
425 }
426
427 if (is_entry_frame()) return sender_for_entry_frame(map);
428 if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
429 if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
430
431 assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
432 if (_cb != nullptr) return sender_for_compiled_frame(map);
433
434 // Must be native-compiled frame, i.e. the marshaling code for native
435 // methods that exists in the core system.
436
437 // Native code may or may not have signed the return address, we have no way to be sure or what
438 // signing methods they used. Instead, just ensure the stripped value is used.
439
440 return frame(sender_sp(), link(), sender_pc());
441 }
442
443 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
444 // we cannot rely upon the last fp having been saved to the thread
445 // in C2 code but it will have been pushed onto the stack. so we
446 // have to find it relative to the unextended sp
447
448 assert(_cb->frame_size() > 0, "must have non-zero frame size");
449 intptr_t* l_sender_sp = (!PreserveFramePointer || _sp_is_trusted) ? unextended_sp() + _cb->frame_size()
450 : sender_sp();
451 assert(!_sp_is_trusted || l_sender_sp == real_fp(), "");
452
453 // The return_address is always the word on the stack.
454 // For ROP protection, C1/C2 will have signed the sender_pc,
455 // but there is no requirement to authenticate it here.
456 address sender_pc = pauth_strip_verifiable((address) *(l_sender_sp - 1));
457
458 intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset);
459
460 if (map->update_map()) {
461 // Tell GC to use argument oopmaps for some runtime stubs that need it.
462 // For C1, the runtime stub might not have oop maps, so set this flag
463 // outside of update_register_map.
464 if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers
465 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
466 if (oop_map() != nullptr) {
467 _oop_map->update_register_map(this, map);
468 }
469 } else {
470 assert(!_cb->caller_must_gc_arguments(map->thread()), "");
471 assert(!map->include_argument_oops(), "");
472 assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
473 }
474
475 // Since the prolog does the save and restore of FP there is no oopmap
476 // for it so we must fill in its location as if there was an oopmap entry
477 // since if our caller was compiled code there could be live jvm state in it.
478 update_map_with_saved_link(map, saved_fp_addr);
479 }
480
481 if (Continuation::is_return_barrier_entry(sender_pc)) {
482 if (map->walk_cont()) { // about to walk into an h-stack
483 return Continuation::top_frame(*this, map);
484 } else {
485 return Continuation::continuation_bottom_sender(map->thread(), *this, l_sender_sp);
486 }
487 }
488
489 intptr_t* unextended_sp = l_sender_sp;
490 return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
491 }
492
493 template <typename RegisterMapT>
494 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
495 // The interpreter and compiler(s) always save FP in a known
496 // location on entry. C2-compiled code uses FP as an allocatable
497 // callee-saved register. We must record where that location is so
498 // that if FP was live on callout from c2 we can find the saved copy.
499
500 map->set_location(rfp->as_VMReg(), (address) link_addr);
501 // this is weird "H" ought to be at a higher address however the
502 // oopMaps seems to have the "H" regs at the same address and the
503 // vanilla register.
504 // XXXX make this go away
505 if (true) {
506 map->set_location(rfp->as_VMReg()->next(), (address) link_addr);
507 }
508 }
509 #endif // CPU_AARCH64_FRAME_AARCH64_INLINE_HPP