1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP
26 #define CPU_X86_FRAME_X86_INLINE_HPP
27
28 #include "code/codeBlob.inline.hpp"
29 #include "code/codeCache.inline.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.inline.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "runtime/registerMap.hpp"
35 #ifdef COMPILER1
36 #include "c1/c1_Runtime1.hpp"
37 #endif
38
39 // Inline functions for Intel frames:
40
41 #if INCLUDE_JFR
42
43 // Static helper routines
44
45 inline address frame::interpreter_bcp(const intptr_t* fp) {
46 assert(fp != nullptr, "invariant");
47 return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
48 }
49
50 inline address frame::interpreter_return_address(const intptr_t* fp) {
51 assert(fp != nullptr, "invariant");
52 return reinterpret_cast<address>(fp[frame::return_addr_offset]);
53 }
54
55 inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
56 assert(fp != nullptr, "invariant");
57 return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
58 }
59
60 inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
61 assert(fp != nullptr, "invariant");
62 assert(sp != nullptr, "invariant");
63 return sp <= fp + frame::interpreter_frame_initial_sp_offset;
64 }
65
66 inline intptr_t* frame::sender_sp(intptr_t* fp) {
67 assert(fp != nullptr, "invariant");
68 return fp + frame::sender_sp_offset;
69 }
70
71 inline intptr_t* frame::link(const intptr_t* fp) {
72 assert(fp != nullptr, "invariant");
73 return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
74 }
75
76 inline address frame::return_address(const intptr_t* sp) {
77 assert(sp != nullptr, "invariant");
78 return reinterpret_cast<address>(sp[-1]);
79 }
80
81 inline intptr_t* frame::fp(const intptr_t* sp) {
82 assert(sp != nullptr, "invariant");
83 return reinterpret_cast<intptr_t*>(sp[-2]);
84 }
85
86 #endif // INCLUDE_JFR
87
88 // Constructors:
89
90 inline frame::frame() {
91 _pc = nullptr;
92 _sp = nullptr;
93 _unextended_sp = nullptr;
94 _fp = nullptr;
95 _cb = nullptr;
96 _deopt_state = unknown;
97 _oop_map = nullptr;
98 _on_heap = false;
99 DEBUG_ONLY(_frame_index = -1;)
100 }
101
102 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
103 _sp = sp;
104 _unextended_sp = sp;
105 _fp = fp;
106 _pc = pc;
107 _oop_map = nullptr;
108 _on_heap = false;
109 DEBUG_ONLY(_frame_index = -1;)
110
111 assert(pc != nullptr, "no pc?");
112 _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
113 setup(pc);
114 }
115
116 inline void frame::setup(address pc) {
117 adjust_unextended_sp();
118
119 address original_pc = get_deopt_original_pc();
120 if (original_pc != nullptr) {
121 _pc = original_pc;
122 _deopt_state = is_deoptimized;
123 assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
124 "original PC must be in the main code section of the compiled method (or must be immediately following it)");
125 } else {
126 if (_cb == SharedRuntime::deopt_blob()) {
127 _deopt_state = is_deoptimized;
128 } else {
129 _deopt_state = not_deoptimized;
130 }
131 }
132 }
133
134 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
135 init(sp, fp, pc);
136 }
137
138 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
139 _sp = sp;
140 _unextended_sp = unextended_sp;
141 _fp = fp;
142 _pc = pc;
143 assert(pc != nullptr, "no pc?");
144 _cb = cb;
145 _oop_map = nullptr;
146 assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
147 _on_heap = false;
148 DEBUG_ONLY(_frame_index = -1;)
149
150 setup(pc);
151 }
152
153 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb,
154 const ImmutableOopMap* oop_map, bool on_heap) {
155 _sp = sp;
156 _unextended_sp = unextended_sp;
157 _fp = fp;
158 _pc = pc;
159 _cb = cb;
160 _oop_map = oop_map;
161 _deopt_state = not_deoptimized;
162 _on_heap = on_heap;
163 DEBUG_ONLY(_frame_index = -1;)
164
165 // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
166 assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
167 if (cb != nullptr) {
168 setup(pc);
169 }
170 #ifdef ASSERT
171 // The following assertion has been disabled because it would sometime trap for Continuation.run,
172 // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
173 // is benign even in fast mode (see Freeze::setup_jump)
174 // We might freeze deoptimized frame in slow mode
175 // assert(_pc == pc && _deopt_state == not_deoptimized, "");
176 #endif
177 }
178
179 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
180 _sp = sp;
181 _unextended_sp = unextended_sp;
182 _fp = fp;
183 _pc = pc;
184 assert(pc != nullptr, "no pc?");
185 _cb = CodeCache::find_blob_fast(pc);
186 _oop_map = nullptr;
187 assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
188 _on_heap = false;
189 DEBUG_ONLY(_frame_index = -1;)
190
191 setup(pc);
192 }
193
194 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
195
196 inline frame::frame(intptr_t* sp, intptr_t* fp) {
197 _sp = sp;
198 _unextended_sp = sp;
199 _fp = fp;
200 _pc = (address)(sp[-1]);
201 _on_heap = false;
202 DEBUG_ONLY(_frame_index = -1;)
203
204 // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
205 // when last_Java_sp is non-null but the pc fetched is junk.
206 // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
207 // -> pd_last_frame should use a specialized version of pd_last_frame which could
208 // call a specialized frame constructor instead of this one.
209 // Then we could use the assert below. However this assert is of somewhat dubious
210 // value.
211 // UPDATE: this constructor is only used by trace_method_handle_stub() now.
212 // assert(_pc != nullptr, "no pc?");
213
214 _cb = CodeCache::find_blob(_pc);
215 adjust_unextended_sp();
216
217 address original_pc = get_deopt_original_pc();
218 if (original_pc != nullptr) {
219 _pc = original_pc;
220 _deopt_state = is_deoptimized;
221 } else {
222 _deopt_state = not_deoptimized;
223 }
224 _oop_map = nullptr;
225 }
226
227 // Accessors
228
229 inline bool frame::equal(frame other) const {
230 bool ret = sp() == other.sp()
231 && unextended_sp() == other.unextended_sp()
232 && fp() == other.fp()
233 && pc() == other.pc();
234 assert(!ret || (ret && cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction");
235 return ret;
236 }
237
238 // Return unique id for this frame. The id must have a value where we can distinguish
239 // identity and younger/older relationship. null represents an invalid (incomparable)
240 // frame.
241 inline intptr_t* frame::id(void) const { return unextended_sp(); }
242
243 // Return true if the frame is older (less recent activation) than the frame represented by id
244 inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
245 return this->id() > id ; }
246
247 inline intptr_t* frame::link() const { return *(intptr_t **)addr_at(link_offset); }
248
249 inline intptr_t* frame::link_or_null() const {
250 intptr_t** ptr = (intptr_t **)addr_at(link_offset);
251 return os::is_readable_pointer(ptr) ? *ptr : nullptr;
252 }
253
254 inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
255 inline void frame::set_unextended_sp(intptr_t* value) { _unextended_sp = value; }
256 inline int frame::offset_unextended_sp() const { assert_offset(); return _offset_unextended_sp; }
257 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
258
259 inline intptr_t* frame::real_fp() const {
260 if (_cb != nullptr) {
261 // use the frame size if valid
262 int size = _cb->frame_size();
263 if (size > 0) {
264 return unextended_sp() + size;
265 }
266 }
267 // else rely on fp()
268 assert(! is_compiled_frame(), "unknown compiled frame size");
269 return fp();
270 }
271
272 inline int frame::frame_size() const {
273 return is_interpreted_frame()
274 ? pointer_delta_as_int(sender_sp(), sp())
275 : cb()->frame_size();
276 }
277
278 inline int frame::compiled_frame_stack_argsize() const {
279 assert(cb()->is_nmethod(), "");
280 return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
281 }
282
283 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
284 assert(mask != nullptr, "");
285 Method* m = interpreter_frame_method();
286 int bci = interpreter_frame_bci();
287 m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
288 }
289
290 // Return address:
291
292 inline address* frame::sender_pc_addr() const { return (address*) addr_at(return_addr_offset); }
293 inline address frame::sender_pc() const { return *sender_pc_addr(); }
294
295 inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); }
296
297 inline intptr_t* frame::interpreter_frame_locals() const {
298 intptr_t n = *addr_at(interpreter_frame_locals_offset);
299 return &fp()[n]; // return relativized locals
300 }
301
302 inline intptr_t* frame::interpreter_frame_last_sp() const {
303 intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
304 assert(n <= 0, "n: " INTPTR_FORMAT, n);
305 return n != 0 ? &fp()[n] : nullptr;
306 }
307
308 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
309 return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
310 }
311
312 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
313 return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
314 }
315
316
317
318 // Constant pool cache
319
320 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
321 return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
322 }
323
324 // Method
325
326 inline Method** frame::interpreter_frame_method_addr() const {
327 return (Method**)addr_at(interpreter_frame_method_offset);
328 }
329
330 // Mirror
331
332 inline oop* frame::interpreter_frame_mirror_addr() const {
333 return (oop*)addr_at(interpreter_frame_mirror_offset);
334 }
335
336 // top of expression stack
337 inline intptr_t* frame::interpreter_frame_tos_address() const {
338 intptr_t* last_sp = interpreter_frame_last_sp();
339 if (last_sp == nullptr) {
340 return sp();
341 } else {
342 // sp() may have been extended or shrunk by an adapter. At least
343 // check that we don't fall behind the legal region.
344 // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
345 assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
346 return last_sp;
347 }
348 }
349
350 inline oop* frame::interpreter_frame_temp_oop_addr() const {
351 return (oop *)(fp() + interpreter_frame_oop_temp_offset);
352 }
353
354 inline int frame::interpreter_frame_monitor_size() {
355 return BasicObjectLock::size();
356 }
357
358
359 // expression stack
360 // (the max_stack arguments are used by the GC; see class FrameClosure)
361
362 inline intptr_t* frame::interpreter_frame_expression_stack() const {
363 intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
364 return monitor_end-1;
365 }
366
367 // Entry frames
368
369 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
370 return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
371 }
372
373 // Compiled frames
374
375 inline oop frame::saved_oop_result(RegisterMap* map) const {
376 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
377 guarantee(result_adr != nullptr, "bad register save location");
378 return *result_adr;
379 }
380
381 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
382 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
383 guarantee(result_adr != nullptr, "bad register save location");
384
385 *result_adr = obj;
386 }
387
388 inline bool frame::is_interpreted_frame() const {
389 return Interpreter::contains(pc());
390 }
391
392 inline int frame::sender_sp_ret_address_offset() {
393 return frame::sender_sp_offset - frame::return_addr_offset;
394 }
395
396 //------------------------------------------------------------------------------
397 // frame::sender
398
399 inline frame frame::sender(RegisterMap* map) const {
400 frame result = sender_raw(map);
401
402 if (map->process_frames() && !map->in_cont()) {
403 StackWatermarkSet::on_iteration(map->thread(), result);
404 }
405
406 return result;
407 }
408
409 inline frame frame::sender_raw(RegisterMap* map) const {
410 // Default is we done have to follow them. The sender_for_xxx will
411 // update it accordingly
412 map->set_include_argument_oops(false);
413
414 if (map->in_cont()) { // already in an h-stack
415 return map->stack_chunk()->sender(*this, map);
416 }
417
418 if (is_entry_frame()) return sender_for_entry_frame(map);
419 if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
420 if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
421
422 assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
423 if (_cb != nullptr) return sender_for_compiled_frame(map);
424
425 // Must be native-compiled frame, i.e. the marshaling code for native
426 // methods that exists in the core system.
427 return frame(sender_sp(), link(), sender_pc());
428 }
429
430 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
431 assert(map != nullptr, "map must be set");
432
433 // frame owned by optimizing compiler
434 assert(_cb->frame_size() > 0, "must have non-zero frame size");
435 intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
436 assert(sender_sp == real_fp(), "");
437
438 #ifdef ASSERT
439 address sender_pc_copy = (address) *(sender_sp-1);
440 #endif
441
442 // This is the saved value of EBP which may or may not really be an FP.
443 // It is only an FP if the sender is an interpreter frame (or C1?).
444 // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier)
445 intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
446
447 // Repair the sender sp if the frame has been extended
448 sender_sp = repair_sender_sp(sender_sp, saved_fp_addr);
449
450 // On Intel the return_address is always the word on the stack
451 address sender_pc = (address) *(sender_sp-1);
452
453 #ifdef ASSERT
454 if (sender_pc != sender_pc_copy) {
455 // When extending the stack in the callee method entry to make room for unpacking of value
456 // type args, we keep a copy of the sender pc at the expected location in the callee frame.
457 // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
458 nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
459 assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
460 }
461 #endif
462
463 if (map->update_map()) {
464 // Tell GC to use argument oopmaps for some runtime stubs that need it.
465 // For C1, the runtime stub might not have oop maps, so set this flag
466 // outside of update_register_map.
467 bool c1_buffering = false;
468 #ifdef COMPILER1
469 nmethod* nm = _cb->as_nmethod_or_null();
470 if (nm != nullptr && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
471 pc() < nm->verified_inline_entry_point()) {
472 // TODO 8284443 Can't we do that by not passing 'dont_gc_arguments' in case 'StubId::c1_buffer_inline_args_id' in 'Runtime1::generate_code_for'?
473 // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
474 // before doing any argument shuffling, so we need to scan the oops
475 // as the caller passes them.
476 c1_buffering = true;
477 #ifdef ASSERT
478 NativeCall* call = nativeCall_before(pc());
479 address dest = call->destination();
480 assert(dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_no_receiver_id) ||
481 dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_id), "unexpected safepoint in entry point");
482 #endif
483 }
484 #endif
485 if (!_cb->is_nmethod() || c1_buffering) { // compiled frames do not use callee-saved registers
486 bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
487 map->set_include_argument_oops(caller_args);
488 if (oop_map() != nullptr) {
489 _oop_map->update_register_map(this, map);
490 }
491 } else {
492 assert(!_cb->caller_must_gc_arguments(map->thread()), "");
493 assert(!map->include_argument_oops(), "");
494 assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
495 }
496
497 // Since the prolog does the save and restore of EBP there is no oopmap
498 // for it so we must fill in its location as if there was an oopmap entry
499 // since if our caller was compiled code there could be live jvm state in it.
500 update_map_with_saved_link(map, saved_fp_addr);
501 }
502
503 assert(sender_sp != sp(), "must have changed");
504
505 if (Continuation::is_return_barrier_entry(sender_pc)) {
506 if (map->walk_cont()) { // about to walk into an h-stack
507 return Continuation::top_frame(*this, map);
508 } else {
509 return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp);
510 }
511 }
512
513 intptr_t* unextended_sp = sender_sp;
514 return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
515 }
516
517 template <typename RegisterMapT>
518 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
519 // The interpreter and compiler(s) always save EBP/RBP in a known
520 // location on entry. We must record where that location is
521 // so this if EBP/RBP was live on callout from c2 we can find
522 // the saved copy no matter what it called.
523
524 // Since the interpreter always saves EBP/RBP if we record where it is then
525 // we don't have to always save EBP/RBP on entry and exit to c2 compiled
526 // code, on entry will be enough.
527 map->set_location(rbp->as_VMReg(), (address) link_addr);
528 #ifdef AMD64
529 // this is weird "H" ought to be at a higher address however the
530 // oopMaps seems to have the "H" regs at the same address and the
531 // vanilla register.
532 // XXXX make this go away
533 if (true) {
534 map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
535 }
536 #endif // AMD64
537 }
538 #endif // CPU_X86_FRAME_X86_INLINE_HPP