1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP
26 #define CPU_X86_FRAME_X86_INLINE_HPP
27
28 #include "code/codeBlob.inline.hpp"
29 #include "code/codeCache.inline.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.inline.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "runtime/registerMap.hpp"
35 #ifdef COMPILER1
36 #include "c1/c1_Runtime1.hpp"
37 #endif
38
39 // Inline functions for Intel frames:
40
41 #if INCLUDE_JFR
42
43 // Static helper routines
44
45 inline address frame::interpreter_bcp(const intptr_t* fp) {
46 assert(fp != nullptr, "invariant");
47 return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
48 }
49
50 inline address frame::interpreter_return_address(const intptr_t* fp) {
51 assert(fp != nullptr, "invariant");
52 return reinterpret_cast<address>(fp[frame::return_addr_offset]);
53 }
54
55 inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
56 assert(fp != nullptr, "invariant");
57 return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
58 }
59
60 inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
61 assert(fp != nullptr, "invariant");
62 assert(sp != nullptr, "invariant");
63 return sp <= fp + frame::interpreter_frame_initial_sp_offset;
64 }
65
66 inline intptr_t* frame::sender_sp(intptr_t* fp) {
67 assert(fp != nullptr, "invariant");
68 return fp + frame::sender_sp_offset;
69 }
70
71 inline intptr_t* frame::link(const intptr_t* fp) {
72 assert(fp != nullptr, "invariant");
73 return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
74 }
75
76 inline address frame::return_address(const intptr_t* sp) {
77 assert(sp != nullptr, "invariant");
78 return reinterpret_cast<address>(sp[-1]);
79 }
80
81 inline intptr_t* frame::fp(const intptr_t* sp) {
82 assert(sp != nullptr, "invariant");
83 return reinterpret_cast<intptr_t*>(sp[-2]);
84 }
85
86 #endif // INCLUDE_JFR
87
88 // Constructors:
89
90 inline frame::frame() {
91 _pc = nullptr;
92 _sp = nullptr;
93 _unextended_sp = nullptr;
94 _fp = nullptr;
95 _cb = nullptr;
96 _deopt_state = unknown;
97 _oop_map = nullptr;
98 _on_heap = false;
99 DEBUG_ONLY(_frame_index = -1;)
100 }
101
102 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
103 _sp = sp;
104 _unextended_sp = sp;
105 _fp = fp;
106 _pc = pc;
107 _oop_map = nullptr;
108 _on_heap = false;
109 DEBUG_ONLY(_frame_index = -1;)
110
111 assert(pc != nullptr, "no pc?");
112 _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
113 setup(pc);
114 }
115
116 inline void frame::setup(address pc) {
117 address original_pc = get_deopt_original_pc();
118 if (original_pc != nullptr) {
119 _pc = original_pc;
120 _deopt_state = is_deoptimized;
121 assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
122 "original PC must be in the main code section of the compiled method (or must be immediately following it)");
123 } else {
124 if (_cb == SharedRuntime::deopt_blob()) {
125 _deopt_state = is_deoptimized;
126 } else {
127 _deopt_state = not_deoptimized;
128 }
129 }
130 }
131
132 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
133 init(sp, fp, pc);
134 }
135
136 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
137 _sp = sp;
138 _unextended_sp = unextended_sp;
139 _fp = fp;
140 _pc = pc;
141 assert(pc != nullptr, "no pc?");
142 _cb = cb;
143 _oop_map = nullptr;
144 assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
145 _on_heap = false;
146 DEBUG_ONLY(_frame_index = -1;)
147
148 setup(pc);
149 }
150
151 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb,
152 const ImmutableOopMap* oop_map, bool on_heap) {
153 _sp = sp;
154 _unextended_sp = unextended_sp;
155 _fp = fp;
156 _pc = pc;
157 _cb = cb;
158 _oop_map = oop_map;
159 _deopt_state = not_deoptimized;
160 _on_heap = on_heap;
161 DEBUG_ONLY(_frame_index = -1;)
162
163 // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why.
164 assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
165 if (cb != nullptr) {
166 setup(pc);
167 }
168 #ifdef ASSERT
169 // The following assertion has been disabled because it would sometime trap for Continuation.run,
170 // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
171 // is benign even in fast mode (see Freeze::setup_jump)
172 // We might freeze deoptimized frame in slow mode
173 // assert(_pc == pc && _deopt_state == not_deoptimized, "");
174 #endif
175 }
176
177 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
178 _sp = sp;
179 _unextended_sp = unextended_sp;
180 _fp = fp;
181 _pc = pc;
182 assert(pc != nullptr, "no pc?");
183 _cb = CodeCache::find_blob_fast(pc);
184 _oop_map = nullptr;
185 assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
186 _on_heap = false;
187 DEBUG_ONLY(_frame_index = -1;)
188
189 setup(pc);
190 }
191
192 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
193
194 inline frame::frame(intptr_t* sp, intptr_t* fp) {
195 _sp = sp;
196 _unextended_sp = sp;
197 _fp = fp;
198 _pc = (address)(sp[-1]);
199 _on_heap = false;
200 DEBUG_ONLY(_frame_index = -1;)
201
202 // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
203 // when last_Java_sp is non-null but the pc fetched is junk.
204 // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
205 // -> pd_last_frame should use a specialized version of pd_last_frame which could
206 // call a specialized frame constructor instead of this one.
207 // Then we could use the assert below. However this assert is of somewhat dubious
208 // value.
209 // UPDATE: this constructor is only used by trace_method_handle_stub() now.
210 // assert(_pc != nullptr, "no pc?");
211
212 _cb = CodeCache::find_blob(_pc);
213
214 address original_pc = get_deopt_original_pc();
215 if (original_pc != nullptr) {
216 _pc = original_pc;
217 _deopt_state = is_deoptimized;
218 } else {
219 _deopt_state = not_deoptimized;
220 }
221 _oop_map = nullptr;
222 }
223
224 // Accessors
225
226 inline bool frame::equal(frame other) const {
227 bool ret = sp() == other.sp()
228 && unextended_sp() == other.unextended_sp()
229 && fp() == other.fp()
230 && pc() == other.pc();
231 assert(!ret || (ret && cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction");
232 return ret;
233 }
234
235 // Return unique id for this frame. The id must have a value where we can distinguish
236 // identity and younger/older relationship. null represents an invalid (incomparable)
237 // frame.
238 inline intptr_t* frame::id(void) const { return unextended_sp(); }
239
240 // Return true if the frame is older (less recent activation) than the frame represented by id
241 inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id");
242 return this->id() > id ; }
243
244 inline intptr_t* frame::link() const { return *(intptr_t **)addr_at(link_offset); }
245
246 inline intptr_t* frame::link_or_null() const {
247 intptr_t** ptr = (intptr_t **)addr_at(link_offset);
248 return os::is_readable_pointer(ptr) ? *ptr : nullptr;
249 }
250
251 inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; }
252 inline void frame::set_unextended_sp(intptr_t* value) { _unextended_sp = value; }
253 inline int frame::offset_unextended_sp() const { assert_offset(); return _offset_unextended_sp; }
254 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; }
255
256 inline intptr_t* frame::real_fp() const {
257 if (_cb != nullptr) {
258 // use the frame size if valid
259 int size = _cb->frame_size();
260 if (size > 0) {
261 return unextended_sp() + size;
262 }
263 }
264 // else rely on fp()
265 assert(! is_compiled_frame(), "unknown compiled frame size");
266 return fp();
267 }
268
269 inline int frame::frame_size() const {
270 return is_interpreted_frame()
271 ? pointer_delta_as_int(sender_sp(), sp())
272 : cb()->frame_size();
273 }
274
275 inline int frame::compiled_frame_stack_argsize() const {
276 assert(cb()->is_nmethod(), "");
277 return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
278 }
279
280 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
281 assert(mask != nullptr, "");
282 Method* m = interpreter_frame_method();
283 int bci = interpreter_frame_bci();
284 m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
285 }
286
287 // Return address:
288
289 inline address* frame::sender_pc_addr() const { return (address*) addr_at(return_addr_offset); }
290 inline address frame::sender_pc() const { return *sender_pc_addr(); }
291
292 inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); }
293
294 inline intptr_t* frame::interpreter_frame_locals() const {
295 intptr_t n = *addr_at(interpreter_frame_locals_offset);
296 return &fp()[n]; // return relativized locals
297 }
298
299 inline intptr_t* frame::interpreter_frame_last_sp() const {
300 intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
301 assert(n <= 0, "n: " INTPTR_FORMAT, n);
302 return n != 0 ? &fp()[n] : nullptr;
303 }
304
305 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
306 return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
307 }
308
309 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
310 return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
311 }
312
313
314
315 // Constant pool cache
316
317 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
318 return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
319 }
320
321 // Method
322
323 inline Method** frame::interpreter_frame_method_addr() const {
324 return (Method**)addr_at(interpreter_frame_method_offset);
325 }
326
327 // Mirror
328
329 inline oop* frame::interpreter_frame_mirror_addr() const {
330 return (oop*)addr_at(interpreter_frame_mirror_offset);
331 }
332
333 // top of expression stack
334 inline intptr_t* frame::interpreter_frame_tos_address() const {
335 intptr_t* last_sp = interpreter_frame_last_sp();
336 if (last_sp == nullptr) {
337 return sp();
338 } else {
339 // sp() may have been extended or shrunk by an adapter. At least
340 // check that we don't fall behind the legal region.
341 // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
342 assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
343 return last_sp;
344 }
345 }
346
347 inline oop* frame::interpreter_frame_temp_oop_addr() const {
348 return (oop *)(fp() + interpreter_frame_oop_temp_offset);
349 }
350
351 inline int frame::interpreter_frame_monitor_size() {
352 return BasicObjectLock::size();
353 }
354
355
356 // expression stack
357 // (the max_stack arguments are used by the GC; see class FrameClosure)
358
359 inline intptr_t* frame::interpreter_frame_expression_stack() const {
360 intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
361 return monitor_end-1;
362 }
363
364 // Entry frames
365
366 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
367 return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
368 }
369
370 // Compiled frames
371
372 inline oop frame::saved_oop_result(RegisterMap* map) const {
373 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
374 guarantee(result_adr != nullptr, "bad register save location");
375 return *result_adr;
376 }
377
378 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
379 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
380 guarantee(result_adr != nullptr, "bad register save location");
381
382 *result_adr = obj;
383 }
384
385 inline bool frame::is_interpreted_frame() const {
386 return Interpreter::contains(pc());
387 }
388
389 inline int frame::sender_sp_ret_address_offset() {
390 return frame::sender_sp_offset - frame::return_addr_offset;
391 }
392
393 //------------------------------------------------------------------------------
394 // frame::sender
395
396 inline frame frame::sender(RegisterMap* map) const {
397 frame result = sender_raw(map);
398
399 if (map->process_frames() && !map->in_cont()) {
400 StackWatermarkSet::on_iteration(map->thread(), result);
401 }
402
403 return result;
404 }
405
406 inline frame frame::sender_raw(RegisterMap* map) const {
407 // Default is we done have to follow them. The sender_for_xxx will
408 // update it accordingly
409 map->set_include_argument_oops(false);
410
411 if (map->in_cont()) { // already in an h-stack
412 return map->stack_chunk()->sender(*this, map);
413 }
414
415 if (is_entry_frame()) return sender_for_entry_frame(map);
416 if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
417 if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
418
419 assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
420 if (_cb != nullptr) return sender_for_compiled_frame(map);
421
422 // Must be native-compiled frame, i.e. the marshaling code for native
423 // methods that exists in the core system.
424 return frame(sender_sp(), link(), sender_pc());
425 }
426
427 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
428 assert(map != nullptr, "map must be set");
429
430 // frame owned by optimizing compiler
431 assert(_cb->frame_size() > 0, "must have non-zero frame size");
432 intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
433 assert(sender_sp == real_fp(), "");
434
435 #ifdef ASSERT
436 address sender_pc_copy = (address) *(sender_sp-1);
437 #endif
438
439 // This is the saved value of EBP which may or may not really be an FP.
440 // It is only an FP if the sender is an interpreter frame (or C1?).
441 // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier)
442 intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
443
444 // Repair the sender sp if the frame has been extended
445 sender_sp = repair_sender_sp(sender_sp, saved_fp_addr);
446
447 // On Intel the return_address is always the word on the stack
448 address sender_pc = (address) *(sender_sp-1);
449
450 #ifdef ASSERT
451 if (sender_pc != sender_pc_copy) {
452 // When extending the stack in the callee method entry to make room for unpacking of value
453 // type args, we keep a copy of the sender pc at the expected location in the callee frame.
454 // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
455 nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
456 assert(sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
457 }
458 #endif
459
460 if (map->update_map()) {
461 // Tell GC to use argument oopmaps for some runtime stubs that need it.
462 // For C1, the runtime stub might not have oop maps, so set this flag
463 // outside of update_register_map.
464 bool c1_buffering = false;
465 #ifdef COMPILER1
466 nmethod* nm = _cb->as_nmethod_or_null();
467 if (nm != nullptr && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
468 pc() < nm->verified_inline_entry_point()) {
469 // TODO 8284443 Can't we do that by not passing 'dont_gc_arguments' in case 'StubId::c1_buffer_inline_args_id' in 'Runtime1::generate_code_for'?
470 // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
471 // before doing any argument shuffling, so we need to scan the oops
472 // as the caller passes them.
473 c1_buffering = true;
474 #ifdef ASSERT
475 NativeCall* call = nativeCall_before(pc());
476 address dest = call->destination();
477 assert(dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_no_receiver_id) ||
478 dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_id), "unexpected safepoint in entry point");
479 #endif
480 }
481 #endif
482 if (!_cb->is_nmethod() || c1_buffering) { // compiled frames do not use callee-saved registers
483 bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
484 map->set_include_argument_oops(caller_args);
485 if (oop_map() != nullptr) {
486 _oop_map->update_register_map(this, map);
487 }
488 } else {
489 assert(!_cb->caller_must_gc_arguments(map->thread()), "");
490 assert(!map->include_argument_oops(), "");
491 assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
492 }
493
494 // Since the prolog does the save and restore of EBP there is no oopmap
495 // for it so we must fill in its location as if there was an oopmap entry
496 // since if our caller was compiled code there could be live jvm state in it.
497 update_map_with_saved_link(map, saved_fp_addr);
498 }
499
500 assert(sender_sp != sp(), "must have changed");
501
502 if (Continuation::is_return_barrier_entry(sender_pc)) {
503 if (map->walk_cont()) { // about to walk into an h-stack
504 return Continuation::top_frame(*this, map);
505 } else {
506 return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp);
507 }
508 }
509
510 intptr_t* unextended_sp = sender_sp;
511 return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
512 }
513
514 template <typename RegisterMapT>
515 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
516 // The interpreter and compiler(s) always save EBP/RBP in a known
517 // location on entry. We must record where that location is
518 // so this if EBP/RBP was live on callout from c2 we can find
519 // the saved copy no matter what it called.
520
521 // Since the interpreter always saves EBP/RBP if we record where it is then
522 // we don't have to always save EBP/RBP on entry and exit to c2 compiled
523 // code, on entry will be enough.
524 map->set_location(rbp->as_VMReg(), (address) link_addr);
525 #ifdef AMD64
526 // this is weird "H" ought to be at a higher address however the
527 // oopMaps seems to have the "H" regs at the same address and the
528 // vanilla register.
529 // XXXX make this go away
530 if (true) {
531 map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
532 }
533 #endif // AMD64
534 }
535 #endif // CPU_X86_FRAME_X86_INLINE_HPP