1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP 26 #define CPU_X86_FRAME_X86_INLINE_HPP 27 28 #include "code/codeBlob.inline.hpp" 29 #include "code/codeCache.inline.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.inline.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/registerMap.hpp" 35 36 // Inline functions for Intel frames: 37 38 #if INCLUDE_JFR 39 40 // Static helper routines 41 42 inline address frame::interpreter_bcp(const intptr_t* fp) { 43 assert(fp != nullptr, "invariant"); 44 return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]); 45 } 46 47 inline address frame::interpreter_return_address(const intptr_t* fp) { 48 assert(fp != nullptr, "invariant"); 49 return reinterpret_cast<address>(fp[frame::return_addr_offset]); 50 } 51 52 inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) { 53 assert(fp != nullptr, "invariant"); 54 return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]); 55 } 56 57 inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) { 58 assert(fp != nullptr, "invariant"); 59 assert(sp != nullptr, "invariant"); 60 return sp <= fp + frame::interpreter_frame_initial_sp_offset; 61 } 62 63 inline intptr_t* frame::sender_sp(intptr_t* fp) { 64 assert(fp != nullptr, "invariant"); 65 return fp + frame::sender_sp_offset; 66 } 67 68 inline intptr_t* frame::link(const intptr_t* fp) { 69 assert(fp != nullptr, "invariant"); 70 return reinterpret_cast<intptr_t*>(fp[frame::link_offset]); 71 } 72 73 inline address frame::return_address(const intptr_t* sp) { 74 assert(sp != nullptr, "invariant"); 75 return reinterpret_cast<address>(sp[-1]); 76 } 77 78 inline intptr_t* frame::fp(const intptr_t* sp) { 79 assert(sp != nullptr, "invariant"); 80 return reinterpret_cast<intptr_t*>(sp[-2]); 81 } 82 83 #endif // INCLUDE_JFR 84 85 // Constructors: 86 87 inline frame::frame() { 88 _pc = nullptr; 89 _sp = nullptr; 90 _unextended_sp = nullptr; 91 _fp = nullptr; 92 _cb = nullptr; 93 _deopt_state = unknown; 94 _oop_map = nullptr; 95 _on_heap = false; 96 DEBUG_ONLY(_frame_index = -1;) 97 } 98 99 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) { 100 _sp = sp; 101 _unextended_sp = sp; 102 _fp = fp; 103 _pc = pc; 104 _oop_map = nullptr; 105 _on_heap = false; 106 DEBUG_ONLY(_frame_index = -1;) 107 108 assert(pc != nullptr, "no pc?"); 109 _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames 110 setup(pc); 111 } 112 113 inline void frame::setup(address pc) { 114 adjust_unextended_sp(); 115 116 address original_pc = get_deopt_original_pc(); 117 if (original_pc != nullptr) { 118 _pc = original_pc; 119 _deopt_state = is_deoptimized; 120 assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc), 121 "original PC must be in the main code section of the compiled method (or must be immediately following it)"); 122 } else { 123 if (_cb == SharedRuntime::deopt_blob()) { 124 _deopt_state = is_deoptimized; 125 } else { 126 _deopt_state = not_deoptimized; 127 } 128 } 129 } 130 131 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) { 132 init(sp, fp, pc); 133 } 134 135 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) { 136 _sp = sp; 137 _unextended_sp = unextended_sp; 138 _fp = fp; 139 _pc = pc; 140 assert(pc != nullptr, "no pc?"); 141 _cb = cb; 142 _oop_map = nullptr; 143 assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc)); 144 _on_heap = false; 145 DEBUG_ONLY(_frame_index = -1;) 146 147 setup(pc); 148 } 149 150 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, 151 const ImmutableOopMap* oop_map, bool on_heap) { 152 _sp = sp; 153 _unextended_sp = unextended_sp; 154 _fp = fp; 155 _pc = pc; 156 _cb = cb; 157 _oop_map = oop_map; 158 _deopt_state = not_deoptimized; 159 _on_heap = on_heap; 160 DEBUG_ONLY(_frame_index = -1;) 161 162 // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why. 163 assert(_on_heap || _cb != nullptr, "these frames are always heap frames"); 164 if (cb != nullptr) { 165 setup(pc); 166 } 167 #ifdef ASSERT 168 // The following assertion has been disabled because it would sometime trap for Continuation.run, 169 // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this 170 // is benign even in fast mode (see Freeze::setup_jump) 171 // We might freeze deoptimized frame in slow mode 172 // assert(_pc == pc && _deopt_state == not_deoptimized, ""); 173 #endif 174 } 175 176 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { 177 _sp = sp; 178 _unextended_sp = unextended_sp; 179 _fp = fp; 180 _pc = pc; 181 assert(pc != nullptr, "no pc?"); 182 _cb = CodeCache::find_blob_fast(pc); 183 _oop_map = nullptr; 184 assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp)); 185 _on_heap = false; 186 DEBUG_ONLY(_frame_index = -1;) 187 188 setup(pc); 189 } 190 191 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {} 192 193 inline frame::frame(intptr_t* sp, intptr_t* fp) { 194 _sp = sp; 195 _unextended_sp = sp; 196 _fp = fp; 197 _pc = (address)(sp[-1]); 198 _on_heap = false; 199 DEBUG_ONLY(_frame_index = -1;) 200 201 // Here's a sticky one. This constructor can be called via AsyncGetCallTrace 202 // when last_Java_sp is non-null but the pc fetched is junk. 203 // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler 204 // -> pd_last_frame should use a specialized version of pd_last_frame which could 205 // call a specialized frame constructor instead of this one. 206 // Then we could use the assert below. However this assert is of somewhat dubious 207 // value. 208 // UPDATE: this constructor is only used by trace_method_handle_stub() now. 209 // assert(_pc != nullptr, "no pc?"); 210 211 _cb = CodeCache::find_blob(_pc); 212 adjust_unextended_sp(); 213 214 address original_pc = get_deopt_original_pc(); 215 if (original_pc != nullptr) { 216 _pc = original_pc; 217 _deopt_state = is_deoptimized; 218 } else { 219 _deopt_state = not_deoptimized; 220 } 221 _oop_map = nullptr; 222 } 223 224 // Accessors 225 226 inline bool frame::equal(frame other) const { 227 bool ret = sp() == other.sp() 228 && unextended_sp() == other.unextended_sp() 229 && fp() == other.fp() 230 && pc() == other.pc(); 231 assert(!ret || (ret && cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction"); 232 return ret; 233 } 234 235 // Return unique id for this frame. The id must have a value where we can distinguish 236 // identity and younger/older relationship. null represents an invalid (incomparable) 237 // frame. 238 inline intptr_t* frame::id(void) const { return unextended_sp(); } 239 240 // Return true if the frame is older (less recent activation) than the frame represented by id 241 inline bool frame::is_older(intptr_t* id) const { assert(this->id() != nullptr && id != nullptr, "null frame id"); 242 return this->id() > id ; } 243 244 inline intptr_t* frame::link() const { return *(intptr_t **)addr_at(link_offset); } 245 246 inline intptr_t* frame::link_or_null() const { 247 intptr_t** ptr = (intptr_t **)addr_at(link_offset); 248 return os::is_readable_pointer(ptr) ? *ptr : nullptr; 249 } 250 251 inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; } 252 inline void frame::set_unextended_sp(intptr_t* value) { _unextended_sp = value; } 253 inline int frame::offset_unextended_sp() const { assert_offset(); return _offset_unextended_sp; } 254 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; } 255 256 inline intptr_t* frame::real_fp() const { 257 if (_cb != nullptr) { 258 // use the frame size if valid 259 int size = _cb->frame_size(); 260 if (size > 0) { 261 return unextended_sp() + size; 262 } 263 } 264 // else rely on fp() 265 assert(! is_compiled_frame(), "unknown compiled frame size"); 266 return fp(); 267 } 268 269 inline int frame::frame_size() const { 270 return is_interpreted_frame() 271 ? pointer_delta_as_int(sender_sp(), sp()) 272 : cb()->frame_size(); 273 } 274 275 inline int frame::compiled_frame_stack_argsize() const { 276 assert(cb()->is_nmethod(), ""); 277 return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; 278 } 279 280 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { 281 assert(mask != nullptr, ""); 282 Method* m = interpreter_frame_method(); 283 int bci = interpreter_frame_bci(); 284 m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask); 285 } 286 287 // Return address: 288 289 inline address* frame::sender_pc_addr() const { return (address*) addr_at(return_addr_offset); } 290 inline address frame::sender_pc() const { return *sender_pc_addr(); } 291 292 inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); } 293 294 inline intptr_t* frame::interpreter_frame_locals() const { 295 intptr_t n = *addr_at(interpreter_frame_locals_offset); 296 return &fp()[n]; // return relativized locals 297 } 298 299 inline intptr_t* frame::interpreter_frame_last_sp() const { 300 intptr_t n = *addr_at(interpreter_frame_last_sp_offset); 301 assert(n <= 0, "n: " INTPTR_FORMAT, n); 302 return n != 0 ? &fp()[n] : nullptr; 303 } 304 305 inline intptr_t* frame::interpreter_frame_bcp_addr() const { 306 return (intptr_t*)addr_at(interpreter_frame_bcp_offset); 307 } 308 309 inline intptr_t* frame::interpreter_frame_mdp_addr() const { 310 return (intptr_t*)addr_at(interpreter_frame_mdp_offset); 311 } 312 313 314 315 // Constant pool cache 316 317 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const { 318 return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset); 319 } 320 321 // Method 322 323 inline Method** frame::interpreter_frame_method_addr() const { 324 return (Method**)addr_at(interpreter_frame_method_offset); 325 } 326 327 // Mirror 328 329 inline oop* frame::interpreter_frame_mirror_addr() const { 330 return (oop*)addr_at(interpreter_frame_mirror_offset); 331 } 332 333 // top of expression stack 334 inline intptr_t* frame::interpreter_frame_tos_address() const { 335 intptr_t* last_sp = interpreter_frame_last_sp(); 336 if (last_sp == nullptr) { 337 return sp(); 338 } else { 339 // sp() may have been extended or shrunk by an adapter. At least 340 // check that we don't fall behind the legal region. 341 // For top deoptimized frame last_sp == interpreter_frame_monitor_end. 342 assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos"); 343 return last_sp; 344 } 345 } 346 347 inline oop* frame::interpreter_frame_temp_oop_addr() const { 348 return (oop *)(fp() + interpreter_frame_oop_temp_offset); 349 } 350 351 inline int frame::interpreter_frame_monitor_size() { 352 return BasicObjectLock::size(); 353 } 354 355 356 // expression stack 357 // (the max_stack arguments are used by the GC; see class FrameClosure) 358 359 inline intptr_t* frame::interpreter_frame_expression_stack() const { 360 intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end(); 361 return monitor_end-1; 362 } 363 364 // Entry frames 365 366 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const { 367 return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset); 368 } 369 370 // Compiled frames 371 372 inline oop frame::saved_oop_result(RegisterMap* map) const { 373 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp()); 374 guarantee(result_adr != nullptr, "bad register save location"); 375 return *result_adr; 376 } 377 378 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { 379 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp()); 380 guarantee(result_adr != nullptr, "bad register save location"); 381 382 *result_adr = obj; 383 } 384 385 inline bool frame::is_interpreted_frame() const { 386 return Interpreter::contains(pc()); 387 } 388 389 inline int frame::sender_sp_ret_address_offset() { 390 return frame::sender_sp_offset - frame::return_addr_offset; 391 } 392 393 //------------------------------------------------------------------------------ 394 // frame::sender 395 396 inline frame frame::sender(RegisterMap* map) const { 397 frame result = sender_raw(map); 398 399 if (map->process_frames() && !map->in_cont()) { 400 StackWatermarkSet::on_iteration(map->thread(), result); 401 } 402 403 return result; 404 } 405 406 inline frame frame::sender_raw(RegisterMap* map) const { 407 // Default is we done have to follow them. The sender_for_xxx will 408 // update it accordingly 409 map->set_include_argument_oops(false); 410 411 if (map->in_cont()) { // already in an h-stack 412 return map->stack_chunk()->sender(*this, map); 413 } 414 415 if (is_entry_frame()) return sender_for_entry_frame(map); 416 if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map); 417 if (is_interpreted_frame()) return sender_for_interpreter_frame(map); 418 419 assert(_cb == CodeCache::find_blob(pc()), "Must be the same"); 420 if (_cb != nullptr) return sender_for_compiled_frame(map); 421 422 // Must be native-compiled frame, i.e. the marshaling code for native 423 // methods that exists in the core system. 424 return frame(sender_sp(), link(), sender_pc()); 425 } 426 427 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const { 428 assert(map != nullptr, "map must be set"); 429 430 // frame owned by optimizing compiler 431 assert(_cb->frame_size() > 0, "must have non-zero frame size"); 432 intptr_t* sender_sp = unextended_sp() + _cb->frame_size(); 433 assert(sender_sp == real_fp(), ""); 434 435 // On Intel the return_address is always the word on the stack 436 address sender_pc = (address) *(sender_sp-1); 437 438 // This is the saved value of EBP which may or may not really be an FP. 439 // It is only an FP if the sender is an interpreter frame (or C1?). 440 // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier) 441 intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); 442 443 if (map->update_map()) { 444 // Tell GC to use argument oopmaps for some runtime stubs that need it. 445 // For C1, the runtime stub might not have oop maps, so set this flag 446 // outside of update_register_map. 447 if (!_cb->is_nmethod()) { // compiled frames do not use callee-saved registers 448 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); 449 if (oop_map() != nullptr) { 450 _oop_map->update_register_map(this, map); 451 } 452 } else { 453 assert(!_cb->caller_must_gc_arguments(map->thread()), ""); 454 assert(!map->include_argument_oops(), ""); 455 assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame"); 456 } 457 458 // Since the prolog does the save and restore of EBP there is no oopmap 459 // for it so we must fill in its location as if there was an oopmap entry 460 // since if our caller was compiled code there could be live jvm state in it. 461 update_map_with_saved_link(map, saved_fp_addr); 462 } 463 464 assert(sender_sp != sp(), "must have changed"); 465 466 if (Continuation::is_return_barrier_entry(sender_pc)) { 467 if (map->walk_cont()) { // about to walk into an h-stack 468 return Continuation::top_frame(*this, map); 469 } else { 470 return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp); 471 } 472 } 473 474 intptr_t* unextended_sp = sender_sp; 475 return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc); 476 } 477 478 template <typename RegisterMapT> 479 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) { 480 // The interpreter and compiler(s) always save EBP/RBP in a known 481 // location on entry. We must record where that location is 482 // so this if EBP/RBP was live on callout from c2 we can find 483 // the saved copy no matter what it called. 484 485 // Since the interpreter always saves EBP/RBP if we record where it is then 486 // we don't have to always save EBP/RBP on entry and exit to c2 compiled 487 // code, on entry will be enough. 488 map->set_location(rbp->as_VMReg(), (address) link_addr); 489 #ifdef AMD64 490 // this is weird "H" ought to be at a higher address however the 491 // oopMaps seems to have the "H" regs at the same address and the 492 // vanilla register. 493 // XXXX make this go away 494 if (true) { 495 map->set_location(rbp->as_VMReg()->next(), (address) link_addr); 496 } 497 #endif // AMD64 498 } 499 #endif // CPU_X86_FRAME_X86_INLINE_HPP