1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP 26 #define CPU_X86_FRAME_X86_INLINE_HPP 27 28 #include "code/codeBlob.inline.hpp" 29 #include "code/codeCache.inline.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.inline.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/oopMapCache.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/registerMap.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_Runtime1.hpp" 38 #endif 39 40 // Inline functions for Intel frames: 41 42 // Constructors: 43 44 inline frame::frame() { 45 _pc = NULL; 46 _sp = NULL; 47 _unextended_sp = NULL; 48 _fp = NULL; 49 _cb = NULL; 50 _deopt_state = unknown; 51 _oop_map = NULL; 52 _on_heap = false; 53 DEBUG_ONLY(_frame_index = -1;) 54 } 55 56 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) { 57 _sp = sp; 58 _unextended_sp = sp; 59 _fp = fp; 60 _pc = pc; 61 _oop_map = NULL; 62 _on_heap = false; 63 DEBUG_ONLY(_frame_index = -1;) 64 65 assert(pc != NULL, "no pc?"); 66 _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames 67 setup(pc); 68 } 69 70 inline void frame::setup(address pc) { 71 adjust_unextended_sp(); 72 73 address original_pc = CompiledMethod::get_deopt_original_pc(this); 74 if (original_pc != NULL) { 75 _pc = original_pc; 76 _deopt_state = is_deoptimized; 77 assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc), 78 "original PC must be in the main code section of the compiled method (or must be immediately following it)"); 79 } else { 80 if (_cb == SharedRuntime::deopt_blob()) { 81 _deopt_state = is_deoptimized; 82 } else { 83 _deopt_state = not_deoptimized; 84 } 85 } 86 } 87 88 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) { 89 init(sp, fp, pc); 90 } 91 92 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) { 93 _sp = sp; 94 _unextended_sp = unextended_sp; 95 _fp = fp; 96 _pc = pc; 97 assert(pc != NULL, "no pc?"); 98 _cb = cb; 99 _oop_map = NULL; 100 assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc)); 101 _on_heap = false; 102 DEBUG_ONLY(_frame_index = -1;) 103 104 setup(pc); 105 } 106 107 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, 108 const ImmutableOopMap* oop_map, bool on_heap) { 109 _sp = sp; 110 _unextended_sp = unextended_sp; 111 _fp = fp; 112 _pc = pc; 113 _cb = cb; 114 _oop_map = oop_map; 115 _deopt_state = not_deoptimized; 116 _on_heap = on_heap; 117 DEBUG_ONLY(_frame_index = -1;) 118 119 // In thaw, non-heap frames use this constructor to pass oop_map. I don't know why. 120 assert(_on_heap || _cb != nullptr, "these frames are always heap frames"); 121 if (cb != NULL) { 122 setup(pc); 123 } 124 #ifdef ASSERT 125 // The following assertion has been disabled because it would sometime trap for Continuation.run, 126 // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this 127 // is benign even in fast mode (see Freeze::setup_jump) 128 // We might freeze deoptimized frame in slow mode 129 // assert(_pc == pc && _deopt_state == not_deoptimized, ""); 130 #endif 131 } 132 133 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { 134 _sp = sp; 135 _unextended_sp = unextended_sp; 136 _fp = fp; 137 _pc = pc; 138 assert(pc != NULL, "no pc?"); 139 _cb = CodeCache::find_blob_fast(pc); 140 _oop_map = NULL; 141 assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp)); 142 _on_heap = false; 143 DEBUG_ONLY(_frame_index = -1;) 144 145 setup(pc); 146 } 147 148 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {} 149 150 inline frame::frame(intptr_t* sp, intptr_t* fp) { 151 _sp = sp; 152 _unextended_sp = sp; 153 _fp = fp; 154 _pc = (address)(sp[-1]); 155 _on_heap = false; 156 DEBUG_ONLY(_frame_index = -1;) 157 158 // Here's a sticky one. This constructor can be called via AsyncGetCallTrace 159 // when last_Java_sp is non-null but the pc fetched is junk. 160 // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler 161 // -> pd_last_frame should use a specialized version of pd_last_frame which could 162 // call a specialized frame constructor instead of this one. 163 // Then we could use the assert below. However this assert is of somewhat dubious 164 // value. 165 // UPDATE: this constructor is only used by trace_method_handle_stub() now. 166 // assert(_pc != NULL, "no pc?"); 167 168 _cb = CodeCache::find_blob(_pc); 169 adjust_unextended_sp(); 170 171 address original_pc = CompiledMethod::get_deopt_original_pc(this); 172 if (original_pc != NULL) { 173 _pc = original_pc; 174 _deopt_state = is_deoptimized; 175 } else { 176 _deopt_state = not_deoptimized; 177 } 178 _oop_map = NULL; 179 } 180 181 // Accessors 182 183 inline bool frame::equal(frame other) const { 184 bool ret = sp() == other.sp() 185 && unextended_sp() == other.unextended_sp() 186 && fp() == other.fp() 187 && pc() == other.pc(); 188 assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction"); 189 return ret; 190 } 191 192 // Return unique id for this frame. The id must have a value where we can distinguish 193 // identity and younger/older relationship. NULL represents an invalid (incomparable) 194 // frame. 195 inline intptr_t* frame::id(void) const { return unextended_sp(); } 196 197 // Return true if the frame is older (less recent activation) than the frame represented by id 198 inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); 199 return this->id() > id ; } 200 201 inline intptr_t* frame::link() const { return *(intptr_t **)addr_at(link_offset); } 202 203 inline intptr_t* frame::link_or_null() const { 204 intptr_t** ptr = (intptr_t **)addr_at(link_offset); 205 return os::is_readable_pointer(ptr) ? *ptr : NULL; 206 } 207 208 inline intptr_t* frame::unextended_sp() const { assert_absolute(); return _unextended_sp; } 209 inline void frame::set_unextended_sp(intptr_t* value) { _unextended_sp = value; } 210 inline int frame::offset_unextended_sp() const { assert_offset(); return _offset_unextended_sp; } 211 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap(); _offset_unextended_sp = value; } 212 213 inline intptr_t* frame::real_fp() const { 214 if (_cb != NULL) { 215 // use the frame size if valid 216 int size = _cb->frame_size(); 217 if (size > 0) { 218 return unextended_sp() + size; 219 } 220 } 221 // else rely on fp() 222 assert(! is_compiled_frame(), "unknown compiled frame size"); 223 return fp(); 224 } 225 226 inline int frame::frame_size() const { 227 return is_interpreted_frame() 228 ? sender_sp() - sp() 229 : cb()->frame_size(); 230 } 231 232 inline int frame::compiled_frame_stack_argsize() const { 233 assert(cb()->is_compiled(), ""); 234 return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord; 235 } 236 237 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const { 238 assert(mask != NULL, ""); 239 Method* m = interpreter_frame_method(); 240 int bci = interpreter_frame_bci(); 241 m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask); 242 } 243 244 // Return address: 245 246 inline address* frame::sender_pc_addr() const { return (address*) addr_at(return_addr_offset); } 247 inline address frame::sender_pc() const { return *sender_pc_addr(); } 248 249 inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); } 250 251 inline intptr_t** frame::interpreter_frame_locals_addr() const { 252 return (intptr_t**)addr_at(interpreter_frame_locals_offset); 253 } 254 255 inline intptr_t* frame::interpreter_frame_last_sp() const { 256 return (intptr_t*)at(interpreter_frame_last_sp_offset); 257 } 258 259 inline intptr_t* frame::interpreter_frame_bcp_addr() const { 260 return (intptr_t*)addr_at(interpreter_frame_bcp_offset); 261 } 262 263 inline intptr_t* frame::interpreter_frame_mdp_addr() const { 264 return (intptr_t*)addr_at(interpreter_frame_mdp_offset); 265 } 266 267 268 269 // Constant pool cache 270 271 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const { 272 return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset); 273 } 274 275 // Method 276 277 inline Method** frame::interpreter_frame_method_addr() const { 278 return (Method**)addr_at(interpreter_frame_method_offset); 279 } 280 281 // Mirror 282 283 inline oop* frame::interpreter_frame_mirror_addr() const { 284 return (oop*)addr_at(interpreter_frame_mirror_offset); 285 } 286 287 // top of expression stack 288 inline intptr_t* frame::interpreter_frame_tos_address() const { 289 intptr_t* last_sp = interpreter_frame_last_sp(); 290 if (last_sp == NULL) { 291 return sp(); 292 } else { 293 // sp() may have been extended or shrunk by an adapter. At least 294 // check that we don't fall behind the legal region. 295 // For top deoptimized frame last_sp == interpreter_frame_monitor_end. 296 assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos"); 297 return last_sp; 298 } 299 } 300 301 inline oop* frame::interpreter_frame_temp_oop_addr() const { 302 return (oop *)(fp() + interpreter_frame_oop_temp_offset); 303 } 304 305 inline int frame::interpreter_frame_monitor_size() { 306 return BasicObjectLock::size(); 307 } 308 309 310 // expression stack 311 // (the max_stack arguments are used by the GC; see class FrameClosure) 312 313 inline intptr_t* frame::interpreter_frame_expression_stack() const { 314 intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end(); 315 return monitor_end-1; 316 } 317 318 // Entry frames 319 320 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const { 321 return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset); 322 } 323 324 // Compiled frames 325 326 inline oop frame::saved_oop_result(RegisterMap* map) const { 327 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp()); 328 guarantee(result_adr != NULL, "bad register save location"); 329 return *result_adr; 330 } 331 332 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { 333 oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp()); 334 guarantee(result_adr != NULL, "bad register save location"); 335 336 *result_adr = obj; 337 } 338 339 inline bool frame::is_interpreted_frame() const { 340 return Interpreter::contains(pc()); 341 } 342 343 inline int frame::sender_sp_ret_address_offset() { 344 return frame::sender_sp_offset - frame::return_addr_offset; 345 } 346 347 inline const ImmutableOopMap* frame::get_oop_map() const { 348 if (_cb == NULL) return NULL; 349 if (_cb->oop_maps() != NULL) { 350 NativePostCallNop* nop = nativePostCallNop_at(_pc); 351 if (nop != NULL && nop->displacement() != 0) { 352 int slot = ((nop->displacement() >> 24) & 0xff); 353 return _cb->oop_map_for_slot(slot, _pc); 354 } 355 const ImmutableOopMap* oop_map = OopMapSet::find_map(this); 356 return oop_map; 357 } 358 return NULL; 359 } 360 361 //------------------------------------------------------------------------------ 362 // frame::sender 363 364 inline frame frame::sender(RegisterMap* map) const { 365 frame result = sender_raw(map); 366 367 if (map->process_frames() && !map->in_cont()) { 368 StackWatermarkSet::on_iteration(map->thread(), result); 369 } 370 371 return result; 372 } 373 374 inline frame frame::sender_raw(RegisterMap* map) const { 375 // Default is we done have to follow them. The sender_for_xxx will 376 // update it accordingly 377 map->set_include_argument_oops(false); 378 379 if (map->in_cont()) { // already in an h-stack 380 return map->stack_chunk()->sender(*this, map); 381 } 382 383 if (is_entry_frame()) return sender_for_entry_frame(map); 384 if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map); 385 if (is_interpreted_frame()) return sender_for_interpreter_frame(map); 386 387 assert(_cb == CodeCache::find_blob(pc()), "Must be the same"); 388 if (_cb != NULL) return sender_for_compiled_frame(map); 389 390 // Must be native-compiled frame, i.e. the marshaling code for native 391 // methods that exists in the core system. 392 return frame(sender_sp(), link(), sender_pc()); 393 } 394 395 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const { 396 assert(map != NULL, "map must be set"); 397 398 // frame owned by optimizing compiler 399 assert(_cb->frame_size() > 0, "must have non-zero frame size"); 400 intptr_t* sender_sp = unextended_sp() + _cb->frame_size(); 401 assert(sender_sp == real_fp(), ""); 402 403 #ifdef ASSERT 404 address sender_pc_copy = (address) *(sender_sp-1); 405 #endif 406 407 // This is the saved value of EBP which may or may not really be an FP. 408 // It is only an FP if the sender is an interpreter frame (or C1?). 409 // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier) 410 intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); 411 412 // Repair the sender sp if the frame has been extended 413 sender_sp = repair_sender_sp(sender_sp, saved_fp_addr); 414 415 // On Intel the return_address is always the word on the stack 416 address sender_pc = (address) *(sender_sp-1); 417 418 #ifdef ASSERT 419 if (sender_pc != sender_pc_copy) { 420 // When extending the stack in the callee method entry to make room for unpacking of value 421 // type args, we keep a copy of the sender pc at the expected location in the callee frame. 422 // If the sender pc is patched due to deoptimization, the copy is not consistent anymore. 423 nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod(); 424 assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc"); 425 } 426 #endif 427 428 if (map->update_map()) { 429 // Tell GC to use argument oopmaps for some runtime stubs that need it. 430 // For C1, the runtime stub might not have oop maps, so set this flag 431 // outside of update_register_map. 432 bool c1_buffering = false; 433 #ifdef COMPILER1 434 nmethod* nm = _cb->as_nmethod_or_null(); 435 if (nm != NULL && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() && 436 pc() < nm->verified_inline_entry_point()) { 437 // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx 438 // before doing any argument shuffling, so we need to scan the oops 439 // as the caller passes them. 440 c1_buffering = true; 441 #ifdef ASSERT 442 NativeCall* call = nativeCall_before(pc()); 443 address dest = call->destination(); 444 assert(dest == Runtime1::entry_for(Runtime1::buffer_inline_args_no_receiver_id) || 445 dest == Runtime1::entry_for(Runtime1::buffer_inline_args_id), "unexpected safepoint in entry point"); 446 #endif 447 } 448 #endif 449 if (!_cb->is_compiled() || c1_buffering) { // compiled frames do not use callee-saved registers 450 bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering; 451 map->set_include_argument_oops(caller_args); 452 if (oop_map() != NULL) { 453 _oop_map->update_register_map(this, map); 454 } 455 } else { 456 assert(!_cb->caller_must_gc_arguments(map->thread()), ""); 457 assert(!map->include_argument_oops(), ""); 458 assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame"); 459 } 460 461 // Since the prolog does the save and restore of EBP there is no oopmap 462 // for it so we must fill in its location as if there was an oopmap entry 463 // since if our caller was compiled code there could be live jvm state in it. 464 update_map_with_saved_link(map, saved_fp_addr); 465 } 466 467 assert(sender_sp != sp(), "must have changed"); 468 469 if (Continuation::is_return_barrier_entry(sender_pc)) { 470 if (map->walk_cont()) { // about to walk into an h-stack 471 return Continuation::top_frame(*this, map); 472 } else { 473 return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp); 474 } 475 } 476 477 intptr_t* unextended_sp = sender_sp; 478 return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc); 479 } 480 481 template <typename RegisterMapT> 482 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) { 483 // The interpreter and compiler(s) always save EBP/RBP in a known 484 // location on entry. We must record where that location is 485 // so this if EBP/RBP was live on callout from c2 we can find 486 // the saved copy no matter what it called. 487 488 // Since the interpreter always saves EBP/RBP if we record where it is then 489 // we don't have to always save EBP/RBP on entry and exit to c2 compiled 490 // code, on entry will be enough. 491 map->set_location(rbp->as_VMReg(), (address) link_addr); 492 #ifdef AMD64 493 // this is weird "H" ought to be at a higher address however the 494 // oopMaps seems to have the "H" regs at the same address and the 495 // vanilla register. 496 // XXXX make this go away 497 if (true) { 498 map->set_location(rbp->as_VMReg()->next(), (address) link_addr); 499 } 500 #endif // AMD64 501 } 502 #endif // CPU_X86_FRAME_X86_INLINE_HPP