1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "memory/universe.hpp" 32 #include "oops/markWord.hpp" 33 #include "oops/method.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/javaCalls.hpp" 39 #include "runtime/monitorChunk.hpp" 40 #include "runtime/os.inline.hpp" 41 #include "runtime/signature.hpp" 42 #include "runtime/stackWatermarkSet.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "vmreg_riscv.inline.hpp" 46 #ifdef COMPILER1 47 #include "c1/c1_Runtime1.hpp" 48 #include "runtime/vframeArray.hpp" 49 #endif 50 51 #ifdef ASSERT 52 void RegisterMap::check_location_valid() { 53 } 54 #endif 55 56 57 // Profiling/safepoint support 58 59 bool frame::safe_for_sender(JavaThread *thread) { 60 address addr_sp = (address)_sp; 61 address addr_fp = (address)_fp; 62 address unextended_sp = (address)_unextended_sp; 63 64 // consider stack guards when trying to determine "safe" stack pointers 65 // sp must be within the usable part of the stack (not in guards) 66 if (!thread->is_in_usable_stack(addr_sp)) { 67 return false; 68 } 69 70 // When we are running interpreted code the machine stack pointer, SP, is 71 // set low enough so that the Java expression stack can grow and shrink 72 // without ever exceeding the machine stack bounds. So, ESP >= SP. 73 74 // When we call out of an interpreted method, SP is incremented so that 75 // the space between SP and ESP is removed. The SP saved in the callee's 76 // frame is the SP *before* this increment. So, when we walk a stack of 77 // interpreter frames the sender's SP saved in a frame might be less than 78 // the SP at the point of call. 79 80 // So unextended sp must be within the stack but we need not to check 81 // that unextended sp >= sp 82 83 if (!thread->is_in_full_stack_checked(unextended_sp)) { 84 return false; 85 } 86 87 // an fp must be within the stack and above (but not equal) sp 88 // second evaluation on fp+ is added to handle situation where fp is -1 89 bool fp_safe = thread->is_in_stack_range_excl(addr_fp, addr_sp) && 90 thread->is_in_full_stack_checked(addr_fp + (return_addr_offset * sizeof(void*))); 91 92 // We know sp/unextended_sp are safe only fp is questionable here 93 94 // If the current frame is known to the code cache then we can attempt to 95 // to construct the sender and do some validation of it. This goes a long way 96 // toward eliminating issues when we get in frame construction code 97 98 if (_cb != nullptr) { 99 100 // First check if frame is complete and tester is reliable 101 // Unfortunately we can only check frame complete for runtime stubs and nmethod 102 // other generic buffer blobs are more problematic so we just assume they are 103 // ok. adapter blobs never have a frame complete and are never ok. 104 105 if (!_cb->is_frame_complete_at(_pc)) { 106 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { 107 return false; 108 } 109 } 110 111 // Could just be some random pointer within the codeBlob 112 if (!_cb->code_contains(_pc)) { 113 return false; 114 } 115 116 // Entry frame checks 117 if (is_entry_frame()) { 118 // an entry frame must have a valid fp. 119 return fp_safe && is_entry_frame_valid(thread); 120 } 121 122 intptr_t* sender_sp = nullptr; 123 intptr_t* sender_unextended_sp = nullptr; 124 address sender_pc = nullptr; 125 intptr_t* saved_fp = nullptr; 126 127 if (is_interpreted_frame()) { 128 // fp must be safe 129 if (!fp_safe) { 130 return false; 131 } 132 133 sender_pc = (address)this->fp()[return_addr_offset]; 134 // for interpreted frames, the value below is the sender "raw" sp, 135 // which can be different from the sender unextended sp (the sp seen 136 // by the sender) because of current frame local variables 137 sender_sp = (intptr_t*) addr_at(sender_sp_offset); 138 sender_unextended_sp = (intptr_t*) this->fp()[interpreter_frame_sender_sp_offset]; 139 saved_fp = (intptr_t*) this->fp()[link_offset]; 140 } else { 141 // must be some sort of compiled/runtime frame 142 // fp does not have to be safe (although it could be check for c1?) 143 144 // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc 145 if (_cb->frame_size() <= 0) { 146 return false; 147 } 148 149 sender_sp = _unextended_sp + _cb->frame_size(); 150 // Is sender_sp safe? 151 if (!thread->is_in_full_stack_checked((address)sender_sp)) { 152 return false; 153 } 154 155 sender_unextended_sp = sender_sp; 156 sender_pc = (address) *(sender_sp - 1); 157 saved_fp = (intptr_t*) *(sender_sp - 2); 158 } 159 160 if (Continuation::is_return_barrier_entry(sender_pc)) { 161 // If our sender_pc is the return barrier, then our "real" sender is the continuation entry 162 frame s = Continuation::continuation_bottom_sender(thread, *this, sender_sp); 163 sender_sp = s.sp(); 164 sender_pc = s.pc(); 165 } 166 167 // If the potential sender is the interpreter then we can do some more checking 168 if (Interpreter::contains(sender_pc)) { 169 170 // fp is always saved in a recognizable place in any code we generate. However 171 // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved fp 172 // is really a frame pointer. 173 if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) { 174 return false; 175 } 176 177 // construct the potential sender 178 frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc); 179 180 return sender.is_interpreted_frame_valid(thread); 181 } 182 183 // We must always be able to find a recognizable pc 184 CodeBlob* sender_blob = CodeCache::find_blob(sender_pc); 185 if (sender_pc == nullptr || sender_blob == nullptr) { 186 return false; 187 } 188 189 // Could just be some random pointer within the codeBlob 190 if (!sender_blob->code_contains(sender_pc)) { 191 return false; 192 } 193 194 // We should never be able to see an adapter if the current frame is something from code cache 195 if (sender_blob->is_adapter_blob()) { 196 return false; 197 } 198 199 // Could be the call_stub 200 if (StubRoutines::returns_to_call_stub(sender_pc)) { 201 if (!thread->is_in_stack_range_excl((address)saved_fp, (address)sender_sp)) { 202 return false; 203 } 204 205 // construct the potential sender 206 frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc); 207 208 // Validate the JavaCallWrapper an entry frame must have 209 address jcw = (address)sender.entry_frame_call_wrapper(); 210 211 return thread->is_in_stack_range_excl(jcw, (address)sender.fp()); 212 } 213 214 nmethod* nm = sender_blob->as_nmethod_or_null(); 215 if (nm != nullptr) { 216 if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || 217 nm->method()->is_method_handle_intrinsic()) { 218 return false; 219 } 220 } 221 222 // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size 223 // because the return address counts against the callee's frame. 224 if (sender_blob->frame_size() <= 0) { 225 assert(!sender_blob->is_nmethod(), "should count return address at least"); 226 return false; 227 } 228 229 // We should never be able to see anything here except an nmethod. If something in the 230 // code cache (current frame) is called by an entity within the code cache that entity 231 // should not be anything but the call stub (already covered), the interpreter (already covered) 232 // or an nmethod. 233 if (!sender_blob->is_nmethod()) { 234 return false; 235 } 236 237 // Could put some more validation for the potential non-interpreted sender 238 // frame we'd create by calling sender if I could think of any. Wait for next crash in forte... 239 240 // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb 241 242 // We've validated the potential sender that would be created 243 return true; 244 } 245 246 // Must be native-compiled frame. Since sender will try and use fp to find 247 // linkages it must be safe 248 if (!fp_safe) { 249 return false; 250 } 251 252 // Will the pc we fetch be non-zero (which we'll find at the oldest frame) 253 if ((address)this->fp()[return_addr_offset] == nullptr) { return false; } 254 255 return true; 256 } 257 258 void frame::patch_pc(Thread* thread, address pc) { 259 assert(_cb == CodeCache::find_blob(pc), "unexpected pc"); 260 address* pc_addr = &(((address*) sp())[-1]); 261 address pc_old = *pc_addr; 262 263 if (TracePcPatching) { 264 tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]", 265 p2i(pc_addr), p2i(pc_old), p2i(pc)); 266 } 267 268 assert(!Continuation::is_return_barrier_entry(pc_old), "return barrier"); 269 270 // Either the return address is the original one or we are going to 271 // patch in the same address that's already there. 272 assert(_pc == pc_old || pc == pc_old || pc_old == nullptr, "must be"); 273 DEBUG_ONLY(address old_pc = _pc;) 274 *pc_addr = pc; 275 _pc = pc; // must be set before call to get_deopt_original_pc 276 address original_pc = get_deopt_original_pc(); 277 if (original_pc != nullptr) { 278 assert(original_pc == old_pc, "expected original PC to be stored before patching"); 279 _deopt_state = is_deoptimized; 280 _pc = original_pc; 281 } else { 282 _deopt_state = not_deoptimized; 283 } 284 } 285 286 intptr_t* frame::entry_frame_argument_at(int offset) const { 287 // convert offset to index to deal with tsi 288 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); 289 // Entry frame's arguments are always in relation to unextended_sp() 290 return &unextended_sp()[index]; 291 } 292 293 // locals 294 295 void frame::interpreter_frame_set_locals(intptr_t* locs) { 296 assert(is_interpreted_frame(), "interpreted frame expected"); 297 // set relativized locals 298 ptr_at_put(interpreter_frame_locals_offset, (intptr_t) (locs - fp())); 299 } 300 301 // sender_sp 302 303 intptr_t* frame::interpreter_frame_sender_sp() const { 304 assert(is_interpreted_frame(), "interpreted frame expected"); 305 return (intptr_t*) at(interpreter_frame_sender_sp_offset); 306 } 307 308 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) { 309 assert(is_interpreted_frame(), "interpreted frame expected"); 310 ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp); 311 } 312 313 314 // monitor elements 315 316 BasicObjectLock* frame::interpreter_frame_monitor_begin() const { 317 return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset); 318 } 319 320 BasicObjectLock* frame::interpreter_frame_monitor_end() const { 321 BasicObjectLock* result = (BasicObjectLock*) at_relative(interpreter_frame_monitor_block_top_offset); 322 // make sure the pointer points inside the frame 323 assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer"); 324 assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer"); 325 return result; 326 } 327 328 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) { 329 assert(is_interpreted_frame(), "interpreted frame expected"); 330 // set relativized monitor_block_top 331 ptr_at_put(interpreter_frame_monitor_block_top_offset, (intptr_t*)value - fp()); 332 assert(at_absolute(interpreter_frame_monitor_block_top_offset) <= interpreter_frame_monitor_block_top_offset, ""); 333 } 334 335 // Used by template based interpreter deoptimization 336 void frame::interpreter_frame_set_last_sp(intptr_t* last_sp) { 337 assert(is_interpreted_frame(), "interpreted frame expected"); 338 // set relativized last_sp 339 ptr_at_put(interpreter_frame_last_sp_offset, last_sp != nullptr ? (last_sp - fp()) : 0); 340 } 341 342 void frame::interpreter_frame_set_extended_sp(intptr_t* sp) { 343 assert(is_interpreted_frame(), "interpreted frame expected"); 344 // set relativized extended_sp 345 ptr_at_put(interpreter_frame_extended_sp_offset, (sp - fp())); 346 } 347 348 frame frame::sender_for_entry_frame(RegisterMap* map) const { 349 assert(map != nullptr, "map must be set"); 350 // Java frame called from C; skip all C frames and return top C 351 // frame of that chunk as the sender 352 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); 353 assert(!entry_frame_is_first(), "next Java fp must be non zero"); 354 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); 355 // Since we are walking the stack now this nested anchor is obviously walkable 356 // even if it wasn't when it was stacked. 357 jfa->make_walkable(); 358 map->clear(); 359 assert(map->include_argument_oops(), "should be set by clear"); 360 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); 361 return fr; 362 } 363 364 UpcallStub::FrameData* UpcallStub::frame_data_for_frame(const frame& frame) const { 365 assert(frame.is_upcall_stub_frame(), "wrong frame"); 366 // need unextended_sp here, since normal sp is wrong for interpreter callees 367 return reinterpret_cast<UpcallStub::FrameData*>( 368 reinterpret_cast<address>(frame.unextended_sp()) + in_bytes(_frame_data_offset)); 369 } 370 371 bool frame::upcall_stub_frame_is_first() const { 372 assert(is_upcall_stub_frame(), "must be optimzed entry frame"); 373 UpcallStub* blob = _cb->as_upcall_stub(); 374 JavaFrameAnchor* jfa = blob->jfa_for_frame(*this); 375 return jfa->last_Java_sp() == nullptr; 376 } 377 378 frame frame::sender_for_upcall_stub_frame(RegisterMap* map) const { 379 assert(map != nullptr, "map must be set"); 380 UpcallStub* blob = _cb->as_upcall_stub(); 381 // Java frame called from C; skip all C frames and return top C 382 // frame of that chunk as the sender 383 JavaFrameAnchor* jfa = blob->jfa_for_frame(*this); 384 assert(!upcall_stub_frame_is_first(), "must have a frame anchor to go back to"); 385 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); 386 // Since we are walking the stack now this nested anchor is obviously walkable 387 // even if it wasn't when it was stacked. 388 jfa->make_walkable(); 389 map->clear(); 390 assert(map->include_argument_oops(), "should be set by clear"); 391 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); 392 393 return fr; 394 } 395 396 //------------------------------------------------------------------------------ 397 // frame::verify_deopt_original_pc 398 // 399 // Verifies the calculated original PC of a deoptimization PC for the 400 // given unextended SP. 401 #ifdef ASSERT 402 void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) { 403 frame fr; 404 405 // This is ugly but it's better than to change {get,set}_original_pc 406 // to take an SP value as argument. And it's only a debugging 407 // method anyway. 408 fr._unextended_sp = unextended_sp; 409 410 assert_cond(nm != nullptr); 411 address original_pc = nm->get_original_pc(&fr); 412 assert(nm->insts_contains_inclusive(original_pc), 413 "original PC must be in the main code section of the compiled method (or must be immediately following it)"); 414 } 415 #endif 416 417 //------------------------------------------------------------------------------ 418 // frame::adjust_unextended_sp 419 #ifdef ASSERT 420 void frame::adjust_unextended_sp() { 421 // On riscv, sites calling method handle intrinsics and lambda forms are treated 422 // as any other call site. Therefore, no special action is needed when we are 423 // returning to any of these call sites. 424 425 if (_cb != nullptr) { 426 nmethod* sender_nm = _cb->as_nmethod_or_null(); 427 if (sender_nm != nullptr) { 428 // If the sender PC is a deoptimization point, get the original PC. 429 if (sender_nm->is_deopt_entry(_pc) || 430 sender_nm->is_deopt_mh_entry(_pc)) { 431 verify_deopt_original_pc(sender_nm, _unextended_sp); 432 } 433 } 434 } 435 } 436 #endif 437 438 439 //------------------------------------------------------------------------------ 440 // frame::sender_for_interpreter_frame 441 frame frame::sender_for_interpreter_frame(RegisterMap* map) const { 442 // SP is the raw SP from the sender after adapter or interpreter 443 // extension. 444 intptr_t* sender_sp = this->sender_sp(); 445 446 // This is the sp before any possible extension (adapter/locals). 447 intptr_t* unextended_sp = interpreter_frame_sender_sp(); 448 449 #ifdef COMPILER2 450 assert(map != nullptr, "map must be set"); 451 if (map->update_map()) { 452 update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset)); 453 } 454 #endif // COMPILER2 455 456 if (Continuation::is_return_barrier_entry(sender_pc())) { 457 if (map->walk_cont()) { // about to walk into an h-stack 458 return Continuation::top_frame(*this, map); 459 } else { 460 return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp); 461 } 462 } 463 464 return frame(sender_sp, unextended_sp, link(), sender_pc()); 465 } 466 467 bool frame::is_interpreted_frame_valid(JavaThread* thread) const { 468 assert(is_interpreted_frame(), "Not an interpreted frame"); 469 // These are reasonable sanity checks 470 if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) { 471 return false; 472 } 473 if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) { 474 return false; 475 } 476 if (fp() + interpreter_frame_initial_sp_offset < sp()) { 477 return false; 478 } 479 // These are hacks to keep us out of trouble. 480 // The problem with these is that they mask other problems 481 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above 482 return false; 483 } 484 485 // do some validation of frame elements 486 487 // first the method 488 Method* m = safe_interpreter_frame_method(); 489 // validate the method we'd find in this potential sender 490 if (!Method::is_valid_method(m)) { 491 return false; 492 } 493 494 // stack frames shouldn't be much larger than max_stack elements 495 // this test requires the use of unextended_sp which is the sp as seen by 496 // the current frame, and not sp which is the "raw" pc which could point 497 // further because of local variables of the callee method inserted after 498 // method arguments 499 if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) { 500 return false; 501 } 502 503 // validate bci/bcx 504 address bcp = interpreter_frame_bcp(); 505 if (m->validate_bci_from_bcp(bcp) < 0) { 506 return false; 507 } 508 509 // validate constantPoolCache* 510 ConstantPoolCache* cp = *interpreter_frame_cache_addr(); 511 if (MetaspaceObj::is_valid(cp) == false) { 512 return false; 513 } 514 515 // validate locals 516 if (m->max_locals() > 0) { 517 address locals = (address)interpreter_frame_locals(); 518 if (!thread->is_in_stack_range_incl(locals, (address)fp())) { 519 return false; 520 } 521 } 522 523 // We'd have to be pretty unlucky to be mislead at this point 524 return true; 525 } 526 527 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { 528 assert(is_interpreted_frame(), "interpreted frame expected"); 529 Method* method = interpreter_frame_method(); 530 BasicType type = method->result_type(); 531 532 intptr_t* tos_addr = nullptr; 533 if (method->is_native()) { 534 tos_addr = (intptr_t*)sp(); 535 if (type == T_FLOAT || type == T_DOUBLE) { 536 // This is because we do a push(ltos) after push(dtos) in generate_native_entry. 537 tos_addr += 2 * Interpreter::stackElementWords; 538 } 539 } else { 540 tos_addr = (intptr_t*)interpreter_frame_tos_address(); 541 } 542 543 switch (type) { 544 case T_OBJECT : 545 case T_ARRAY : { 546 oop obj; 547 if (method->is_native()) { 548 obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); 549 } else { 550 oop* obj_p = (oop*)tos_addr; 551 obj = (obj_p == nullptr) ? (oop)nullptr : *obj_p; 552 } 553 assert(Universe::is_in_heap_or_null(obj), "sanity check"); 554 *oop_result = obj; 555 break; 556 } 557 case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break; 558 case T_BYTE : value_result->b = *(jbyte*)tos_addr; break; 559 case T_CHAR : value_result->c = *(jchar*)tos_addr; break; 560 case T_SHORT : value_result->s = *(jshort*)tos_addr; break; 561 case T_INT : value_result->i = *(jint*)tos_addr; break; 562 case T_LONG : value_result->j = *(jlong*)tos_addr; break; 563 case T_FLOAT : { 564 value_result->f = *(jfloat*)tos_addr; 565 break; 566 } 567 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; 568 case T_VOID : /* Nothing to do */ break; 569 default : ShouldNotReachHere(); 570 } 571 572 return type; 573 } 574 575 576 intptr_t* frame::interpreter_frame_tos_at(jint offset) const { 577 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); 578 return &interpreter_frame_tos_address()[index]; 579 } 580 581 #ifndef PRODUCT 582 583 #define DESCRIBE_FP_OFFSET(name) \ 584 values.describe(frame_no, fp() + frame::name##_offset, #name) 585 586 void frame::describe_pd(FrameValues& values, int frame_no) { 587 if (is_interpreted_frame()) { 588 DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); 589 DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); 590 DESCRIBE_FP_OFFSET(interpreter_frame_method); 591 DESCRIBE_FP_OFFSET(interpreter_frame_mdp); 592 DESCRIBE_FP_OFFSET(interpreter_frame_extended_sp); 593 DESCRIBE_FP_OFFSET(interpreter_frame_mirror); 594 DESCRIBE_FP_OFFSET(interpreter_frame_cache); 595 DESCRIBE_FP_OFFSET(interpreter_frame_locals); 596 DESCRIBE_FP_OFFSET(interpreter_frame_bcp); 597 DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp); 598 } 599 600 if (is_java_frame() || Continuation::is_continuation_enterSpecial(*this)) { 601 intptr_t* ret_pc_loc; 602 intptr_t* fp_loc; 603 if (is_interpreted_frame()) { 604 ret_pc_loc = fp() + return_addr_offset; 605 fp_loc = fp(); 606 } else { 607 ret_pc_loc = real_fp() - 1; 608 fp_loc = real_fp() - 2; 609 } 610 address ret_pc = *(address*)ret_pc_loc; 611 values.describe(frame_no, ret_pc_loc, 612 Continuation::is_return_barrier_entry(ret_pc) ? "return address (return barrier)" : "return address"); 613 values.describe(-1, fp_loc, "saved fp", 0); // "unowned" as value belongs to sender 614 } 615 } 616 #endif 617 618 intptr_t *frame::initial_deoptimization_info() { 619 // Not used on riscv, but we must return something. 620 return nullptr; 621 } 622 623 #undef DESCRIBE_FP_OFFSET 624 625 #ifndef PRODUCT 626 // This is a generic constructor which is only used by pns() in debug.cpp. 627 frame::frame(void* ptr_sp, void* ptr_fp, void* pc) : _on_heap(false) { 628 init((intptr_t*)ptr_sp, (intptr_t*)ptr_fp, (address)pc); 629 } 630 631 #endif 632 633 void JavaFrameAnchor::make_walkable() { 634 // last frame set? 635 if (last_Java_sp() == nullptr) { return; } 636 // already walkable? 637 if (walkable()) { return; } 638 vmassert(last_Java_sp() != nullptr, "not called from Java code?"); 639 vmassert(last_Java_pc() == nullptr, "already walkable"); 640 _last_Java_pc = (address)_last_Java_sp[-1]; 641 vmassert(walkable(), "something went wrong"); 642 }