1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP
  26 #define CPU_X86_FRAME_X86_INLINE_HPP
  27 
  28 #include "code/codeCache.hpp"
  29 #include "code/codeCache.inline.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.inline.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 
  34 // Inline functions for Intel frames:
  35 
  36 class ContinuationCodeBlobLookup {
  37 public:
  38   enum { has_oopmap_lookup = true };
  39 
  40   static CodeBlob* find_blob(address pc) {
  41     CodeBlob* cb = CodeCache::find_blob_fast(pc);
  42     /*Prefetch::read(cb, PrefetchScanIntervalInBytes);
  43     Prefetch::read((void*)cb->is_compiled_addr(), PrefetchScanIntervalInBytes);
  44     Prefetch::read((void*) ((CompiledMethod*) cb)->deopt_handler_begin_addr(), PrefetchScanIntervalInBytes);*/
  45     return cb;
  46   }
  47 
  48   static CodeBlob* find_blob_and_oopmap(address pc, int& slot) {
  49     return CodeCache::find_blob_and_oopmap(pc, slot);
  50   }
  51 };
  52 
  53 // Constructors:
  54 
  55 inline frame::frame() {
  56   _pc = NULL;
  57   _sp = NULL;
  58   _unextended_sp = NULL;
  59   _fp = NULL;
  60   _cb = NULL;
  61   _deopt_state = unknown;
  62   _oop_map = NULL;
  63 }
  64 
  65 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
  66   _sp = sp;
  67   _unextended_sp = sp;
  68   _fp = fp;
  69   _pc = pc;
  70   assert(pc != NULL, "no pc?");
  71   _cb = CodeCache::find_blob_fast(pc);
  72   adjust_unextended_sp();
  73 
  74   address original_pc = CompiledMethod::get_deopt_original_pc(this);
  75   if (original_pc != NULL) {
  76     _pc = original_pc;
  77     _deopt_state = is_deoptimized;
  78   } else {
  79     _deopt_state = not_deoptimized;
  80   }
  81 
  82   _oop_map = NULL;
  83 }
  84 
  85 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
  86   init(sp, fp, pc);
  87 }
  88 
  89 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
  90   _sp = sp;
  91   _unextended_sp = unextended_sp;
  92   _fp = fp;
  93   _pc = pc;
  94   assert(pc != NULL, "no pc?");
  95   _cb = cb;
  96   _oop_map = NULL;
  97   setup(pc);
  98 }
  99 
 100 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map) {
 101   _sp = sp;
 102   _unextended_sp = unextended_sp;
 103   _fp = fp;
 104   _pc = pc;
 105   assert(pc != NULL, "no pc?");
 106   _cb = cb;
 107   _oop_map = oop_map;
 108   setup(pc);
 109 }
 110 
 111 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool dummy) {
 112   _sp = sp;
 113   _unextended_sp = unextended_sp;
 114   _fp = fp;
 115   _pc = pc;
 116   assert(pc != NULL, "no pc?");
 117   _cb = cb;
 118   _oop_map = oop_map;
 119   _deopt_state = not_deoptimized;
 120 #ifdef ASSERT
 121   // The following assertion has been disabled because it would sometime trap for Continuation.run, which is not *in* a continuation
 122   // and therefore does not clear the _cont_fastpath flag, but this is benign even in fast mode (see Freeze::setup_jump)
 123   // if (cb != NULL) {
 124   //   setup(pc);
 125   //   assert(_pc == pc && _deopt_state == not_deoptimized, "");
 126   // }
 127 #endif
 128 }
 129 
 130 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
 131   _sp = sp;
 132   _unextended_sp = unextended_sp;
 133   _fp = fp;
 134   _pc = pc;
 135   assert(pc != NULL, "no pc?");
 136   _cb = CodeCache::find_blob(pc); // TODO R find_blob_fast
 137   _oop_map = NULL;
 138   setup(pc);
 139 }
 140 
 141 inline void frame::setup(address pc) {
 142   adjust_unextended_sp();
 143 
 144   assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc));
 145   address original_pc = CompiledMethod::get_deopt_original_pc(this);
 146   if (original_pc != NULL) {
 147     _pc = original_pc;
 148     assert(_cb->as_compiled_method()->insts_contains_inclusive(_pc),
 149            "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
 150     _deopt_state = is_deoptimized;
 151   } else {
 152     if (_cb == SharedRuntime::deopt_blob()) {
 153       _deopt_state = is_deoptimized;
 154     } else {
 155       _deopt_state = not_deoptimized;
 156     }
 157   }
 158 }
 159 
 160 inline frame::frame(intptr_t* sp, intptr_t* fp) {
 161   _sp = sp;
 162   _unextended_sp = sp;
 163   _fp = fp;
 164   _pc = (address)(sp[-1]);
 165 
 166   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
 167   // when last_Java_sp is non-null but the pc fetched is junk. If we are truly
 168   // unlucky the junk value could be to a zombied method and we'll die on the
 169   // find_blob call. This is also why we can have no asserts on the validity
 170   // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
 171   // -> pd_last_frame should use a specialized version of pd_last_frame which could
 172   // call a specialized frame constructor instead of this one.
 173   // Then we could use the assert below. However this assert is of somewhat dubious
 174   // value.
 175   // UPDATE: this constructor is only used by trace_method_handle_stub() now.
 176   // assert(_pc != NULL, "no pc?");
 177 
 178   _cb = CodeCache::find_blob(_pc);
 179   adjust_unextended_sp();
 180 
 181   address original_pc = CompiledMethod::get_deopt_original_pc(this);
 182   if (original_pc != NULL) {
 183     _pc = original_pc;
 184     _deopt_state = is_deoptimized;
 185   } else {
 186     _deopt_state = not_deoptimized;
 187   }
 188   _oop_map = NULL;
 189 }
 190 
 191 inline frame::frame(int sp, int ref_sp, intptr_t fp, address pc, CodeBlob* cb, bool deopt) {
 192   _cont_sp._sp = sp;
 193   _cont_sp._ref_sp = ref_sp;
 194   _unextended_sp = NULL;
 195   _fp = (intptr_t*)fp;
 196   _pc = pc;
 197   assert(pc != NULL, "no pc?");
 198   _cb = cb;
 199   _deopt_state = deopt ? is_deoptimized : not_deoptimized;
 200   _oop_map = NULL;
 201 }
 202 
 203 // Accessors
 204 
 205 inline bool frame::equal(frame other) const {
 206   bool ret =  sp() == other.sp()
 207               && unextended_sp() == other.unextended_sp()
 208               && fp() == other.fp()
 209               && pc() == other.pc();
 210   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
 211   return ret;
 212 }
 213 
 214 // Return unique id for this frame. The id must have a value where we can distinguish
 215 // identity and younger/older relationship. NULL represents an invalid (incomparable)
 216 // frame.
 217 inline intptr_t* frame::id(void) const { return unextended_sp(); }
 218 
 219 // Return true if the frame is older (less recent activation) than the frame represented by id
 220 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
 221                                                     return this->id() > id ; }
 222 
 223 
 224 
 225 inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
 226 
 227 inline intptr_t* frame::unextended_sp() const     { return _unextended_sp; }
 228 
 229 inline intptr_t* frame::real_fp() const {
 230   if (_cb != NULL) {
 231     // use the frame size if valid
 232     int size = _cb->frame_size();
 233     if (size > 0) {
 234       return unextended_sp() + size;
 235     }
 236   }
 237   // else rely on fp()
 238   assert(! is_compiled_frame(), "unknown compiled frame size");
 239   return fp();
 240 }
 241 
 242 // helper to update a map with callee-saved RBP
 243 
 244 template <typename RegisterMapT>
 245 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
 246   // The interpreter and compiler(s) always save EBP/RBP in a known
 247   // location on entry. We must record where that location is
 248   // so this if EBP/RBP was live on callout from c2 we can find
 249   // the saved copy no matter what it called.
 250 
 251   // Since the interpreter always saves EBP/RBP if we record where it is then
 252   // we don't have to always save EBP/RBP on entry and exit to c2 compiled
 253   // code, on entry will be enough.
 254   map->set_location(rbp->as_VMReg(), (address) link_addr);
 255 #ifdef AMD64
 256   // this is weird "H" ought to be at a higher address however the
 257   // oopMaps seems to have the "H" regs at the same address and the
 258   // vanilla register.
 259   // XXXX make this go away
 260   if (true) {
 261     map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
 262   }
 263 #endif // AMD64
 264 }
 265 
 266 template <typename RegisterMapT>
 267 intptr_t** frame::saved_link_address(const RegisterMapT* map) {
 268   return (intptr_t**)map->location(rbp->as_VMReg());
 269 }
 270 
 271 // Return address:
 272 
 273 inline address* frame::sender_pc_addr()      const { return (address*) addr_at( return_addr_offset); }
 274 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
 275 
 276 inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
 277 
 278 inline intptr_t** frame::interpreter_frame_locals_addr() const {
 279   return (intptr_t**)addr_at(interpreter_frame_locals_offset);
 280 }
 281 
 282 inline intptr_t* frame::interpreter_frame_last_sp() const {
 283   return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
 284 }
 285 
 286 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
 287   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
 288 }
 289 
 290 
 291 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
 292   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
 293 }
 294 
 295 
 296 
 297 // Constant pool cache
 298 
 299 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
 300   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
 301 }
 302 
 303 // Method
 304 
 305 inline Method** frame::interpreter_frame_method_addr() const {
 306   return (Method**)addr_at(interpreter_frame_method_offset);
 307 }
 308 
 309 // Mirror
 310 
 311 inline oop* frame::interpreter_frame_mirror_addr() const {
 312   return (oop*)addr_at(interpreter_frame_mirror_offset);
 313 }
 314 
 315 // top of expression stack
 316 inline intptr_t* frame::interpreter_frame_tos_address() const {
 317   intptr_t* last_sp = interpreter_frame_last_sp();
 318   if (last_sp == NULL) {
 319     return sp();
 320   } else {
 321     // sp() may have been extended or shrunk by an adapter.  At least
 322     // check that we don't fall behind the legal region.
 323     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
 324     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
 325     return last_sp;
 326   }
 327 }
 328 
 329 inline oop* frame::interpreter_frame_temp_oop_addr() const {
 330   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
 331 }
 332 
 333 inline int frame::interpreter_frame_monitor_size() {
 334   return BasicObjectLock::size();
 335 }
 336 
 337 
 338 // expression stack
 339 // (the max_stack arguments are used by the GC; see class FrameClosure)
 340 
 341 inline intptr_t* frame::interpreter_frame_expression_stack() const {
 342   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
 343   return monitor_end-1;
 344 }
 345 
 346 // Entry frames
 347 
 348 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
 349  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
 350 }
 351 
 352 // Compiled frames
 353 
 354 inline oop frame::saved_oop_result(RegisterMap* map) const {
 355   oop* result_adr = (oop *)map->location(rax->as_VMReg());
 356   guarantee(result_adr != NULL, "bad register save location");
 357 
 358   return (*result_adr);
 359 }
 360 
 361 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
 362   oop* result_adr = (oop *)map->location(rax->as_VMReg());
 363   guarantee(result_adr != NULL, "bad register save location");
 364 
 365   *result_adr = obj;
 366 }
 367 
 368 inline bool frame::is_interpreted_frame() const {
 369   return Interpreter::contains(pc());
 370 }
 371 
 372 template <typename LOOKUP>
 373 frame frame::frame_sender(RegisterMap* map) const {
 374   // Default is we done have to follow them. The sender_for_xxx will
 375   // update it accordingly
 376   map->set_include_argument_oops(false);
 377 
 378   if (is_entry_frame())       return sender_for_entry_frame(map);
 379   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
 380 
 381   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
 382 
 383   if (_cb != NULL) {
 384     return is_compiled_frame() ? sender_for_compiled_frame<LOOKUP, false>(map) : sender_for_compiled_frame<LOOKUP, true>(map);
 385   }
 386   // Must be native-compiled frame, i.e. the marshaling code for native
 387   // methods that exists in the core system.
 388   return frame(sender_sp(), link(), sender_pc());
 389 }
 390 
 391 //------------------------------------------------------------------------------
 392 // frame::sender_for_compiled_frame
 393 template <typename LOOKUP, bool stub>
 394 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
 395   assert(map != NULL, "map must be set");
 396 
 397   if (map->in_cont()) { // already in an h-stack
 398     return Continuation::sender_for_compiled_frame(*this, map);
 399   }
 400 
 401   // frame owned by optimizing compiler
 402   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
 403   intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
 404 
 405   assert (sender_sp == real_fp(), "sender_sp: " INTPTR_FORMAT " real_fp: " INTPTR_FORMAT, p2i(sender_sp), p2i(real_fp()));
 406 
 407   // On Intel the return_address is always the word on the stack
 408   address sender_pc = (address) *(sender_sp-1);
 409 
 410   // This is the saved value of EBP which may or may not really be an FP.
 411   // It is only an FP if the sender is an interpreter frame (or C1?).
 412   intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
 413   intptr_t* sender_fp = *saved_fp_addr;
 414   if (map->update_map()) {
 415     // Tell GC to use argument oopmaps for some runtime stubs that need it.
 416     // For C1, the runtime stub might not have oop maps, so set this flag
 417     // outside of update_register_map.
 418     if (stub) { // compiled frames do not use callee-saved registers
 419       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
 420       if (oop_map() != NULL) { 
 421         _oop_map->update_register_map(this, map);
 422       }
 423     } else {
 424       assert (!_cb->caller_must_gc_arguments(map->thread()), "");
 425       assert (!map->include_argument_oops(), "");
 426       assert (oop_map() == NULL || OopMapStream(oop_map(), OopMapValue::callee_saved_value).is_done(), "callee-saved value in compiled frame");
 427     }
 428 
 429     // Since the prolog does the save and restore of EBP there is no oopmap
 430     // for it so we must fill in its location as if there was an oopmap entry
 431     // since if our caller was compiled code there could be live jvm state in it.
 432     update_map_with_saved_link(map, saved_fp_addr);
 433   }
 434 
 435   assert(sender_sp != sp(), "must have changed");
 436 
 437   if (Continuation::is_return_barrier_entry(sender_pc)) {       
 438     if (map->walk_cont()) { // about to walk into an h-stack         
 439       return Continuation::top_frame(*this, map);       
 440     } else {
 441       Continuation::fix_continuation_bottom_sender(map, *this, &sender_pc, &sender_sp, &sender_fp); 
 442     }
 443   }
 444 
 445   intptr_t* unextended_sp = sender_sp;
 446   CodeBlob* sender_cb = LOOKUP::find_blob(sender_pc);
 447   if (sender_cb != NULL) {
 448     return frame(sender_sp, unextended_sp, sender_fp, sender_pc, sender_cb);
 449   }
 450   // tty->print_cr(">>>> NO CB:"); print_on(tty);
 451   return frame(sender_sp, unextended_sp, sender_fp, sender_pc);
 452 }
 453 
 454 inline const ImmutableOopMap* frame::get_oop_map() const {
 455   if (_cb == NULL) return NULL;
 456   if (_cb->oop_maps() != NULL) {
 457     NativePostCallNop* nop = nativePostCallNop_at(_pc);
 458     if (nop != NULL &&
 459 #ifdef CONT_DOUBLE_NOP
 460       !nop->is_mode2() &&
 461 #endif
 462       nop->displacement() != 0
 463     ) {
 464       int slot = ((nop->displacement() >> 24) & 0xff);
 465       return _cb->oop_map_for_slot(slot, _pc);
 466     }
 467     const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
 468     return oop_map;
 469   }
 470   return NULL;
 471 }
 472 
 473 #endif // CPU_X86_FRAME_X86_INLINE_HPP