1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP
 26 #define CPU_X86_FRAME_X86_INLINE_HPP
 27 
 28 #include "code/codeBlob.inline.hpp"
 29 #include "code/codeCache.inline.hpp"
 30 #include "code/vmreg.inline.hpp"
 31 #include "compiler/oopMap.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "runtime/registerMap.hpp"
 35 
 36 // Inline functions for Intel frames:
 37 
 38 // Constructors:
 39 
 40 inline frame::frame() {
 41   _pc = nullptr;
 42   _sp = nullptr;
 43   _unextended_sp = nullptr;
 44   _fp = nullptr;
 45   _cb = nullptr;
 46   _deopt_state = unknown;
 47   _oop_map = nullptr;
 48   _on_heap = false;
 49   DEBUG_ONLY(_frame_index = -1;)
 50 }
 51 
 52 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 53   _sp = sp;
 54   _unextended_sp = sp;
 55   _fp = fp;
 56   _pc = pc;
 57   _oop_map = nullptr;
 58   _on_heap = false;
 59   DEBUG_ONLY(_frame_index = -1;)
 60 
 61   assert(pc != nullptr, "no pc?");
 62   _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
 63   setup(pc);
 64 }
 65 
 66 inline void frame::setup(address pc) {
 67   adjust_unextended_sp();
 68 
 69   address original_pc = CompiledMethod::get_deopt_original_pc(this);
 70   if (original_pc != nullptr) {
 71     _pc = original_pc;
 72     _deopt_state = is_deoptimized;
 73     assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
 74            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 75   } else {
 76     if (_cb == SharedRuntime::deopt_blob()) {
 77       _deopt_state = is_deoptimized;
 78     } else {
 79       _deopt_state = not_deoptimized;
 80     }
 81   }
 82 }
 83 
 84 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 85   init(sp, fp, pc);
 86 }
 87 
 88 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
 89   _sp = sp;
 90   _unextended_sp = unextended_sp;
 91   _fp = fp;
 92   _pc = pc;
 93   assert(pc != nullptr, "no pc?");
 94   _cb = cb;
 95   _oop_map = nullptr;
 96   assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
 97   _on_heap = false;
 98   DEBUG_ONLY(_frame_index = -1;)
 99 
100   setup(pc);
101 }
102 
103 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb,
104                     const ImmutableOopMap* oop_map, bool on_heap) {
105   _sp = sp;
106   _unextended_sp = unextended_sp;
107   _fp = fp;
108   _pc = pc;
109   _cb = cb;
110   _oop_map = oop_map;
111   _deopt_state = not_deoptimized;
112   _on_heap = on_heap;
113   DEBUG_ONLY(_frame_index = -1;)
114 
115   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
116   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
117   if (cb != nullptr) {
118     setup(pc);
119   }
120 #ifdef ASSERT
121   // The following assertion has been disabled because it would sometime trap for Continuation.run,
122   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
123   // is benign even in fast mode (see Freeze::setup_jump)
124   // We might freeze deoptimized frame in slow mode
125   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
126 #endif
127 }
128 
129 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
130   _sp = sp;
131   _unextended_sp = unextended_sp;
132   _fp = fp;
133   _pc = pc;
134   assert(pc != nullptr, "no pc?");
135   _cb = CodeCache::find_blob_fast(pc);
136   _oop_map = nullptr;
137   assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
138   _on_heap = false;
139   DEBUG_ONLY(_frame_index = -1;)
140 
141   setup(pc);
142 }
143 
144 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
145 
146 inline frame::frame(intptr_t* sp, intptr_t* fp) {
147   _sp = sp;
148   _unextended_sp = sp;
149   _fp = fp;
150   _pc = (address)(sp[-1]);
151   _on_heap = false;
152   DEBUG_ONLY(_frame_index = -1;)
153 
154   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
155   // when last_Java_sp is non-null but the pc fetched is junk.
156   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
157   // -> pd_last_frame should use a specialized version of pd_last_frame which could
158   // call a specialized frame constructor instead of this one.
159   // Then we could use the assert below. However this assert is of somewhat dubious
160   // value.
161   // UPDATE: this constructor is only used by trace_method_handle_stub() now.
162   // assert(_pc != nullptr, "no pc?");
163 
164   _cb = CodeCache::find_blob(_pc);
165   adjust_unextended_sp();
166 
167   address original_pc = CompiledMethod::get_deopt_original_pc(this);
168   if (original_pc != nullptr) {
169     _pc = original_pc;
170     _deopt_state = is_deoptimized;
171   } else {
172     _deopt_state = not_deoptimized;
173   }
174   _oop_map = nullptr;
175 }
176 
177 // Accessors
178 
179 inline bool frame::equal(frame other) const {
180   bool ret =  sp() == other.sp()
181               && unextended_sp() == other.unextended_sp()
182               && fp() == other.fp()
183               && pc() == other.pc();
184   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
185   return ret;
186 }
187 
188 // Return unique id for this frame. The id must have a value where we can distinguish
189 // identity and younger/older relationship. null represents an invalid (incomparable)
190 // frame.
191 inline intptr_t* frame::id(void) const { return unextended_sp(); }
192 
193 // Return true if the frame is older (less recent activation) than the frame represented by id
194 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != nullptr && id != nullptr, "null frame id");
195                                                     return this->id() > id ; }
196 
197 inline intptr_t* frame::link() const              { return *(intptr_t **)addr_at(link_offset); }
198 
199 inline intptr_t* frame::link_or_null() const {
200   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
201   return os::is_readable_pointer(ptr) ? *ptr : nullptr;
202 }
203 
204 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
205 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
206 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
207 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
208 
209 inline intptr_t* frame::real_fp() const {
210   if (_cb != nullptr) {
211     // use the frame size if valid
212     int size = _cb->frame_size();
213     if (size > 0) {
214       return unextended_sp() + size;
215     }
216   }
217   // else rely on fp()
218   assert(! is_compiled_frame(), "unknown compiled frame size");
219   return fp();
220 }
221 
222 inline int frame::frame_size() const {
223   return is_interpreted_frame()
224     ? pointer_delta_as_int(sender_sp(), sp())
225     : cb()->frame_size();
226 }
227 
228 inline int frame::compiled_frame_stack_argsize() const {
229   assert(cb()->is_compiled(), "");
230   return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
231 }
232 
233 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
234   assert(mask != nullptr, "");
235   Method* m = interpreter_frame_method();
236   int   bci = interpreter_frame_bci();
237   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
238 }
239 
240 // Return address:
241 
242 inline address* frame::sender_pc_addr()      const { return (address*) addr_at(return_addr_offset); }
243 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
244 
245 inline intptr_t* frame::sender_sp()          const { return            addr_at(sender_sp_offset); }
246 
247 inline intptr_t* frame::interpreter_frame_locals() const {
248   intptr_t n = *addr_at(interpreter_frame_locals_offset);
249   return &fp()[n]; // return relativized locals
250 }
251 
252 inline intptr_t* frame::interpreter_frame_last_sp() const {
253   intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
254   assert(n <= 0, "n: " INTPTR_FORMAT, n);
255   return n != 0 ? &fp()[n] : nullptr;
256 }
257 
258 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
259   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
260 }
261 
262 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
263   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
264 }
265 
266 
267 
268 // Constant pool cache
269 
270 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
271   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
272 }
273 
274 // Method
275 
276 inline Method** frame::interpreter_frame_method_addr() const {
277   return (Method**)addr_at(interpreter_frame_method_offset);
278 }
279 
280 // Mirror
281 
282 inline oop* frame::interpreter_frame_mirror_addr() const {
283   return (oop*)addr_at(interpreter_frame_mirror_offset);
284 }
285 
286 // top of expression stack
287 inline intptr_t* frame::interpreter_frame_tos_address() const {
288   intptr_t* last_sp = interpreter_frame_last_sp();
289   if (last_sp == nullptr) {
290     return sp();
291   } else {
292     // sp() may have been extended or shrunk by an adapter.  At least
293     // check that we don't fall behind the legal region.
294     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
295     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
296     return last_sp;
297   }
298 }
299 
300 inline oop* frame::interpreter_frame_temp_oop_addr() const {
301   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
302 }
303 
304 inline int frame::interpreter_frame_monitor_size() {
305   return BasicObjectLock::size();
306 }
307 
308 
309 // expression stack
310 // (the max_stack arguments are used by the GC; see class FrameClosure)
311 
312 inline intptr_t* frame::interpreter_frame_expression_stack() const {
313   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
314   return monitor_end-1;
315 }
316 
317 // Entry frames
318 
319 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
320  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
321 }
322 
323 // Compiled frames
324 
325 inline oop frame::saved_oop_result(RegisterMap* map) const {
326   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
327   guarantee(result_adr != nullptr, "bad register save location");
328   return *result_adr;
329 }
330 
331 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
332   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
333   guarantee(result_adr != nullptr, "bad register save location");
334 
335   *result_adr = obj;
336 }
337 
338 inline bool frame::is_interpreted_frame() const {
339   return Interpreter::contains(pc());
340 }
341 
342 inline int frame::sender_sp_ret_address_offset() {
343   return frame::sender_sp_offset - frame::return_addr_offset;
344 }
345 
346 inline const ImmutableOopMap* frame::get_oop_map() const {
347   if (_cb == nullptr) return nullptr;
348   if (_cb->oop_maps() != nullptr) {
349     NativePostCallNop* nop = nativePostCallNop_at(_pc);
350     if (nop != nullptr && nop->displacement() != 0) {
351       int slot = ((nop->displacement() >> 24) & 0xff);
352       return _cb->oop_map_for_slot(slot, _pc);
353     }
354     const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
355     return oop_map;
356   }
357   return nullptr;
358 }
359 
360 //------------------------------------------------------------------------------
361 // frame::sender
362 
363 inline frame frame::sender(RegisterMap* map) const {
364   frame result = sender_raw(map);
365 
366   if (map->process_frames() && !map->in_cont()) {
367     StackWatermarkSet::on_iteration(map->thread(), result);
368   }
369 
370   return result;
371 }
372 
373 inline frame frame::sender_raw(RegisterMap* map) const {
374   // Default is we done have to follow them. The sender_for_xxx will
375   // update it accordingly
376   map->set_include_argument_oops(false);
377 
378   if (map->in_cont()) { // already in an h-stack
379     return map->stack_chunk()->sender(*this, map);
380   }
381 
382   if (is_entry_frame())       return sender_for_entry_frame(map);
383   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
384   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
385 
386   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
387   if (_cb != nullptr) return sender_for_compiled_frame(map);
388 
389   // Must be native-compiled frame, i.e. the marshaling code for native
390   // methods that exists in the core system.
391   return frame(sender_sp(), link(), sender_pc());
392 }
393 
394 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
395   assert(map != nullptr, "map must be set");
396 
397   // frame owned by optimizing compiler
398   assert(_cb->frame_size() > 0, "must have non-zero frame size");
399   intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
400   assert(sender_sp == real_fp(), "");
401 
402   // On Intel the return_address is always the word on the stack
403   address sender_pc = (address) *(sender_sp-1);
404 
405   // This is the saved value of EBP which may or may not really be an FP.
406   // It is only an FP if the sender is an interpreter frame (or C1?).
407   // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier)
408   intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
409 
410   if (map->update_map()) {
411     // Tell GC to use argument oopmaps for some runtime stubs that need it.
412     // For C1, the runtime stub might not have oop maps, so set this flag
413     // outside of update_register_map.
414     if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
415       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
416       if (oop_map() != nullptr) {
417         _oop_map->update_register_map(this, map);
418       }
419     } else {
420       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
421       assert(!map->include_argument_oops(), "");
422       assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
423     }
424 
425     // Since the prolog does the save and restore of EBP there is no oopmap
426     // for it so we must fill in its location as if there was an oopmap entry
427     // since if our caller was compiled code there could be live jvm state in it.
428     update_map_with_saved_link(map, saved_fp_addr);
429   }
430 
431   assert(sender_sp != sp(), "must have changed");
432 
433   if (Continuation::is_return_barrier_entry(sender_pc)) {
434     if (map->walk_cont()) { // about to walk into an h-stack
435       return Continuation::top_frame(*this, map);
436     } else {
437       return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp);
438     }
439   }
440 
441   intptr_t* unextended_sp = sender_sp;
442   return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
443 }
444 
445 template <typename RegisterMapT>
446 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
447   // The interpreter and compiler(s) always save EBP/RBP in a known
448   // location on entry. We must record where that location is
449   // so this if EBP/RBP was live on callout from c2 we can find
450   // the saved copy no matter what it called.
451 
452   // Since the interpreter always saves EBP/RBP if we record where it is then
453   // we don't have to always save EBP/RBP on entry and exit to c2 compiled
454   // code, on entry will be enough.
455   map->set_location(rbp->as_VMReg(), (address) link_addr);
456 #ifdef AMD64
457   // this is weird "H" ought to be at a higher address however the
458   // oopMaps seems to have the "H" regs at the same address and the
459   // vanilla register.
460   // XXXX make this go away
461   if (true) {
462     map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
463   }
464 #endif // AMD64
465 }
466 #endif // CPU_X86_FRAME_X86_INLINE_HPP