1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP
 26 #define CPU_X86_FRAME_X86_INLINE_HPP
 27 
 28 #include "code/codeBlob.inline.hpp"
 29 #include "code/codeCache.inline.hpp"
 30 #include "code/vmreg.inline.hpp"
 31 #include "compiler/oopMap.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #ifdef COMPILER1
 36 #include "c1/c1_Runtime1.hpp"
 37 #endif
 38 
 39 // Inline functions for Intel frames:
 40 
 41 // Constructors:
 42 
 43 inline frame::frame() {
 44   _pc = nullptr;
 45   _sp = nullptr;
 46   _unextended_sp = nullptr;
 47   _fp = nullptr;
 48   _cb = nullptr;
 49   _deopt_state = unknown;
 50   _oop_map = nullptr;
 51   _on_heap = false;
 52   DEBUG_ONLY(_frame_index = -1;)
 53 }
 54 
 55 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 56   _sp = sp;
 57   _unextended_sp = sp;
 58   _fp = fp;
 59   _pc = pc;
 60   _oop_map = nullptr;
 61   _on_heap = false;
 62   DEBUG_ONLY(_frame_index = -1;)
 63 
 64   assert(pc != nullptr, "no pc?");
 65   _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
 66   setup(pc);
 67 }
 68 
 69 inline void frame::setup(address pc) {
 70   adjust_unextended_sp();
 71 
 72   address original_pc = get_deopt_original_pc();
 73   if (original_pc != nullptr) {
 74     _pc = original_pc;
 75     _deopt_state = is_deoptimized;
 76     assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
 77            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 78   } else {
 79     if (_cb == SharedRuntime::deopt_blob()) {
 80       _deopt_state = is_deoptimized;
 81     } else {
 82       _deopt_state = not_deoptimized;
 83     }
 84   }
 85 }
 86 
 87 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 88   init(sp, fp, pc);
 89 }
 90 
 91 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
 92   _sp = sp;
 93   _unextended_sp = unextended_sp;
 94   _fp = fp;
 95   _pc = pc;
 96   assert(pc != nullptr, "no pc?");
 97   _cb = cb;
 98   _oop_map = nullptr;
 99   assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
100   _on_heap = false;
101   DEBUG_ONLY(_frame_index = -1;)
102 
103   setup(pc);
104 }
105 
106 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb,
107                     const ImmutableOopMap* oop_map, bool on_heap) {
108   _sp = sp;
109   _unextended_sp = unextended_sp;
110   _fp = fp;
111   _pc = pc;
112   _cb = cb;
113   _oop_map = oop_map;
114   _deopt_state = not_deoptimized;
115   _on_heap = on_heap;
116   DEBUG_ONLY(_frame_index = -1;)
117 
118   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
119   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
120   if (cb != nullptr) {
121     setup(pc);
122   }
123 #ifdef ASSERT
124   // The following assertion has been disabled because it would sometime trap for Continuation.run,
125   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
126   // is benign even in fast mode (see Freeze::setup_jump)
127   // We might freeze deoptimized frame in slow mode
128   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
129 #endif
130 }
131 
132 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
133   _sp = sp;
134   _unextended_sp = unextended_sp;
135   _fp = fp;
136   _pc = pc;
137   assert(pc != nullptr, "no pc?");
138   _cb = CodeCache::find_blob_fast(pc);
139   _oop_map = nullptr;
140   assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
141   _on_heap = false;
142   DEBUG_ONLY(_frame_index = -1;)
143 
144   setup(pc);
145 }
146 
147 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
148 
149 inline frame::frame(intptr_t* sp, intptr_t* fp) {
150   _sp = sp;
151   _unextended_sp = sp;
152   _fp = fp;
153   _pc = (address)(sp[-1]);
154   _on_heap = false;
155   DEBUG_ONLY(_frame_index = -1;)
156 
157   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
158   // when last_Java_sp is non-null but the pc fetched is junk.
159   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
160   // -> pd_last_frame should use a specialized version of pd_last_frame which could
161   // call a specialized frame constructor instead of this one.
162   // Then we could use the assert below. However this assert is of somewhat dubious
163   // value.
164   // UPDATE: this constructor is only used by trace_method_handle_stub() now.
165   // assert(_pc != nullptr, "no pc?");
166 
167   _cb = CodeCache::find_blob(_pc);
168   adjust_unextended_sp();
169 
170   address original_pc = get_deopt_original_pc();
171   if (original_pc != nullptr) {
172     _pc = original_pc;
173     _deopt_state = is_deoptimized;
174   } else {
175     _deopt_state = not_deoptimized;
176   }
177   _oop_map = nullptr;
178 }
179 
180 // Accessors
181 
182 inline bool frame::equal(frame other) const {
183   bool ret =  sp() == other.sp()
184               && unextended_sp() == other.unextended_sp()
185               && fp() == other.fp()
186               && pc() == other.pc();
187   assert(!ret || (ret && cb() == other.cb() && _deopt_state == other._deopt_state), "inconsistent construction");
188   return ret;
189 }
190 
191 // Return unique id for this frame. The id must have a value where we can distinguish
192 // identity and younger/older relationship. null represents an invalid (incomparable)
193 // frame.
194 inline intptr_t* frame::id(void) const { return unextended_sp(); }
195 
196 // Return true if the frame is older (less recent activation) than the frame represented by id
197 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != nullptr && id != nullptr, "null frame id");
198                                                     return this->id() > id ; }
199 
200 inline intptr_t* frame::link() const              { return *(intptr_t **)addr_at(link_offset); }
201 
202 inline intptr_t* frame::link_or_null() const {
203   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
204   return os::is_readable_pointer(ptr) ? *ptr : nullptr;
205 }
206 
207 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
208 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
209 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
210 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
211 
212 inline intptr_t* frame::real_fp() const {
213   if (_cb != nullptr) {
214     // use the frame size if valid
215     int size = _cb->frame_size();
216     if (size > 0) {
217       return unextended_sp() + size;
218     }
219   }
220   // else rely on fp()
221   assert(! is_compiled_frame(), "unknown compiled frame size");
222   return fp();
223 }
224 
225 inline int frame::frame_size() const {
226   return is_interpreted_frame()
227     ? pointer_delta_as_int(sender_sp(), sp())
228     : cb()->frame_size();
229 }
230 
231 inline int frame::compiled_frame_stack_argsize() const {
232   assert(cb()->is_nmethod(), "");
233   return (cb()->as_nmethod()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
234 }
235 
236 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
237   assert(mask != nullptr, "");
238   Method* m = interpreter_frame_method();
239   int   bci = interpreter_frame_bci();
240   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
241 }
242 
243 // Return address:
244 
245 inline address* frame::sender_pc_addr()      const { return (address*) addr_at(return_addr_offset); }
246 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
247 
248 inline intptr_t* frame::sender_sp()          const { return            addr_at(sender_sp_offset); }
249 
250 inline intptr_t* frame::interpreter_frame_locals() const {
251   intptr_t n = *addr_at(interpreter_frame_locals_offset);
252   return &fp()[n]; // return relativized locals
253 }
254 
255 inline intptr_t* frame::interpreter_frame_last_sp() const {
256   intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
257   assert(n <= 0, "n: " INTPTR_FORMAT, n);
258   return n != 0 ? &fp()[n] : nullptr;
259 }
260 
261 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
262   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
263 }
264 
265 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
266   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
267 }
268 
269 
270 
271 // Constant pool cache
272 
273 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
274   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
275 }
276 
277 // Method
278 
279 inline Method** frame::interpreter_frame_method_addr() const {
280   return (Method**)addr_at(interpreter_frame_method_offset);
281 }
282 
283 // Mirror
284 
285 inline oop* frame::interpreter_frame_mirror_addr() const {
286   return (oop*)addr_at(interpreter_frame_mirror_offset);
287 }
288 
289 // top of expression stack
290 inline intptr_t* frame::interpreter_frame_tos_address() const {
291   intptr_t* last_sp = interpreter_frame_last_sp();
292   if (last_sp == nullptr) {
293     return sp();
294   } else {
295     // sp() may have been extended or shrunk by an adapter.  At least
296     // check that we don't fall behind the legal region.
297     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
298     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
299     return last_sp;
300   }
301 }
302 
303 inline oop* frame::interpreter_frame_temp_oop_addr() const {
304   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
305 }
306 
307 inline int frame::interpreter_frame_monitor_size() {
308   return BasicObjectLock::size();
309 }
310 
311 
312 // expression stack
313 // (the max_stack arguments are used by the GC; see class FrameClosure)
314 
315 inline intptr_t* frame::interpreter_frame_expression_stack() const {
316   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
317   return monitor_end-1;
318 }
319 
320 // Entry frames
321 
322 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
323  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
324 }
325 
326 // Compiled frames
327 
328 inline oop frame::saved_oop_result(RegisterMap* map) const {
329   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
330   guarantee(result_adr != nullptr, "bad register save location");
331   return *result_adr;
332 }
333 
334 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
335   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
336   guarantee(result_adr != nullptr, "bad register save location");
337 
338   *result_adr = obj;
339 }
340 
341 inline bool frame::is_interpreted_frame() const {
342   return Interpreter::contains(pc());
343 }
344 
345 inline int frame::sender_sp_ret_address_offset() {
346   return frame::sender_sp_offset - frame::return_addr_offset;
347 }
348 
349 //------------------------------------------------------------------------------
350 // frame::sender
351 
352 inline frame frame::sender(RegisterMap* map) const {
353   frame result = sender_raw(map);
354 
355   if (map->process_frames() && !map->in_cont()) {
356     StackWatermarkSet::on_iteration(map->thread(), result);
357   }
358 
359   return result;
360 }
361 
362 inline frame frame::sender_raw(RegisterMap* map) const {
363   // Default is we done have to follow them. The sender_for_xxx will
364   // update it accordingly
365   map->set_include_argument_oops(false);
366 
367   if (map->in_cont()) { // already in an h-stack
368     return map->stack_chunk()->sender(*this, map);
369   }
370 
371   if (is_entry_frame())       return sender_for_entry_frame(map);
372   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
373   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
374 
375   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
376   if (_cb != nullptr) return sender_for_compiled_frame(map);
377 
378   // Must be native-compiled frame, i.e. the marshaling code for native
379   // methods that exists in the core system.
380   return frame(sender_sp(), link(), sender_pc());
381 }
382 
383 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
384   assert(map != nullptr, "map must be set");
385 
386   // frame owned by optimizing compiler
387   assert(_cb->frame_size() > 0, "must have non-zero frame size");
388   intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
389   assert(sender_sp == real_fp(), "");
390 
391 #ifdef ASSERT
392   address sender_pc_copy = (address) *(sender_sp-1);
393 #endif
394 
395   // This is the saved value of EBP which may or may not really be an FP.
396   // It is only an FP if the sender is an interpreter frame (or C1?).
397   // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier)
398   intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
399 
400   // Repair the sender sp if the frame has been extended
401   sender_sp = repair_sender_sp(sender_sp, saved_fp_addr);
402 
403   // On Intel the return_address is always the word on the stack
404   address sender_pc = (address) *(sender_sp-1);
405 
406 #ifdef ASSERT
407   if (sender_pc != sender_pc_copy) {
408     // When extending the stack in the callee method entry to make room for unpacking of value
409     // type args, we keep a copy of the sender pc at the expected location in the callee frame.
410     // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
411     nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
412     assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
413   }
414 #endif
415 
416   if (map->update_map()) {
417     // Tell GC to use argument oopmaps for some runtime stubs that need it.
418     // For C1, the runtime stub might not have oop maps, so set this flag
419     // outside of update_register_map.
420     bool c1_buffering = false;
421 #ifdef COMPILER1
422     nmethod* nm = _cb->as_nmethod_or_null();
423     if (nm != nullptr && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
424         pc() < nm->verified_inline_entry_point()) {
425       // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
426       // before doing any argument shuffling, so we need to scan the oops
427       // as the caller passes them.
428       c1_buffering = true;
429 #ifdef ASSERT
430       NativeCall* call = nativeCall_before(pc());
431       address dest = call->destination();
432       assert(dest == Runtime1::entry_for(Runtime1::buffer_inline_args_no_receiver_id) ||
433              dest == Runtime1::entry_for(Runtime1::buffer_inline_args_id), "unexpected safepoint in entry point");
434 #endif
435     }
436 #endif
437     if (!_cb->is_nmethod() || c1_buffering) { // compiled frames do not use callee-saved registers
438       bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
439       map->set_include_argument_oops(caller_args);
440       if (oop_map() != nullptr) {
441         _oop_map->update_register_map(this, map);
442       }
443     } else {
444       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
445       assert(!map->include_argument_oops(), "");
446       assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
447     }
448 
449     // Since the prolog does the save and restore of EBP there is no oopmap
450     // for it so we must fill in its location as if there was an oopmap entry
451     // since if our caller was compiled code there could be live jvm state in it.
452     update_map_with_saved_link(map, saved_fp_addr);
453   }
454 
455   assert(sender_sp != sp(), "must have changed");
456 
457   if (Continuation::is_return_barrier_entry(sender_pc)) {
458     if (map->walk_cont()) { // about to walk into an h-stack
459       return Continuation::top_frame(*this, map);
460     } else {
461       return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp);
462     }
463   }
464 
465   intptr_t* unextended_sp = sender_sp;
466   return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
467 }
468 
469 template <typename RegisterMapT>
470 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
471   // The interpreter and compiler(s) always save EBP/RBP in a known
472   // location on entry. We must record where that location is
473   // so this if EBP/RBP was live on callout from c2 we can find
474   // the saved copy no matter what it called.
475 
476   // Since the interpreter always saves EBP/RBP if we record where it is then
477   // we don't have to always save EBP/RBP on entry and exit to c2 compiled
478   // code, on entry will be enough.
479   map->set_location(rbp->as_VMReg(), (address) link_addr);
480 #ifdef AMD64
481   // this is weird "H" ought to be at a higher address however the
482   // oopMaps seems to have the "H" regs at the same address and the
483   // vanilla register.
484   // XXXX make this go away
485   if (true) {
486     map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
487   }
488 #endif // AMD64
489 }
490 #endif // CPU_X86_FRAME_X86_INLINE_HPP