1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP
 26 #define CPU_X86_FRAME_X86_INLINE_HPP
 27 
 28 #include "code/codeBlob.inline.hpp"
 29 #include "code/codeCache.inline.hpp"
 30 #include "code/vmreg.inline.hpp"
 31 #include "compiler/oopMap.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #ifdef COMPILER1
 36 #include "c1/c1_Runtime1.hpp"
 37 #endif
 38 
 39 // Inline functions for Intel frames:
 40 
 41 // Constructors:
 42 
 43 inline frame::frame() {
 44   _pc = nullptr;
 45   _sp = nullptr;
 46   _unextended_sp = nullptr;
 47   _fp = nullptr;
 48   _cb = nullptr;
 49   _deopt_state = unknown;
 50   _oop_map = nullptr;
 51   _on_heap = false;
 52   DEBUG_ONLY(_frame_index = -1;)
 53 }
 54 
 55 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 56   _sp = sp;
 57   _unextended_sp = sp;
 58   _fp = fp;
 59   _pc = pc;
 60   _oop_map = nullptr;
 61   _on_heap = false;
 62   DEBUG_ONLY(_frame_index = -1;)
 63 
 64   assert(pc != nullptr, "no pc?");
 65   _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
 66   setup(pc);
 67 }
 68 
 69 inline void frame::setup(address pc) {
 70   adjust_unextended_sp();
 71 
 72   address original_pc = CompiledMethod::get_deopt_original_pc(this);
 73   if (original_pc != nullptr) {
 74     _pc = original_pc;
 75     _deopt_state = is_deoptimized;
 76     assert(_cb == nullptr || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
 77            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 78   } else {
 79     if (_cb == SharedRuntime::deopt_blob()) {
 80       _deopt_state = is_deoptimized;
 81     } else {
 82       _deopt_state = not_deoptimized;
 83     }
 84   }
 85 }
 86 
 87 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 88   init(sp, fp, pc);
 89 }
 90 
 91 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
 92   _sp = sp;
 93   _unextended_sp = unextended_sp;
 94   _fp = fp;
 95   _pc = pc;
 96   assert(pc != nullptr, "no pc?");
 97   _cb = cb;
 98   _oop_map = nullptr;
 99   assert(_cb != nullptr, "pc: " INTPTR_FORMAT, p2i(pc));
100   _on_heap = false;
101   DEBUG_ONLY(_frame_index = -1;)
102 
103   setup(pc);
104 }
105 
106 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb,
107                     const ImmutableOopMap* oop_map, bool on_heap) {
108   _sp = sp;
109   _unextended_sp = unextended_sp;
110   _fp = fp;
111   _pc = pc;
112   _cb = cb;
113   _oop_map = oop_map;
114   _deopt_state = not_deoptimized;
115   _on_heap = on_heap;
116   DEBUG_ONLY(_frame_index = -1;)
117 
118   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
119   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
120   if (cb != nullptr) {
121     setup(pc);
122   }
123 #ifdef ASSERT
124   // The following assertion has been disabled because it would sometime trap for Continuation.run,
125   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
126   // is benign even in fast mode (see Freeze::setup_jump)
127   // We might freeze deoptimized frame in slow mode
128   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
129 #endif
130 }
131 
132 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
133   _sp = sp;
134   _unextended_sp = unextended_sp;
135   _fp = fp;
136   _pc = pc;
137   assert(pc != nullptr, "no pc?");
138   _cb = CodeCache::find_blob_fast(pc);
139   _oop_map = nullptr;
140   assert(_cb != nullptr, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
141   _on_heap = false;
142   DEBUG_ONLY(_frame_index = -1;)
143 
144   setup(pc);
145 }
146 
147 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
148 
149 inline frame::frame(intptr_t* sp, intptr_t* fp) {
150   _sp = sp;
151   _unextended_sp = sp;
152   _fp = fp;
153   _pc = (address)(sp[-1]);
154   _on_heap = false;
155   DEBUG_ONLY(_frame_index = -1;)
156 
157   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
158   // when last_Java_sp is non-null but the pc fetched is junk.
159   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
160   // -> pd_last_frame should use a specialized version of pd_last_frame which could
161   // call a specialized frame constructor instead of this one.
162   // Then we could use the assert below. However this assert is of somewhat dubious
163   // value.
164   // UPDATE: this constructor is only used by trace_method_handle_stub() now.
165   // assert(_pc != nullptr, "no pc?");
166 
167   _cb = CodeCache::find_blob(_pc);
168   adjust_unextended_sp();
169 
170   address original_pc = CompiledMethod::get_deopt_original_pc(this);
171   if (original_pc != nullptr) {
172     _pc = original_pc;
173     _deopt_state = is_deoptimized;
174   } else {
175     _deopt_state = not_deoptimized;
176   }
177   _oop_map = nullptr;
178 }
179 
180 // Accessors
181 
182 inline bool frame::equal(frame other) const {
183   bool ret =  sp() == other.sp()
184               && unextended_sp() == other.unextended_sp()
185               && fp() == other.fp()
186               && pc() == other.pc();
187   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
188   return ret;
189 }
190 
191 // Return unique id for this frame. The id must have a value where we can distinguish
192 // identity and younger/older relationship. null represents an invalid (incomparable)
193 // frame.
194 inline intptr_t* frame::id(void) const { return unextended_sp(); }
195 
196 // Return true if the frame is older (less recent activation) than the frame represented by id
197 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != nullptr && id != nullptr, "null frame id");
198                                                     return this->id() > id ; }
199 
200 inline intptr_t* frame::link() const              { return *(intptr_t **)addr_at(link_offset); }
201 
202 inline intptr_t* frame::link_or_null() const {
203   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
204   return os::is_readable_pointer(ptr) ? *ptr : nullptr;
205 }
206 
207 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
208 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
209 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
210 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
211 
212 inline intptr_t* frame::real_fp() const {
213   if (_cb != nullptr) {
214     // use the frame size if valid
215     int size = _cb->frame_size();
216     if (size > 0) {
217       return unextended_sp() + size;
218     }
219   }
220   // else rely on fp()
221   assert(! is_compiled_frame(), "unknown compiled frame size");
222   return fp();
223 }
224 
225 inline int frame::frame_size() const {
226   return is_interpreted_frame()
227     ? pointer_delta_as_int(sender_sp(), sp())
228     : cb()->frame_size();
229 }
230 
231 inline int frame::compiled_frame_stack_argsize() const {
232   assert(cb()->is_compiled(), "");
233   return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
234 }
235 
236 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
237   assert(mask != nullptr, "");
238   Method* m = interpreter_frame_method();
239   int   bci = interpreter_frame_bci();
240   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
241 }
242 
243 // Return address:
244 
245 inline address* frame::sender_pc_addr()      const { return (address*) addr_at(return_addr_offset); }
246 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
247 
248 inline intptr_t* frame::sender_sp()          const { return            addr_at(sender_sp_offset); }
249 
250 inline intptr_t* frame::interpreter_frame_locals() const {
251   intptr_t n = *addr_at(interpreter_frame_locals_offset);
252   return &fp()[n]; // return relativized locals
253 }
254 
255 inline intptr_t* frame::interpreter_frame_last_sp() const {
256   intptr_t n = *addr_at(interpreter_frame_last_sp_offset);
257   assert(n <= 0, "n: " INTPTR_FORMAT, n);
258   return n != 0 ? &fp()[n] : nullptr;
259 }
260 
261 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
262   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
263 }
264 
265 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
266   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
267 }
268 
269 
270 
271 // Constant pool cache
272 
273 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
274   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
275 }
276 
277 // Method
278 
279 inline Method** frame::interpreter_frame_method_addr() const {
280   return (Method**)addr_at(interpreter_frame_method_offset);
281 }
282 
283 // Mirror
284 
285 inline oop* frame::interpreter_frame_mirror_addr() const {
286   return (oop*)addr_at(interpreter_frame_mirror_offset);
287 }
288 
289 // top of expression stack
290 inline intptr_t* frame::interpreter_frame_tos_address() const {
291   intptr_t* last_sp = interpreter_frame_last_sp();
292   if (last_sp == nullptr) {
293     return sp();
294   } else {
295     // sp() may have been extended or shrunk by an adapter.  At least
296     // check that we don't fall behind the legal region.
297     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
298     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
299     return last_sp;
300   }
301 }
302 
303 inline oop* frame::interpreter_frame_temp_oop_addr() const {
304   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
305 }
306 
307 inline int frame::interpreter_frame_monitor_size() {
308   return BasicObjectLock::size();
309 }
310 
311 
312 // expression stack
313 // (the max_stack arguments are used by the GC; see class FrameClosure)
314 
315 inline intptr_t* frame::interpreter_frame_expression_stack() const {
316   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
317   return monitor_end-1;
318 }
319 
320 // Entry frames
321 
322 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
323  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
324 }
325 
326 // Compiled frames
327 
328 inline oop frame::saved_oop_result(RegisterMap* map) const {
329   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
330   guarantee(result_adr != nullptr, "bad register save location");
331   return *result_adr;
332 }
333 
334 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
335   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
336   guarantee(result_adr != nullptr, "bad register save location");
337 
338   *result_adr = obj;
339 }
340 
341 inline bool frame::is_interpreted_frame() const {
342   return Interpreter::contains(pc());
343 }
344 
345 inline int frame::sender_sp_ret_address_offset() {
346   return frame::sender_sp_offset - frame::return_addr_offset;
347 }
348 
349 inline const ImmutableOopMap* frame::get_oop_map() const {
350   if (_cb == nullptr) return nullptr;
351   if (_cb->oop_maps() != nullptr) {
352     NativePostCallNop* nop = nativePostCallNop_at(_pc);
353     if (nop != nullptr && nop->displacement() != 0) {
354       int slot = ((nop->displacement() >> 24) & 0xff);
355       return _cb->oop_map_for_slot(slot, _pc);
356     }
357     const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
358     return oop_map;
359   }
360   return nullptr;
361 }
362 
363 //------------------------------------------------------------------------------
364 // frame::sender
365 
366 inline frame frame::sender(RegisterMap* map) const {
367   frame result = sender_raw(map);
368 
369   if (map->process_frames() && !map->in_cont()) {
370     StackWatermarkSet::on_iteration(map->thread(), result);
371   }
372 
373   return result;
374 }
375 
376 inline frame frame::sender_raw(RegisterMap* map) const {
377   // Default is we done have to follow them. The sender_for_xxx will
378   // update it accordingly
379   map->set_include_argument_oops(false);
380 
381   if (map->in_cont()) { // already in an h-stack
382     return map->stack_chunk()->sender(*this, map);
383   }
384 
385   if (is_entry_frame())       return sender_for_entry_frame(map);
386   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
387   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
388 
389   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
390   if (_cb != nullptr) return sender_for_compiled_frame(map);
391 
392   // Must be native-compiled frame, i.e. the marshaling code for native
393   // methods that exists in the core system.
394   return frame(sender_sp(), link(), sender_pc());
395 }
396 
397 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
398   assert(map != nullptr, "map must be set");
399 
400   // frame owned by optimizing compiler
401   assert(_cb->frame_size() > 0, "must have non-zero frame size");
402   intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
403   assert(sender_sp == real_fp(), "");
404 
405 #ifdef ASSERT
406   address sender_pc_copy = (address) *(sender_sp-1);
407 #endif
408 
409   // This is the saved value of EBP which may or may not really be an FP.
410   // It is only an FP if the sender is an interpreter frame (or C1?).
411   // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier)
412   intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
413 
414   // Repair the sender sp if the frame has been extended
415   sender_sp = repair_sender_sp(sender_sp, saved_fp_addr);
416 
417   // On Intel the return_address is always the word on the stack
418   address sender_pc = (address) *(sender_sp-1);
419 
420 #ifdef ASSERT
421   if (sender_pc != sender_pc_copy) {
422     // When extending the stack in the callee method entry to make room for unpacking of value
423     // type args, we keep a copy of the sender pc at the expected location in the callee frame.
424     // If the sender pc is patched due to deoptimization, the copy is not consistent anymore.
425     nmethod* nm = CodeCache::find_blob(sender_pc)->as_nmethod();
426     assert(sender_pc == nm->deopt_mh_handler_begin() || sender_pc == nm->deopt_handler_begin(), "unexpected sender pc");
427   }
428 #endif
429 
430   if (map->update_map()) {
431     // Tell GC to use argument oopmaps for some runtime stubs that need it.
432     // For C1, the runtime stub might not have oop maps, so set this flag
433     // outside of update_register_map.
434     bool c1_buffering = false;
435 #ifdef COMPILER1
436     nmethod* nm = _cb->as_nmethod_or_null();
437     if (nm != nullptr && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
438         pc() < nm->verified_inline_entry_point()) {
439       // The VEP and VIEP(RO) of C1-compiled methods call buffer_inline_args_xxx
440       // before doing any argument shuffling, so we need to scan the oops
441       // as the caller passes them.
442       c1_buffering = true;
443 #ifdef ASSERT
444       NativeCall* call = nativeCall_before(pc());
445       address dest = call->destination();
446       assert(dest == Runtime1::entry_for(Runtime1::buffer_inline_args_no_receiver_id) ||
447              dest == Runtime1::entry_for(Runtime1::buffer_inline_args_id), "unexpected safepoint in entry point");
448 #endif
449     }
450 #endif
451     if (!_cb->is_compiled() || c1_buffering) { // compiled frames do not use callee-saved registers
452       bool caller_args = _cb->caller_must_gc_arguments(map->thread()) || c1_buffering;
453       map->set_include_argument_oops(caller_args);
454       if (oop_map() != nullptr) {
455         _oop_map->update_register_map(this, map);
456       }
457     } else {
458       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
459       assert(!map->include_argument_oops(), "");
460       assert(oop_map() == nullptr || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
461     }
462 
463     // Since the prolog does the save and restore of EBP there is no oopmap
464     // for it so we must fill in its location as if there was an oopmap entry
465     // since if our caller was compiled code there could be live jvm state in it.
466     update_map_with_saved_link(map, saved_fp_addr);
467   }
468 
469   assert(sender_sp != sp(), "must have changed");
470 
471   if (Continuation::is_return_barrier_entry(sender_pc)) {
472     if (map->walk_cont()) { // about to walk into an h-stack
473       return Continuation::top_frame(*this, map);
474     } else {
475       return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp);
476     }
477   }
478 
479   intptr_t* unextended_sp = sender_sp;
480   return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
481 }
482 
483 template <typename RegisterMapT>
484 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
485   // The interpreter and compiler(s) always save EBP/RBP in a known
486   // location on entry. We must record where that location is
487   // so this if EBP/RBP was live on callout from c2 we can find
488   // the saved copy no matter what it called.
489 
490   // Since the interpreter always saves EBP/RBP if we record where it is then
491   // we don't have to always save EBP/RBP on entry and exit to c2 compiled
492   // code, on entry will be enough.
493   map->set_location(rbp->as_VMReg(), (address) link_addr);
494 #ifdef AMD64
495   // this is weird "H" ought to be at a higher address however the
496   // oopMaps seems to have the "H" regs at the same address and the
497   // vanilla register.
498   // XXXX make this go away
499   if (true) {
500     map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
501   }
502 #endif // AMD64
503 }
504 #endif // CPU_X86_FRAME_X86_INLINE_HPP