1 /*
  2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_FRAME_X86_INLINE_HPP
 26 #define CPU_X86_FRAME_X86_INLINE_HPP
 27 
 28 #include "code/codeBlob.inline.hpp"
 29 #include "code/codeCache.inline.hpp"
 30 #include "code/vmreg.inline.hpp"
 31 #include "compiler/oopMap.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "interpreter/oopMapCache.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "runtime/registerMap.hpp"
 36 
 37 // Inline functions for Intel frames:
 38 
 39 // Constructors:
 40 
 41 inline frame::frame() {
 42   _pc = NULL;
 43   _sp = NULL;
 44   _unextended_sp = NULL;
 45   _fp = NULL;
 46   _cb = NULL;
 47   _deopt_state = unknown;
 48   _oop_map = NULL;
 49   _on_heap = false;
 50   DEBUG_ONLY(_frame_index = -1;)
 51 }
 52 
 53 inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
 54   _sp = sp;
 55   _unextended_sp = sp;
 56   _fp = fp;
 57   _pc = pc;
 58   _oop_map = NULL;
 59   _on_heap = false;
 60   DEBUG_ONLY(_frame_index = -1;)
 61 
 62   assert(pc != NULL, "no pc?");
 63   _cb = CodeCache::find_blob(pc); // not fast because this constructor can be used on native frames
 64   setup(pc);
 65 }
 66 
 67 inline void frame::setup(address pc) {
 68   adjust_unextended_sp();
 69 
 70   address original_pc = CompiledMethod::get_deopt_original_pc(this);
 71   if (original_pc != NULL) {
 72     _pc = original_pc;
 73     _deopt_state = is_deoptimized;
 74     assert(_cb == NULL || _cb->as_compiled_method()->insts_contains_inclusive(_pc),
 75            "original PC must be in the main code section of the compiled method (or must be immediately following it)");
 76   } else {
 77     if (_cb == SharedRuntime::deopt_blob()) {
 78       _deopt_state = is_deoptimized;
 79     } else {
 80       _deopt_state = not_deoptimized;
 81     }
 82   }
 83 }
 84 
 85 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
 86   init(sp, fp, pc);
 87 }
 88 
 89 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb) {
 90   _sp = sp;
 91   _unextended_sp = unextended_sp;
 92   _fp = fp;
 93   _pc = pc;
 94   assert(pc != NULL, "no pc?");
 95   _cb = cb;
 96   _oop_map = NULL;
 97   assert(_cb != NULL, "pc: " INTPTR_FORMAT, p2i(pc));
 98   _on_heap = false;
 99   DEBUG_ONLY(_frame_index = -1;)
100 
101   setup(pc);
102 }
103 
104 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb,
105                     const ImmutableOopMap* oop_map, bool on_heap) {
106   _sp = sp;
107   _unextended_sp = unextended_sp;
108   _fp = fp;
109   _pc = pc;
110   _cb = cb;
111   _oop_map = oop_map;
112   _deopt_state = not_deoptimized;
113   _on_heap = on_heap;
114   DEBUG_ONLY(_frame_index = -1;)
115 
116   // In thaw, non-heap frames use this constructor to pass oop_map.  I don't know why.
117   assert(_on_heap || _cb != nullptr, "these frames are always heap frames");
118   if (cb != NULL) {
119     setup(pc);
120   }
121 #ifdef ASSERT
122   // The following assertion has been disabled because it would sometime trap for Continuation.run,
123   // which is not *in* a continuation and therefore does not clear the _cont_fastpath flag, but this
124   // is benign even in fast mode (see Freeze::setup_jump)
125   // We might freeze deoptimized frame in slow mode
126   // assert(_pc == pc && _deopt_state == not_deoptimized, "");
127 #endif
128 }
129 
130 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
131   _sp = sp;
132   _unextended_sp = unextended_sp;
133   _fp = fp;
134   _pc = pc;
135   assert(pc != NULL, "no pc?");
136   _cb = CodeCache::find_blob_fast(pc);
137   _oop_map = NULL;
138   assert(_cb != NULL, "pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT " fp: " INTPTR_FORMAT, p2i(pc), p2i(sp), p2i(unextended_sp), p2i(fp));
139   _on_heap = false;
140   DEBUG_ONLY(_frame_index = -1;)
141 
142   setup(pc);
143 }
144 
145 inline frame::frame(intptr_t* sp) : frame(sp, sp, *(intptr_t**)(sp - frame::sender_sp_offset), *(address*)(sp - 1)) {}
146 
147 inline frame::frame(intptr_t* sp, intptr_t* fp) {
148   _sp = sp;
149   _unextended_sp = sp;
150   _fp = fp;
151   _pc = (address)(sp[-1]);
152   _on_heap = false;
153   DEBUG_ONLY(_frame_index = -1;)
154 
155   // Here's a sticky one. This constructor can be called via AsyncGetCallTrace
156   // when last_Java_sp is non-null but the pc fetched is junk.
157   // AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
158   // -> pd_last_frame should use a specialized version of pd_last_frame which could
159   // call a specialized frame constructor instead of this one.
160   // Then we could use the assert below. However this assert is of somewhat dubious
161   // value.
162   // UPDATE: this constructor is only used by trace_method_handle_stub() now.
163   // assert(_pc != NULL, "no pc?");
164 
165   _cb = CodeCache::find_blob(_pc);
166   adjust_unextended_sp();
167 
168   address original_pc = CompiledMethod::get_deopt_original_pc(this);
169   if (original_pc != NULL) {
170     _pc = original_pc;
171     _deopt_state = is_deoptimized;
172   } else {
173     _deopt_state = not_deoptimized;
174   }
175   _oop_map = NULL;
176 }
177 
178 // Accessors
179 
180 inline bool frame::equal(frame other) const {
181   bool ret =  sp() == other.sp()
182               && unextended_sp() == other.unextended_sp()
183               && fp() == other.fp()
184               && pc() == other.pc();
185   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
186   return ret;
187 }
188 
189 // Return unique id for this frame. The id must have a value where we can distinguish
190 // identity and younger/older relationship. NULL represents an invalid (incomparable)
191 // frame.
192 inline intptr_t* frame::id(void) const { return unextended_sp(); }
193 
194 // Return true if the frame is older (less recent activation) than the frame represented by id
195 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
196                                                     return this->id() > id ; }
197 
198 inline intptr_t* frame::link() const              { return *(intptr_t **)addr_at(link_offset); }
199 
200 inline intptr_t* frame::link_or_null() const {
201   intptr_t** ptr = (intptr_t **)addr_at(link_offset);
202   return os::is_readable_pointer(ptr) ? *ptr : NULL;
203 }
204 
205 inline intptr_t* frame::unextended_sp() const          { assert_absolute(); return _unextended_sp; }
206 inline void frame::set_unextended_sp(intptr_t* value)  { _unextended_sp = value; }
207 inline int  frame::offset_unextended_sp() const        { assert_offset();   return _offset_unextended_sp; }
208 inline void frame::set_offset_unextended_sp(int value) { assert_on_heap();  _offset_unextended_sp = value; }
209 
210 inline intptr_t* frame::real_fp() const {
211   if (_cb != NULL) {
212     // use the frame size if valid
213     int size = _cb->frame_size();
214     if (size > 0) {
215       return unextended_sp() + size;
216     }
217   }
218   // else rely on fp()
219   assert(! is_compiled_frame(), "unknown compiled frame size");
220   return fp();
221 }
222 
223 inline int frame::frame_size() const {
224   return is_interpreted_frame()
225     ? sender_sp() - sp()
226     : cb()->frame_size();
227 }
228 
229 inline int frame::compiled_frame_stack_argsize() const {
230   assert(cb()->is_compiled(), "");
231   return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
232 }
233 
234 inline void frame::interpreted_frame_oop_map(InterpreterOopMap* mask) const {
235   assert(mask != NULL, "");
236   Method* m = interpreter_frame_method();
237   int   bci = interpreter_frame_bci();
238   m->mask_for(bci, mask); // OopMapCache::compute_one_oop_map(m, bci, mask);
239 }
240 
241 // Return address:
242 
243 inline address* frame::sender_pc_addr()      const { return (address*) addr_at(return_addr_offset); }
244 inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
245 
246 inline intptr_t* frame::sender_sp()          const { return            addr_at(sender_sp_offset); }
247 
248 inline intptr_t** frame::interpreter_frame_locals_addr() const {
249   return (intptr_t**)addr_at(interpreter_frame_locals_offset);
250 }
251 
252 inline intptr_t* frame::interpreter_frame_last_sp() const {
253   return (intptr_t*)at(interpreter_frame_last_sp_offset);
254 }
255 
256 inline intptr_t* frame::interpreter_frame_bcp_addr() const {
257   return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
258 }
259 
260 inline intptr_t* frame::interpreter_frame_mdp_addr() const {
261   return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
262 }
263 
264 
265 
266 // Constant pool cache
267 
268 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
269   return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
270 }
271 
272 // Method
273 
274 inline Method** frame::interpreter_frame_method_addr() const {
275   return (Method**)addr_at(interpreter_frame_method_offset);
276 }
277 
278 // Mirror
279 
280 inline oop* frame::interpreter_frame_mirror_addr() const {
281   return (oop*)addr_at(interpreter_frame_mirror_offset);
282 }
283 
284 // top of expression stack
285 inline intptr_t* frame::interpreter_frame_tos_address() const {
286   intptr_t* last_sp = interpreter_frame_last_sp();
287   if (last_sp == NULL) {
288     return sp();
289   } else {
290     // sp() may have been extended or shrunk by an adapter.  At least
291     // check that we don't fall behind the legal region.
292     // For top deoptimized frame last_sp == interpreter_frame_monitor_end.
293     assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
294     return last_sp;
295   }
296 }
297 
298 inline oop* frame::interpreter_frame_temp_oop_addr() const {
299   return (oop *)(fp() + interpreter_frame_oop_temp_offset);
300 }
301 
302 inline int frame::interpreter_frame_monitor_size() {
303   return BasicObjectLock::size();
304 }
305 
306 
307 // expression stack
308 // (the max_stack arguments are used by the GC; see class FrameClosure)
309 
310 inline intptr_t* frame::interpreter_frame_expression_stack() const {
311   intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
312   return monitor_end-1;
313 }
314 
315 // Entry frames
316 
317 inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
318  return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
319 }
320 
321 // Compiled frames
322 
323 inline oop frame::saved_oop_result(RegisterMap* map) const {
324   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
325   guarantee(result_adr != NULL, "bad register save location");
326   return *result_adr;
327 }
328 
329 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
330   oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
331   guarantee(result_adr != NULL, "bad register save location");
332 
333   *result_adr = obj;
334 }
335 
336 inline bool frame::is_interpreted_frame() const {
337   return Interpreter::contains(pc());
338 }
339 
340 inline int frame::sender_sp_ret_address_offset() {
341   return frame::sender_sp_offset - frame::return_addr_offset;
342 }
343 
344 inline const ImmutableOopMap* frame::get_oop_map() const {
345   if (_cb == NULL) return NULL;
346   if (_cb->oop_maps() != NULL) {
347     NativePostCallNop* nop = nativePostCallNop_at(_pc);
348     if (nop != NULL && nop->displacement() != 0) {
349       int slot = ((nop->displacement() >> 24) & 0xff);
350       return _cb->oop_map_for_slot(slot, _pc);
351     }
352     const ImmutableOopMap* oop_map = OopMapSet::find_map(this);
353     return oop_map;
354   }
355   return NULL;
356 }
357 
358 //------------------------------------------------------------------------------
359 // frame::sender
360 
361 inline frame frame::sender(RegisterMap* map) const {
362   frame result = sender_raw(map);
363 
364   if (map->process_frames() && !map->in_cont()) {
365     StackWatermarkSet::on_iteration(map->thread(), result);
366   }
367 
368   return result;
369 }
370 
371 inline frame frame::sender_raw(RegisterMap* map) const {
372   // Default is we done have to follow them. The sender_for_xxx will
373   // update it accordingly
374   map->set_include_argument_oops(false);
375 
376   if (map->in_cont()) { // already in an h-stack
377     return map->stack_chunk()->sender(*this, map);
378   }
379 
380   if (is_entry_frame())       return sender_for_entry_frame(map);
381   if (is_upcall_stub_frame()) return sender_for_upcall_stub_frame(map);
382   if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
383 
384   assert(_cb == CodeCache::find_blob(pc()), "Must be the same");
385   if (_cb != NULL) return sender_for_compiled_frame(map);
386 
387   // Must be native-compiled frame, i.e. the marshaling code for native
388   // methods that exists in the core system.
389   return frame(sender_sp(), link(), sender_pc());
390 }
391 
392 inline frame frame::sender_for_compiled_frame(RegisterMap* map) const {
393   assert(map != NULL, "map must be set");
394 
395   // frame owned by optimizing compiler
396   assert(_cb->frame_size() > 0, "must have non-zero frame size");
397   intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
398   assert(sender_sp == real_fp(), "");
399 
400   // On Intel the return_address is always the word on the stack
401   address sender_pc = (address) *(sender_sp-1);
402 
403   // This is the saved value of EBP which may or may not really be an FP.
404   // It is only an FP if the sender is an interpreter frame (or C1?).
405   // saved_fp_addr should be correct even for a bottom thawed frame (with a return barrier)
406   intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
407 
408   if (map->update_map()) {
409     // Tell GC to use argument oopmaps for some runtime stubs that need it.
410     // For C1, the runtime stub might not have oop maps, so set this flag
411     // outside of update_register_map.
412     if (!_cb->is_compiled()) { // compiled frames do not use callee-saved registers
413       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
414       if (oop_map() != NULL) {
415         _oop_map->update_register_map(this, map);
416       }
417     } else {
418       assert(!_cb->caller_must_gc_arguments(map->thread()), "");
419       assert(!map->include_argument_oops(), "");
420       assert(oop_map() == NULL || !oop_map()->has_any(OopMapValue::callee_saved_value), "callee-saved value in compiled frame");
421     }
422 
423     // Since the prolog does the save and restore of EBP there is no oopmap
424     // for it so we must fill in its location as if there was an oopmap entry
425     // since if our caller was compiled code there could be live jvm state in it.
426     update_map_with_saved_link(map, saved_fp_addr);
427   }
428 
429   assert(sender_sp != sp(), "must have changed");
430 
431   if (Continuation::is_return_barrier_entry(sender_pc)) {
432     if (map->walk_cont()) { // about to walk into an h-stack
433       return Continuation::top_frame(*this, map);
434     } else {
435       return Continuation::continuation_bottom_sender(map->thread(), *this, sender_sp);
436     }
437   }
438 
439   intptr_t* unextended_sp = sender_sp;
440   return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
441 }
442 
443 template <typename RegisterMapT>
444 void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr) {
445   // The interpreter and compiler(s) always save EBP/RBP in a known
446   // location on entry. We must record where that location is
447   // so this if EBP/RBP was live on callout from c2 we can find
448   // the saved copy no matter what it called.
449 
450   // Since the interpreter always saves EBP/RBP if we record where it is then
451   // we don't have to always save EBP/RBP on entry and exit to c2 compiled
452   // code, on entry will be enough.
453   map->set_location(rbp->as_VMReg(), (address) link_addr);
454 #ifdef AMD64
455   // this is weird "H" ought to be at a higher address however the
456   // oopMaps seems to have the "H" regs at the same address and the
457   // vanilla register.
458   // XXXX make this go away
459   if (true) {
460     map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
461   }
462 #endif // AMD64
463 }
464 #endif // CPU_X86_FRAME_X86_INLINE_HPP