1 /*
  2  * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_INSTANCESTACKCHUNKKLASS_X86_INLINE_HPP
 26 #define CPU_X86_INSTANCESTACKCHUNKKLASS_X86_INLINE_HPP
 27 
 28 #include "interpreter/oopMapCache.hpp"
 29 #include "runtime/frame.inline.hpp"
 30 #include "runtime/registerMap.hpp"
 31 
 32 int InstanceStackChunkKlass::metadata_words() { return frame::sender_sp_offset; }
 33 int InstanceStackChunkKlass::align_wiggle()   { return 1; }
 34 
 35 #ifdef ASSERT
 36 template <bool mixed>
 37 inline bool StackChunkFrameStream<mixed>::is_in_frame(void* p0) const {
 38   assert (!is_done(), "");
 39   intptr_t* p = (intptr_t*)p0;
 40   int argsize = is_compiled() ? (_cb->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord : 0;
 41   int frame_size = _cb->frame_size() + argsize;
 42   return p == sp() - frame::sender_sp_offset || ((p - unextended_sp()) >= 0 && (p - unextended_sp()) < frame_size);
 43 }
 44 #endif
 45 
 46 template <bool mixed>
 47 inline frame StackChunkFrameStream<mixed>::to_frame() const {
 48   if (is_done()) return frame(_sp, _sp, nullptr, nullptr, nullptr, nullptr, true);
 49   return mixed && !is_interpreted() ? frame(sp(), unextended_sp(), fp(), pc(), cb(), _oopmap) // we might freeze deoptimized frame in slow mode
 50                                     : frame(sp(), unextended_sp(), fp(), pc(), cb(), _oopmap, true);
 51 }
 52 
 53 template <bool mixed>
 54 inline address StackChunkFrameStream<mixed>::get_pc() const {
 55   assert (!is_done(), "");
 56   return *(address*)(_sp - 1);
 57 }
 58 
 59 template <bool mixed>
 60 inline intptr_t* StackChunkFrameStream<mixed>::fp() const {
 61   intptr_t* fp_addr = _sp - frame::sender_sp_offset;
 62   return (mixed && is_interpreted()) ? fp_addr + *fp_addr // derelativize
 63                                      : *(intptr_t**)fp_addr;
 64 }
 65 
 66 template <bool mixed>
 67 inline intptr_t* StackChunkFrameStream<mixed>::derelativize(int offset) const {
 68   intptr_t* fp = this->fp();
 69   assert (fp != nullptr, "");
 70   return fp + fp[offset];
 71 }
 72 
 73 template <bool mixed>
 74 inline intptr_t* StackChunkFrameStream<mixed>::unextended_sp_for_interpreter_frame() const {
 75   assert (mixed && is_interpreted(), "");
 76   return derelativize(frame::interpreter_frame_last_sp_offset);
 77 }
 78 
 79 // template <bool mixed>
 80 // inline intptr_t* StackChunkFrameStream<mixed>::unextended_sp_for_interpreter_frame_caller() const {
 81 //   assert (mixed, "");
 82 //   intptr_t* callee_fp = sp() - frame::sender_sp_offset;
 83 //   intptr_t* unextended_sp = callee_fp + callee_fp[frame::interpreter_frame_sender_sp_offset];
 84 //   assert (unextended_sp > callee_fp && unextended_sp >= sp(), "callee_fp: %p (%d) offset: %ld", callee_fp, _chunk->to_offset(callee_fp), callee_fp[frame::interpreter_frame_sender_sp_offset]);
 85 //   return unextended_sp;
 86 // }
 87 
 88 template <bool mixed>
 89 intptr_t* StackChunkFrameStream<mixed>::next_sp_for_interpreter_frame() const {
 90   assert (mixed && is_interpreted(), "");
 91   return (derelativize(frame::interpreter_frame_locals_offset) + 1 >= _end) ? _end : fp() + frame::sender_sp_offset;
 92 }
 93 
 94 template <bool mixed>
 95 inline void StackChunkFrameStream<mixed>::next_for_interpreter_frame() {
 96   assert (mixed && is_interpreted(), "");
 97   if (derelativize(frame::interpreter_frame_locals_offset) + 1 >= _end) {
 98     _unextended_sp = _end;
 99     _sp = _end;
100   } else {
101     intptr_t* fp = this->fp();
102     _unextended_sp = fp + fp[frame::interpreter_frame_sender_sp_offset];
103     _sp = fp + frame::sender_sp_offset;
104   }
105 }
106 
107 template <bool mixed>
108 inline int StackChunkFrameStream<mixed>::interpreter_frame_size() const {
109   assert (mixed && is_interpreted(), "");
110   // InterpreterOopMap mask;
111   // to_frame().interpreted_frame_oop_map(&mask);
112   // intptr_t* top = derelativize(frame::interpreter_frame_initial_sp_offset) - mask.expression_stack_size();
113   
114   intptr_t* top = unextended_sp(); // later subtract argsize if callee is interpreted
115   intptr_t* bottom = derelativize(frame::interpreter_frame_locals_offset) + 1; // the sender's unextended sp: derelativize(frame::interpreter_frame_sender_sp_offset); 
116 
117   // tty->print_cr(">>>> StackChunkFrameStream<mixed>::interpreter_frame_size bottom: %d top: %d size: %d", _chunk->to_offset(bottom - 1) + 1, _chunk->to_offset(top), (int)(bottom - top));
118   return (int)(bottom - top);
119 }
120 
121 template <bool mixed>
122 inline int StackChunkFrameStream<mixed>::interpreter_frame_stack_argsize() const {
123   assert (mixed && is_interpreted(), "");
124   int diff = (int)(derelativize(frame::interpreter_frame_locals_offset) - derelativize(frame::interpreter_frame_sender_sp_offset) + 1);
125   // tty->print_cr(">>>> Interpreted::stack_argsize: %ld -- %ld relative: %d", f.at(frame::interpreter_frame_locals_offset), f.at(frame::interpreter_frame_sender_sp_offset), relative);
126   return diff;
127 }
128 
129 template <bool mixed>
130 inline int StackChunkFrameStream<mixed>::interpreter_frame_num_oops() const {
131   assert (mixed && is_interpreted(), "");
132   InterpreterOopMap mask;
133   frame f = to_frame();
134   f.interpreted_frame_oop_map(&mask);
135   return  mask.num_oops()
136         + 1 // for the mirror oop
137         + ((intptr_t*)f.interpreter_frame_monitor_begin() - (intptr_t*)f.interpreter_frame_monitor_end<true>())/BasicObjectLock::size();
138 }
139 
140 inline void stackChunkOopDesc::relativize_frame_pd(frame& fr) const {
141   if (fr.is_interpreted_frame()) fr.set_offset_fp(relativize_address(fr.fp()));
142 }
143 
144 inline void stackChunkOopDesc::derelativize_frame_pd(frame& fr) const {
145   if (fr.is_interpreted_frame()) fr.set_fp(derelativize_address(fr.offset_fp()));
146 }
147 
148 template<>
149 template<>
150 inline void StackChunkFrameStream<true>::update_reg_map_pd(RegisterMap* map) {
151   if (map->update_map()) {
152     frame::update_map_with_saved_link(map, map->in_cont() ? (intptr_t**)(intptr_t)frame::sender_sp_offset : (intptr_t**)(_sp - frame::sender_sp_offset));
153   }
154 }
155 
156 template<>
157 template<>
158 inline void StackChunkFrameStream<false>::update_reg_map_pd(RegisterMap* map) {
159   if (map->update_map()) {
160     frame::update_map_with_saved_link(map, map->in_cont() ? (intptr_t**)(intptr_t)frame::sender_sp_offset : (intptr_t**)(_sp - frame::sender_sp_offset));
161   }
162 }
163 
164 template <bool mixed>
165 template <typename RegisterMapT>
166 inline void StackChunkFrameStream<mixed>::update_reg_map_pd(RegisterMapT* map) {}
167 
168 // Java frames don't have callee saved registers (except for rbp), so we can use a smaller RegisterMap
169 class SmallRegisterMap {
170 public:
171   static constexpr SmallRegisterMap* instance = nullptr;
172 private:
173   static void assert_is_rbp(VMReg r) NOT_DEBUG_RETURN
174                                      DEBUG_ONLY({ assert (r == rbp->as_VMReg() || r == rbp->as_VMReg()->next(), "Reg: %s", r->name()); })
175 public:
176   // as_RegisterMap is used when we didn't want to templatize and abstract over RegisterMap type to support SmallRegisterMap
177   // Consider enhancing SmallRegisterMap to support those cases
178   const RegisterMap* as_RegisterMap() const { return nullptr; }
179   RegisterMap* as_RegisterMap() { return nullptr; }
180 
181   RegisterMap* copy_to_RegisterMap(RegisterMap* map, intptr_t* sp) const {
182     map->clear();
183     map->set_include_argument_oops(this->include_argument_oops());
184     frame::update_map_with_saved_link(map, (intptr_t**)sp - frame::sender_sp_offset);
185     return map;
186   }
187   
188   SmallRegisterMap() {}
189 
190   SmallRegisterMap(const RegisterMap* map) {
191   #ifdef ASSERT
192     for(int i = 0; i < RegisterMap::reg_count; i++) {
193       VMReg r = VMRegImpl::as_VMReg(i);
194       if (map->location(r, (intptr_t*)nullptr) != nullptr) assert_is_rbp(r);
195     }
196   #endif
197   }
198 
199   inline address location(VMReg reg, intptr_t* sp) const {
200     assert_is_rbp(reg);
201     return (address)(sp - frame::sender_sp_offset);
202   }
203 
204   inline void set_location(VMReg reg, address loc) { assert_is_rbp(reg); }
205 
206   JavaThread* thread() const {
207   #ifndef ASSERT
208     guarantee (false, ""); 
209   #endif
210     return nullptr;
211   }
212 
213   bool update_map()    const { return false; }
214   bool walk_cont()     const { return false; }
215   bool include_argument_oops() const { return false; }
216   void set_include_argument_oops(bool f)  {}
217   bool in_cont()       const { return false; }
218   stackChunkHandle stack_chunk() const { return stackChunkHandle(); }
219 
220 #ifdef ASSERT
221   bool should_skip_missing() const  { return false; }
222   VMReg find_register_spilled_here(void* p, intptr_t* sp) { return rbp->as_VMReg(); }
223   void print() const { print_on(tty); }
224   void print_on(outputStream* st) const { st->print_cr("Small register map"); }
225 #endif
226 };
227 
228 #endif // CPU_X86_INSTANCESTACKCHUNKKLASS_X86_INLINE_HPP