< prev index next >

src/hotspot/share/runtime/vframe.inline.hpp

Print this page

 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_VFRAME_INLINE_HPP
 26 #define SHARE_RUNTIME_VFRAME_INLINE_HPP
 27 
 28 #include "runtime/vframe.hpp"
 29 


 30 #include "runtime/frame.inline.hpp"
 31 #include "runtime/thread.inline.hpp"
 32 
 33 inline vframeStreamCommon::vframeStreamCommon(JavaThread* thread, bool process_frames) : _reg_map(thread, false, process_frames) {
 34   _thread = thread;






 35 }
 36 
 37 inline intptr_t* vframeStreamCommon::frame_id() const        { return _frame.id(); }
 38 
 39 inline int vframeStreamCommon::vframe_id() const {
 40   assert(_mode == compiled_mode, "unexpected mode: %d", _mode);
 41   return _vframe_id;
 42 }
 43 
 44 inline int vframeStreamCommon::decode_offset() const {
 45   assert(_mode == compiled_mode, "unexpected mode: %d", _mode);
 46   return _decode_offset;
 47 }
 48 
 49 inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
 50 
 51 inline bool vframeStreamCommon::is_entry_frame() const       { return _frame.is_entry_frame(); }
 52 


 53 inline void vframeStreamCommon::next() {
 54   // handle frames with inlining
 55   if (_mode == compiled_mode    && fill_in_compiled_inlined_sender()) return;
 56 
 57   // handle general case
 58   do {
 59     _prev_frame = _frame;




















 60     _frame = _frame.sender(&_reg_map);




 61   } while (!fill_from_frame());
 62 }
 63 
 64 inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub, bool process_frame)
 65   : vframeStreamCommon(thread, process_frame /* process_frames */) {
 66   _stop_at_java_call_stub = stop_at_java_call_stub;
 67 
 68   if (!thread->has_last_Java_frame()) {
 69     _mode = at_end_mode;
 70     return;
 71   }
 72 
 73   _frame = _thread->last_frame();

 74   while (!fill_from_frame()) {
 75     _prev_frame = _frame;


 76     _frame = _frame.sender(&_reg_map);
 77   }

 78 }
 79 
 80 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
 81   if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
 82     return false;
 83   }
 84   fill_from_compiled_frame(_sender_decode_offset);
 85   ++_vframe_id;
 86   return true;
 87 }
 88 
 89 
 90 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
 91   _mode = compiled_mode;
 92   _decode_offset = decode_offset;
 93 
 94   // Range check to detect ridiculous offsets.
 95   if (decode_offset == DebugInformationRecorder::serialized_null ||
 96       decode_offset < 0 ||
 97       decode_offset >= nm()->scopes_data_size()) {

124   DebugInfoReadStream buffer(nm(), decode_offset);
125   _sender_decode_offset = buffer.read_int();
126   _method               = buffer.read_method();
127   _bci                  = buffer.read_bci();
128 
129   assert(_method->is_method(), "checking type of decoded method");
130 }
131 
132 // The native frames are handled specially. We do not rely on ScopeDesc info
133 // since the pc might not be exact due to the _last_native_pc trick.
134 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
135   _mode = compiled_mode;
136   _sender_decode_offset = DebugInformationRecorder::serialized_null;
137   _decode_offset = DebugInformationRecorder::serialized_null;
138   _vframe_id = 0;
139   _method = nm()->method();
140   _bci = 0;
141 }
142 
143 inline bool vframeStreamCommon::fill_from_frame() {





144   // Interpreted frame
145   if (_frame.is_interpreted_frame()) {
146     fill_from_interpreter_frame();
147     return true;
148   }
149 
150   // Compiled frame
151 
152   if (cb() != NULL && cb()->is_compiled()) {

153     if (nm()->is_native_method()) {
154       // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
155       fill_from_compiled_native_frame();
156     } else {
157       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
158       int decode_offset;
159       if (pc_desc == NULL) {
160         // Should not happen, but let fill_from_compiled_frame handle it.
161 
162         // If we are trying to walk the stack of a thread that is not
163         // at a safepoint (like AsyncGetCallTrace would do) then this is an
164         // acceptable result. [ This is assuming that safe_for_sender
165         // is so bullet proof that we can trust the frames it produced. ]
166         //
167         // So if we see that the thread is not safepoint safe
168         // then simply produce the method and a bci of zero
169         // and skip the possibility of decoding any inlining that
170         // may be present. That is far better than simply stopping (or
171         // asserting. If however the thread is safepoint safe this
172         // is the sign of a compiler bug  and we'll let
173         // fill_from_compiled_frame handle it.
174 
175 
176         JavaThreadState state = _thread->thread_state();
177 
178         // in_Java should be good enough to test safepoint safety
179         // if state were say in_Java_trans then we'd expect that
180         // the pc would have already been slightly adjusted to
181         // one that would produce a pcDesc since the trans state
182         // would be one that might in fact anticipate a safepoint
183 
184         if (state == _thread_in_Java ) {
185           // This will get a method a zero bci and no inlining.
186           // Might be nice to have a unique bci to signify this
187           // particular case but for now zero will do.
188 
189           fill_from_compiled_native_frame();
190 
191           // There is something to be said for setting the mode to
192           // at_end_mode to prevent trying to walk further up the
193           // stack. There is evidence that if we walk any further
194           // that we could produce a bad stack chain. However until
195           // we see evidence that allowing this causes us to find
196           // frames bad enough to cause segv's or assertion failures
197           // we don't do it as while we may get a bad call chain the
198           // probability is much higher (several magnitudes) that we
199           // get good data.
200 
201           return true;
202         }
203         decode_offset = DebugInformationRecorder::serialized_null;
204       } else {
205         decode_offset = pc_desc->scope_decode_offset();
206       }
207       fill_from_compiled_frame(decode_offset);

208       _vframe_id = 0;
209     }
210     return true;
211   }
212 
213   // End of stack?
214   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
215     _mode = at_end_mode;
216     return true;
217   }
218 
219   return false;
220 }
221 
222 
223 inline void vframeStreamCommon::fill_from_interpreter_frame() {
224   Method* method = _frame.interpreter_frame_method();
225   address   bcp    = _frame.interpreter_frame_bcp();
226   int       bci    = method->validate_bci_from_bcp(bcp);







227   // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
228   // AsyncGetCallTrace interrupts the VM asynchronously. As a result
229   // it is possible to access an interpreter frame for which
230   // no Java-level information is yet available (e.g., becasue
231   // the frame was being created when the VM interrupted it).
232   // In this scenario, pretend that the interpreter is at the point
233   // of entering the method.
234   if (bci < 0) {
235     DEBUG_ONLY(found_bad_method_frame();)
236     bci = 0;
237   }
238   _mode   = interpreted_mode;
239   _method = method;
240   _bci    = bci;
241 }
242 
243 #endif // SHARE_RUNTIME_VFRAME_INLINE_HPP

 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_VFRAME_INLINE_HPP
 26 #define SHARE_RUNTIME_VFRAME_INLINE_HPP
 27 
 28 #include "runtime/vframe.hpp"
 29 
 30 #include "oops/instanceStackChunkKlass.inline.hpp"
 31 #include "runtime/handles.inline.hpp"
 32 #include "runtime/frame.inline.hpp"
 33 #include "runtime/thread.inline.hpp"
 34 
 35 inline vframeStreamCommon::vframeStreamCommon(RegisterMap reg_map) : _reg_map(reg_map), _cont(NULL) {
 36   _thread = _reg_map.thread();
 37 }
 38 
 39 inline oop vframeStreamCommon::continuation() const { 
 40   if (_reg_map.cont() != NULL) return _reg_map.cont();
 41   if (_cont != NULL)           return _cont->continuation();
 42   return NULL;
 43 }
 44 
 45 inline intptr_t* vframeStreamCommon::frame_id() const        { return _frame.id(); }
 46 
 47 inline int vframeStreamCommon::vframe_id() const {
 48   assert(_mode == compiled_mode, "unexpected mode: %d", _mode);
 49   return _vframe_id;
 50 }
 51 
 52 inline int vframeStreamCommon::decode_offset() const {
 53   assert(_mode == compiled_mode, "unexpected mode: %d", _mode);
 54   return _decode_offset;
 55 }
 56 
 57 inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
 58 
 59 inline bool vframeStreamCommon::is_entry_frame() const       { return _frame.is_entry_frame(); }
 60 
 61 extern "C" void pfl();
 62 
 63 inline void vframeStreamCommon::next() {
 64   // handle frames with inlining
 65   if (_mode == compiled_mode    && fill_in_compiled_inlined_sender()) return;
 66 
 67   // handle general case
 68   do {
 69     bool cont_entry = false;
 70     if (Continuation::is_continuation_enterSpecial(_frame)) {
 71       assert (!_reg_map.in_cont(), "");
 72       assert (_cont != NULL, "");
 73       assert (_cont->cont_oop() != NULL, "_cont: " INTPTR_FORMAT, p2i(_cont));
 74       cont_entry = true;
 75       
 76       oop scope = jdk_internal_vm_Continuation::scope(_cont->cont_oop());
 77       if ((_continuation_scope.not_null() && scope == _continuation_scope()) || scope == java_lang_VirtualThread::vthread_scope()) {
 78         _mode = at_end_mode;
 79         break;
 80       }
 81     } else if (_reg_map.in_cont() && Continuation::is_continuation_entry_frame(_frame, &_reg_map)) {
 82       assert (_reg_map.cont() != NULL, "");
 83       oop scope = jdk_internal_vm_Continuation::scope(_reg_map.cont());
 84       if ((_continuation_scope.not_null() && scope == _continuation_scope()) || scope == java_lang_VirtualThread::vthread_scope()) {
 85         _mode = at_end_mode;
 86         break;
 87       }      
 88     }
 89 
 90     _frame = _frame.sender(&_reg_map);
 91     
 92     if (cont_entry) {
 93       _cont = _cont->parent();
 94     }
 95   } while (!fill_from_frame());
 96 }
 97 
 98 inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub, bool process_frame, bool vthread_carrier)
 99   : vframeStreamCommon(RegisterMap(thread, true, process_frame, true)) {
100   _stop_at_java_call_stub = stop_at_java_call_stub;
101 
102   if (!thread->has_last_Java_frame()) {
103     _mode = at_end_mode;
104     return;
105   }
106 
107   _frame = vthread_carrier ? _thread->vthread_carrier_last_frame(&_reg_map) : _thread->last_frame();
108   _cont = _thread->last_continuation();
109   while (!fill_from_frame()) {
110     if (_cont != NULL && Continuation::is_continuation_enterSpecial(_frame)) {
111       _cont = _cont->parent();
112     }
113     _frame = _frame.sender(&_reg_map);
114   }
115   // assert (_reg_map.stack_chunk()() == (stackChunkOop)NULL, "map.chunk: " INTPTR_FORMAT, p2i((stackChunkOopDesc*)_reg_map.stack_chunk()()));
116 }
117 
118 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
119   if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
120     return false;
121   }
122   fill_from_compiled_frame(_sender_decode_offset);
123   ++_vframe_id;
124   return true;
125 }
126 
127 
128 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
129   _mode = compiled_mode;
130   _decode_offset = decode_offset;
131 
132   // Range check to detect ridiculous offsets.
133   if (decode_offset == DebugInformationRecorder::serialized_null ||
134       decode_offset < 0 ||
135       decode_offset >= nm()->scopes_data_size()) {

162   DebugInfoReadStream buffer(nm(), decode_offset);
163   _sender_decode_offset = buffer.read_int();
164   _method               = buffer.read_method();
165   _bci                  = buffer.read_bci();
166 
167   assert(_method->is_method(), "checking type of decoded method");
168 }
169 
170 // The native frames are handled specially. We do not rely on ScopeDesc info
171 // since the pc might not be exact due to the _last_native_pc trick.
172 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
173   _mode = compiled_mode;
174   _sender_decode_offset = DebugInformationRecorder::serialized_null;
175   _decode_offset = DebugInformationRecorder::serialized_null;
176   _vframe_id = 0;
177   _method = nm()->method();
178   _bci = 0;
179 }
180 
181 inline bool vframeStreamCommon::fill_from_frame() {
182   if (_frame.is_empty()) {
183     _mode = at_end_mode;
184     return true;
185   }
186 
187   // Interpreted frame
188   if (_frame.is_interpreted_frame()) {
189     fill_from_interpreter_frame();
190     return true;
191   }
192 
193   // Compiled frame
194 
195   if (cb() != NULL && cb()->is_compiled()) {
196     assert (nm()->method() != NULL, "must be");
197     if (nm()->is_native_method()) {
198       // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
199       fill_from_compiled_native_frame();
200     } else {
201       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
202       int decode_offset;
203       if (pc_desc == NULL) {
204         // Should not happen, but let fill_from_compiled_frame handle it.
205 
206         // If we are trying to walk the stack of a thread that is not
207         // at a safepoint (like AsyncGetCallTrace would do) then this is an
208         // acceptable result. [ This is assuming that safe_for_sender
209         // is so bullet proof that we can trust the frames it produced. ]
210         //
211         // So if we see that the thread is not safepoint safe
212         // then simply produce the method and a bci of zero
213         // and skip the possibility of decoding any inlining that
214         // may be present. That is far better than simply stopping (or
215         // asserting. If however the thread is safepoint safe this
216         // is the sign of a compiler bug  and we'll let
217         // fill_from_compiled_frame handle it.
218 
219 
220         JavaThreadState state = _thread != NULL ? _thread->thread_state() : _thread_in_Java;
221 
222         // in_Java should be good enough to test safepoint safety
223         // if state were say in_Java_trans then we'd expect that
224         // the pc would have already been slightly adjusted to
225         // one that would produce a pcDesc since the trans state
226         // would be one that might in fact anticipate a safepoint
227 
228         if (state == _thread_in_Java ) {
229           // This will get a method a zero bci and no inlining.
230           // Might be nice to have a unique bci to signify this
231           // particular case but for now zero will do.
232 
233           fill_from_compiled_native_frame();
234 
235           // There is something to be said for setting the mode to
236           // at_end_mode to prevent trying to walk further up the
237           // stack. There is evidence that if we walk any further
238           // that we could produce a bad stack chain. However until
239           // we see evidence that allowing this causes us to find
240           // frames bad enough to cause segv's or assertion failures
241           // we don't do it as while we may get a bad call chain the
242           // probability is much higher (several magnitudes) that we
243           // get good data.
244 
245           return true;
246         }
247         decode_offset = DebugInformationRecorder::serialized_null;
248       } else {
249         decode_offset = pc_desc->scope_decode_offset();
250       }
251       fill_from_compiled_frame(decode_offset);
252 
253       _vframe_id = 0;
254     }
255     return true;
256   }
257 
258   // End of stack?
259   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
260     _mode = at_end_mode;
261     return true;
262   }
263 
264   return false;
265 }
266 
267 
268 inline void vframeStreamCommon::fill_from_interpreter_frame() {
269   Method* method;
270   address bcp;
271   if (!_reg_map.in_cont()) {
272     method = _frame.interpreter_frame_method();
273     bcp    = _frame.interpreter_frame_bcp();
274   } else {
275     method = _reg_map.stack_chunk()->interpreter_frame_method(_frame);
276     bcp    = _reg_map.stack_chunk()->interpreter_frame_bcp(_frame);
277   }
278   int bci  = method->validate_bci_from_bcp(bcp);
279   // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
280   // AsyncGetCallTrace interrupts the VM asynchronously. As a result
281   // it is possible to access an interpreter frame for which
282   // no Java-level information is yet available (e.g., becasue
283   // the frame was being created when the VM interrupted it).
284   // In this scenario, pretend that the interpreter is at the point
285   // of entering the method.
286   if (bci < 0) {
287     DEBUG_ONLY(found_bad_method_frame();)
288     bci = 0;
289   }
290   _mode   = interpreted_mode;
291   _method = method;
292   _bci    = bci;
293 }
294 
295 #endif // SHARE_RUNTIME_VFRAME_INLINE_HPP
< prev index next >