1 /*
  2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_VFRAME_INLINE_HPP
 26 #define SHARE_RUNTIME_VFRAME_INLINE_HPP
 27 
 28 #include "runtime/vframe.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "oops/stackChunkOop.inline.hpp"
 32 #include "runtime/continuationJavaClasses.inline.hpp"
 33 #include "runtime/frame.inline.hpp"
 34 #include "runtime/handles.inline.hpp"
 35 #include "runtime/javaThread.inline.hpp"
 36 
 37 inline vframeStreamCommon::vframeStreamCommon(RegisterMap reg_map) : _reg_map(reg_map), _cont_entry(nullptr) {
 38   _thread = _reg_map.thread();
 39 }
 40 
 41 inline oop vframeStreamCommon::continuation() const {
 42   if (_reg_map.cont() != nullptr) {
 43     return _reg_map.cont();
 44   } else if (_cont_entry != nullptr) {
 45     return _cont_entry->cont_oop(_reg_map.thread());
 46   } else {
 47     return nullptr;
 48   }
 49 }
 50 
 51 inline intptr_t* vframeStreamCommon::frame_id() const {
 52   if (_frame.is_heap_frame()) {
 53     // Make something sufficiently unique
 54     intptr_t id = _reg_map.stack_chunk_index() << 16;
 55     id += _frame.offset_unextended_sp();
 56     return reinterpret_cast<intptr_t*>(id);
 57   }
 58   return _frame.id();
 59 }
 60 
 61 inline int vframeStreamCommon::vframe_id() const {
 62   assert(_mode == compiled_mode, "unexpected mode: %d", _mode);
 63   return _vframe_id;
 64 }
 65 
 66 inline int vframeStreamCommon::decode_offset() const {
 67   assert(_mode == compiled_mode, "unexpected mode: %d", _mode);
 68   return _decode_offset;
 69 }
 70 
 71 inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
 72 
 73 inline void vframeStreamCommon::next() {
 74   // handle frames with inlining
 75   if (_mode == compiled_mode    && fill_in_compiled_inlined_sender()) return;
 76 
 77   // handle general case
 78   do {
 79     bool is_enterSpecial_frame  = false;
 80     if (Continuation::is_continuation_enterSpecial(_frame)) {
 81       assert(!_reg_map.in_cont(), "");
 82       assert(_cont_entry != nullptr, "");
 83       // Reading oops are only safe if process_frames() is true, and we fix the oops.
 84       assert(!_reg_map.process_frames() || _cont_entry->cont_oop(_reg_map.thread()) != nullptr, "_cont: " INTPTR_FORMAT, p2i(_cont_entry));
 85       is_enterSpecial_frame = true;
 86 
 87       // TODO: handle ShowCarrierFrames
 88       if (_cont_entry->is_virtual_thread() ||
 89           (_continuation_scope.not_null() && _cont_entry->scope(_reg_map.thread()) == _continuation_scope())) {
 90         _mode = at_end_mode;
 91         break;
 92       }
 93     } else if (_reg_map.in_cont() && Continuation::is_continuation_entry_frame(_frame, &_reg_map)) {
 94       assert(_reg_map.cont() != nullptr, "");
 95       oop scope = jdk_internal_vm_Continuation::scope(_reg_map.cont());
 96       if (scope == java_lang_VirtualThread::vthread_scope() ||
 97           (_continuation_scope.not_null() && scope == _continuation_scope())) {
 98         _mode = at_end_mode;
 99         break;
100       }
101     }
102 
103     _frame = _frame.sender(&_reg_map);
104 
105     if (is_enterSpecial_frame) {
106       _cont_entry = _cont_entry->parent();
107     }
108   } while (!fill_from_frame());
109 }
110 
111 inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub, bool process_frame, bool vthread_carrier)
112   : vframeStreamCommon(RegisterMap(thread,
113                                    RegisterMap::UpdateMap::include,
114                                    process_frame ? RegisterMap::ProcessFrames::include : RegisterMap::ProcessFrames::skip ,
115                                    RegisterMap::WalkContinuation::include)) {
116   _stop_at_java_call_stub = stop_at_java_call_stub;
117 
118   if (!thread->has_last_Java_frame()) {
119     _mode = at_end_mode;
120     return;
121   }
122 
123   if (thread->is_vthread_mounted()) {
124     _frame = vthread_carrier ? _thread->carrier_last_frame(&_reg_map) : _thread->vthread_last_frame();
125   } else {
126     _frame = _thread->last_frame();
127   }
128 
129   _cont_entry = _thread->last_continuation();
130   while (!fill_from_frame()) {
131     _frame = _frame.sender(&_reg_map);
132   }
133 }
134 
135 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
136   if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
137     return false;
138   }
139   fill_from_compiled_frame(_sender_decode_offset);
140   ++_vframe_id;
141   return true;
142 }
143 
144 
145 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
146   _mode = compiled_mode;
147   _decode_offset = decode_offset;
148 
149   // Range check to detect ridiculous offsets.
150   if (decode_offset == DebugInformationRecorder::serialized_null ||
151       decode_offset < 0 ||
152       decode_offset >= nm()->scopes_data_size()) {
153     // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
154     // If we read nmethod::scopes_data at serialized_null (== 0)
155     // or if read some at other invalid offset, invalid values will be decoded.
156     // Based on these values, invalid heap locations could be referenced
157     // that could lead to crashes in product mode.
158     // Therefore, do not use the decode offset if invalid, but fill the frame
159     // as it were a native compiled frame (no Java-level assumptions).
160 #ifdef ASSERT
161     if (WizardMode) {
162       // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
163       stringStream ss;
164       ss.print_cr("Error in fill_from_frame: pc_desc for "
165                   INTPTR_FORMAT " not found or invalid at %d",
166                   p2i(_frame.pc()), decode_offset);
167       nm()->print_on(&ss);
168       nm()->method()->print_codes_on(&ss);
169       nm()->print_code_on(&ss);
170       nm()->print_pcs_on(&ss);
171       tty->print("%s", ss.as_string()); // print all at once
172     }
173     found_bad_method_frame();
174 #endif
175     // Provide a cheap fallback in product mode.  (See comment above.)
176     fill_from_compiled_native_frame();
177     return;
178   }
179 
180   // Decode first part of scopeDesc
181   DebugInfoReadStream buffer(nm(), decode_offset);
182   _sender_decode_offset = buffer.read_int();
183   _method               = buffer.read_method();
184   _bci                  = buffer.read_bci();
185 
186   assert(_method->is_method(), "checking type of decoded method");
187 }
188 
189 // The native frames are handled specially. We do not rely on ScopeDesc info
190 // since the pc might not be exact due to the _last_native_pc trick.
191 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
192   _mode = compiled_mode;
193   _sender_decode_offset = DebugInformationRecorder::serialized_null;
194   _decode_offset = DebugInformationRecorder::serialized_null;
195   _vframe_id = 0;
196   _method = nm()->method();
197   _bci = 0;
198 }
199 
200 inline bool vframeStreamCommon::fill_from_frame() {
201   // Interpreted frame
202   if (_frame.is_interpreted_frame()) {
203     fill_from_interpreter_frame();
204     return true;
205   }
206 
207   // Compiled frame
208 
209   if (cb() != nullptr && cb()->is_nmethod()) {
210     assert(nm()->method() != nullptr, "must be");
211     if (nm()->is_native_method()) {
212       // Do not rely on scopeDesc since the pc might be imprecise due to the _last_native_pc trick.
213       fill_from_compiled_native_frame();
214     } else {
215       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
216       int decode_offset;
217       if (pc_desc == nullptr) {
218         // Should not happen, but let fill_from_compiled_frame handle it.
219 
220         // If we are trying to walk the stack of a thread that is not
221         // at a safepoint (like AsyncGetCallTrace would do) then this is an
222         // acceptable result. [ This is assuming that safe_for_sender
223         // is so bullet proof that we can trust the frames it produced. ]
224         //
225         // So if we see that the thread is not safepoint safe
226         // then simply produce the method and a bci of zero
227         // and skip the possibility of decoding any inlining that
228         // may be present. That is far better than simply stopping (or
229         // asserting. If however the thread is safepoint safe this
230         // is the sign of a compiler bug  and we'll let
231         // fill_from_compiled_frame handle it.
232 
233 
234         JavaThreadState state = _thread != nullptr ? _thread->thread_state() : _thread_in_Java;
235 
236         // in_Java should be good enough to test safepoint safety
237         // if state were say in_Java_trans then we'd expect that
238         // the pc would have already been slightly adjusted to
239         // one that would produce a pcDesc since the trans state
240         // would be one that might in fact anticipate a safepoint
241 
242         if (state == _thread_in_Java ) {
243           // This will get a method a zero bci and no inlining.
244           // Might be nice to have a unique bci to signify this
245           // particular case but for now zero will do.
246 
247           fill_from_compiled_native_frame();
248 
249           // There is something to be said for setting the mode to
250           // at_end_mode to prevent trying to walk further up the
251           // stack. There is evidence that if we walk any further
252           // that we could produce a bad stack chain. However until
253           // we see evidence that allowing this causes us to find
254           // frames bad enough to cause segv's or assertion failures
255           // we don't do it as while we may get a bad call chain the
256           // probability is much higher (several magnitudes) that we
257           // get good data.
258 
259           return true;
260         }
261         decode_offset = DebugInformationRecorder::serialized_null;
262       } else {
263         decode_offset = pc_desc->scope_decode_offset();
264       }
265       fill_from_compiled_frame(decode_offset);
266 
267       _vframe_id = 0;
268     }
269     return true;
270   }
271 
272   // End of stack?
273   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
274     _mode = at_end_mode;
275     return true;
276   }
277 
278   assert(!Continuation::is_continuation_enterSpecial(_frame), "");
279   return false;
280 }
281 
282 
283 inline void vframeStreamCommon::fill_from_interpreter_frame() {
284   Method* method;
285   address bcp;
286   if (!_reg_map.in_cont()) {
287     method = _frame.interpreter_frame_method();
288     bcp    = _frame.interpreter_frame_bcp();
289   } else {
290     method = _reg_map.stack_chunk()->interpreter_frame_method(_frame);
291     bcp    = _reg_map.stack_chunk()->interpreter_frame_bcp(_frame);
292   }
293   int bci  = method->validate_bci_from_bcp(bcp);
294   // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
295   // AsyncGetCallTrace interrupts the VM asynchronously. As a result
296   // it is possible to access an interpreter frame for which
297   // no Java-level information is yet available (e.g., because
298   // the frame was being created when the VM interrupted it).
299   // In this scenario, pretend that the interpreter is at the point
300   // of entering the method.
301   if (bci < 0) {
302     DEBUG_ONLY(found_bad_method_frame();)
303     bci = 0;
304   }
305   _mode   = interpreted_mode;
306   _method = method;
307   _bci    = bci;
308 }
309 
310 #endif // SHARE_RUNTIME_VFRAME_INLINE_HPP