1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_RUNTIME_VFRAME_INLINE_HPP
  26 #define SHARE_RUNTIME_VFRAME_INLINE_HPP
  27 
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "runtime/handles.inline.hpp"
  30 #include "runtime/frame.inline.hpp"
  31 #include "runtime/thread.inline.hpp"
  32 #include "runtime/handles.inline.hpp"
  33 #include "runtime/vframe.hpp"
  34 
  35 inline vframeStreamCommon::vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
  36   _thread = thread;
  37 }
  38 
  39 inline vframeStreamCommon::vframeStreamCommon(RegisterMap reg_map) : _reg_map(reg_map) {
  40   _thread = _reg_map.thread();
  41 }
  42 
  43 inline intptr_t* vframeStreamCommon::frame_id() const        { return _frame.id(); }
  44 
  45 inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
  46 
  47 inline bool vframeStreamCommon::is_entry_frame() const       { return _frame.is_entry_frame(); }
  48 
  49 inline void vframeStreamCommon::next() {
  50   // handle frames with inlining
  51   if (_mode == compiled_mode    && fill_in_compiled_inlined_sender()) return;
  52 
  53   // handle general case
  54   do {
  55     assert (_continuation_scope.is_null() || _cont.not_null(), "must be");
  56     bool cont_entry = false;
  57     oop cont = (oop)NULL;
  58     if (_cont.not_null() && Continuation::is_continuation_entry_frame(_frame, &_reg_map)) {
  59       cont_entry = true;
  60       cont = _cont();
  61       oop scope = java_lang_Continuation::scope(cont);
  62 
  63       // *(_cont.raw_value()) = java_lang_Continuation::parent(_cont());
  64 
  65       if (_continuation_scope.not_null() && oopDesc::equals(scope, _continuation_scope())) {
  66         assert (Continuation::is_frame_in_continuation(_frame, cont), "");
  67         _mode = at_end_mode;
  68         break;
  69       }
  70     }
  71 
  72     _prev_frame = _frame;
  73     _frame = _frame.sender(&_reg_map);
  74     
  75     if (cont_entry) {
  76       *(_cont.raw_value()) = java_lang_Continuation::parent(cont);
  77       assert (_reg_map.cont() == (oop)NULL || oopDesc::equals(_cont(), _reg_map.cont()), 
  78         "map.cont: " INTPTR_FORMAT " vframeStream: " INTPTR_FORMAT, 
  79         p2i((oopDesc*)_reg_map.cont()), p2i((oopDesc*)_cont()));
  80     }
  81   } while (!fill_from_frame());
  82 }
  83 
  84 inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub)
  85   : vframeStreamCommon(RegisterMap(thread, false, true)) {
  86   _stop_at_java_call_stub = stop_at_java_call_stub;
  87 
  88   if (!thread->has_last_Java_frame()) {
  89     _mode = at_end_mode;
  90     return;
  91   }
  92 
  93   _frame = _thread->last_frame();
  94   oop cont = _thread->last_continuation();
  95   while (!fill_from_frame()) {
  96     if (cont != (oop)NULL && Continuation::is_continuation_entry_frame(_frame, &_reg_map)) {
  97       cont = java_lang_Continuation::parent(cont);
  98     }
  99     _prev_frame = _frame;
 100     _frame = _frame.sender(&_reg_map);
 101   }
 102   _cont = cont != (oop)NULL ? Handle(Thread::current(), cont) : Handle();
 103 }
 104 
 105 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
 106   if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
 107     return false;
 108   }
 109   fill_from_compiled_frame(_sender_decode_offset);
 110   ++_vframe_id;
 111   return true;
 112 }
 113 
 114 
 115 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
 116   _mode = compiled_mode;
 117   _decode_offset = decode_offset;
 118 
 119   // Range check to detect ridiculous offsets.
 120   if (decode_offset == DebugInformationRecorder::serialized_null ||
 121       decode_offset < 0 ||
 122       decode_offset >= nm()->scopes_data_size()) {
 123     // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
 124     // If we read nmethod::scopes_data at serialized_null (== 0)
 125     // or if read some at other invalid offset, invalid values will be decoded.
 126     // Based on these values, invalid heap locations could be referenced
 127     // that could lead to crashes in product mode.
 128     // Therefore, do not use the decode offset if invalid, but fill the frame
 129     // as it were a native compiled frame (no Java-level assumptions).
 130 #ifdef ASSERT
 131     if (WizardMode) {
 132       ttyLocker ttyl;
 133       tty->print_cr("Error in fill_from_frame: pc_desc for "
 134                     INTPTR_FORMAT " not found or invalid at %d",
 135                     p2i(_frame.pc()), decode_offset);
 136       nm()->print();
 137       nm()->method()->print_codes();
 138       nm()->print_code();
 139       nm()->print_pcs();
 140     }
 141     found_bad_method_frame();
 142 #endif
 143     // Provide a cheap fallback in product mode.  (See comment above.)
 144     fill_from_compiled_native_frame();
 145     return;
 146   }
 147 
 148   // Decode first part of scopeDesc
 149   DebugInfoReadStream buffer(nm(), decode_offset);
 150   _sender_decode_offset = buffer.read_int();
 151   _method               = buffer.read_method();
 152   _bci                  = buffer.read_bci();
 153 
 154   assert(_method->is_method(), "checking type of decoded method");
 155 }
 156 
 157 // The native frames are handled specially. We do not rely on ScopeDesc info
 158 // since the pc might not be exact due to the _last_native_pc trick.
 159 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
 160   _mode = compiled_mode;
 161   _sender_decode_offset = DebugInformationRecorder::serialized_null;
 162   _decode_offset = DebugInformationRecorder::serialized_null;
 163   _vframe_id = 0;
 164   _method = nm()->method();
 165   _bci = 0;
 166 }
 167 
 168 inline bool vframeStreamCommon::fill_from_frame() {
 169   if (_frame.is_empty()) {
 170     _mode = at_end_mode;
 171     return true;
 172   }
 173 
 174   // Interpreted frame
 175   if (_frame.is_interpreted_frame()) {
 176     fill_from_interpreter_frame();
 177     return true;
 178   }
 179 
 180   // Compiled frame
 181 
 182   if (cb() != NULL && cb()->is_compiled()) {
 183     assert (nm()->method() != NULL, "must be");
 184     if (nm()->is_native_method()) {
 185       // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
 186       fill_from_compiled_native_frame();
 187     } else {
 188       PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
 189       int decode_offset;
 190       if (pc_desc == NULL) {
 191         // Should not happen, but let fill_from_compiled_frame handle it.
 192 
 193         // If we are trying to walk the stack of a thread that is not
 194         // at a safepoint (like AsyncGetCallTrace would do) then this is an
 195         // acceptable result. [ This is assuming that safe_for_sender
 196         // is so bullet proof that we can trust the frames it produced. ]
 197         //
 198         // So if we see that the thread is not safepoint safe
 199         // then simply produce the method and a bci of zero
 200         // and skip the possibility of decoding any inlining that
 201         // may be present. That is far better than simply stopping (or
 202         // asserting. If however the thread is safepoint safe this
 203         // is the sign of a compiler bug  and we'll let
 204         // fill_from_compiled_frame handle it.
 205 
 206 
 207         JavaThreadState state = _thread != NULL ? _thread->thread_state() : _thread_in_Java;
 208 
 209         // in_Java should be good enough to test safepoint safety
 210         // if state were say in_Java_trans then we'd expect that
 211         // the pc would have already been slightly adjusted to
 212         // one that would produce a pcDesc since the trans state
 213         // would be one that might in fact anticipate a safepoint
 214 
 215         if (state == _thread_in_Java ) {
 216           // This will get a method a zero bci and no inlining.
 217           // Might be nice to have a unique bci to signify this
 218           // particular case but for now zero will do.
 219 
 220           fill_from_compiled_native_frame();
 221 
 222           // There is something to be said for setting the mode to
 223           // at_end_mode to prevent trying to walk further up the
 224           // stack. There is evidence that if we walk any further
 225           // that we could produce a bad stack chain. However until
 226           // we see evidence that allowing this causes us to find
 227           // frames bad enough to cause segv's or assertion failures
 228           // we don't do it as while we may get a bad call chain the
 229           // probability is much higher (several magnitudes) that we
 230           // get good data.
 231 
 232           return true;
 233         }
 234         decode_offset = DebugInformationRecorder::serialized_null;
 235       } else {
 236         decode_offset = pc_desc->scope_decode_offset();
 237       }
 238       fill_from_compiled_frame(decode_offset);
 239 
 240       _vframe_id = 0;
 241     }
 242     return true;
 243   }
 244 
 245   // End of stack?
 246   if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
 247     _mode = at_end_mode;
 248     return true;
 249   }
 250 
 251   return false;
 252 }
 253 
 254 
 255 inline void vframeStreamCommon::fill_from_interpreter_frame() {
 256   Method* method;
 257   address bcp;
 258   if (!_reg_map.in_cont()) {
 259     method = _frame.interpreter_frame_method();
 260     bcp    = _frame.interpreter_frame_bcp();
 261   } else {
 262     method = Continuation::interpreter_frame_method(_frame, &_reg_map);
 263     bcp    = Continuation::interpreter_frame_bcp(_frame, &_reg_map);
 264   }
 265   int       bci    = method->validate_bci_from_bcp(bcp);
 266   // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
 267   // AsyncGetCallTrace interrupts the VM asynchronously. As a result
 268   // it is possible to access an interpreter frame for which
 269   // no Java-level information is yet available (e.g., becasue
 270   // the frame was being created when the VM interrupted it).
 271   // In this scenario, pretend that the interpreter is at the point
 272   // of entering the method.
 273   if (bci < 0) {
 274     DEBUG_ONLY(found_bad_method_frame();)
 275     bci = 0;
 276   }
 277   _mode   = interpreted_mode;
 278   _method = method;
 279   _bci    = bci;
 280 }
 281 
 282 #endif // SHARE_RUNTIME_VFRAME_INLINE_HPP