< prev index next >

src/hotspot/share/runtime/frame.cpp

Print this page
*** 23,45 ***
   */
  
  #include "precompiled.hpp"
  #include "classfile/moduleEntry.hpp"
  #include "code/codeCache.hpp"
  #include "code/vmreg.inline.hpp"
  #include "compiler/abstractCompiler.hpp"
  #include "compiler/disassembler.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/collectedHeap.inline.hpp"
  #include "interpreter/interpreter.hpp"
  #include "interpreter/oopMapCache.hpp"
  #include "memory/resourceArea.hpp"
  #include "memory/universe.hpp"
  #include "oops/markWord.hpp"
  #include "oops/method.hpp"
  #include "oops/methodData.hpp"
  #include "oops/oop.inline.hpp"
  #include "oops/verifyOopClosure.hpp"
  #include "prims/methodHandles.hpp"
  #include "runtime/frame.inline.hpp"
  #include "runtime/handles.inline.hpp"
  #include "runtime/javaCalls.hpp"
  #include "runtime/monitorChunk.hpp"
  #include "runtime/os.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "runtime/signature.hpp"
  #include "runtime/stubCodeGenerator.hpp"
  #include "runtime/stubRoutines.hpp"
  #include "runtime/thread.inline.hpp"
  #include "utilities/debug.hpp"
  #include "utilities/decoder.hpp"
  #include "utilities/formatBuffer.hpp"
  
! RegisterMap::RegisterMap(JavaThread *thread, bool update_map, bool process_frames) {
    _thread         = thread;
    _update_map     = update_map;
    _process_frames = process_frames;
    clear();
    debug_only(_update_for_id = NULL;)
  #ifndef PRODUCT
    for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
  #endif /* PRODUCT */
  }
  
--- 23,73 ---
   */
  
  #include "precompiled.hpp"
  #include "classfile/moduleEntry.hpp"
  #include "code/codeCache.hpp"
+ #include "code/scopeDesc.hpp"
  #include "code/vmreg.inline.hpp"
  #include "compiler/abstractCompiler.hpp"
  #include "compiler/disassembler.hpp"
  #include "compiler/oopMap.hpp"
  #include "gc/shared/collectedHeap.inline.hpp"
  #include "interpreter/interpreter.hpp"
  #include "interpreter/oopMapCache.hpp"
+ #include "logging/log.hpp"
  #include "memory/resourceArea.hpp"
  #include "memory/universe.hpp"
+ #include "oops/instanceStackChunkKlass.inline.hpp"
  #include "oops/markWord.hpp"
  #include "oops/method.hpp"
  #include "oops/methodData.hpp"
  #include "oops/oop.inline.hpp"
  #include "oops/verifyOopClosure.hpp"
  #include "prims/methodHandles.hpp"
+ #include "runtime/continuation.hpp"
  #include "runtime/frame.inline.hpp"
  #include "runtime/handles.inline.hpp"
  #include "runtime/javaCalls.hpp"
  #include "runtime/monitorChunk.hpp"
  #include "runtime/os.hpp"
  #include "runtime/sharedRuntime.hpp"
  #include "runtime/signature.hpp"
+ #include "runtime/stackValue.hpp"
  #include "runtime/stubCodeGenerator.hpp"
  #include "runtime/stubRoutines.hpp"
  #include "runtime/thread.inline.hpp"
  #include "utilities/debug.hpp"
  #include "utilities/decoder.hpp"
  #include "utilities/formatBuffer.hpp"
  
! RegisterMap::RegisterMap(JavaThread *thread, bool update_map, bool process_frames, bool walk_cont) {
    _thread         = thread;
    _update_map     = update_map;
+   _walk_cont      = walk_cont;
+   DEBUG_ONLY(_skip_missing = false;)
    _process_frames = process_frames;
    clear();
    debug_only(_update_for_id = NULL;)
+ 
+   if (walk_cont && thread != NULL && thread->last_continuation() != NULL) {
+     _chunk = stackChunkHandle(Thread::current(), NULL, true);
+   }
+ 
+ #ifndef PRODUCT
+   for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
+ #endif /* PRODUCT */
+ }
+ 
+ RegisterMap::RegisterMap(oop continuation, bool update_map) {
+   _thread         = NULL;
+   _update_map     = update_map;
+   _walk_cont      = true;
+   DEBUG_ONLY(_skip_missing = false;)
+   _process_frames = false;
+   clear();
+   debug_only(_update_for_id = NULL;)
+ 
+   _chunk = stackChunkHandle(Thread::current(), NULL, true);
+ 
  #ifndef PRODUCT
    for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
  #endif /* PRODUCT */
  }
  

*** 71,14 ***
    _thread                = map->thread();
    _update_map            = map->update_map();
    _process_frames        = map->process_frames();
    _include_argument_oops = map->include_argument_oops();
    debug_only(_update_for_id = map->_update_for_id;)
    pd_initialize_from(map);
    if (update_map()) {
      for(int i = 0; i < location_valid_size; i++) {
!       LocationValidType bits = !update_map() ? 0 : map->_location_valid[i];
        _location_valid[i] = bits;
        // for whichever bits are set, pull in the corresponding map->_location
        int j = i*location_valid_type_size;
        while (bits != 0) {
          if ((bits & 1) != 0) {
--- 99,20 ---
    _thread                = map->thread();
    _update_map            = map->update_map();
    _process_frames        = map->process_frames();
    _include_argument_oops = map->include_argument_oops();
    debug_only(_update_for_id = map->_update_for_id;)
+   _walk_cont     = map->_walk_cont;
+   DEBUG_ONLY(_skip_missing = map->_skip_missing;)
+ 
+   // only the original RegisterMap's handle lives long enough for StackWalker; this is bound to cause trouble with nested continuations.
+   _chunk = map->_chunk; // stackChunkHandle(Thread::current(), map->_chunk(), map->_chunk.not_null()); // 
+ 
    pd_initialize_from(map);
    if (update_map()) {
      for(int i = 0; i < location_valid_size; i++) {
!       LocationValidType bits = map->_location_valid[i];
        _location_valid[i] = bits;
        // for whichever bits are set, pull in the corresponding map->_location
        int j = i*location_valid_type_size;
        while (bits != 0) {
          if ((bits & 1) != 0) {

*** 90,13 ***
        }
      }
    }
  }
  
  void RegisterMap::clear() {
    set_include_argument_oops(true);
!   if (_update_map) {
      for(int i = 0; i < location_valid_size; i++) {
        _location_valid[i] = 0;
      }
      pd_clear();
    } else {
--- 124,26 ---
        }
      }
    }
  }
  
+ oop RegisterMap::cont() const {
+   return _chunk() != NULL ? _chunk()->cont() : (oop)NULL;
+ }
+ 
+ void RegisterMap::set_stack_chunk(stackChunkOop chunk) {
+   assert (chunk == NULL || _walk_cont, "");
+   assert (chunk == NULL || chunk->is_stackChunk(), "");
+   assert (chunk == NULL || _chunk.not_null(), "");
+   if (_chunk.is_null()) return;
+   log_trace(jvmcont)("set_stack_chunk: " INTPTR_FORMAT " this: " INTPTR_FORMAT, p2i((oopDesc*)chunk), p2i(this));
+   *(_chunk.raw_value()) = chunk; // reuse handle. see comment above in the constructor
+ }
+ 
  void RegisterMap::clear() {
    set_include_argument_oops(true);
!   if (update_map()) {
      for(int i = 0; i < location_valid_size; i++) {
        _location_valid[i] = 0;
      }
      pd_clear();
    } else {

*** 104,16 ***
    }
  }
  
  #ifndef PRODUCT
  
  void RegisterMap::print_on(outputStream* st) const {
    st->print_cr("Register map");
    for(int i = 0; i < reg_count; i++) {
  
      VMReg r = VMRegImpl::as_VMReg(i);
!     intptr_t* src = (intptr_t*) location(r);
      if (src != NULL) {
  
        r->print_on(st);
        st->print(" [" INTPTR_FORMAT "] = ", p2i(src));
        if (((uintptr_t)src & (sizeof(*src)-1)) != 0) {
--- 151,24 ---
    }
  }
  
  #ifndef PRODUCT
  
+ VMReg RegisterMap::find_register_spilled_here(void* p, intptr_t* sp) {
+   for(int i = 0; i < RegisterMap::reg_count; i++) {
+     VMReg r = VMRegImpl::as_VMReg(i);
+     if (p == location(r, sp)) return r;
+   }
+   return NULL;
+ }
+ 
  void RegisterMap::print_on(outputStream* st) const {
    st->print_cr("Register map");
    for(int i = 0; i < reg_count; i++) {
  
      VMReg r = VMRegImpl::as_VMReg(i);
!     intptr_t* src = (intptr_t*) location(r, (intptr_t*)NULL);
      if (src != NULL) {
  
        r->print_on(st);
        st->print(" [" INTPTR_FORMAT "] = ", p2i(src));
        if (((uintptr_t)src & (sizeof(*src)-1)) != 0) {

*** 163,18 ***
    _pc = newpc;
    _cb = CodeCache::find_blob_unsafe(_pc);
  
  }
  
  // type testers
  bool frame::is_ignored_frame() const {
    return false;  // FIXME: some LambdaForm frames should be ignored
  }
- bool frame::is_deoptimized_frame() const {
-   assert(_deopt_state != unknown, "not answerable");
-   return _deopt_state == is_deoptimized;
- }
  
  bool frame::is_native_frame() const {
    return (_cb != NULL &&
            _cb->is_nmethod() &&
            ((nmethod*)_cb)->is_native_method());
--- 218,29 ---
    _pc = newpc;
    _cb = CodeCache::find_blob_unsafe(_pc);
  
  }
  
+ void frame::set_pc_preserve_deopt(address newpc) {
+   set_pc_preserve_deopt(newpc, CodeCache::find_blob_unsafe(newpc));
+ }
+ 
+ void frame::set_pc_preserve_deopt(address newpc, CodeBlob* cb) {
+ #ifdef ASSERT
+   if (_cb != NULL && _cb->is_nmethod()) {
+     assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation");
+   }
+ #endif // ASSERT
+ 
+   _pc = newpc;
+   _cb = cb;
+ }
+ 
  // type testers
  bool frame::is_ignored_frame() const {
    return false;  // FIXME: some LambdaForm frames should be ignored
  }
  
  bool frame::is_native_frame() const {
    return (_cb != NULL &&
            _cb->is_nmethod() &&
            ((nmethod*)_cb)->is_native_method());

*** 184,21 ***
    if (is_interpreted_frame()) return true;
    if (is_compiled_frame())    return true;
    return false;
  }
  
- 
- bool frame::is_compiled_frame() const {
-   if (_cb != NULL &&
-       _cb->is_compiled() &&
-       ((CompiledMethod*)_cb)->is_java_method()) {
-     return true;
-   }
-   return false;
- }
- 
- 
  bool frame::is_runtime_frame() const {
    return (_cb != NULL && _cb->is_runtime_stub());
  }
  
  bool frame::is_safepoint_blob_frame() const {
--- 250,10 ---

*** 264,41 ***
  
  bool frame::can_be_deoptimized() const {
    if (!is_compiled_frame()) return false;
    CompiledMethod* nm = (CompiledMethod*)_cb;
  
!   if( !nm->can_be_deoptimized() )
      return false;
  
    return !nm->is_at_poll_return(pc());
  }
  
  void frame::deoptimize(JavaThread* thread) {
!   assert(thread->frame_anchor()->has_last_Java_frame() &&
!          thread->frame_anchor()->walkable(), "must be");
    // Schedule deoptimization of an nmethod activation with this frame.
    assert(_cb != NULL && _cb->is_compiled(), "must be");
  
    // If the call site is a MethodHandle call site use the MH deopt
    // handler.
    CompiledMethod* cm = (CompiledMethod*) _cb;
    address deopt = cm->is_method_handle_return(pc()) ?
                          cm->deopt_mh_handler_begin() :
                          cm->deopt_handler_begin();
  
    // Save the original pc before we patch in the new one
    cm->set_original_pc(this, pc());
    patch_pc(thread, deopt);
  
  #ifdef ASSERT
!   {
-     RegisterMap map(thread, false);
      frame check = thread->last_frame();
!     while (id() != check.id()) {
!       check = check.sender(&map);
      }
-     assert(check.is_deoptimized_frame(), "missed deopt");
    }
  #endif // ASSERT
  }
  
  frame frame::java_sender() const {
--- 319,52 ---
  
  bool frame::can_be_deoptimized() const {
    if (!is_compiled_frame()) return false;
    CompiledMethod* nm = (CompiledMethod*)_cb;
  
!   if(!nm->can_be_deoptimized())
      return false;
  
    return !nm->is_at_poll_return(pc());
  }
  
  void frame::deoptimize(JavaThread* thread) {
!   // tty->print_cr(">>> frame::deoptimize");
!   // print_on(tty);
+   assert(thread == NULL
+          || (thread->frame_anchor()->has_last_Java_frame() &&
+              thread->frame_anchor()->walkable()), "must be");
    // Schedule deoptimization of an nmethod activation with this frame.
    assert(_cb != NULL && _cb->is_compiled(), "must be");
  
+   // log_develop_trace(jvmcont)(">>>> frame::deoptimize %ld", os::current_thread_id());
+   // tty->print_cr(">>>> frame::deoptimize: %ld", os::current_thread_id()); print_on(tty);
+ 
    // If the call site is a MethodHandle call site use the MH deopt
    // handler.
    CompiledMethod* cm = (CompiledMethod*) _cb;
    address deopt = cm->is_method_handle_return(pc()) ?
                          cm->deopt_mh_handler_begin() :
                          cm->deopt_handler_begin();
  
+   NativePostCallNop* inst = nativePostCallNop_at(pc());
+ 
    // Save the original pc before we patch in the new one
    cm->set_original_pc(this, pc());
    patch_pc(thread, deopt);
+   assert(is_deoptimized_frame(), "must be");
  
  #ifdef ASSERT
!   if (thread != NULL) {
      frame check = thread->last_frame();
!     if (is_older(check.id())) {
!       RegisterMap map(thread, false);
+       while (id() != check.id()) {
+         check = check.sender(&map);
+       }
+       assert(check.is_deoptimized_frame(), "missed deopt");
      }
    }
  #endif // ASSERT
  }
  
  frame frame::java_sender() const {

*** 370,14 ***
    assert(is_interpreted_frame(), "interpreted frame expected");
    assert(ProfileInterpreter, "must be profiling interpreter");
    *interpreter_frame_mdp_addr() = (intptr_t)mdp;
  }
  
  BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const {
    assert(is_interpreted_frame(), "Not an interpreted frame");
  #ifdef ASSERT
!   interpreter_frame_verify_monitor(current);
  #endif
    BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size());
    return next;
  }
  
--- 436,18 ---
    assert(is_interpreted_frame(), "interpreted frame expected");
    assert(ProfileInterpreter, "must be profiling interpreter");
    *interpreter_frame_mdp_addr() = (intptr_t)mdp;
  }
  
+ template BasicObjectLock* frame::next_monitor_in_interpreter_frame<true>(BasicObjectLock* current) const;
+ template BasicObjectLock* frame::next_monitor_in_interpreter_frame<false>(BasicObjectLock* current) const;
+ 
+ template <bool relative>
  BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const {
    assert(is_interpreted_frame(), "Not an interpreted frame");
  #ifdef ASSERT
!   interpreter_frame_verify_monitor<relative>(current);
  #endif
    BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size());
    return next;
  }
  

*** 391,32 ***
    return previous;
  }
  
  // Interpreter locals and expression stack locations.
  
  intptr_t* frame::interpreter_frame_local_at(int index) const {
    const int n = Interpreter::local_offset_in_bytes(index)/wordSize;
!   return &((*interpreter_frame_locals_addr())[n]);
  }
  
  intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
    const int i = offset * interpreter_frame_expression_stack_direction();
    const int n = i * Interpreter::stackElementWords;
!   return &(interpreter_frame_expression_stack()[n]);
  }
  
  jint frame::interpreter_frame_expression_stack_size() const {
    // Number of elements on the interpreter expression stack
    // Callers should span by stackElementWords
    int element_size = Interpreter::stackElementWords;
    size_t stack_size = 0;
    if (frame::interpreter_frame_expression_stack_direction() < 0) {
!     stack_size = (interpreter_frame_expression_stack() -
!                   interpreter_frame_tos_address() + 1)/element_size;
    } else {
!     stack_size = (interpreter_frame_tos_address() -
!                   interpreter_frame_expression_stack() + 1)/element_size;
    }
    assert( stack_size <= (size_t)max_jint, "stack size too big");
    return ((jint)stack_size);
  }
  
--- 461,46 ---
    return previous;
  }
  
  // Interpreter locals and expression stack locations.
  
+ template intptr_t* frame::interpreter_frame_local_at<true>(int index) const;
+ template intptr_t* frame::interpreter_frame_local_at<false>(int index) const;
+ 
+ template <bool relative>
  intptr_t* frame::interpreter_frame_local_at(int index) const {
    const int n = Interpreter::local_offset_in_bytes(index)/wordSize;
!   intptr_t* first = relative ? fp() + (intptr_t)*interpreter_frame_locals_addr()
+                              : *interpreter_frame_locals_addr();
+   return &(first[n]);
  }
  
+ template intptr_t* frame::interpreter_frame_expression_stack_at<true>(jint index) const;
+ template intptr_t* frame::interpreter_frame_expression_stack_at<false>(jint index) const;
+ 
+ template <bool relative>
  intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
    const int i = offset * interpreter_frame_expression_stack_direction();
    const int n = i * Interpreter::stackElementWords;
!   return &(interpreter_frame_expression_stack<relative>()[n]);
  }
  
+ template jint frame::interpreter_frame_expression_stack_size<true>() const;
+ template jint frame::interpreter_frame_expression_stack_size<false>() const;
+ 
+ template <bool relative>
  jint frame::interpreter_frame_expression_stack_size() const {
    // Number of elements on the interpreter expression stack
    // Callers should span by stackElementWords
    int element_size = Interpreter::stackElementWords;
    size_t stack_size = 0;
    if (frame::interpreter_frame_expression_stack_direction() < 0) {
!     stack_size = (interpreter_frame_expression_stack<relative>() -
!                   interpreter_frame_tos_address<relative>() + 1)/element_size;
    } else {
!     stack_size = (interpreter_frame_tos_address<relative>() -
!                   interpreter_frame_expression_stack<relative>() + 1)/element_size;
    }
    assert( stack_size <= (size_t)max_jint, "stack size too big");
    return ((jint)stack_size);
  }
  

*** 476,38 ***
    }
    NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);)
  }
  
  
  void frame::print_on(outputStream* st) const {
    print_value_on(st,NULL);
    if (is_interpreted_frame()) {
!     interpreter_frame_print_on(st);
    }
  }
  
! 
  void frame::interpreter_frame_print_on(outputStream* st) const {
  #ifndef PRODUCT
    assert(is_interpreted_frame(), "Not an interpreted frame");
    jint i;
    for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
!     intptr_t x = *interpreter_frame_local_at(i);
      st->print(" - local  [" INTPTR_FORMAT "]", x);
      st->fill_to(23);
      st->print_cr("; #%d", i);
    }
!   for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) {
!     intptr_t x = *interpreter_frame_expression_stack_at(i);
      st->print(" - stack  [" INTPTR_FORMAT "]", x);
      st->fill_to(23);
      st->print_cr("; #%d", i);
    }
    // locks for synchronization
!   for (BasicObjectLock* current = interpreter_frame_monitor_end();
         current < interpreter_frame_monitor_begin();
!        current = next_monitor_in_interpreter_frame(current)) {
      st->print(" - obj    [");
      current->obj()->print_value_on(st);
      st->print_cr("]");
      st->print(" - lock   [");
      current->lock()->print_on(st, current->obj());
--- 560,42 ---
    }
    NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);)
  }
  
  
+ template void frame::print_on<false>(outputStream* st) const;
+ template void frame::print_on<true >(outputStream* st) const;
+ 
+ template <bool relative>
  void frame::print_on(outputStream* st) const {
    print_value_on(st,NULL);
    if (is_interpreted_frame()) {
!     interpreter_frame_print_on<relative>(st);
    }
  }
  
! template <bool relative>
  void frame::interpreter_frame_print_on(outputStream* st) const {
  #ifndef PRODUCT
    assert(is_interpreted_frame(), "Not an interpreted frame");
    jint i;
    for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
!     intptr_t x = *interpreter_frame_local_at<relative>(i);
      st->print(" - local  [" INTPTR_FORMAT "]", x);
      st->fill_to(23);
      st->print_cr("; #%d", i);
    }
!   for (i = interpreter_frame_expression_stack_size<relative>() - 1; i >= 0; --i ) {
!     intptr_t x = *interpreter_frame_expression_stack_at<relative>(i);
      st->print(" - stack  [" INTPTR_FORMAT "]", x);
      st->fill_to(23);
      st->print_cr("; #%d", i);
    }
    // locks for synchronization
!   for (BasicObjectLock* current = interpreter_frame_monitor_end<relative>();
         current < interpreter_frame_monitor_begin();
!        current = next_monitor_in_interpreter_frame<relative>(current)) {
      st->print(" - obj    [");
      current->obj()->print_value_on(st);
      st->print_cr("]");
      st->print(" - lock   [");
      current->lock()->print_on(st, current->obj());

*** 518,11 ***
    // bcp
    st->print(" - bcp    [" INTPTR_FORMAT "]", p2i(interpreter_frame_bcp()));
    st->fill_to(23);
    st->print_cr("; @%d", interpreter_frame_bci());
    // locals
!   st->print_cr(" - locals [" INTPTR_FORMAT "]", p2i(interpreter_frame_local_at(0)));
    // method
    st->print(" - method [" INTPTR_FORMAT "]", p2i(interpreter_frame_method()));
    st->fill_to(23);
    st->print("; ");
    interpreter_frame_method()->print_name(st);
--- 606,11 ---
    // bcp
    st->print(" - bcp    [" INTPTR_FORMAT "]", p2i(interpreter_frame_bcp()));
    st->fill_to(23);
    st->print_cr("; @%d", interpreter_frame_bci());
    // locals
!   st->print_cr(" - locals [" INTPTR_FORMAT "]", p2i(interpreter_frame_local_at<relative>(0)));
    // method
    st->print(" - method [" INTPTR_FORMAT "]", p2i(interpreter_frame_method()));
    st->fill_to(23);
    st->print("; ");
    interpreter_frame_method()->print_name(st);

*** 597,16 ***
          st->print("j  " PTR_FORMAT, p2i(pc()));
        }
      } else if (StubRoutines::contains(pc())) {
        StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
        if (desc != NULL) {
!         st->print("v  ~StubRoutines::%s", desc->name());
        } else {
          st->print("v  ~StubRoutines::" PTR_FORMAT, p2i(pc()));
        }
      } else if (_cb->is_buffer_blob()) {
!       st->print("v  ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
      } else if (_cb->is_compiled()) {
        CompiledMethod* cm = (CompiledMethod*)_cb;
        Method* m = cm->method();
        if (m != NULL) {
          if (cm->is_nmethod()) {
--- 685,16 ---
          st->print("j  " PTR_FORMAT, p2i(pc()));
        }
      } else if (StubRoutines::contains(pc())) {
        StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
        if (desc != NULL) {
!         st->print("v  ~StubRoutines::%s " PTR_FORMAT, desc->name(), p2i(pc()));
        } else {
          st->print("v  ~StubRoutines::" PTR_FORMAT, p2i(pc()));
        }
      } else if (_cb->is_buffer_blob()) {
!       st->print("v  ~BufferBlob::%s " PTR_FORMAT, ((BufferBlob *)_cb)->name(), p2i(pc()));
      } else if (_cb->is_compiled()) {
        CompiledMethod* cm = (CompiledMethod*)_cb;
        Method* m = cm->method();
        if (m != NULL) {
          if (cm->is_nmethod()) {

*** 638,25 ***
  #endif
        } else {
          st->print("J  " PTR_FORMAT, p2i(pc()));
        }
      } else if (_cb->is_runtime_stub()) {
!       st->print("v  ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name());
      } else if (_cb->is_deoptimization_stub()) {
!       st->print("v  ~DeoptimizationBlob");
      } else if (_cb->is_exception_stub()) {
!       st->print("v  ~ExceptionBlob");
      } else if (_cb->is_safepoint_stub()) {
!       st->print("v  ~SafepointBlob");
      } else if (_cb->is_adapter_blob()) {
!       st->print("v  ~AdapterBlob");
      } else if (_cb->is_vtable_blob()) {
!       st->print("v  ~VtableBlob");
      } else if (_cb->is_method_handles_adapter_blob()) {
!       st->print("v  ~MethodHandlesAdapterBlob");
      } else if (_cb->is_uncommon_trap_stub()) {
!       st->print("v  ~UncommonTrapBlob");
      } else {
        st->print("v  blob " PTR_FORMAT, p2i(pc()));
      }
    } else {
      print_C_frame(st, buf, buflen, pc());
--- 726,25 ---
  #endif
        } else {
          st->print("J  " PTR_FORMAT, p2i(pc()));
        }
      } else if (_cb->is_runtime_stub()) {
!       st->print("v  ~RuntimeStub::%s " PTR_FORMAT, ((RuntimeStub *)_cb)->name(), p2i(pc()));
      } else if (_cb->is_deoptimization_stub()) {
!       st->print("v  ~DeoptimizationBlob " PTR_FORMAT, p2i(pc()));
      } else if (_cb->is_exception_stub()) {
!       st->print("v  ~ExceptionBlob " PTR_FORMAT, p2i(pc()));
      } else if (_cb->is_safepoint_stub()) {
!       st->print("v  ~SafepointBlob " PTR_FORMAT, p2i(pc()));
      } else if (_cb->is_adapter_blob()) {
!       st->print("v  ~AdapterBlob " PTR_FORMAT, p2i(pc()));
      } else if (_cb->is_vtable_blob()) {
!       st->print("v  ~VtableBlob " PTR_FORMAT, p2i(pc()));
      } else if (_cb->is_method_handles_adapter_blob()) {
!       st->print("v  ~MethodHandlesAdapterBlob " PTR_FORMAT, p2i(pc()));
      } else if (_cb->is_uncommon_trap_stub()) {
!       st->print("v  ~UncommonTrapBlob " PTR_FORMAT, p2i(pc()));
      } else {
        st->print("v  blob " PTR_FORMAT, p2i(pc()));
      }
    } else {
      print_C_frame(st, buf, buflen, pc());

*** 670,10 ***
--- 758,11 ---
    It uses the Method* in order to get the max_stack value but during GC this
    Method* value saved on the frame is changed by reverse_and_push and hence cannot
    be used. So we save the max_stack value in the FrameClosure object and pass it
    down to the interpreter_frame_expression_stack_at method
  */
+ template <bool relative>
  class InterpreterFrameClosure : public OffsetClosure {
   private:
    const frame* _fr;
    OopClosure*  _f;
    int          _max_locals;

*** 689,24 ***
    }
  
    void offset_do(int offset) {
      oop* addr;
      if (offset < _max_locals) {
!       addr = (oop*) _fr->interpreter_frame_local_at(offset);
        assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
        _f->do_oop(addr);
      } else {
!       addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
        // In case of exceptions, the expression stack is invalid and the esp will be reset to express
        // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
        bool in_stack;
        if (frame::interpreter_frame_expression_stack_direction() > 0) {
!         in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
        } else {
!         in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
        }
        if (in_stack) {
          _f->do_oop(addr);
        }
      }
    }
  
--- 778,26 ---
    }
  
    void offset_do(int offset) {
      oop* addr;
      if (offset < _max_locals) {
!       addr = (oop*) _fr->interpreter_frame_local_at<relative>(offset);
        assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
+       DEBUG_ONLY(if (log_develop_is_enabled(Trace, jvmcont) && relative) log_develop_trace(jvmcont)("InterpreterFrameClosure::offset_do local p: " INTPTR_FORMAT, p2i(addr));)
        _f->do_oop(addr);
      } else {
!       addr = (oop*) _fr->interpreter_frame_expression_stack_at<relative>((offset - _max_locals));
        // In case of exceptions, the expression stack is invalid and the esp will be reset to express
        // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
        bool in_stack;
        if (frame::interpreter_frame_expression_stack_direction() > 0) {
!         in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address<relative>();
        } else {
!         in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address<relative>();
        }
        if (in_stack) {
+         DEBUG_ONLY(if (log_develop_is_enabled(Trace, jvmcont) && relative) log_develop_trace(jvmcont)("InterpreterFrameClosure::offset_do stack p: " INTPTR_FORMAT, p2i(addr));)
          _f->do_oop(addr);
        }
      }
    }
  

*** 809,33 ***
    ArgumentSizeComputer asc(signature);
    int size = asc.size();
    return (oop *)interpreter_frame_tos_at(size);
  }
  
  
  void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) const {
-   assert(is_interpreted_frame(), "Not an interpreted frame");
-   assert(map != NULL, "map must be set");
    Thread *thread = Thread::current();
    methodHandle m (thread, interpreter_frame_method());
!   jint      bci = interpreter_frame_bci();
  
    assert(!Universe::heap()->is_in(m()),
            "must be valid oop");
    assert(m->is_method(), "checking frame value");
    assert((m->is_native() && bci == 0)  ||
           (!m->is_native() && bci >= 0 && bci < m->code_size()),
           "invalid bci value");
  
    // Handle the monitor elements in the activation
    for (
!     BasicObjectLock* current = interpreter_frame_monitor_end();
      current < interpreter_frame_monitor_begin();
!     current = next_monitor_in_interpreter_frame(current)
    ) {
  #ifdef ASSERT
!     interpreter_frame_verify_monitor(current);
  #endif
      current->oops_do(f);
    }
  
    if (m->is_native()) {
--- 900,64 ---
    ArgumentSizeComputer asc(signature);
    int size = asc.size();
    return (oop *)interpreter_frame_tos_at(size);
  }
  
+ oop frame::interpreter_callee_receiver(Symbol* signature) {
+   // TODO: Erik: remove after integration with concurrent stack scanning
+   oop r = *interpreter_callee_receiver_addr(signature);
+   r = NativeAccess<>::oop_load(&r);
+   return r;
+ }
  
+ template <bool relative>
  void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) const {
    Thread *thread = Thread::current();
    methodHandle m (thread, interpreter_frame_method());
!   jint bci = interpreter_frame_bci();
+ 
+   InterpreterOopMap mask;
+   if (query_oop_map_cache) {
+     m->mask_for(bci, &mask);
+   } else {
+     OopMapCache::compute_one_oop_map(m, bci, &mask);
+   }
+   
+   oops_interpreted_do0<relative>(f, map, m, bci, mask);
+ }
+ 
+ // Initialize explicitly so that these can be used only with definitions.
+ // TODO: Rectify as Loom stabilizes...
+ template void frame::oops_interpreted_do<true> (OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) const;
+ template void frame::oops_interpreted_do<false>(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) const;
+ 
+ template <bool relative>
+ void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, const InterpreterOopMap& mask) const {
+   Thread *thread = Thread::current();
+   methodHandle m (thread, interpreter_frame_method());
+   jint bci = interpreter_frame_bci();
+   oops_interpreted_do0<relative>(f, map, m, bci, mask);
+ }
  
+ template <bool relative>
+ void frame::oops_interpreted_do0(OopClosure* f, const RegisterMap* map, methodHandle m, jint bci, const InterpreterOopMap& mask) const {
+   assert(is_interpreted_frame(), "Not an interpreted frame");
    assert(!Universe::heap()->is_in(m()),
            "must be valid oop");
    assert(m->is_method(), "checking frame value");
    assert((m->is_native() && bci == 0)  ||
           (!m->is_native() && bci >= 0 && bci < m->code_size()),
           "invalid bci value");
  
    // Handle the monitor elements in the activation
    for (
!     BasicObjectLock* current = interpreter_frame_monitor_end<relative>();
      current < interpreter_frame_monitor_begin();
!     current = next_monitor_in_interpreter_frame<relative>(current)
    ) {
  #ifdef ASSERT
!     interpreter_frame_verify_monitor<relative>(current);
  #endif
      current->oops_do(f);
    }
  
    if (m->is_native()) {

*** 860,13 ***
    if (!m->is_native()) {
      Bytecode_invoke call = Bytecode_invoke_check(m, bci);
      if (call.is_valid()) {
        signature = call.signature();
        has_receiver = call.has_receiver();
!       if (map->include_argument_oops() &&
!           interpreter_frame_expression_stack_size() > 0) {
!         ResourceMark rm(thread);  // is this right ???
          // we are at a call site & the expression stack is not empty
          // => process callee's arguments
          //
          // Note: The expression stack can be empty if an exception
          //       occurred during method resolution/execution. In all
--- 982,13 ---
    if (!m->is_native()) {
      Bytecode_invoke call = Bytecode_invoke_check(m, bci);
      if (call.is_valid()) {
        signature = call.signature();
        has_receiver = call.has_receiver();
!       if (map != NULL && map->include_argument_oops() &&
!           interpreter_frame_expression_stack_size<relative>() > 0) {
!         // ResourceMark rm(thread);  // is this right ???
          // we are at a call site & the expression stack is not empty
          // => process callee's arguments
          //
          // Note: The expression stack can be empty if an exception
          //       occurred during method resolution/execution. In all

*** 878,33 ***
          oops_interpreted_arguments_do(signature, has_receiver, f);
        }
      }
    }
  
!   InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
  
    // process locals & expression stack
!   InterpreterOopMap mask;
-   if (query_oop_map_cache) {
-     m->mask_for(bci, &mask);
-   } else {
-     OopMapCache::compute_one_oop_map(m, bci, &mask);
-   }
    mask.iterate_oop(&blk);
  }
  
  
  void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
    InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
    finder.oops_do();
  }
  
! void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map,
-                               DerivedPointerIterationMode derived_mode) const {
    assert(_cb != NULL, "sanity check");
!   if (_cb->oop_maps() != NULL) {
!     OopMapSet::oops_do(this, reg_map, f, derived_mode);
  
      // Preserve potential arguments for a callee. We handle this by dispatching
      // on the codeblob. For c2i, we do
      if (reg_map->include_argument_oops()) {
        _cb->preserve_callee_argument_oops(*this, reg_map, f);
--- 1000,32 ---
          oops_interpreted_arguments_do(signature, has_receiver, f);
        }
      }
    }
  
!   InterpreterFrameClosure<relative> blk(this, max_locals, m->max_stack(), f);
  
    // process locals & expression stack
!   // mask.print();
    mask.iterate_oop(&blk);
  }
  
  
  void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
    InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
    finder.oops_do();
  }
  
! void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
    assert(_cb != NULL, "sanity check");
!   assert((oop_map() == NULL) == (_cb->oop_maps() == NULL), "frame and _cb must agree that oopmap is set or not");
!   if (oop_map() != NULL) {
+     if (df != NULL) {
+       _oop_map->oops_do(this, reg_map, f, df);
+     } else {
+       _oop_map->oops_do(this, reg_map, f, derived_mode);
+     }
  
      // Preserve potential arguments for a callee. We handle this by dispatching
      // on the codeblob. For c2i, we do
      if (reg_map->include_argument_oops()) {
        _cb->preserve_callee_argument_oops(*this, reg_map, f);

*** 939,11 ***
    virtual void handle_oop_offset() {
      // Extract low order register number from register array.
      // In LP64-land, the high-order bits are valid but unhelpful.
      VMReg reg = _regs[_offset].first();
      oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
!     assert(loc != NULL, "missing register map entry");
      _f->do_oop(loc);
    }
  
   public:
    CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
--- 1060,19 ---
    virtual void handle_oop_offset() {
      // Extract low order register number from register array.
      // In LP64-land, the high-order bits are valid but unhelpful.
      VMReg reg = _regs[_offset].first();
      oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
!   #ifdef ASSERT
+     if (loc == NULL) {
+       if (_reg_map->should_skip_missing())
+         return;
+       tty->print_cr("Error walking frame oops:");
+       _fr.print_on(tty);
+       assert(loc != NULL, "missing register map entry reg: " INTPTR_FORMAT " %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
+     }
+   #endif
      _f->do_oop(loc);
    }
  
   public:
    CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)

*** 976,16 ***
    }
  };
  
  void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
                                         const RegisterMap* reg_map, OopClosure* f) const {
!   ResourceMark rm;
    CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
    finder.oops_do();
  }
  
- 
  // Get receiver out of callers frame, i.e. find parameter 0 in callers
  // frame.  Consult ADLC for where parameter 0 is to be found.  Then
  // check local reg_map for it being a callee-save register or argument
  // register, both of which are saved in the local frame.  If not found
  // there, it must be an in-stack argument of the caller.
--- 1105,15 ---
    }
  };
  
  void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
                                         const RegisterMap* reg_map, OopClosure* f) const {
!   // ResourceMark rm;
    CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
    finder.oops_do();
  }
  
  // Get receiver out of callers frame, i.e. find parameter 0 in callers
  // frame.  Consult ADLC for where parameter 0 is to be found.  Then
  // check local reg_map for it being a callee-save register or argument
  // register, both of which are saved in the local frame.  If not found
  // there, it must be an in-stack argument of the caller.

*** 999,10 ***
--- 1127,12 ---
    if (oop_adr == NULL) {
      guarantee(oop_adr != NULL, "bad register save location");
      return NULL;
    }
    oop r = *oop_adr;
+   // TODO: Erik: remove after integration with concurrent stack scanning
+   r = NativeAccess<>::oop_load(&r);
    assert(Universe::heap()->is_in_or_null(r), "bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", p2i(r), p2i(r));
    return r;
  }
  
  

*** 1020,10 ***
--- 1150,12 ---
    assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
           "Should not call this unless it's a native nmethod");
    int byte_offset = in_bytes(nm->native_receiver_sp_offset());
    assert(byte_offset >= 0, "should not see invalid offset");
    oop owner = ((oop*) sp())[byte_offset / wordSize];
+   // TODO: Erik: remove after integration with concurrent stack scanning
+   owner = NativeAccess<>::oop_load(&owner);
    assert( Universe::heap()->is_in(owner), "bad receiver" );
    return owner;
  }
  
  void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) const {

*** 1037,42 ***
    }
    // Traverse the Handle Block saved in the entry frame
    entry_frame_call_wrapper()->oops_do(f);
  }
  
! void frame::oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
!                     DerivedPointerIterationMode derived_mode) const {
!   oops_do_internal(f, cf, map, true, derived_mode);
! }
  
! void frame::oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map) const {
! #if COMPILER2_OR_JVMCI
!   oops_do_internal(f, cf, map, true, DerivedPointerTable::is_active() ?
!                                      DerivedPointerIterationMode::_with_table :
!                                      DerivedPointerIterationMode::_ignore);
! #else
!   oops_do_internal(f, cf, map, true, DerivedPointerIterationMode::_ignore);
! #endif
  }
  
! void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
-                              bool use_interpreter_oop_map_cache, DerivedPointerIterationMode derived_mode) const {
  #ifndef PRODUCT
    // simulate GC crash here to dump java thread in error report
    if (CrashGCForDumpingJavaThread) {
      char *t = NULL;
      *t = 'c';
    }
  #endif
    if (is_interpreted_frame()) {
!     oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
    } else if (is_entry_frame()) {
      oops_entry_do(f, map);
    } else if (is_optimized_entry_frame()) {
      _cb->as_optimized_entry_blob()->oops_do(f, *this);
    } else if (CodeCache::contains(pc())) {
!     oops_code_blob_do(f, cf, map, derived_mode);
    } else {
      ShouldNotReachHere();
    }
  }
  
--- 1169,43 ---
    }
    // Traverse the Handle Block saved in the entry frame
    entry_frame_call_wrapper()->oops_do(f);
  }
  
! bool frame::is_deoptimized_frame() const {
!   assert(_deopt_state != unknown, "not answerable");
!   if (_deopt_state == is_deoptimized) {
!     return true;
+   }
  
!   /* This method only checks if the frame is deoptimized
!    * as in return address being patched. 
!    * It doesn't care if the OP that we return to is a 
!    * deopt instruction */
!   /*if (_cb != NULL && _cb->is_nmethod()) {
!     return NativeDeoptInstruction::is_deopt_at(_pc);
!   }*/
!   return false;
  }
  
! void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* map, bool use_interpreter_oop_map_cache) const {
  #ifndef PRODUCT
    // simulate GC crash here to dump java thread in error report
    if (CrashGCForDumpingJavaThread) {
      char *t = NULL;
      *t = 'c';
    }
  #endif
    if (is_interpreted_frame()) {
!     map->thread() != NULL ? oops_interpreted_do<false>(f, map, use_interpreter_oop_map_cache)
+                           : oops_interpreted_do<true >(f, map, use_interpreter_oop_map_cache);
    } else if (is_entry_frame()) {
      oops_entry_do(f, map);
    } else if (is_optimized_entry_frame()) {
      _cb->as_optimized_entry_blob()->oops_do(f, *this);
    } else if (CodeCache::contains(pc())) {
!     oops_code_blob_do(f, cf, df, derived_mode, map);
    } else {
      ShouldNotReachHere();
    }
  }
  

*** 1092,10 ***
--- 1225,17 ---
      f->do_metadata(m);
    }
  }
  
  void frame::verify(const RegisterMap* map) const {
+ #ifndef PRODUCT
+   if (TraceCodeBlobStacks) {
+     tty->print_cr("*** verify");
+     print_on(tty);
+   }
+ #endif
+ 
    // for now make sure receiver type is correct
    if (is_interpreted_frame()) {
      Method* method = interpreter_frame_method();
      guarantee(method->is_method(), "method is wrong in frame::verify");
      if (!method->is_static()) {

*** 1105,12 ***
      }
    }
  #if COMPILER2_OR_JVMCI
    assert(DerivedPointerTable::is_empty(), "must be empty before verify");
  #endif
    if (map->update_map()) { // The map has to be up-to-date for the current frame
!     oops_do_internal(&VerifyOopClosure::verify_oop, NULL, map, false, DerivedPointerIterationMode::_ignore);
    }
  }
  
  
  #ifdef ASSERT
--- 1245,13 ---
      }
    }
  #if COMPILER2_OR_JVMCI
    assert(DerivedPointerTable::is_empty(), "must be empty before verify");
  #endif
+ 
    if (map->update_map()) { // The map has to be up-to-date for the current frame
!     oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, DerivedPointerIterationMode::_ignore, map, false);
    }
  }
  
  
  #ifdef ASSERT

*** 1127,14 ***
    return false;
  }
  #endif
  
  #ifdef ASSERT
  void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
    assert(is_interpreted_frame(), "Not an interpreted frame");
    // verify that the value is in the right part of the frame
!   address low_mark  = (address) interpreter_frame_monitor_end();
    address high_mark = (address) interpreter_frame_monitor_begin();
    address current   = (address) value;
  
    const int monitor_size = frame::interpreter_frame_monitor_size();
    guarantee((high_mark - current) % monitor_size  ==  0         , "Misaligned top of BasicObjectLock*");
--- 1268,15 ---
    return false;
  }
  #endif
  
  #ifdef ASSERT
+ template <bool relative>
  void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
    assert(is_interpreted_frame(), "Not an interpreted frame");
    // verify that the value is in the right part of the frame
!   address low_mark  = (address) interpreter_frame_monitor_end<relative>();
    address high_mark = (address) interpreter_frame_monitor_begin();
    address current   = (address) value;
  
    const int monitor_size = frame::interpreter_frame_monitor_size();
    guarantee((high_mark - current) % monitor_size  ==  0         , "Misaligned top of BasicObjectLock*");

*** 1144,15 ***
    guarantee( current >= low_mark                               , "Current BasicObjectLock* below than low_mark");
  }
  #endif
  
  #ifndef PRODUCT
  // callers need a ResourceMark because of name_and_sig_as_C_string() usage,
  // RA allocated string is returned to the caller
! void frame::describe(FrameValues& values, int frame_no) {
    // boundaries: sp and the 'real' frame pointer
!   values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1);
    intptr_t* frame_pointer = real_fp(); // Note: may differ from fp()
  
    // print frame info at the highest boundary
    intptr_t* info_address = MAX2(sp(), frame_pointer);
  
--- 1286,67 ---
    guarantee( current >= low_mark                               , "Current BasicObjectLock* below than low_mark");
  }
  #endif
  
  #ifndef PRODUCT
+ 
+ // Returns true iff the address p is readable and *(intptr_t*)p != errvalue
+ extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue);
+ 
+ class FrameValuesOopClosure: public OopClosure, public DerivedOopClosure {
+ private:
+   FrameValues& _values;
+   int _frame_no;
+ public:
+   FrameValuesOopClosure(FrameValues& values, int frame_no) : _values(values), _frame_no(frame_no) {}
+   virtual void do_oop(oop* p) {
+     bool good = *p == nullptr || (dbg_is_safe(*p, -1) && dbg_is_safe((*p)->klass(), -1) && oopDesc::is_oop_or_null(*p));
+     _values.describe(_frame_no, (intptr_t*)p, err_msg("oop%s for #%d", good ? "" : " (BAD)", _frame_no)); 
+   }
+   virtual void do_oop(narrowOop* p) { _values.describe(_frame_no, (intptr_t*)p, err_msg("narrow oop for #%d", _frame_no)); }
+   virtual void do_derived_oop(oop* base, derived_pointer* derived) { 
+     _values.describe(_frame_no, (intptr_t*)derived, err_msg("derived pointer (base: " INTPTR_FORMAT ") for #%d", p2i(base), _frame_no));
+   }
+ };
+ 
+ class FrameValuesOopMapClosure: public OopMapClosure {
+ private:
+   const frame* _fr;
+   const RegisterMap* _reg_map;
+   FrameValues& _values;
+   int _frame_no;
+ public:
+   FrameValuesOopMapClosure(const frame* fr, const RegisterMap* reg_map, FrameValues& values, int frame_no)
+    : _fr(fr), _reg_map(reg_map), _values(values), _frame_no(frame_no) {}
+ 
+   virtual void do_value(VMReg reg, OopMapValue::oop_types type) {
+     intptr_t* p = (intptr_t*)_fr->oopmapreg_to_location(reg, _reg_map);
+     if (p != NULL && (((intptr_t)p & WordAlignmentMask) == 0)) {
+       const char* type_name = NULL;
+       switch(type) {
+         case OopMapValue::oop_value:          type_name = "oop";          break;
+         case OopMapValue::narrowoop_value:    type_name = "narrow oop";   break;
+         case OopMapValue::callee_saved_value: type_name = "callee-saved"; break;
+         case OopMapValue::derived_oop_value:  type_name = "derived";      break;
+         // case OopMapValue::live_value:         type_name = "live";         break;
+         default: break;
+       }
+       if (type_name != NULL)
+         _values.describe(_frame_no, p, err_msg("%s for #%d", type_name, _frame_no));
+     }
+   }
+ };
+ 
+ template void frame::describe<false>(FrameValues& values, int frame_no, const RegisterMap* reg_map);
+ template void frame::describe<true >(FrameValues& values, int frame_no, const RegisterMap* reg_map);
+ 
  // callers need a ResourceMark because of name_and_sig_as_C_string() usage,
  // RA allocated string is returned to the caller
! template <bool relative>
+ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_map) {
    // boundaries: sp and the 'real' frame pointer
!   values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 0);
    intptr_t* frame_pointer = real_fp(); // Note: may differ from fp()
  
    // print frame info at the highest boundary
    intptr_t* info_address = MAX2(sp(), frame_pointer);
  

*** 1161,64 ***
      values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1);
    }
  
    if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
      // Label values common to most frames
!     values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
    }
  
    if (is_interpreted_frame()) {
      Method* m = interpreter_frame_method();
      int bci = interpreter_frame_bci();
  
      // Label the method and current bci
      values.describe(-1, info_address,
!                     FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
      values.describe(-1, info_address,
!                     err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
      if (m->max_locals() > 0) {
!       intptr_t* l0 = interpreter_frame_local_at(0);
!       intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1);
!       values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1);
        // Report each local and mark as owned by this frame
        for (int l = 0; l < m->max_locals(); l++) {
!         intptr_t* l0 = interpreter_frame_local_at(l);
!         values.describe(frame_no, l0, err_msg("local %d", l));
        }
      }
  
      // Compute the actual expression stack size
      InterpreterOopMap mask;
      OopMapCache::compute_one_oop_map(methodHandle(Thread::current(), m), bci, &mask);
      intptr_t* tos = NULL;
      // Report each stack element and mark as owned by this frame
      for (int e = 0; e < mask.expression_stack_size(); e++) {
!       tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
!       values.describe(frame_no, interpreter_frame_expression_stack_at(e),
!                       err_msg("stack %d", e));
      }
      if (tos != NULL) {
!       values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1);
      }
!     if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) {
!       values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin");
!       values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end");
      }
    } else if (is_entry_frame()) {
      // For now just label the frame
      values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
!   } else if (is_compiled_frame()) {
      // For now just label the frame
!     CompiledMethod* cm = (CompiledMethod*)cb();
      values.describe(-1, info_address,
                      FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
                                         p2i(cm),
                                         cm->method()->name_and_sig_as_C_string(),
                                         (_deopt_state == is_deoptimized) ?
                                         " (deoptimized)" :
                                         ((_deopt_state == unknown) ? " (state unknown)" : "")),
!                     2);
    } else if (is_native_frame()) {
      // For now just label the frame
      nmethod* nm = cb()->as_nmethod_or_null();
      values.describe(-1, info_address,
                      FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
--- 1355,169 ---
      values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1);
    }
  
    if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
      // Label values common to most frames
!     values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no), 0);
    }
  
    if (is_interpreted_frame()) {
      Method* m = interpreter_frame_method();
      int bci = interpreter_frame_bci();
+     InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
  
      // Label the method and current bci
      values.describe(-1, info_address,
!                     FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 3);
+     if (desc != NULL) {
+       values.describe(-1, info_address, err_msg("- %s codelet: %s", 
+         desc->bytecode()    >= 0    ? Bytecodes::name(desc->bytecode()) : "",
+         desc->description() != NULL ? desc->description()               : "?"), 2);
+     }
      values.describe(-1, info_address,
!                     err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 2);
+     values.describe(frame_no, (intptr_t*)sender_pc_addr(), Continuation::is_return_barrier_entry(*sender_pc_addr()) ? "return address (return barrier)" : "return address");
+ 
      if (m->max_locals() > 0) {
!       intptr_t* l0 = interpreter_frame_local_at<relative>(0);
!       intptr_t* ln = interpreter_frame_local_at<relative>(m->max_locals() - 1);
!       values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 2);
        // Report each local and mark as owned by this frame
        for (int l = 0; l < m->max_locals(); l++) {
!         intptr_t* l0 = interpreter_frame_local_at<relative>(l);
!         values.describe(frame_no, l0, err_msg("local %d", l), 1);
        }
      }
  
+     if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end<relative>()) {
+       values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin");
+       values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end<relative>(), "monitors end");
+     }
+ 
      // Compute the actual expression stack size
      InterpreterOopMap mask;
      OopMapCache::compute_one_oop_map(methodHandle(Thread::current(), m), bci, &mask);
      intptr_t* tos = NULL;
      // Report each stack element and mark as owned by this frame
      for (int e = 0; e < mask.expression_stack_size(); e++) {
!       tos = MAX2(tos, interpreter_frame_expression_stack_at<relative>(e));
!       values.describe(frame_no, interpreter_frame_expression_stack_at<relative>(e),
!                       err_msg("stack %d", e), 1);
      }
      if (tos != NULL) {
!       values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
      }
! 
!     if (reg_map != NULL) {
!       FrameValuesOopClosure oopsFn(values, frame_no);
+       oops_do(&oopsFn, NULL, &oopsFn, reg_map);
      }
    } else if (is_entry_frame()) {
      // For now just label the frame
      values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
!   } else if (cb()->is_compiled()) {
      // For now just label the frame
!     CompiledMethod* cm = cb()->as_compiled_method();
      values.describe(-1, info_address,
                      FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
                                         p2i(cm),
                                         cm->method()->name_and_sig_as_C_string(),
                                         (_deopt_state == is_deoptimized) ?
                                         " (deoptimized)" :
                                         ((_deopt_state == unknown) ? " (state unknown)" : "")),
!                     3);
+ 
+     { // mark arguments (see nmethod::print_nmethod_labels)
+       Method* m = cm->method();
+ 
+       int stack_slot_offset = cm->frame_size() * wordSize; // offset, in bytes, to caller sp
+       int sizeargs = m->size_of_parameters();
+ 
+       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
+       VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
+       {
+         int sig_index = 0;
+         if (!m->is_static()) sig_bt[sig_index++] = T_OBJECT; // 'this'
+         for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
+           BasicType t = ss.type();
+           assert(type2size[t] == 1 || type2size[t] == 2, "size is 1 or 2");
+           sig_bt[sig_index++] = t;
+           if (type2size[t] == 2) sig_bt[sig_index++] = T_VOID;
+         }
+         assert(sig_index == sizeargs, "");
+       }
+       int out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
+       assert (out_preserve ==  m->num_stack_arg_slots(), "");
+       int sig_index = 0;
+       int arg_index = (m->is_static() ? 0 : -1);
+       for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
+         bool at_this = (arg_index == -1);
+         bool at_old_sp = false;
+         BasicType t = (at_this ? T_OBJECT : ss.type());
+         assert(t == sig_bt[sig_index], "sigs in sync");
+         VMReg fst = regs[sig_index].first();
+         if (fst->is_stack()) {
+           assert (((int)fst->reg2stack()) >= 0, "reg2stack: " INTPTR_FORMAT, fst->reg2stack());
+           int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
+           intptr_t* stack_address = (intptr_t*)((address)sp() + offset);
+           if (at_this)
+             values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
+           else
+             values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
+         }
+         sig_index += type2size[t];
+         arg_index += 1;
+         if (!at_this) ss.next();
+       }
+     }
+ 
+     if (reg_map != NULL && is_java_frame()) {
+       int scope_no = 0;
+       for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != NULL; scope = scope->sender(), scope_no++) {
+         Method* m = scope->method();
+         int  bci = scope->bci();
+         values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
+ 
+         { // mark locals
+           GrowableArray<ScopeValue*>* scvs = scope->locals();
+           int scvs_length = scvs != NULL ? scvs->length() : 0;
+           for (int i = 0; i < scvs_length; i++) {
+             intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
+             if (stack_address != NULL)
+               values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
+           }
+         }
+         { // mark expression stack
+           GrowableArray<ScopeValue*>* scvs = scope->expressions();
+           int scvs_length = scvs != NULL ? scvs->length() : 0;
+           for (int i = 0; i < scvs_length; i++) {
+             intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
+             if (stack_address != NULL)
+               values.describe(frame_no, stack_address, err_msg("stack %d for #%d (scope %d)", i, frame_no, scope_no), 1);
+           }
+         }
+       }
+ 
+       FrameValuesOopClosure oopsFn(values, frame_no);
+       oops_do(&oopsFn, NULL, &oopsFn, reg_map);
+ 
+       if (oop_map() != NULL) {
+         FrameValuesOopMapClosure valuesFn(this, reg_map, values, frame_no);
+         // also OopMapValue::live_value ??
+         oop_map()->all_type_do(this, OopMapValue::callee_saved_value, &valuesFn);
+       }
+     }
+ 
+     if (cm->method()->is_continuation_enter_intrinsic()) {
+       address usp = (address)unextended_sp();
+       values.describe(frame_no, (intptr_t*)(usp + in_bytes(ContinuationEntry::parent_offset())), "parent");
+       values.describe(frame_no, (intptr_t*)(usp + in_bytes(ContinuationEntry::cont_offset())),   "continuation");
+       values.describe(frame_no, (intptr_t*)(usp + in_bytes(ContinuationEntry::chunk_offset())),   "chunk");
+       values.describe(frame_no, (intptr_t*)(usp + in_bytes(ContinuationEntry::argsize_offset())), "argsize");
+       // values.describe(frame_no, (intptr_t*)(usp + in_bytes(ContinuationEntry::parent_cont_fastpath_offset())),      "parent fastpath");
+       // values.describe(frame_no, (intptr_t*)(usp + in_bytes(ContinuationEntry::parent_held_monitor_count_offset())), "parent held monitor count");
+     }
    } else if (is_native_frame()) {
      // For now just label the frame
      nmethod* nm = cb()->as_nmethod_or_null();
      values.describe(-1, info_address,
                      FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,

*** 1235,10 ***
--- 1534,13 ---
  
    // platform dependent additional data
    describe_pd(values, frame_no);
  }
  
+ void frame::describe_top(FrameValues& values) {
+   describe_top_pd(values);
+ }
  #endif
  
  #ifndef PRODUCT
  
  void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {

*** 1274,10 ***
--- 1576,11 ---
        }
      } else {
        prev = fv;
      }
    }
+   // if (error) { tty->cr(); print_on((JavaThread*)nullptr, tty); }
    assert(!error, "invalid layout");
  }
  #endif // ASSERT
  
  void FrameValues::print_on(JavaThread* thread, outputStream* st) {

*** 1289,25 ***
    int min_index = 0;
    int max_index = _values.length() - 1;
    intptr_t* v0 = _values.at(min_index).location;
    intptr_t* v1 = _values.at(max_index).location;
  
!   if (thread == Thread::current()) {
!     while (!thread->is_in_live_stack((address)v0)) {
!       v0 = _values.at(++min_index).location;
!     }
!     while (!thread->is_in_live_stack((address)v1)) {
!       v1 = _values.at(--max_index).location;
!     }
-   } else {
-     while (!thread->is_in_full_stack((address)v0)) {
-       v0 = _values.at(++min_index).location;
-     }
-     while (!thread->is_in_full_stack((address)v1)) {
-       v1 = _values.at(--max_index).location;
      }
    }
    intptr_t* min = MIN2(v0, v1);
    intptr_t* max = MAX2(v0, v1);
    intptr_t* cur = max;
    intptr_t* last = NULL;
    for (int i = max_index; i >= min_index; i--) {
--- 1592,42 ---
    int min_index = 0;
    int max_index = _values.length() - 1;
    intptr_t* v0 = _values.at(min_index).location;
    intptr_t* v1 = _values.at(max_index).location;
  
!   if (thread != NULL) {
!     if (thread == Thread::current()) {
!       while (!thread->is_in_live_stack((address)v0)) v0 = _values.at(++min_index).location;
!       while (!thread->is_in_live_stack((address)v1)) v1 = _values.at(--max_index).location;
!     } else {
!       while (!thread->is_in_full_stack((address)v0)) v0 = _values.at(++min_index).location;
!       while (!thread->is_in_full_stack((address)v1)) v1 = _values.at(--max_index).location;
      }
    }
+   
+   print_on(st, min_index, max_index, v0, v1);
+ }
+ 
+ void FrameValues::print_on(stackChunkOop chunk, outputStream* st) {
+   assert (chunk->is_stackChunk(), "");
+ 
+   _values.sort(compare);
+ 
+   intptr_t* start = chunk->start_address();
+   intptr_t* end = chunk->end_address() + 1;
+ 
+   int min_index = 0;
+   int max_index = _values.length() - 1;
+   intptr_t* v0 = _values.at(min_index).location;
+   intptr_t* v1 = _values.at(max_index).location;
+   while (!(start <= v0 && v0 <= end)) v0 = _values.at(++min_index).location;
+   while (!(start <= v1 && v1 <= end)) v1 = _values.at(--max_index).location;
+ 
+   print_on(st, min_index, max_index, v0, v1, true);
+ }
+ 
+ void FrameValues::print_on(outputStream* st, int min_index, int max_index, intptr_t* v0, intptr_t* v1, bool relative) {
    intptr_t* min = MIN2(v0, v1);
    intptr_t* max = MAX2(v0, v1);
    intptr_t* cur = max;
    intptr_t* last = NULL;
    for (int i = max_index; i >= min_index; i--) {

*** 1318,11 ***
      }
      if (last == fv.location) {
        const char* spacer = "          " LP64_ONLY("        ");
        st->print_cr(" %s  %s %s", spacer, spacer, fv.description);
      } else {
!       st->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", p2i(fv.location), *fv.location, fv.description);
        last = fv.location;
        cur--;
      }
    }
  }
--- 1638,17 ---
      }
      if (last == fv.location) {
        const char* spacer = "          " LP64_ONLY("        ");
        st->print_cr(" %s  %s %s", spacer, spacer, fv.description);
      } else {
!       if (relative
+           && *fv.location != 0 && *fv.location > -100 && *fv.location < 100 
+           && (strncmp(fv.description, "interpreter_frame_", 18) == 0 || strstr(fv.description, " method "))) {
+         st->print_cr(" " INTPTR_FORMAT ": %18d %s", p2i(fv.location), (int)*fv.location, fv.description);
+       } else {
+         st->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", p2i(fv.location), *fv.location, fv.description);
+       }
        last = fv.location;
        cur--;
      }
    }
  }
< prev index next >