< prev index next >

src/hotspot/share/runtime/frame.cpp

Print this page

        

*** 23,65 **** */ #include "precompiled.hpp" #include "classfile/moduleEntry.hpp" #include "code/codeCache.hpp" #include "code/vmreg.inline.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/oopMapCache.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/markOop.hpp" #include "oops/method.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "prims/methodHandles.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/monitorChunk.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "utilities/debug.hpp" #include "utilities/decoder.hpp" #include "utilities/formatBuffer.hpp" ! RegisterMap::RegisterMap(JavaThread *thread, bool update_map) { _thread = thread; _update_map = update_map; clear(); debug_only(_update_for_id = NULL;) #ifndef PRODUCT for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; #endif /* PRODUCT */ } --- 23,86 ---- */ #include "precompiled.hpp" #include "classfile/moduleEntry.hpp" #include "code/codeCache.hpp" + #include "code/scopeDesc.hpp" #include "code/vmreg.inline.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/oopMapCache.hpp" + #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/markOop.hpp" #include "oops/method.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "prims/methodHandles.hpp" + #include "runtime/continuation.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/monitorChunk.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" + #include "runtime/stackValue.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "utilities/debug.hpp" #include "utilities/decoder.hpp" #include "utilities/formatBuffer.hpp" ! RegisterMap::RegisterMap(JavaThread *thread, bool update_map, bool walk_cont, bool validate_oops) ! : _cont(Handle()) { _thread = thread; _update_map = update_map; + _validate_oops = validate_oops; + _walk_cont = walk_cont; + DEBUG_ONLY(_skip_missing = false;) clear(); debug_only(_update_for_id = NULL;) + + _on_hstack = false; + _last_vstack_fp = NULL; + if (walk_cont) { + // we allocate the handle now (rather than in set_cont) because sometimes (StackWalker) the handle must live across HandleMarks + if (thread != NULL && thread->last_continuation() != NULL) { + _cont = Handle(Thread::current(), thread->last_continuation()); + *(_cont.raw_value()) = NULL; // TODO UGLY : we just need to allocate a NULL handle + } else { + _cont = Handle(); + } + } + #ifndef PRODUCT for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; #endif /* PRODUCT */ }
*** 68,81 **** assert(map != NULL, "RegisterMap must be present"); _thread = map->thread(); _update_map = map->update_map(); _include_argument_oops = map->include_argument_oops(); debug_only(_update_for_id = map->_update_for_id;) pd_initialize_from(map); if (update_map()) { for(int i = 0; i < location_valid_size; i++) { ! LocationValidType bits = !update_map() ? 0 : map->_location_valid[i]; _location_valid[i] = bits; // for whichever bits are set, pull in the corresponding map->_location int j = i*location_valid_type_size; while (bits != 0) { if ((bits & 1) != 0) { --- 89,110 ---- assert(map != NULL, "RegisterMap must be present"); _thread = map->thread(); _update_map = map->update_map(); _include_argument_oops = map->include_argument_oops(); debug_only(_update_for_id = map->_update_for_id;) + _validate_oops = map->_validate_oops; + _walk_cont = map->_walk_cont; + DEBUG_ONLY(_skip_missing = map->_skip_missing;) + + _cont = map->_cont; + _on_hstack = map->_on_hstack; + _last_vstack_fp = map->_last_vstack_fp; + pd_initialize_from(map); if (update_map()) { for(int i = 0; i < location_valid_size; i++) { ! LocationValidType bits = map->_location_valid[i]; _location_valid[i] = bits; // for whichever bits are set, pull in the corresponding map->_location int j = i*location_valid_type_size; while (bits != 0) { if ((bits & 1) != 0) {
*** 87,99 **** } } } } void RegisterMap::clear() { set_include_argument_oops(true); ! if (_update_map) { for(int i = 0; i < location_valid_size; i++) { _location_valid[i] = 0; } pd_clear(); } else { --- 116,150 ---- } } } } + void RegisterMap::set_in_cont(bool on_hstack) { + assert (_walk_cont, ""); + _on_hstack = on_hstack; + if (!on_hstack) + _last_vstack_fp = NULL; + } + + void RegisterMap::set_cont(Handle cont) { + assert (_walk_cont, ""); + _cont = cont; + } + + void RegisterMap::set_cont(oop cont) { + assert (_walk_cont, ""); + if (cont != NULL) { + assert (_cont.not_null(), ""); + *(_cont.raw_value()) = cont; // reuse handle. see comment above in the constructor + } else { + _cont = Handle(); + } + } + void RegisterMap::clear() { set_include_argument_oops(true); ! if (update_map()) { for(int i = 0; i < location_valid_size; i++) { _location_valid[i] = 0; } pd_clear(); } else {
*** 101,110 **** --- 152,169 ---- } } #ifndef PRODUCT + VMReg RegisterMap::find_register_spilled_here(void* p) { + for(int i = 0; i < RegisterMap::reg_count; i++) { + VMReg r = VMRegImpl::as_VMReg(i); + if (p == location(r)) return r; + } + return NULL; + } + void RegisterMap::print_on(outputStream* st) const { st->print_cr("Register map"); for(int i = 0; i < reg_count; i++) { VMReg r = VMRegImpl::as_VMReg(i);
*** 132,141 **** --- 191,203 ---- // that happens for deoptimized frames. In addition it makes the value the // hardware would want to see in the native frame. The only user (at this point) // is deoptimization. It likely no one else should ever use it. address frame::raw_pc() const { + // if (Continuation::is_continuation_entry_frame(*this)) { + // return StubRoutines::cont_returnBarrier(); + // } if (is_deoptimized_frame()) { CompiledMethod* cm = cb()->as_compiled_method_or_null(); if (cm->is_method_handle_return(pc())) return cm->deopt_mh_handler_begin() - pc_return_offset; else
*** 160,177 **** _pc = newpc; _cb = CodeCache::find_blob_unsafe(_pc); } // type testers bool frame::is_ignored_frame() const { return false; // FIXME: some LambdaForm frames should be ignored } - bool frame::is_deoptimized_frame() const { - assert(_deopt_state != unknown, "not answerable"); - return _deopt_state == is_deoptimized; - } bool frame::is_native_frame() const { return (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_native_method()); --- 222,250 ---- _pc = newpc; _cb = CodeCache::find_blob_unsafe(_pc); } + void frame::set_pc_preserve_deopt(address newpc) { + set_pc_preserve_deopt(newpc, CodeCache::find_blob_unsafe(newpc)); + } + + void frame::set_pc_preserve_deopt(address newpc, CodeBlob* cb) { + #ifdef ASSERT + if (_cb != NULL && _cb->is_nmethod()) { + assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); + } + #endif // ASSERT + + _pc = newpc; + _cb = cb; + } + // type testers bool frame::is_ignored_frame() const { return false; // FIXME: some LambdaForm frames should be ignored } bool frame::is_native_frame() const { return (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_native_method());
*** 181,201 **** if (is_interpreted_frame()) return true; if (is_compiled_frame()) return true; return false; } - - bool frame::is_compiled_frame() const { - if (_cb != NULL && - _cb->is_compiled() && - ((CompiledMethod*)_cb)->is_java_method()) { - return true; - } - return false; - } - - bool frame::is_runtime_frame() const { return (_cb != NULL && _cb->is_runtime_stub()); } bool frame::is_safepoint_blob_frame() const { --- 254,263 ----
*** 265,302 **** CompiledMethod* nm = (CompiledMethod*)_cb; if( !nm->can_be_deoptimized() ) return false; return !nm->is_at_poll_return(pc()); } void frame::deoptimize(JavaThread* thread) { assert(thread->frame_anchor()->has_last_Java_frame() && thread->frame_anchor()->walkable(), "must be"); // Schedule deoptimization of an nmethod activation with this frame. assert(_cb != NULL && _cb->is_compiled(), "must be"); // If the call site is a MethodHandle call site use the MH deopt // handler. CompiledMethod* cm = (CompiledMethod*) _cb; address deopt = cm->is_method_handle_return(pc()) ? cm->deopt_mh_handler_begin() : cm->deopt_handler_begin(); // Save the original pc before we patch in the new one cm->set_original_pc(this, pc()); patch_pc(thread, deopt); #ifdef ASSERT { - RegisterMap map(thread, false); frame check = thread->last_frame(); ! while (id() != check.id()) { ! check = check.sender(&map); } - assert(check.is_deoptimized_frame(), "missed deopt"); } #endif // ASSERT } frame frame::java_sender() const { --- 327,381 ---- CompiledMethod* nm = (CompiledMethod*)_cb; if( !nm->can_be_deoptimized() ) return false; + // address* pc_addr = &(((address*) sp())[-1]); // TODO: PLATFORM + // if (Continuation::is_return_barrier_entry(*pc_addr)) { + // log_trace(jvmcont)("Can't deopt entry:"); + // if (log_is_enabled(Trace, jvmcont)) { + // print_value_on(tty, NULL); + // } + // return false; + // } + return !nm->is_at_poll_return(pc()); } void frame::deoptimize(JavaThread* thread) { + // tty->print_cr(">>> frame::deoptimize"); + // print_on(tty); assert(thread->frame_anchor()->has_last_Java_frame() && thread->frame_anchor()->walkable(), "must be"); // Schedule deoptimization of an nmethod activation with this frame. assert(_cb != NULL && _cb->is_compiled(), "must be"); + // log_develop_trace(jvmcont)(">>>> frame::deoptimize %ld", os::current_thread_id()); + // tty->print_cr(">>>> frame::deoptimize: %ld", os::current_thread_id()); print_on(tty); + // If the call site is a MethodHandle call site use the MH deopt // handler. CompiledMethod* cm = (CompiledMethod*) _cb; address deopt = cm->is_method_handle_return(pc()) ? cm->deopt_mh_handler_begin() : cm->deopt_handler_begin(); // Save the original pc before we patch in the new one cm->set_original_pc(this, pc()); patch_pc(thread, deopt); + assert(is_deoptimized_frame(), "must be"); #ifdef ASSERT { frame check = thread->last_frame(); ! if (is_older(check.id())) { ! RegisterMap map(thread, false); ! while (id() != check.id()) { ! check = check.sender(&map); ! } ! assert(check.is_deoptimized_frame(), "missed deopt"); } } #endif // ASSERT } frame frame::java_sender() const {
*** 806,822 **** ArgumentSizeComputer asc(signature); int size = asc.size(); return (oop *)interpreter_frame_tos_at(size); } - void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) { - assert(is_interpreted_frame(), "Not an interpreted frame"); - assert(map != NULL, "map must be set"); Thread *thread = Thread::current(); methodHandle m (thread, interpreter_frame_method()); ! jint bci = interpreter_frame_bci(); assert(!Universe::heap()->is_in(m()), "must be valid oop"); assert(m->is_method(), "checking frame value"); assert((m->is_native() && bci == 0) || --- 885,919 ---- ArgumentSizeComputer asc(signature); int size = asc.size(); return (oop *)interpreter_frame_tos_at(size); } void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) { Thread *thread = Thread::current(); methodHandle m (thread, interpreter_frame_method()); ! jint bci = interpreter_frame_bci(); ! ! InterpreterOopMap mask; ! if (query_oop_map_cache) { ! m->mask_for(bci, &mask); ! } else { ! OopMapCache::compute_one_oop_map(m, bci, &mask); ! } ! ! oops_interpreted_do0(f, map, m, bci, mask); ! } ! ! void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, const InterpreterOopMap& mask) { ! Thread *thread = Thread::current(); ! methodHandle m (thread, interpreter_frame_method()); ! jint bci = interpreter_frame_bci(); ! oops_interpreted_do0(f, map, m, bci, mask); ! } ! ! void frame::oops_interpreted_do0(OopClosure* f, const RegisterMap* map, methodHandle m, jint bci, const InterpreterOopMap& mask) { ! assert(is_interpreted_frame(), "Not an interpreted frame"); ! // assert(map != NULL, "map must be set"); assert(!Universe::heap()->is_in(m()), "must be valid oop"); assert(m->is_method(), "checking frame value"); assert((m->is_native() && bci == 0) ||
*** 857,869 **** if (!m->is_native()) { Bytecode_invoke call = Bytecode_invoke_check(m, bci); if (call.is_valid()) { signature = call.signature(); has_receiver = call.has_receiver(); ! if (map->include_argument_oops() && interpreter_frame_expression_stack_size() > 0) { ! ResourceMark rm(thread); // is this right ??? // we are at a call site & the expression stack is not empty // => process callee's arguments // // Note: The expression stack can be empty if an exception // occurred during method resolution/execution. In all --- 954,966 ---- if (!m->is_native()) { Bytecode_invoke call = Bytecode_invoke_check(m, bci); if (call.is_valid()) { signature = call.signature(); has_receiver = call.has_receiver(); ! if (map != NULL && map->include_argument_oops() && interpreter_frame_expression_stack_size() > 0) { ! // ResourceMark rm(thread); // is this right ??? // we are at a call site & the expression stack is not empty // => process callee's arguments // // Note: The expression stack can be empty if an exception // occurred during method resolution/execution. In all
*** 878,906 **** } InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); // process locals & expression stack ! InterpreterOopMap mask; ! if (query_oop_map_cache) { ! m->mask_for(bci, &mask); ! } else { ! OopMapCache::compute_one_oop_map(m, bci, &mask); ! } mask.iterate_oop(&blk); } void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) { InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); finder.oops_do(); } ! void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { assert(_cb != NULL, "sanity check"); ! if (_cb->oop_maps() != NULL) { ! OopMapSet::oops_do(this, reg_map, f); // Preserve potential arguments for a callee. We handle this by dispatching // on the codeblob. For c2i, we do if (reg_map->include_argument_oops()) { _cb->preserve_callee_argument_oops(*this, reg_map, f); --- 975,999 ---- } InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); // process locals & expression stack ! // mask.print(); mask.iterate_oop(&blk); } void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) { InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); finder.oops_do(); } ! void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, const RegisterMap* reg_map) { assert(_cb != NULL, "sanity check"); ! assert((oop_map() == NULL) == (_cb->oop_maps() == NULL), "frame and _cb must agree that oopmap is set or not"); ! if (oop_map() != NULL) { ! _oop_map->oops_do(this, reg_map, f, df); // Preserve potential arguments for a callee. We handle this by dispatching // on the codeblob. For c2i, we do if (reg_map->include_argument_oops()) { _cb->preserve_callee_argument_oops(*this, reg_map, f);
*** 934,943 **** --- 1027,1045 ---- virtual void handle_oop_offset() { // Extract low order register number from register array. // In LP64-land, the high-order bits are valid but unhelpful. VMReg reg = _regs[_offset].first(); oop *loc = _fr.oopmapreg_to_location(reg, _reg_map); + #ifdef ASSERT + if (loc == NULL) { + if (_reg_map->should_skip_missing()) + return; + tty->print_cr("Error walking frame oops:"); + _fr.print_on(tty); + assert(loc != NULL, "reg: " INTPTR_FORMAT " %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc)); + } + #endif _f->do_oop(loc); } public: CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
*** 970,980 **** } }; void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) { ! ResourceMark rm; CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map); finder.oops_do(); } --- 1072,1082 ---- } }; void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) { ! // ResourceMark rm; CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map); finder.oops_do(); }
*** 1032,1042 **** // Traverse the Handle Block saved in the entry frame entry_frame_call_wrapper()->oops_do(f); } ! void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { #ifndef PRODUCT #if defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 #pragma error_messages(off, SEC_NULL_PTR_DEREF) #endif // simulate GC crash here to dump java thread in error report --- 1134,1144 ---- // Traverse the Handle Block saved in the entry frame entry_frame_call_wrapper()->oops_do(f); } ! void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, const RegisterMap* map, bool use_interpreter_oop_map_cache) { #ifndef PRODUCT #if defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 #pragma error_messages(off, SEC_NULL_PTR_DEREF) #endif // simulate GC crash here to dump java thread in error report
*** 1048,1058 **** if (is_interpreted_frame()) { oops_interpreted_do(f, map, use_interpreter_oop_map_cache); } else if (is_entry_frame()) { oops_entry_do(f, map); } else if (CodeCache::contains(pc())) { ! oops_code_blob_do(f, cf, map); } else { ShouldNotReachHere(); } } --- 1150,1160 ---- if (is_interpreted_frame()) { oops_interpreted_do(f, map, use_interpreter_oop_map_cache); } else if (is_entry_frame()) { oops_entry_do(f, map); } else if (CodeCache::contains(pc())) { ! oops_code_blob_do(f, cf, df, map); } else { ShouldNotReachHere(); } }
*** 1072,1081 **** --- 1174,1190 ---- f->do_metadata(m); } } void frame::verify(const RegisterMap* map) { + #ifndef PRODUCT + if (TraceCodeBlobStacks) { + tty->print_cr("*** verify"); + print_on(tty); + } + #endif + // for now make sure receiver type is correct if (is_interpreted_frame()) { Method* method = interpreter_frame_method(); guarantee(method->is_method(), "method is wrong in frame::verify"); if (!method->is_static()) {
*** 1085,1095 **** } } #if COMPILER2_OR_JVMCI assert(DerivedPointerTable::is_empty(), "must be empty before verify"); #endif ! oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false); } #ifdef ASSERT bool frame::verify_return_pc(address x) { --- 1194,1204 ---- } } #if COMPILER2_OR_JVMCI assert(DerivedPointerTable::is_empty(), "must be empty before verify"); #endif ! oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false); } #ifdef ASSERT bool frame::verify_return_pc(address x) {
*** 1122,1134 **** guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); } #endif #ifndef PRODUCT ! void frame::describe(FrameValues& values, int frame_no) { // boundaries: sp and the 'real' frame pointer ! values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1); intptr_t* frame_pointer = real_fp(); // Note: may differ from fp() // print frame info at the highest boundary intptr_t* info_address = MAX2(sp(), frame_pointer); --- 1231,1285 ---- guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); } #endif #ifndef PRODUCT ! ! class FrameValuesOopClosure: public OopClosure, public DerivedOopClosure { ! private: ! FrameValues& _values; ! int _frame_no; ! public: ! FrameValuesOopClosure(FrameValues& values, int frame_no) : _values(values), _frame_no(frame_no) {} ! virtual void do_oop(oop* p) { _values.describe(_frame_no, (intptr_t*)p, err_msg("oop for #%d", _frame_no)); } ! virtual void do_oop(narrowOop* p) { _values.describe(_frame_no, (intptr_t*)p, err_msg("narrow oop for #%d", _frame_no)); } ! virtual void do_derived_oop(oop *base, oop *derived) { ! _values.describe(_frame_no, (intptr_t*)derived, err_msg("derived pointer (base: " INTPTR_FORMAT ") for #%d", p2i(base), _frame_no)); ! } ! }; ! ! class FrameValuesOopMapClosure: public OopMapClosure { ! private: ! const frame* _fr; ! const RegisterMap* _reg_map; ! FrameValues& _values; ! int _frame_no; ! public: ! FrameValuesOopMapClosure(const frame* fr, const RegisterMap* reg_map, FrameValues& values, int frame_no) ! : _fr(fr), _reg_map(reg_map), _values(values), _frame_no(frame_no) {} ! ! virtual void do_value(VMReg reg, OopMapValue::oop_types type) { ! intptr_t* p = (intptr_t*)_fr->oopmapreg_to_location(reg, _reg_map); ! if (p != NULL && (((intptr_t)p & WordAlignmentMask) == 0)) { ! const char* type_name = NULL; ! switch(type) { ! case OopMapValue::oop_value: type_name = "oop"; break; ! case OopMapValue::narrowoop_value: type_name = "narrow oop"; break; ! case OopMapValue::callee_saved_value: type_name = "callee-saved"; break; ! case OopMapValue::derived_oop_value: type_name = "derived"; break; ! // case OopMapValue::live_value: type_name = "live"; break; ! default: break; ! } ! if (type_name != NULL) ! _values.describe(_frame_no, p, err_msg("%s for #%d", type_name, _frame_no)); ! } ! } ! }; ! ! void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_map) { // boundaries: sp and the 'real' frame pointer ! values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 0); intptr_t* frame_pointer = real_fp(); // Note: may differ from fp() // print frame info at the highest boundary intptr_t* info_address = MAX2(sp(), frame_pointer);
*** 1137,1166 **** values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1); } if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) { // Label values common to most frames ! values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no)); } if (is_interpreted_frame()) { Method* m = interpreter_frame_method(); int bci = interpreter_frame_bci(); // Label the method and current bci values.describe(-1, info_address, ! FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2); values.describe(-1, info_address, ! err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1); if (m->max_locals() > 0) { intptr_t* l0 = interpreter_frame_local_at(0); intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1); ! values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1); // Report each local and mark as owned by this frame for (int l = 0; l < m->max_locals(); l++) { intptr_t* l0 = interpreter_frame_local_at(l); ! values.describe(frame_no, l0, err_msg("local %d", l)); } } // Compute the actual expression stack size InterpreterOopMap mask; --- 1288,1319 ---- values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1); } if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) { // Label values common to most frames ! values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no), 0); } if (is_interpreted_frame()) { Method* m = interpreter_frame_method(); int bci = interpreter_frame_bci(); // Label the method and current bci values.describe(-1, info_address, ! FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 3); values.describe(-1, info_address, ! err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 2); ! values.describe(frame_no, (intptr_t*)sender_pc_addr(), "return address"); ! if (m->max_locals() > 0) { intptr_t* l0 = interpreter_frame_local_at(0); intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1); ! values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 2); // Report each local and mark as owned by this frame for (int l = 0; l < m->max_locals(); l++) { intptr_t* l0 = interpreter_frame_local_at(l); ! values.describe(frame_no, l0, err_msg("local %d", l), 1); } } // Compute the actual expression stack size InterpreterOopMap mask;
*** 1168,1201 **** intptr_t* tos = NULL; // Report each stack element and mark as owned by this frame for (int e = 0; e < mask.expression_stack_size(); e++) { tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); values.describe(frame_no, interpreter_frame_expression_stack_at(e), ! err_msg("stack %d", e)); } if (tos != NULL) { ! values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1); } if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) { values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin"); values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end"); } } else if (is_entry_frame()) { // For now just label the frame values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); } else if (is_compiled_frame()) { // For now just label the frame ! CompiledMethod* cm = (CompiledMethod*)cb(); values.describe(-1, info_address, FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s%s", frame_no, p2i(cm), (cm->is_aot() ? "A ": "J "), cm->method()->name_and_sig_as_C_string(), (_deopt_state == is_deoptimized) ? " (deoptimized)" : ((_deopt_state == unknown) ? " (state unknown)" : "")), ! 2); } else if (is_native_frame()) { // For now just label the frame nmethod* nm = cb()->as_nmethod_or_null(); values.describe(-1, info_address, FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, --- 1321,1439 ---- intptr_t* tos = NULL; // Report each stack element and mark as owned by this frame for (int e = 0; e < mask.expression_stack_size(); e++) { tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); values.describe(frame_no, interpreter_frame_expression_stack_at(e), ! err_msg("stack %d", e), 1); } if (tos != NULL) { ! values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2); } if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) { values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin"); values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end"); } + + if (reg_map != NULL) { + FrameValuesOopClosure oopsFn(values, frame_no); + oops_do(&oopsFn, NULL, &oopsFn, reg_map); + } } else if (is_entry_frame()) { // For now just label the frame values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); } else if (is_compiled_frame()) { // For now just label the frame ! CompiledMethod* cm = cb()->as_compiled_method(); values.describe(-1, info_address, FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s%s", frame_no, p2i(cm), (cm->is_aot() ? "A ": "J "), cm->method()->name_and_sig_as_C_string(), (_deopt_state == is_deoptimized) ? " (deoptimized)" : ((_deopt_state == unknown) ? " (state unknown)" : "")), ! 3); ! ! { // mark arguments (see nmethod::print_nmethod_labels) ! Method* m = cm->method(); ! ! int stack_slot_offset = cm->frame_size() * wordSize; // offset, in bytes, to caller sp ! int sizeargs = m->size_of_parameters(); ! ! BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); ! VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); ! { ! int sig_index = 0; ! if (!m->is_static()) sig_bt[sig_index++] = T_OBJECT; // 'this' ! for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) { ! BasicType t = ss.type(); ! assert(type2size[t] == 1 || type2size[t] == 2, "size is 1 or 2"); ! sig_bt[sig_index++] = t; ! if (type2size[t] == 2) sig_bt[sig_index++] = T_VOID; ! } ! assert(sig_index == sizeargs, ""); ! } ! int out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false); ! assert (out_preserve == m->num_stack_arg_slots(), ""); ! int sig_index = 0; ! int arg_index = (m->is_static() ? 0 : -1); ! for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) { ! bool at_this = (arg_index == -1); ! bool at_old_sp = false; ! BasicType t = (at_this ? T_OBJECT : ss.type()); ! assert(t == sig_bt[sig_index], "sigs in sync"); ! VMReg fst = regs[sig_index].first(); ! if (fst->is_stack()) { ! int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset; ! intptr_t* stack_address = (intptr_t*)((address)sp() + offset); ! if (at_this) ! values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1); ! else ! values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1); ! } ! sig_index += type2size[t]; ! arg_index += 1; ! if (!at_this) ss.next(); ! } ! } ! ! if (reg_map != NULL) { ! int scope_no = 0; ! for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != NULL; scope = scope->sender(), scope_no++) { ! Method* m = scope->method(); ! int bci = scope->bci(); ! values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2); ! ! { // mark locals ! GrowableArray<ScopeValue*>* scvs = scope->locals(); ! int scvs_length = scvs != NULL ? scvs->length() : 0; ! for (int i = 0; i < scvs_length; i++) { ! intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i)); ! if (stack_address != NULL) ! values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1); ! } ! } ! { // mark expression stack ! GrowableArray<ScopeValue*>* scvs = scope->expressions(); ! int scvs_length = scvs != NULL ? scvs->length() : 0; ! for (int i = 0; i < scvs_length; i++) { ! intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i)); ! if (stack_address != NULL) ! values.describe(frame_no, stack_address, err_msg("stack %d for #%d (scope %d)", i, frame_no, scope_no), 1); ! } ! } ! } ! ! FrameValuesOopClosure oopsFn(values, frame_no); ! oops_do(&oopsFn, NULL, &oopsFn, reg_map); ! ! if (oop_map() != NULL) { ! FrameValuesOopMapClosure valuesFn(this, reg_map, values, frame_no); ! int mask = OopMapValue::callee_saved_value; // | OopMapValue::live_value; ! oop_map()->all_do(this, mask, &valuesFn); ! } ! } } else if (is_native_frame()) { // For now just label the frame nmethod* nm = cb()->as_nmethod_or_null(); values.describe(-1, info_address, FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
*** 1218,1231 **** //----------------------------------------------------------------------------------- // StackFrameStream implementation ! StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) { assert(thread->has_last_Java_frame(), "sanity check"); _fr = thread->last_frame(); _is_done = false; } #ifndef PRODUCT --- 1456,1474 ---- //----------------------------------------------------------------------------------- // StackFrameStream implementation ! StackFrameStream::StackFrameStream(JavaThread *thread, bool update, bool allow_missing_reg) : _reg_map(thread, update) { assert(thread->has_last_Java_frame(), "sanity check"); _fr = thread->last_frame(); _is_done = false; + #ifndef PRODUCT + if (allow_missing_reg) { + _reg_map.set_skip_missing(true); + } + #endif } #ifndef PRODUCT
< prev index next >