< prev index next >

src/hotspot/share/runtime/frame.cpp

Print this page

  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/moduleEntry.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/vmreg.inline.hpp"
  29 #include "compiler/abstractCompiler.hpp"
  30 #include "compiler/disassembler.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/oopMapCache.hpp"
  35 #include "logging/log.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.hpp"

  38 #include "oops/markWord.hpp"
  39 #include "oops/method.inline.hpp"
  40 #include "oops/methodData.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/stackChunkOop.inline.hpp"
  43 #include "oops/verifyOopClosure.hpp"
  44 #include "prims/methodHandles.hpp"
  45 #include "runtime/continuation.hpp"
  46 #include "runtime/continuationEntry.inline.hpp"
  47 #include "runtime/frame.inline.hpp"
  48 #include "runtime/handles.inline.hpp"
  49 #include "runtime/javaCalls.hpp"
  50 #include "runtime/javaThread.hpp"
  51 #include "runtime/monitorChunk.hpp"
  52 #include "runtime/os.hpp"
  53 #include "runtime/safefetch.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/signature.hpp"
  56 #include "runtime/stackValue.hpp"
  57 #include "runtime/stubCodeGenerator.hpp"
  58 #include "runtime/stubRoutines.hpp"
  59 #include "utilities/debug.hpp"
  60 #include "utilities/decoder.hpp"
  61 #include "utilities/formatBuffer.hpp"



  62 
  63 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
  64   _thread         = thread;
  65   _update_map     = update_map == UpdateMap::include;
  66   _process_frames = process_frames == ProcessFrames::include;
  67   _walk_cont      = walk_cont == WalkContinuation::include;
  68   clear();
  69   DEBUG_ONLY (_update_for_id = nullptr;)
  70   NOT_PRODUCT(_skip_missing = false;)
  71   NOT_PRODUCT(_async = false;)
  72 
  73   if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
  74     _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
  75   }
  76   _chunk_index = -1;
  77 
  78 #ifndef PRODUCT
  79   for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
  80 #endif /* PRODUCT */
  81 }

 344     return false;
 345 
 346   return !nm->is_at_poll_return(pc());
 347 }
 348 
 349 void frame::deoptimize(JavaThread* thread) {
 350   assert(thread == nullptr
 351          || (thread->frame_anchor()->has_last_Java_frame() &&
 352              thread->frame_anchor()->walkable()), "must be");
 353   // Schedule deoptimization of an nmethod activation with this frame.
 354   assert(_cb != nullptr && _cb->is_nmethod(), "must be");
 355 
 356   // If the call site is a MethodHandle call site use the MH deopt handler.
 357   nmethod* nm = _cb->as_nmethod();
 358   address deopt = nm->deopt_handler_entry();
 359 
 360   NativePostCallNop* inst = nativePostCallNop_at(pc());
 361 
 362   // Save the original pc before we patch in the new one
 363   nm->set_original_pc(this, pc());



















 364   patch_pc(thread, deopt);
 365   assert(is_deoptimized_frame(), "must be");
 366 
 367 #ifdef ASSERT
 368   if (thread != nullptr) {
 369     frame check = thread->last_frame();
 370     if (is_older(check.id())) {
 371       RegisterMap map(thread,
 372                       RegisterMap::UpdateMap::skip,
 373                       RegisterMap::ProcessFrames::include,
 374                       RegisterMap::WalkContinuation::skip);
 375       while (id() != check.id()) {
 376         check = check.sender(&map);
 377       }
 378       assert(check.is_deoptimized_frame(), "missed deopt");
 379     }
 380   }
 381 #endif // ASSERT
 382 }
 383 

 754  private:
 755   const frame* _fr;
 756   OopClosure*  _f;
 757   int          _max_locals;
 758   int          _max_stack;
 759 
 760  public:
 761   InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
 762                           OopClosure* f) {
 763     _fr         = fr;
 764     _max_locals = max_locals;
 765     _max_stack  = max_stack;
 766     _f          = f;
 767   }
 768 
 769   void offset_do(int offset) {
 770     oop* addr;
 771     if (offset < _max_locals) {
 772       addr = (oop*) _fr->interpreter_frame_local_at(offset);
 773       assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
 774       _f->do_oop(addr);


 775     } else {
 776       addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
 777       // In case of exceptions, the expression stack is invalid and the esp will be reset to express
 778       // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
 779       bool in_stack;
 780       if (frame::interpreter_frame_expression_stack_direction() > 0) {
 781         in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
 782       } else {
 783         in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
 784       }
 785       if (in_stack) {
 786         _f->do_oop(addr);


 787       }
 788     }
 789   }
 790 };
 791 
 792 
 793 class InterpretedArgumentOopFinder: public SignatureIterator {
 794  private:
 795   OopClosure*  _f;             // Closure to invoke
 796   int          _offset;        // TOS-relative offset, decremented with each argument
 797   bool         _has_receiver;  // true if the callee has a receiver
 798   const frame* _fr;
 799 
 800   friend class SignatureIterator;  // so do_parameters_on can call do_type
 801   void do_type(BasicType type) {
 802     _offset -= parameter_type_word_count(type);
 803     if (is_reference_type(type)) oop_offset_do();
 804    }
 805 
 806   void oop_offset_do() {

 995 class CompiledArgumentOopFinder: public SignatureIterator {
 996  protected:
 997   OopClosure*     _f;
 998   int             _offset;        // the current offset, incremented with each argument
 999   bool            _has_receiver;  // true if the callee has a receiver
1000   bool            _has_appendix;  // true if the call has an appendix
1001   frame           _fr;
1002   RegisterMap*    _reg_map;
1003   int             _arg_size;
1004   VMRegPair*      _regs;        // VMReg list of arguments
1005 
1006   friend class SignatureIterator;  // so do_parameters_on can call do_type
1007   void do_type(BasicType type) {
1008     if (is_reference_type(type))  handle_oop_offset();
1009     _offset += parameter_type_word_count(type);
1010   }
1011 
1012   virtual void handle_oop_offset() {
1013     // Extract low order register number from register array.
1014     // In LP64-land, the high-order bits are valid but unhelpful.

1015     VMReg reg = _regs[_offset].first();
1016     oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1017   #ifdef ASSERT
1018     if (loc == nullptr) {
1019       if (_reg_map->should_skip_missing()) {
1020         return;
1021       }
1022       tty->print_cr("Error walking frame oops:");
1023       _fr.print_on(tty);
1024       assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1025     }
1026   #endif
1027     _f->do_oop(loc);
1028   }
1029 
1030  public:
1031   CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1032     : SignatureIterator(signature) {
1033 
1034     // initialize CompiledArgumentOopFinder
1035     _f         = f;
1036     _offset    = 0;
1037     _has_receiver = has_receiver;
1038     _has_appendix = has_appendix;
1039     _fr        = fr;
1040     _reg_map   = (RegisterMap*)reg_map;
1041     _arg_size  = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
1042 
1043     int arg_size;
1044     _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
1045     assert(arg_size == _arg_size, "wrong arg size");
1046   }
1047 
1048   void oops_do() {
1049     if (_has_receiver) {
1050       handle_oop_offset();
1051       _offset++;
1052     }
1053     do_parameters_on(this);
1054     if (_has_appendix) {
1055       handle_oop_offset();
1056       _offset++;
1057     }
1058   }
1059 };
1060 
1061 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1062                                        const RegisterMap* reg_map, OopClosure* f) const {
1063   // ResourceMark rm;
1064   CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1065   finder.oops_do();

1405       tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1406       values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1407                       err_msg("stack %d", e), 1);
1408     }
1409     if (tos != nullptr) {
1410       values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1411     }
1412 
1413     if (reg_map != nullptr) {
1414       FrameValuesOopClosure oopsFn;
1415       oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1416       oopsFn.describe(values, frame_no);
1417     }
1418   } else if (is_entry_frame()) {
1419     // For now just label the frame
1420     values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1421   } else if (is_compiled_frame()) {
1422     // For now just label the frame
1423     nmethod* nm = cb()->as_nmethod();
1424     values.describe(-1, info_address,
1425                     FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
1426                                        p2i(nm),
1427                                        nm->method()->name_and_sig_as_C_string(),
1428                                        (_deopt_state == is_deoptimized) ?
1429                                        " (deoptimized)" :
1430                                        ((_deopt_state == unknown) ? " (state unknown)" : "")),
1431                     3);
1432 
1433     { // mark arguments (see nmethod::print_nmethod_labels)
1434       Method* m = nm->method();
1435 





1436       int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
1437       int sizeargs = m->size_of_parameters();
1438 
1439       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
1440       VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
1441       {
1442         int sig_index = 0;
1443         if (!m->is_static()) {
1444           sig_bt[sig_index++] = T_OBJECT; // 'this'
1445         }
1446         for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
1447           BasicType t = ss.type();
1448           assert(type2size[t] == 1 || type2size[t] == 2, "size is 1 or 2");
1449           sig_bt[sig_index++] = t;
1450           if (type2size[t] == 2) {
1451             sig_bt[sig_index++] = T_VOID;
1452           }
1453         }
1454         assert(sig_index == sizeargs, "");
1455       }
1456       int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
1457       assert(stack_arg_slots ==  nm->as_nmethod()->num_stack_arg_slots(false /* rounded */) || nm->is_osr_method(), "");
1458       int out_preserve = SharedRuntime::out_preserve_stack_slots();
1459       int sig_index = 0;
1460       int arg_index = (m->is_static() ? 0 : -1);
1461       for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
1462         bool at_this = (arg_index == -1);
1463         bool at_old_sp = false;
1464         BasicType t = (at_this ? T_OBJECT : ss.type());
1465         assert(t == sig_bt[sig_index], "sigs in sync");
1466         VMReg fst = regs[sig_index].first();
1467         if (fst->is_stack()) {
1468           assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1469           int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1470           intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1471           if (at_this) {
1472             values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1473           } else {
1474             values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1475           }
1476         }
1477         sig_index += type2size[t];
1478         arg_index += 1;
1479         if (!at_this) {
1480           ss.next();
1481         }
1482       }
1483     }
1484 
1485     if (reg_map != nullptr && is_java_frame()) {
1486       int scope_no = 0;
1487       for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1488         Method* m = scope->method();
1489         int  bci = scope->bci();
1490         values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1491 
1492         { // mark locals
1493           GrowableArray<ScopeValue*>* scvs = scope->locals();
1494           int scvs_length = scvs != nullptr ? scvs->length() : 0;
1495           for (int i = 0; i < scvs_length; i++) {
1496             intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1497             if (stack_address != nullptr) {
1498               values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1499             }
1500           }
1501         }

  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/moduleEntry.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/vmreg.inline.hpp"
  29 #include "compiler/abstractCompiler.hpp"
  30 #include "compiler/disassembler.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/oopMapCache.hpp"
  35 #include "logging/log.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.hpp"
  38 #include "oops/inlineKlass.hpp"
  39 #include "oops/markWord.hpp"
  40 #include "oops/method.inline.hpp"
  41 #include "oops/methodData.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "oops/stackChunkOop.inline.hpp"
  44 #include "oops/verifyOopClosure.hpp"
  45 #include "prims/methodHandles.hpp"
  46 #include "runtime/continuation.hpp"
  47 #include "runtime/continuationEntry.inline.hpp"
  48 #include "runtime/frame.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/javaCalls.hpp"
  51 #include "runtime/javaThread.hpp"
  52 #include "runtime/monitorChunk.hpp"
  53 #include "runtime/os.hpp"
  54 #include "runtime/safefetch.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "runtime/signature.hpp"
  57 #include "runtime/stackValue.hpp"
  58 #include "runtime/stubCodeGenerator.hpp"
  59 #include "runtime/stubRoutines.hpp"
  60 #include "utilities/debug.hpp"
  61 #include "utilities/decoder.hpp"
  62 #include "utilities/formatBuffer.hpp"
  63 #ifdef COMPILER1
  64 #include "c1/c1_Runtime1.hpp"
  65 #endif
  66 
  67 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
  68   _thread         = thread;
  69   _update_map     = update_map == UpdateMap::include;
  70   _process_frames = process_frames == ProcessFrames::include;
  71   _walk_cont      = walk_cont == WalkContinuation::include;
  72   clear();
  73   DEBUG_ONLY (_update_for_id = nullptr;)
  74   NOT_PRODUCT(_skip_missing = false;)
  75   NOT_PRODUCT(_async = false;)
  76 
  77   if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
  78     _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
  79   }
  80   _chunk_index = -1;
  81 
  82 #ifndef PRODUCT
  83   for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
  84 #endif /* PRODUCT */
  85 }

 348     return false;
 349 
 350   return !nm->is_at_poll_return(pc());
 351 }
 352 
 353 void frame::deoptimize(JavaThread* thread) {
 354   assert(thread == nullptr
 355          || (thread->frame_anchor()->has_last_Java_frame() &&
 356              thread->frame_anchor()->walkable()), "must be");
 357   // Schedule deoptimization of an nmethod activation with this frame.
 358   assert(_cb != nullptr && _cb->is_nmethod(), "must be");
 359 
 360   // If the call site is a MethodHandle call site use the MH deopt handler.
 361   nmethod* nm = _cb->as_nmethod();
 362   address deopt = nm->deopt_handler_entry();
 363 
 364   NativePostCallNop* inst = nativePostCallNop_at(pc());
 365 
 366   // Save the original pc before we patch in the new one
 367   nm->set_original_pc(this, pc());
 368 
 369 #ifdef COMPILER1
 370   if (nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
 371       pc() < nm->verified_inline_entry_point()) {
 372     // The VEP and VIEP(RO) of C1-compiled methods call into the runtime to buffer scalarized value
 373     // type args. We can't deoptimize at that point because the buffers have not yet been initialized.
 374     // Also, if the method is synchronized, we first need to acquire the lock.
 375     // Don't patch the return pc to delay deoptimization until we enter the method body (the check
 376     // added in LIRGenerator::do_Base will detect the pending deoptimization by checking the original_pc).
 377 #if defined ASSERT && !defined AARCH64   // Stub call site does not look like NativeCall on AArch64
 378     NativeCall* call = nativeCall_before(this->pc());
 379     address dest = call->destination();
 380     assert(dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_no_receiver_id) ||
 381            dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_id), "unexpected safepoint in entry point");
 382 #endif
 383     return;
 384   }
 385 #endif
 386 
 387   patch_pc(thread, deopt);
 388   assert(is_deoptimized_frame(), "must be");
 389 
 390 #ifdef ASSERT
 391   if (thread != nullptr) {
 392     frame check = thread->last_frame();
 393     if (is_older(check.id())) {
 394       RegisterMap map(thread,
 395                       RegisterMap::UpdateMap::skip,
 396                       RegisterMap::ProcessFrames::include,
 397                       RegisterMap::WalkContinuation::skip);
 398       while (id() != check.id()) {
 399         check = check.sender(&map);
 400       }
 401       assert(check.is_deoptimized_frame(), "missed deopt");
 402     }
 403   }
 404 #endif // ASSERT
 405 }
 406 

 777  private:
 778   const frame* _fr;
 779   OopClosure*  _f;
 780   int          _max_locals;
 781   int          _max_stack;
 782 
 783  public:
 784   InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
 785                           OopClosure* f) {
 786     _fr         = fr;
 787     _max_locals = max_locals;
 788     _max_stack  = max_stack;
 789     _f          = f;
 790   }
 791 
 792   void offset_do(int offset) {
 793     oop* addr;
 794     if (offset < _max_locals) {
 795       addr = (oop*) _fr->interpreter_frame_local_at(offset);
 796       assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
 797       if (_f != nullptr) {
 798         _f->do_oop(addr);
 799       }
 800     } else {
 801       addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
 802       // In case of exceptions, the expression stack is invalid and the esp will be reset to express
 803       // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
 804       bool in_stack;
 805       if (frame::interpreter_frame_expression_stack_direction() > 0) {
 806         in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
 807       } else {
 808         in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
 809       }
 810       if (in_stack) {
 811         if (_f != nullptr) {
 812           _f->do_oop(addr);
 813         }
 814       }
 815     }
 816   }
 817 };
 818 
 819 
 820 class InterpretedArgumentOopFinder: public SignatureIterator {
 821  private:
 822   OopClosure*  _f;             // Closure to invoke
 823   int          _offset;        // TOS-relative offset, decremented with each argument
 824   bool         _has_receiver;  // true if the callee has a receiver
 825   const frame* _fr;
 826 
 827   friend class SignatureIterator;  // so do_parameters_on can call do_type
 828   void do_type(BasicType type) {
 829     _offset -= parameter_type_word_count(type);
 830     if (is_reference_type(type)) oop_offset_do();
 831    }
 832 
 833   void oop_offset_do() {

1022 class CompiledArgumentOopFinder: public SignatureIterator {
1023  protected:
1024   OopClosure*     _f;
1025   int             _offset;        // the current offset, incremented with each argument
1026   bool            _has_receiver;  // true if the callee has a receiver
1027   bool            _has_appendix;  // true if the call has an appendix
1028   frame           _fr;
1029   RegisterMap*    _reg_map;
1030   int             _arg_size;
1031   VMRegPair*      _regs;        // VMReg list of arguments
1032 
1033   friend class SignatureIterator;  // so do_parameters_on can call do_type
1034   void do_type(BasicType type) {
1035     if (is_reference_type(type))  handle_oop_offset();
1036     _offset += parameter_type_word_count(type);
1037   }
1038 
1039   virtual void handle_oop_offset() {
1040     // Extract low order register number from register array.
1041     // In LP64-land, the high-order bits are valid but unhelpful.
1042     assert(_offset < _arg_size, "out of bounds");
1043     VMReg reg = _regs[_offset].first();
1044     oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1045   #ifdef ASSERT
1046     if (loc == nullptr) {
1047       if (_reg_map->should_skip_missing()) {
1048         return;
1049       }
1050       tty->print_cr("Error walking frame oops:");
1051       _fr.print_on(tty);
1052       assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1053     }
1054   #endif
1055     _f->do_oop(loc);
1056   }
1057 
1058  public:
1059   CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1060     : SignatureIterator(signature) {
1061 
1062     // initialize CompiledArgumentOopFinder
1063     _f         = f;
1064     _offset    = 0;
1065     _has_receiver = has_receiver;
1066     _has_appendix = has_appendix;
1067     _fr        = fr;
1068     _reg_map   = (RegisterMap*)reg_map;
1069     _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &_arg_size);




1070   }
1071 
1072   void oops_do() {
1073     if (_has_receiver) {
1074       handle_oop_offset();
1075       _offset++;
1076     }
1077     do_parameters_on(this);
1078     if (_has_appendix) {
1079       handle_oop_offset();
1080       _offset++;
1081     }
1082   }
1083 };
1084 
1085 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1086                                        const RegisterMap* reg_map, OopClosure* f) const {
1087   // ResourceMark rm;
1088   CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1089   finder.oops_do();

1429       tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1430       values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1431                       err_msg("stack %d", e), 1);
1432     }
1433     if (tos != nullptr) {
1434       values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1435     }
1436 
1437     if (reg_map != nullptr) {
1438       FrameValuesOopClosure oopsFn;
1439       oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1440       oopsFn.describe(values, frame_no);
1441     }
1442   } else if (is_entry_frame()) {
1443     // For now just label the frame
1444     values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1445   } else if (is_compiled_frame()) {
1446     // For now just label the frame
1447     nmethod* nm = cb()->as_nmethod();
1448     values.describe(-1, info_address,
1449                     FormatBuffer<1024>("#%d nmethod (%s %d) " INTPTR_FORMAT " for method J %s%s", frame_no,
1450                                        nm->is_compiled_by_c1() ? "c1" : "c2", nm->frame_size(), p2i(nm),
1451                                        nm->method()->name_and_sig_as_C_string(),
1452                                        (_deopt_state == is_deoptimized) ?
1453                                        " (deoptimized)" :
1454                                        ((_deopt_state == unknown) ? " (state unknown)" : "")),
1455                     3);
1456 
1457     { // mark arguments (see nmethod::print_nmethod_labels)
1458       Method* m = nm->method();
1459 
1460       CompiledEntrySignature ces(m);
1461       ces.compute_calling_conventions(false);
1462       const GrowableArray<SigEntry>* sig_cc = nm->is_compiled_by_c2() ? ces.sig_cc() : ces.sig();
1463       const VMRegPair* regs = nm->is_compiled_by_c2() ? ces.regs_cc() : ces.regs();
1464 
1465       int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp





















1466       int out_preserve = SharedRuntime::out_preserve_stack_slots();
1467       int sig_index = 0;
1468       int arg_index = (m->is_static() ? 0 : -1);
1469       for (ExtendedSignature sig = ExtendedSignature(sig_cc, SigEntryFilter()); !sig.at_end(); ++sig) {
1470         bool at_this = (arg_index == -1);
1471         BasicType t = (*sig)._bt;


1472         VMReg fst = regs[sig_index].first();
1473         if (fst->is_stack()) {
1474           assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1475           int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1476           intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1477           if (at_this) {
1478             values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1479           } else {
1480             values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1481           }
1482         }
1483         sig_index += type2size[t];
1484         arg_index += 1;



1485       }
1486     }
1487 
1488     if (reg_map != nullptr && is_java_frame()) {
1489       int scope_no = 0;
1490       for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1491         Method* m = scope->method();
1492         int  bci = scope->bci();
1493         values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1494 
1495         { // mark locals
1496           GrowableArray<ScopeValue*>* scvs = scope->locals();
1497           int scvs_length = scvs != nullptr ? scvs->length() : 0;
1498           for (int i = 0; i < scvs_length; i++) {
1499             intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1500             if (stack_address != nullptr) {
1501               values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1502             }
1503           }
1504         }
< prev index next >