22 *
23 */
24
25 #include "classfile/moduleEntry.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/vmreg.inline.hpp"
29 #include "compiler/abstractCompiler.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/oopMapCache.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/markWord.hpp"
39 #include "oops/method.inline.hpp"
40 #include "oops/methodData.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/stackChunkOop.inline.hpp"
43 #include "oops/verifyOopClosure.hpp"
44 #include "prims/methodHandles.hpp"
45 #include "runtime/continuation.hpp"
46 #include "runtime/continuationEntry.inline.hpp"
47 #include "runtime/frame.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/javaThread.hpp"
51 #include "runtime/monitorChunk.hpp"
52 #include "runtime/os.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "runtime/safefetch.hpp"
55 #include "runtime/signature.hpp"
56 #include "runtime/stackValue.hpp"
57 #include "runtime/stubCodeGenerator.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "utilities/debug.hpp"
60 #include "utilities/decoder.hpp"
61 #include "utilities/formatBuffer.hpp"
62
63 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
64 _thread = thread;
65 _update_map = update_map == UpdateMap::include;
66 _process_frames = process_frames == ProcessFrames::include;
67 _walk_cont = walk_cont == WalkContinuation::include;
68 clear();
69 DEBUG_ONLY (_update_for_id = nullptr;)
70 NOT_PRODUCT(_skip_missing = false;)
71 NOT_PRODUCT(_async = false;)
72
73 if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
74 _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
75 }
76 _chunk_index = -1;
77
78 #ifndef PRODUCT
79 for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
80 #endif /* PRODUCT */
81 }
349 return !nm->is_at_poll_return(pc());
350 }
351
352 void frame::deoptimize(JavaThread* thread) {
353 assert(thread == nullptr
354 || (thread->frame_anchor()->has_last_Java_frame() &&
355 thread->frame_anchor()->walkable()), "must be");
356 // Schedule deoptimization of an nmethod activation with this frame.
357 assert(_cb != nullptr && _cb->is_nmethod(), "must be");
358
359 // If the call site is a MethodHandle call site use the MH deopt handler.
360 nmethod* nm = _cb->as_nmethod();
361 address deopt = nm->is_method_handle_return(pc()) ?
362 nm->deopt_mh_handler_begin() :
363 nm->deopt_handler_begin();
364
365 NativePostCallNop* inst = nativePostCallNop_at(pc());
366
367 // Save the original pc before we patch in the new one
368 nm->set_original_pc(this, pc());
369 patch_pc(thread, deopt);
370 assert(is_deoptimized_frame(), "must be");
371
372 #ifdef ASSERT
373 if (thread != nullptr) {
374 frame check = thread->last_frame();
375 if (is_older(check.id())) {
376 RegisterMap map(thread,
377 RegisterMap::UpdateMap::skip,
378 RegisterMap::ProcessFrames::include,
379 RegisterMap::WalkContinuation::skip);
380 while (id() != check.id()) {
381 check = check.sender(&map);
382 }
383 assert(check.is_deoptimized_frame(), "missed deopt");
384 }
385 }
386 #endif // ASSERT
387 }
388
747 }
748
749
750 /*
751 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
752 max_stack value of the method in order to compute the expression stack address.
753 It uses the Method* in order to get the max_stack value but during GC this
754 Method* value saved on the frame is changed by reverse_and_push and hence cannot
755 be used. So we save the max_stack value in the FrameClosure object and pass it
756 down to the interpreter_frame_expression_stack_at method
757 */
758 class InterpreterFrameClosure : public OffsetClosure {
759 private:
760 const frame* _fr;
761 OopClosure* _f;
762 int _max_locals;
763 int _max_stack;
764
765 public:
766 InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
767 OopClosure* f) {
768 _fr = fr;
769 _max_locals = max_locals;
770 _max_stack = max_stack;
771 _f = f;
772 }
773
774 void offset_do(int offset) {
775 oop* addr;
776 if (offset < _max_locals) {
777 addr = (oop*) _fr->interpreter_frame_local_at(offset);
778 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
779 _f->do_oop(addr);
780 } else {
781 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
782 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
783 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
784 bool in_stack;
785 if (frame::interpreter_frame_expression_stack_direction() > 0) {
786 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
787 } else {
788 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
789 }
790 if (in_stack) {
791 _f->do_oop(addr);
792 }
793 }
794 }
795 };
796
797
798 class InterpretedArgumentOopFinder: public SignatureIterator {
799 private:
800 OopClosure* _f; // Closure to invoke
801 int _offset; // TOS-relative offset, decremented with each argument
802 bool _has_receiver; // true if the callee has a receiver
803 const frame* _fr;
804
805 friend class SignatureIterator; // so do_parameters_on can call do_type
806 void do_type(BasicType type) {
807 _offset -= parameter_type_word_count(type);
808 if (is_reference_type(type)) oop_offset_do();
809 }
810
811 void oop_offset_do() {
946 signature = call.signature();
947 has_receiver = call.has_receiver();
948 if (map->include_argument_oops() &&
949 interpreter_frame_expression_stack_size() > 0) {
950 ResourceMark rm(thread); // is this right ???
951 // we are at a call site & the expression stack is not empty
952 // => process callee's arguments
953 //
954 // Note: The expression stack can be empty if an exception
955 // occurred during method resolution/execution. In all
956 // cases we empty the expression stack completely be-
957 // fore handling the exception (the exception handling
958 // code in the interpreter calls a blocking runtime
959 // routine which can cause this code to be executed).
960 // (was bug gri 7/27/98)
961 oops_interpreted_arguments_do(signature, has_receiver, f);
962 }
963 }
964 }
965
966 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
967
968 // process locals & expression stack
969 InterpreterOopMap mask;
970 if (query_oop_map_cache) {
971 m->mask_for(m, bci, &mask);
972 } else {
973 OopMapCache::compute_one_oop_map(m, bci, &mask);
974 }
975 mask.iterate_oop(&blk);
976 }
977
978
979 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
980 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
981 finder.oops_do();
982 }
983
984 void frame::oops_nmethod_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
985 assert(_cb != nullptr, "sanity check");
986 assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
987 if (oop_map() != nullptr) {
988 if (df != nullptr) {
989 _oop_map->oops_do(this, reg_map, f, df);
990 } else {
991 _oop_map->oops_do(this, reg_map, f, derived_mode);
992 }
993
994 // Preserve potential arguments for a callee. We handle this by dispatching
995 // on the codeblob. For c2i, we do
996 if (reg_map->include_argument_oops() && _cb->is_nmethod()) {
997 // Only nmethod preserves outgoing arguments at call.
1010 class CompiledArgumentOopFinder: public SignatureIterator {
1011 protected:
1012 OopClosure* _f;
1013 int _offset; // the current offset, incremented with each argument
1014 bool _has_receiver; // true if the callee has a receiver
1015 bool _has_appendix; // true if the call has an appendix
1016 frame _fr;
1017 RegisterMap* _reg_map;
1018 int _arg_size;
1019 VMRegPair* _regs; // VMReg list of arguments
1020
1021 friend class SignatureIterator; // so do_parameters_on can call do_type
1022 void do_type(BasicType type) {
1023 if (is_reference_type(type)) handle_oop_offset();
1024 _offset += parameter_type_word_count(type);
1025 }
1026
1027 virtual void handle_oop_offset() {
1028 // Extract low order register number from register array.
1029 // In LP64-land, the high-order bits are valid but unhelpful.
1030 VMReg reg = _regs[_offset].first();
1031 oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1032 #ifdef ASSERT
1033 if (loc == nullptr) {
1034 if (_reg_map->should_skip_missing()) {
1035 return;
1036 }
1037 tty->print_cr("Error walking frame oops:");
1038 _fr.print_on(tty);
1039 assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1040 }
1041 #endif
1042 _f->do_oop(loc);
1043 }
1044
1045 public:
1046 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1047 : SignatureIterator(signature) {
1048
1049 // initialize CompiledArgumentOopFinder
1050 _f = f;
1051 _offset = 0;
1052 _has_receiver = has_receiver;
1053 _has_appendix = has_appendix;
1054 _fr = fr;
1055 _reg_map = (RegisterMap*)reg_map;
1056 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
1057
1058 int arg_size;
1059 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
1060 assert(arg_size == _arg_size, "wrong arg size");
1061 }
1062
1063 void oops_do() {
1064 if (_has_receiver) {
1065 handle_oop_offset();
1066 _offset++;
1067 }
1068 do_parameters_on(this);
1069 if (_has_appendix) {
1070 handle_oop_offset();
1071 _offset++;
1072 }
1073 }
1074 };
1075
1076 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1077 const RegisterMap* reg_map, OopClosure* f) const {
1078 // ResourceMark rm;
1079 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1080 finder.oops_do();
1420 tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1421 values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1422 err_msg("stack %d", e), 1);
1423 }
1424 if (tos != nullptr) {
1425 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1426 }
1427
1428 if (reg_map != nullptr) {
1429 FrameValuesOopClosure oopsFn;
1430 oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1431 oopsFn.describe(values, frame_no);
1432 }
1433 } else if (is_entry_frame()) {
1434 // For now just label the frame
1435 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1436 } else if (is_compiled_frame()) {
1437 // For now just label the frame
1438 nmethod* nm = cb()->as_nmethod();
1439 values.describe(-1, info_address,
1440 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
1441 p2i(nm),
1442 nm->method()->name_and_sig_as_C_string(),
1443 (_deopt_state == is_deoptimized) ?
1444 " (deoptimized)" :
1445 ((_deopt_state == unknown) ? " (state unknown)" : "")),
1446 3);
1447
1448 { // mark arguments (see nmethod::print_nmethod_labels)
1449 Method* m = nm->method();
1450
1451 int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
1452 int sizeargs = m->size_of_parameters();
1453
1454 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
1455 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
1456 {
1457 int sig_index = 0;
1458 if (!m->is_static()) {
1459 sig_bt[sig_index++] = T_OBJECT; // 'this'
1460 }
1461 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
1462 BasicType t = ss.type();
1463 assert(type2size[t] == 1 || type2size[t] == 2, "size is 1 or 2");
1464 sig_bt[sig_index++] = t;
1465 if (type2size[t] == 2) {
1466 sig_bt[sig_index++] = T_VOID;
1467 }
1468 }
1469 assert(sig_index == sizeargs, "");
1470 }
1471 int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
1472 assert(stack_arg_slots == nm->as_nmethod()->num_stack_arg_slots(false /* rounded */) || nm->is_osr_method(), "");
1473 int out_preserve = SharedRuntime::out_preserve_stack_slots();
1474 int sig_index = 0;
1475 int arg_index = (m->is_static() ? 0 : -1);
1476 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
1477 bool at_this = (arg_index == -1);
1478 bool at_old_sp = false;
1479 BasicType t = (at_this ? T_OBJECT : ss.type());
1480 assert(t == sig_bt[sig_index], "sigs in sync");
1481 VMReg fst = regs[sig_index].first();
1482 if (fst->is_stack()) {
1483 assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1484 int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1485 intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1486 if (at_this) {
1487 values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1488 } else {
1489 values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1490 }
1491 }
1492 sig_index += type2size[t];
1493 arg_index += 1;
1494 if (!at_this) {
1495 ss.next();
1496 }
1497 }
1498 }
1499
1500 if (reg_map != nullptr && is_java_frame()) {
1501 int scope_no = 0;
1502 for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1503 Method* m = scope->method();
1504 int bci = scope->bci();
1505 values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1506
1507 { // mark locals
1508 GrowableArray<ScopeValue*>* scvs = scope->locals();
1509 int scvs_length = scvs != nullptr ? scvs->length() : 0;
1510 for (int i = 0; i < scvs_length; i++) {
1511 intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1512 if (stack_address != nullptr) {
1513 values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1514 }
1515 }
1516 }
|
22 *
23 */
24
25 #include "classfile/moduleEntry.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/vmreg.inline.hpp"
29 #include "compiler/abstractCompiler.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/oopMapCache.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/markWord.hpp"
39 #include "oops/method.inline.hpp"
40 #include "oops/methodData.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/inlineKlass.hpp"
43 #include "oops/stackChunkOop.inline.hpp"
44 #include "oops/verifyOopClosure.hpp"
45 #include "prims/methodHandles.hpp"
46 #include "runtime/continuation.hpp"
47 #include "runtime/continuationEntry.inline.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/javaCalls.hpp"
51 #include "runtime/javaThread.hpp"
52 #include "runtime/monitorChunk.hpp"
53 #include "runtime/os.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "runtime/safefetch.hpp"
56 #include "runtime/signature.hpp"
57 #include "runtime/stackValue.hpp"
58 #include "runtime/stubCodeGenerator.hpp"
59 #include "runtime/stubRoutines.hpp"
60 #include "utilities/debug.hpp"
61 #include "utilities/decoder.hpp"
62 #include "utilities/formatBuffer.hpp"
63 #ifdef COMPILER1
64 #include "c1/c1_Runtime1.hpp"
65 #endif
66
67 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
68 _thread = thread;
69 _update_map = update_map == UpdateMap::include;
70 _process_frames = process_frames == ProcessFrames::include;
71 _walk_cont = walk_cont == WalkContinuation::include;
72 clear();
73 DEBUG_ONLY (_update_for_id = nullptr;)
74 NOT_PRODUCT(_skip_missing = false;)
75 NOT_PRODUCT(_async = false;)
76
77 if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
78 _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
79 }
80 _chunk_index = -1;
81
82 #ifndef PRODUCT
83 for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
84 #endif /* PRODUCT */
85 }
353 return !nm->is_at_poll_return(pc());
354 }
355
356 void frame::deoptimize(JavaThread* thread) {
357 assert(thread == nullptr
358 || (thread->frame_anchor()->has_last_Java_frame() &&
359 thread->frame_anchor()->walkable()), "must be");
360 // Schedule deoptimization of an nmethod activation with this frame.
361 assert(_cb != nullptr && _cb->is_nmethod(), "must be");
362
363 // If the call site is a MethodHandle call site use the MH deopt handler.
364 nmethod* nm = _cb->as_nmethod();
365 address deopt = nm->is_method_handle_return(pc()) ?
366 nm->deopt_mh_handler_begin() :
367 nm->deopt_handler_begin();
368
369 NativePostCallNop* inst = nativePostCallNop_at(pc());
370
371 // Save the original pc before we patch in the new one
372 nm->set_original_pc(this, pc());
373
374 #ifdef COMPILER1
375 if (nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
376 pc() < nm->verified_inline_entry_point()) {
377 // The VEP and VIEP(RO) of C1-compiled methods call into the runtime to buffer scalarized value
378 // type args. We can't deoptimize at that point because the buffers have not yet been initialized.
379 // Also, if the method is synchronized, we first need to acquire the lock.
380 // Don't patch the return pc to delay deoptimization until we enter the method body (the check
381 // added in LIRGenerator::do_Base will detect the pending deoptimization by checking the original_pc).
382 #if defined ASSERT && !defined AARCH64 // Stub call site does not look like NativeCall on AArch64
383 NativeCall* call = nativeCall_before(this->pc());
384 address dest = call->destination();
385 assert(dest == Runtime1::entry_for(C1StubId::buffer_inline_args_no_receiver_id) ||
386 dest == Runtime1::entry_for(C1StubId::buffer_inline_args_id), "unexpected safepoint in entry point");
387 #endif
388 return;
389 }
390 #endif
391
392 patch_pc(thread, deopt);
393 assert(is_deoptimized_frame(), "must be");
394
395 #ifdef ASSERT
396 if (thread != nullptr) {
397 frame check = thread->last_frame();
398 if (is_older(check.id())) {
399 RegisterMap map(thread,
400 RegisterMap::UpdateMap::skip,
401 RegisterMap::ProcessFrames::include,
402 RegisterMap::WalkContinuation::skip);
403 while (id() != check.id()) {
404 check = check.sender(&map);
405 }
406 assert(check.is_deoptimized_frame(), "missed deopt");
407 }
408 }
409 #endif // ASSERT
410 }
411
770 }
771
772
773 /*
774 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
775 max_stack value of the method in order to compute the expression stack address.
776 It uses the Method* in order to get the max_stack value but during GC this
777 Method* value saved on the frame is changed by reverse_and_push and hence cannot
778 be used. So we save the max_stack value in the FrameClosure object and pass it
779 down to the interpreter_frame_expression_stack_at method
780 */
781 class InterpreterFrameClosure : public OffsetClosure {
782 private:
783 const frame* _fr;
784 OopClosure* _f;
785 int _max_locals;
786 int _max_stack;
787
788 public:
789 InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
790 OopClosure* f, BufferedValueClosure* bvt_f) {
791 _fr = fr;
792 _max_locals = max_locals;
793 _max_stack = max_stack;
794 _f = f;
795 }
796
797 void offset_do(int offset) {
798 oop* addr;
799 if (offset < _max_locals) {
800 addr = (oop*) _fr->interpreter_frame_local_at(offset);
801 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
802 if (_f != nullptr) {
803 _f->do_oop(addr);
804 }
805 } else {
806 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
807 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
808 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
809 bool in_stack;
810 if (frame::interpreter_frame_expression_stack_direction() > 0) {
811 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
812 } else {
813 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
814 }
815 if (in_stack) {
816 if (_f != nullptr) {
817 _f->do_oop(addr);
818 }
819 }
820 }
821 }
822 };
823
824
825 class InterpretedArgumentOopFinder: public SignatureIterator {
826 private:
827 OopClosure* _f; // Closure to invoke
828 int _offset; // TOS-relative offset, decremented with each argument
829 bool _has_receiver; // true if the callee has a receiver
830 const frame* _fr;
831
832 friend class SignatureIterator; // so do_parameters_on can call do_type
833 void do_type(BasicType type) {
834 _offset -= parameter_type_word_count(type);
835 if (is_reference_type(type)) oop_offset_do();
836 }
837
838 void oop_offset_do() {
973 signature = call.signature();
974 has_receiver = call.has_receiver();
975 if (map->include_argument_oops() &&
976 interpreter_frame_expression_stack_size() > 0) {
977 ResourceMark rm(thread); // is this right ???
978 // we are at a call site & the expression stack is not empty
979 // => process callee's arguments
980 //
981 // Note: The expression stack can be empty if an exception
982 // occurred during method resolution/execution. In all
983 // cases we empty the expression stack completely be-
984 // fore handling the exception (the exception handling
985 // code in the interpreter calls a blocking runtime
986 // routine which can cause this code to be executed).
987 // (was bug gri 7/27/98)
988 oops_interpreted_arguments_do(signature, has_receiver, f);
989 }
990 }
991 }
992
993 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f, nullptr);
994
995 // process locals & expression stack
996 InterpreterOopMap mask;
997 if (query_oop_map_cache) {
998 m->mask_for(m, bci, &mask);
999 } else {
1000 OopMapCache::compute_one_oop_map(m, bci, &mask);
1001 }
1002 mask.iterate_oop(&blk);
1003 }
1004
1005 void frame::buffered_values_interpreted_do(BufferedValueClosure* f) {
1006 assert(is_interpreted_frame(), "Not an interpreted frame");
1007 Thread *thread = Thread::current();
1008 methodHandle m (thread, interpreter_frame_method());
1009 jint bci = interpreter_frame_bci();
1010
1011 assert(m->is_method(), "checking frame value");
1012 assert(!m->is_native() && bci >= 0 && bci < m->code_size(),
1013 "invalid bci value");
1014
1015 InterpreterFrameClosure blk(this, m->max_locals(), m->max_stack(), nullptr, f);
1016
1017 // process locals & expression stack
1018 InterpreterOopMap mask;
1019 m->mask_for(bci, &mask);
1020 mask.iterate_oop(&blk);
1021 }
1022
1023 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
1024 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
1025 finder.oops_do();
1026 }
1027
1028 void frame::oops_nmethod_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
1029 assert(_cb != nullptr, "sanity check");
1030 assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
1031 if (oop_map() != nullptr) {
1032 if (df != nullptr) {
1033 _oop_map->oops_do(this, reg_map, f, df);
1034 } else {
1035 _oop_map->oops_do(this, reg_map, f, derived_mode);
1036 }
1037
1038 // Preserve potential arguments for a callee. We handle this by dispatching
1039 // on the codeblob. For c2i, we do
1040 if (reg_map->include_argument_oops() && _cb->is_nmethod()) {
1041 // Only nmethod preserves outgoing arguments at call.
1054 class CompiledArgumentOopFinder: public SignatureIterator {
1055 protected:
1056 OopClosure* _f;
1057 int _offset; // the current offset, incremented with each argument
1058 bool _has_receiver; // true if the callee has a receiver
1059 bool _has_appendix; // true if the call has an appendix
1060 frame _fr;
1061 RegisterMap* _reg_map;
1062 int _arg_size;
1063 VMRegPair* _regs; // VMReg list of arguments
1064
1065 friend class SignatureIterator; // so do_parameters_on can call do_type
1066 void do_type(BasicType type) {
1067 if (is_reference_type(type)) handle_oop_offset();
1068 _offset += parameter_type_word_count(type);
1069 }
1070
1071 virtual void handle_oop_offset() {
1072 // Extract low order register number from register array.
1073 // In LP64-land, the high-order bits are valid but unhelpful.
1074 assert(_offset < _arg_size, "out of bounds");
1075 VMReg reg = _regs[_offset].first();
1076 oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1077 #ifdef ASSERT
1078 if (loc == nullptr) {
1079 if (_reg_map->should_skip_missing()) {
1080 return;
1081 }
1082 tty->print_cr("Error walking frame oops:");
1083 _fr.print_on(tty);
1084 assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1085 }
1086 #endif
1087 _f->do_oop(loc);
1088 }
1089
1090 public:
1091 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1092 : SignatureIterator(signature) {
1093
1094 // initialize CompiledArgumentOopFinder
1095 _f = f;
1096 _offset = 0;
1097 _has_receiver = has_receiver;
1098 _has_appendix = has_appendix;
1099 _fr = fr;
1100 _reg_map = (RegisterMap*)reg_map;
1101 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &_arg_size);
1102 }
1103
1104 void oops_do() {
1105 if (_has_receiver) {
1106 handle_oop_offset();
1107 _offset++;
1108 }
1109 do_parameters_on(this);
1110 if (_has_appendix) {
1111 handle_oop_offset();
1112 _offset++;
1113 }
1114 }
1115 };
1116
1117 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1118 const RegisterMap* reg_map, OopClosure* f) const {
1119 // ResourceMark rm;
1120 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1121 finder.oops_do();
1461 tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1462 values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1463 err_msg("stack %d", e), 1);
1464 }
1465 if (tos != nullptr) {
1466 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1467 }
1468
1469 if (reg_map != nullptr) {
1470 FrameValuesOopClosure oopsFn;
1471 oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1472 oopsFn.describe(values, frame_no);
1473 }
1474 } else if (is_entry_frame()) {
1475 // For now just label the frame
1476 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1477 } else if (is_compiled_frame()) {
1478 // For now just label the frame
1479 nmethod* nm = cb()->as_nmethod();
1480 values.describe(-1, info_address,
1481 FormatBuffer<1024>("#%d nmethod (%s %d) " INTPTR_FORMAT " for method J %s%s", frame_no,
1482 nm->is_compiled_by_c1() ? "c1" : "c2", nm->frame_size(), p2i(nm),
1483 nm->method()->name_and_sig_as_C_string(),
1484 (_deopt_state == is_deoptimized) ?
1485 " (deoptimized)" :
1486 ((_deopt_state == unknown) ? " (state unknown)" : "")),
1487 3);
1488
1489 { // mark arguments (see nmethod::print_nmethod_labels)
1490 Method* m = nm->method();
1491
1492 CompiledEntrySignature ces(m);
1493 ces.compute_calling_conventions(false);
1494 const GrowableArray<SigEntry>* sig_cc = nm->is_compiled_by_c2() ? ces.sig_cc() : ces.sig();
1495 const VMRegPair* regs = nm->is_compiled_by_c2() ? ces.regs_cc() : ces.regs();
1496
1497 int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
1498 int out_preserve = SharedRuntime::out_preserve_stack_slots();
1499 int sig_index = 0;
1500 int arg_index = (m->is_static() ? 0 : -1);
1501 for (ExtendedSignature sig = ExtendedSignature(sig_cc, SigEntryFilter()); !sig.at_end(); ++sig) {
1502 bool at_this = (arg_index == -1);
1503 BasicType t = (*sig)._bt;
1504 VMReg fst = regs[sig_index].first();
1505 if (fst->is_stack()) {
1506 assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1507 int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1508 intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1509 if (at_this) {
1510 values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1511 } else {
1512 values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1513 }
1514 }
1515 sig_index += type2size[t];
1516 arg_index += 1;
1517 }
1518 }
1519
1520 if (reg_map != nullptr && is_java_frame()) {
1521 int scope_no = 0;
1522 for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1523 Method* m = scope->method();
1524 int bci = scope->bci();
1525 values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1526
1527 { // mark locals
1528 GrowableArray<ScopeValue*>* scvs = scope->locals();
1529 int scvs_length = scvs != nullptr ? scvs->length() : 0;
1530 for (int i = 0; i < scvs_length; i++) {
1531 intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1532 if (stack_address != nullptr) {
1533 values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1534 }
1535 }
1536 }
|