18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/moduleEntry.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/vmreg.inline.hpp"
29 #include "compiler/abstractCompiler.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/oopMapCache.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/markWord.hpp"
39 #include "oops/method.inline.hpp"
40 #include "oops/methodData.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/stackChunkOop.inline.hpp"
43 #include "oops/verifyOopClosure.hpp"
44 #include "prims/methodHandles.hpp"
45 #include "runtime/continuation.hpp"
46 #include "runtime/continuationEntry.inline.hpp"
47 #include "runtime/frame.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/javaThread.hpp"
51 #include "runtime/monitorChunk.hpp"
52 #include "runtime/os.hpp"
53 #include "runtime/safefetch.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "runtime/signature.hpp"
56 #include "runtime/stackValue.hpp"
57 #include "runtime/stubCodeGenerator.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "utilities/debug.hpp"
60 #include "utilities/decoder.hpp"
61 #include "utilities/formatBuffer.hpp"
62
63 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
64 _thread = thread;
65 _update_map = update_map == UpdateMap::include;
66 _process_frames = process_frames == ProcessFrames::include;
67 _walk_cont = walk_cont == WalkContinuation::include;
68 clear();
69 DEBUG_ONLY (_update_for_id = nullptr;)
70 NOT_PRODUCT(_skip_missing = false;)
71 NOT_PRODUCT(_async = false;)
72
73 if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
74 _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
75 }
76 _chunk_index = -1;
77
78 #ifndef PRODUCT
79 for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
80 #endif /* PRODUCT */
81 }
344 return false;
345
346 return !nm->is_at_poll_return(pc());
347 }
348
349 void frame::deoptimize(JavaThread* thread) {
350 assert(thread == nullptr
351 || (thread->frame_anchor()->has_last_Java_frame() &&
352 thread->frame_anchor()->walkable()), "must be");
353 // Schedule deoptimization of an nmethod activation with this frame.
354 assert(_cb != nullptr && _cb->is_nmethod(), "must be");
355
356 // If the call site is a MethodHandle call site use the MH deopt handler.
357 nmethod* nm = _cb->as_nmethod();
358 address deopt = nm->deopt_handler_begin();
359
360 NativePostCallNop* inst = nativePostCallNop_at(pc());
361
362 // Save the original pc before we patch in the new one
363 nm->set_original_pc(this, pc());
364 patch_pc(thread, deopt);
365 assert(is_deoptimized_frame(), "must be");
366
367 #ifdef ASSERT
368 if (thread != nullptr) {
369 frame check = thread->last_frame();
370 if (is_older(check.id())) {
371 RegisterMap map(thread,
372 RegisterMap::UpdateMap::skip,
373 RegisterMap::ProcessFrames::include,
374 RegisterMap::WalkContinuation::skip);
375 while (id() != check.id()) {
376 check = check.sender(&map);
377 }
378 assert(check.is_deoptimized_frame(), "missed deopt");
379 }
380 }
381 #endif // ASSERT
382 }
383
742 }
743
744
745 /*
746 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
747 max_stack value of the method in order to compute the expression stack address.
748 It uses the Method* in order to get the max_stack value but during GC this
749 Method* value saved on the frame is changed by reverse_and_push and hence cannot
750 be used. So we save the max_stack value in the FrameClosure object and pass it
751 down to the interpreter_frame_expression_stack_at method
752 */
753 class InterpreterFrameClosure : public OffsetClosure {
754 private:
755 const frame* _fr;
756 OopClosure* _f;
757 int _max_locals;
758 int _max_stack;
759
760 public:
761 InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
762 OopClosure* f) {
763 _fr = fr;
764 _max_locals = max_locals;
765 _max_stack = max_stack;
766 _f = f;
767 }
768
769 void offset_do(int offset) {
770 oop* addr;
771 if (offset < _max_locals) {
772 addr = (oop*) _fr->interpreter_frame_local_at(offset);
773 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
774 _f->do_oop(addr);
775 } else {
776 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
777 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
778 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
779 bool in_stack;
780 if (frame::interpreter_frame_expression_stack_direction() > 0) {
781 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
782 } else {
783 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
784 }
785 if (in_stack) {
786 _f->do_oop(addr);
787 }
788 }
789 }
790 };
791
792
793 class InterpretedArgumentOopFinder: public SignatureIterator {
794 private:
795 OopClosure* _f; // Closure to invoke
796 int _offset; // TOS-relative offset, decremented with each argument
797 bool _has_receiver; // true if the callee has a receiver
798 const frame* _fr;
799
800 friend class SignatureIterator; // so do_parameters_on can call do_type
801 void do_type(BasicType type) {
802 _offset -= parameter_type_word_count(type);
803 if (is_reference_type(type)) oop_offset_do();
804 }
805
806 void oop_offset_do() {
941 signature = call.signature();
942 has_receiver = call.has_receiver();
943 if (map->include_argument_oops() &&
944 interpreter_frame_expression_stack_size() > 0) {
945 ResourceMark rm(thread); // is this right ???
946 // we are at a call site & the expression stack is not empty
947 // => process callee's arguments
948 //
949 // Note: The expression stack can be empty if an exception
950 // occurred during method resolution/execution. In all
951 // cases we empty the expression stack completely be-
952 // fore handling the exception (the exception handling
953 // code in the interpreter calls a blocking runtime
954 // routine which can cause this code to be executed).
955 // (was bug gri 7/27/98)
956 oops_interpreted_arguments_do(signature, has_receiver, f);
957 }
958 }
959 }
960
961 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
962
963 // process locals & expression stack
964 InterpreterOopMap mask;
965 if (query_oop_map_cache) {
966 m->mask_for(m, bci, &mask);
967 } else {
968 OopMapCache::compute_one_oop_map(m, bci, &mask);
969 }
970 mask.iterate_oop(&blk);
971 }
972
973
974 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
975 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
976 finder.oops_do();
977 }
978
979 void frame::oops_nmethod_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
980 assert(_cb != nullptr, "sanity check");
981 assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
982 if (oop_map() != nullptr) {
983 if (df != nullptr) {
984 _oop_map->oops_do(this, reg_map, f, df);
985 } else {
986 _oop_map->oops_do(this, reg_map, f, derived_mode);
987 }
988
989 // Preserve potential arguments for a callee. We handle this by dispatching
990 // on the codeblob. For c2i, we do
991 if (reg_map->include_argument_oops() && _cb->is_nmethod()) {
992 // Only nmethod preserves outgoing arguments at call.
1005 class CompiledArgumentOopFinder: public SignatureIterator {
1006 protected:
1007 OopClosure* _f;
1008 int _offset; // the current offset, incremented with each argument
1009 bool _has_receiver; // true if the callee has a receiver
1010 bool _has_appendix; // true if the call has an appendix
1011 frame _fr;
1012 RegisterMap* _reg_map;
1013 int _arg_size;
1014 VMRegPair* _regs; // VMReg list of arguments
1015
1016 friend class SignatureIterator; // so do_parameters_on can call do_type
1017 void do_type(BasicType type) {
1018 if (is_reference_type(type)) handle_oop_offset();
1019 _offset += parameter_type_word_count(type);
1020 }
1021
1022 virtual void handle_oop_offset() {
1023 // Extract low order register number from register array.
1024 // In LP64-land, the high-order bits are valid but unhelpful.
1025 VMReg reg = _regs[_offset].first();
1026 oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1027 #ifdef ASSERT
1028 if (loc == nullptr) {
1029 if (_reg_map->should_skip_missing()) {
1030 return;
1031 }
1032 tty->print_cr("Error walking frame oops:");
1033 _fr.print_on(tty);
1034 assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1035 }
1036 #endif
1037 _f->do_oop(loc);
1038 }
1039
1040 public:
1041 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1042 : SignatureIterator(signature) {
1043
1044 // initialize CompiledArgumentOopFinder
1045 _f = f;
1046 _offset = 0;
1047 _has_receiver = has_receiver;
1048 _has_appendix = has_appendix;
1049 _fr = fr;
1050 _reg_map = (RegisterMap*)reg_map;
1051 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
1052
1053 int arg_size;
1054 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
1055 assert(arg_size == _arg_size, "wrong arg size");
1056 }
1057
1058 void oops_do() {
1059 if (_has_receiver) {
1060 handle_oop_offset();
1061 _offset++;
1062 }
1063 do_parameters_on(this);
1064 if (_has_appendix) {
1065 handle_oop_offset();
1066 _offset++;
1067 }
1068 }
1069 };
1070
1071 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1072 const RegisterMap* reg_map, OopClosure* f) const {
1073 // ResourceMark rm;
1074 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1075 finder.oops_do();
1415 tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1416 values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1417 err_msg("stack %d", e), 1);
1418 }
1419 if (tos != nullptr) {
1420 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1421 }
1422
1423 if (reg_map != nullptr) {
1424 FrameValuesOopClosure oopsFn;
1425 oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1426 oopsFn.describe(values, frame_no);
1427 }
1428 } else if (is_entry_frame()) {
1429 // For now just label the frame
1430 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1431 } else if (is_compiled_frame()) {
1432 // For now just label the frame
1433 nmethod* nm = cb()->as_nmethod();
1434 values.describe(-1, info_address,
1435 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
1436 p2i(nm),
1437 nm->method()->name_and_sig_as_C_string(),
1438 (_deopt_state == is_deoptimized) ?
1439 " (deoptimized)" :
1440 ((_deopt_state == unknown) ? " (state unknown)" : "")),
1441 3);
1442
1443 { // mark arguments (see nmethod::print_nmethod_labels)
1444 Method* m = nm->method();
1445
1446 int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
1447 int sizeargs = m->size_of_parameters();
1448
1449 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
1450 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
1451 {
1452 int sig_index = 0;
1453 if (!m->is_static()) {
1454 sig_bt[sig_index++] = T_OBJECT; // 'this'
1455 }
1456 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
1457 BasicType t = ss.type();
1458 assert(type2size[t] == 1 || type2size[t] == 2, "size is 1 or 2");
1459 sig_bt[sig_index++] = t;
1460 if (type2size[t] == 2) {
1461 sig_bt[sig_index++] = T_VOID;
1462 }
1463 }
1464 assert(sig_index == sizeargs, "");
1465 }
1466 int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
1467 assert(stack_arg_slots == nm->as_nmethod()->num_stack_arg_slots(false /* rounded */) || nm->is_osr_method(), "");
1468 int out_preserve = SharedRuntime::out_preserve_stack_slots();
1469 int sig_index = 0;
1470 int arg_index = (m->is_static() ? 0 : -1);
1471 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
1472 bool at_this = (arg_index == -1);
1473 bool at_old_sp = false;
1474 BasicType t = (at_this ? T_OBJECT : ss.type());
1475 assert(t == sig_bt[sig_index], "sigs in sync");
1476 VMReg fst = regs[sig_index].first();
1477 if (fst->is_stack()) {
1478 assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1479 int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1480 intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1481 if (at_this) {
1482 values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1483 } else {
1484 values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1485 }
1486 }
1487 sig_index += type2size[t];
1488 arg_index += 1;
1489 if (!at_this) {
1490 ss.next();
1491 }
1492 }
1493 }
1494
1495 if (reg_map != nullptr && is_java_frame()) {
1496 int scope_no = 0;
1497 for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1498 Method* m = scope->method();
1499 int bci = scope->bci();
1500 values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1501
1502 { // mark locals
1503 GrowableArray<ScopeValue*>* scvs = scope->locals();
1504 int scvs_length = scvs != nullptr ? scvs->length() : 0;
1505 for (int i = 0; i < scvs_length; i++) {
1506 intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1507 if (stack_address != nullptr) {
1508 values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1509 }
1510 }
1511 }
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/moduleEntry.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/vmreg.inline.hpp"
29 #include "compiler/abstractCompiler.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/oopMapCache.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/inlineKlass.hpp"
39 #include "oops/markWord.hpp"
40 #include "oops/method.inline.hpp"
41 #include "oops/methodData.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/stackChunkOop.inline.hpp"
44 #include "oops/verifyOopClosure.hpp"
45 #include "prims/methodHandles.hpp"
46 #include "runtime/continuation.hpp"
47 #include "runtime/continuationEntry.inline.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/javaCalls.hpp"
51 #include "runtime/javaThread.hpp"
52 #include "runtime/monitorChunk.hpp"
53 #include "runtime/os.hpp"
54 #include "runtime/safefetch.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/signature.hpp"
57 #include "runtime/stackValue.hpp"
58 #include "runtime/stubCodeGenerator.hpp"
59 #include "runtime/stubRoutines.hpp"
60 #include "utilities/debug.hpp"
61 #include "utilities/decoder.hpp"
62 #include "utilities/formatBuffer.hpp"
63 #ifdef COMPILER1
64 #include "c1/c1_Runtime1.hpp"
65 #endif
66
67 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
68 _thread = thread;
69 _update_map = update_map == UpdateMap::include;
70 _process_frames = process_frames == ProcessFrames::include;
71 _walk_cont = walk_cont == WalkContinuation::include;
72 clear();
73 DEBUG_ONLY (_update_for_id = nullptr;)
74 NOT_PRODUCT(_skip_missing = false;)
75 NOT_PRODUCT(_async = false;)
76
77 if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
78 _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
79 }
80 _chunk_index = -1;
81
82 #ifndef PRODUCT
83 for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
84 #endif /* PRODUCT */
85 }
348 return false;
349
350 return !nm->is_at_poll_return(pc());
351 }
352
353 void frame::deoptimize(JavaThread* thread) {
354 assert(thread == nullptr
355 || (thread->frame_anchor()->has_last_Java_frame() &&
356 thread->frame_anchor()->walkable()), "must be");
357 // Schedule deoptimization of an nmethod activation with this frame.
358 assert(_cb != nullptr && _cb->is_nmethod(), "must be");
359
360 // If the call site is a MethodHandle call site use the MH deopt handler.
361 nmethod* nm = _cb->as_nmethod();
362 address deopt = nm->deopt_handler_begin();
363
364 NativePostCallNop* inst = nativePostCallNop_at(pc());
365
366 // Save the original pc before we patch in the new one
367 nm->set_original_pc(this, pc());
368
369 #ifdef COMPILER1
370 if (nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
371 pc() < nm->verified_inline_entry_point()) {
372 // The VEP and VIEP(RO) of C1-compiled methods call into the runtime to buffer scalarized value
373 // type args. We can't deoptimize at that point because the buffers have not yet been initialized.
374 // Also, if the method is synchronized, we first need to acquire the lock.
375 // Don't patch the return pc to delay deoptimization until we enter the method body (the check
376 // added in LIRGenerator::do_Base will detect the pending deoptimization by checking the original_pc).
377 #if defined ASSERT && !defined AARCH64 // Stub call site does not look like NativeCall on AArch64
378 NativeCall* call = nativeCall_before(this->pc());
379 address dest = call->destination();
380 assert(dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_no_receiver_id) ||
381 dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_id), "unexpected safepoint in entry point");
382 #endif
383 return;
384 }
385 #endif
386
387 patch_pc(thread, deopt);
388 assert(is_deoptimized_frame(), "must be");
389
390 #ifdef ASSERT
391 if (thread != nullptr) {
392 frame check = thread->last_frame();
393 if (is_older(check.id())) {
394 RegisterMap map(thread,
395 RegisterMap::UpdateMap::skip,
396 RegisterMap::ProcessFrames::include,
397 RegisterMap::WalkContinuation::skip);
398 while (id() != check.id()) {
399 check = check.sender(&map);
400 }
401 assert(check.is_deoptimized_frame(), "missed deopt");
402 }
403 }
404 #endif // ASSERT
405 }
406
765 }
766
767
768 /*
769 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
770 max_stack value of the method in order to compute the expression stack address.
771 It uses the Method* in order to get the max_stack value but during GC this
772 Method* value saved on the frame is changed by reverse_and_push and hence cannot
773 be used. So we save the max_stack value in the FrameClosure object and pass it
774 down to the interpreter_frame_expression_stack_at method
775 */
776 class InterpreterFrameClosure : public OffsetClosure {
777 private:
778 const frame* _fr;
779 OopClosure* _f;
780 int _max_locals;
781 int _max_stack;
782
783 public:
784 InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
785 OopClosure* f, BufferedValueClosure* bvt_f) {
786 _fr = fr;
787 _max_locals = max_locals;
788 _max_stack = max_stack;
789 _f = f;
790 }
791
792 void offset_do(int offset) {
793 oop* addr;
794 if (offset < _max_locals) {
795 addr = (oop*) _fr->interpreter_frame_local_at(offset);
796 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
797 if (_f != nullptr) {
798 _f->do_oop(addr);
799 }
800 } else {
801 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
802 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
803 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
804 bool in_stack;
805 if (frame::interpreter_frame_expression_stack_direction() > 0) {
806 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
807 } else {
808 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
809 }
810 if (in_stack) {
811 if (_f != nullptr) {
812 _f->do_oop(addr);
813 }
814 }
815 }
816 }
817 };
818
819
820 class InterpretedArgumentOopFinder: public SignatureIterator {
821 private:
822 OopClosure* _f; // Closure to invoke
823 int _offset; // TOS-relative offset, decremented with each argument
824 bool _has_receiver; // true if the callee has a receiver
825 const frame* _fr;
826
827 friend class SignatureIterator; // so do_parameters_on can call do_type
828 void do_type(BasicType type) {
829 _offset -= parameter_type_word_count(type);
830 if (is_reference_type(type)) oop_offset_do();
831 }
832
833 void oop_offset_do() {
968 signature = call.signature();
969 has_receiver = call.has_receiver();
970 if (map->include_argument_oops() &&
971 interpreter_frame_expression_stack_size() > 0) {
972 ResourceMark rm(thread); // is this right ???
973 // we are at a call site & the expression stack is not empty
974 // => process callee's arguments
975 //
976 // Note: The expression stack can be empty if an exception
977 // occurred during method resolution/execution. In all
978 // cases we empty the expression stack completely be-
979 // fore handling the exception (the exception handling
980 // code in the interpreter calls a blocking runtime
981 // routine which can cause this code to be executed).
982 // (was bug gri 7/27/98)
983 oops_interpreted_arguments_do(signature, has_receiver, f);
984 }
985 }
986 }
987
988 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f, nullptr);
989
990 // process locals & expression stack
991 InterpreterOopMap mask;
992 if (query_oop_map_cache) {
993 m->mask_for(m, bci, &mask);
994 } else {
995 OopMapCache::compute_one_oop_map(m, bci, &mask);
996 }
997 mask.iterate_oop(&blk);
998 }
999
1000 void frame::buffered_values_interpreted_do(BufferedValueClosure* f) {
1001 assert(is_interpreted_frame(), "Not an interpreted frame");
1002 Thread *thread = Thread::current();
1003 methodHandle m (thread, interpreter_frame_method());
1004 jint bci = interpreter_frame_bci();
1005
1006 assert(m->is_method(), "checking frame value");
1007 assert(!m->is_native() && bci >= 0 && bci < m->code_size(),
1008 "invalid bci value");
1009
1010 InterpreterFrameClosure blk(this, m->max_locals(), m->max_stack(), nullptr, f);
1011
1012 // process locals & expression stack
1013 InterpreterOopMap mask;
1014 m->mask_for(bci, &mask);
1015 mask.iterate_oop(&blk);
1016 }
1017
1018 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
1019 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
1020 finder.oops_do();
1021 }
1022
1023 void frame::oops_nmethod_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
1024 assert(_cb != nullptr, "sanity check");
1025 assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
1026 if (oop_map() != nullptr) {
1027 if (df != nullptr) {
1028 _oop_map->oops_do(this, reg_map, f, df);
1029 } else {
1030 _oop_map->oops_do(this, reg_map, f, derived_mode);
1031 }
1032
1033 // Preserve potential arguments for a callee. We handle this by dispatching
1034 // on the codeblob. For c2i, we do
1035 if (reg_map->include_argument_oops() && _cb->is_nmethod()) {
1036 // Only nmethod preserves outgoing arguments at call.
1049 class CompiledArgumentOopFinder: public SignatureIterator {
1050 protected:
1051 OopClosure* _f;
1052 int _offset; // the current offset, incremented with each argument
1053 bool _has_receiver; // true if the callee has a receiver
1054 bool _has_appendix; // true if the call has an appendix
1055 frame _fr;
1056 RegisterMap* _reg_map;
1057 int _arg_size;
1058 VMRegPair* _regs; // VMReg list of arguments
1059
1060 friend class SignatureIterator; // so do_parameters_on can call do_type
1061 void do_type(BasicType type) {
1062 if (is_reference_type(type)) handle_oop_offset();
1063 _offset += parameter_type_word_count(type);
1064 }
1065
1066 virtual void handle_oop_offset() {
1067 // Extract low order register number from register array.
1068 // In LP64-land, the high-order bits are valid but unhelpful.
1069 assert(_offset < _arg_size, "out of bounds");
1070 VMReg reg = _regs[_offset].first();
1071 oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1072 #ifdef ASSERT
1073 if (loc == nullptr) {
1074 if (_reg_map->should_skip_missing()) {
1075 return;
1076 }
1077 tty->print_cr("Error walking frame oops:");
1078 _fr.print_on(tty);
1079 assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1080 }
1081 #endif
1082 _f->do_oop(loc);
1083 }
1084
1085 public:
1086 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1087 : SignatureIterator(signature) {
1088
1089 // initialize CompiledArgumentOopFinder
1090 _f = f;
1091 _offset = 0;
1092 _has_receiver = has_receiver;
1093 _has_appendix = has_appendix;
1094 _fr = fr;
1095 _reg_map = (RegisterMap*)reg_map;
1096 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &_arg_size);
1097 }
1098
1099 void oops_do() {
1100 if (_has_receiver) {
1101 handle_oop_offset();
1102 _offset++;
1103 }
1104 do_parameters_on(this);
1105 if (_has_appendix) {
1106 handle_oop_offset();
1107 _offset++;
1108 }
1109 }
1110 };
1111
1112 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1113 const RegisterMap* reg_map, OopClosure* f) const {
1114 // ResourceMark rm;
1115 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1116 finder.oops_do();
1456 tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1457 values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1458 err_msg("stack %d", e), 1);
1459 }
1460 if (tos != nullptr) {
1461 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1462 }
1463
1464 if (reg_map != nullptr) {
1465 FrameValuesOopClosure oopsFn;
1466 oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1467 oopsFn.describe(values, frame_no);
1468 }
1469 } else if (is_entry_frame()) {
1470 // For now just label the frame
1471 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1472 } else if (is_compiled_frame()) {
1473 // For now just label the frame
1474 nmethod* nm = cb()->as_nmethod();
1475 values.describe(-1, info_address,
1476 FormatBuffer<1024>("#%d nmethod (%s %d) " INTPTR_FORMAT " for method J %s%s", frame_no,
1477 nm->is_compiled_by_c1() ? "c1" : "c2", nm->frame_size(), p2i(nm),
1478 nm->method()->name_and_sig_as_C_string(),
1479 (_deopt_state == is_deoptimized) ?
1480 " (deoptimized)" :
1481 ((_deopt_state == unknown) ? " (state unknown)" : "")),
1482 3);
1483
1484 { // mark arguments (see nmethod::print_nmethod_labels)
1485 Method* m = nm->method();
1486
1487 CompiledEntrySignature ces(m);
1488 ces.compute_calling_conventions(false);
1489 const GrowableArray<SigEntry>* sig_cc = nm->is_compiled_by_c2() ? ces.sig_cc() : ces.sig();
1490 const VMRegPair* regs = nm->is_compiled_by_c2() ? ces.regs_cc() : ces.regs();
1491
1492 int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
1493 int out_preserve = SharedRuntime::out_preserve_stack_slots();
1494 int sig_index = 0;
1495 int arg_index = (m->is_static() ? 0 : -1);
1496 for (ExtendedSignature sig = ExtendedSignature(sig_cc, SigEntryFilter()); !sig.at_end(); ++sig) {
1497 bool at_this = (arg_index == -1);
1498 BasicType t = (*sig)._bt;
1499 VMReg fst = regs[sig_index].first();
1500 if (fst->is_stack()) {
1501 assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1502 int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1503 intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1504 if (at_this) {
1505 values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1506 } else {
1507 values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1508 }
1509 }
1510 sig_index += type2size[t];
1511 arg_index += 1;
1512 }
1513 }
1514
1515 if (reg_map != nullptr && is_java_frame()) {
1516 int scope_no = 0;
1517 for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1518 Method* m = scope->method();
1519 int bci = scope->bci();
1520 values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1521
1522 { // mark locals
1523 GrowableArray<ScopeValue*>* scvs = scope->locals();
1524 int scvs_length = scvs != nullptr ? scvs->length() : 0;
1525 for (int i = 0; i < scvs_length; i++) {
1526 intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1527 if (stack_address != nullptr) {
1528 values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1529 }
1530 }
1531 }
|