18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/moduleEntry.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/vmreg.inline.hpp"
29 #include "compiler/abstractCompiler.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/oopMapCache.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/markWord.hpp"
39 #include "oops/method.inline.hpp"
40 #include "oops/methodData.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/stackChunkOop.inline.hpp"
43 #include "oops/verifyOopClosure.hpp"
44 #include "prims/methodHandles.hpp"
45 #include "runtime/continuation.hpp"
46 #include "runtime/continuationEntry.inline.hpp"
47 #include "runtime/frame.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/javaThread.hpp"
51 #include "runtime/monitorChunk.hpp"
52 #include "runtime/os.hpp"
53 #include "runtime/safefetch.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "runtime/signature.hpp"
56 #include "runtime/stackValue.hpp"
57 #include "runtime/stubCodeGenerator.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "utilities/debug.hpp"
60 #include "utilities/decoder.hpp"
61 #include "utilities/formatBuffer.hpp"
62
63 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
64 _thread = thread;
65 _update_map = update_map == UpdateMap::include;
66 _process_frames = process_frames == ProcessFrames::include;
67 _walk_cont = walk_cont == WalkContinuation::include;
68 clear();
69 DEBUG_ONLY (_update_for_id = nullptr;)
70 NOT_PRODUCT(_skip_missing = false;)
71 NOT_PRODUCT(_async = false;)
72
73 if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
74 _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
75 }
76 _chunk_index = -1;
77
78 #ifndef PRODUCT
79 for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
80 #endif /* PRODUCT */
81 }
344 return false;
345
346 return !nm->is_at_poll_return(pc());
347 }
348
349 void frame::deoptimize(JavaThread* thread) {
350 assert(thread == nullptr
351 || (thread->frame_anchor()->has_last_Java_frame() &&
352 thread->frame_anchor()->walkable()), "must be");
353 // Schedule deoptimization of an nmethod activation with this frame.
354 assert(_cb != nullptr && _cb->is_nmethod(), "must be");
355
356 // If the call site is a MethodHandle call site use the MH deopt handler.
357 nmethod* nm = _cb->as_nmethod();
358 address deopt = nm->deopt_handler_entry();
359
360 NativePostCallNop* inst = nativePostCallNop_at(pc());
361
362 // Save the original pc before we patch in the new one
363 nm->set_original_pc(this, pc());
364 patch_pc(thread, deopt);
365 assert(is_deoptimized_frame(), "must be");
366
367 #ifdef ASSERT
368 if (thread != nullptr) {
369 frame check = thread->last_frame();
370 if (is_older(check.id())) {
371 RegisterMap map(thread,
372 RegisterMap::UpdateMap::skip,
373 RegisterMap::ProcessFrames::include,
374 RegisterMap::WalkContinuation::skip);
375 while (id() != check.id()) {
376 check = check.sender(&map);
377 }
378 assert(check.is_deoptimized_frame(), "missed deopt");
379 }
380 }
381 #endif // ASSERT
382 }
383
742 }
743
744
745 /*
746 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
747 max_stack value of the method in order to compute the expression stack address.
748 It uses the Method* in order to get the max_stack value but during GC this
749 Method* value saved on the frame is changed by reverse_and_push and hence cannot
750 be used. So we save the max_stack value in the FrameClosure object and pass it
751 down to the interpreter_frame_expression_stack_at method
752 */
753 class InterpreterFrameClosure : public OffsetClosure {
754 private:
755 const frame* _fr;
756 OopClosure* _f;
757 int _max_locals;
758 int _max_stack;
759
760 public:
761 InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
762 OopClosure* f) {
763 _fr = fr;
764 _max_locals = max_locals;
765 _max_stack = max_stack;
766 _f = f;
767 }
768
769 void offset_do(int offset) {
770 oop* addr;
771 if (offset < _max_locals) {
772 addr = (oop*) _fr->interpreter_frame_local_at(offset);
773 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
774 _f->do_oop(addr);
775 } else {
776 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
777 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
778 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
779 bool in_stack;
780 if (frame::interpreter_frame_expression_stack_direction() > 0) {
781 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
782 } else {
783 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
784 }
785 if (in_stack) {
786 _f->do_oop(addr);
787 }
788 }
789 }
790 };
791
792
793 class InterpretedArgumentOopFinder: public SignatureIterator {
794 private:
795 OopClosure* _f; // Closure to invoke
796 int _offset; // TOS-relative offset, decremented with each argument
797 bool _has_receiver; // true if the callee has a receiver
798 const frame* _fr;
799
800 friend class SignatureIterator; // so do_parameters_on can call do_type
801 void do_type(BasicType type) {
802 _offset -= parameter_type_word_count(type);
803 if (is_reference_type(type)) oop_offset_do();
804 }
805
806 void oop_offset_do() {
928 f->do_oop(interpreter_frame_mirror_addr());
929
930 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
931
932 // Process a callee's arguments if we are at a call site
933 // (i.e., if we are at an invoke bytecode)
934 // This is used sometimes for calling into the VM, not for another
935 // interpreted or compiled frame.
936 if (!m->is_native() && map != nullptr && map->include_argument_oops()) {
937 Bytecode_invoke call = Bytecode_invoke_check(m, bci);
938 if (call.is_valid() && interpreter_frame_expression_stack_size() > 0) {
939 ResourceMark rm(thread); // is this right ???
940 Symbol* signature = call.signature();
941 bool has_receiver = call.has_receiver();
942 // We are at a call site & the expression stack is not empty
943 // so we might have callee arguments we need to process.
944 oops_interpreted_arguments_do(signature, has_receiver, f);
945 }
946 }
947
948 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
949
950 // process locals & expression stack
951 InterpreterOopMap mask;
952 if (query_oop_map_cache) {
953 m->mask_for(m, bci, &mask);
954 } else {
955 OopMapCache::compute_one_oop_map(m, bci, &mask);
956 }
957 mask.iterate_oop(&blk);
958 }
959
960 template void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) const;
961 template void frame::oops_interpreted_do(OopClosure* f, const SmallRegisterMapNoArgs* map, bool query_oop_map_cache) const;
962 template void frame::oops_interpreted_do(OopClosure* f, const SmallRegisterMapWithArgs* map, bool query_oop_map_cache) const;
963
964 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
965 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
966 finder.oops_do();
967 }
968
969 void frame::oops_nmethod_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
970 assert(_cb != nullptr, "sanity check");
971 assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
972 if (oop_map() != nullptr) {
973 if (df != nullptr) {
974 _oop_map->oops_do(this, reg_map, f, df);
975 } else {
976 _oop_map->oops_do(this, reg_map, f, derived_mode);
977 }
978
979 // Preserve potential arguments for a callee. We handle this by dispatching
995 class CompiledArgumentOopFinder: public SignatureIterator {
996 protected:
997 OopClosure* _f;
998 int _offset; // the current offset, incremented with each argument
999 bool _has_receiver; // true if the callee has a receiver
1000 bool _has_appendix; // true if the call has an appendix
1001 frame _fr;
1002 RegisterMap* _reg_map;
1003 int _arg_size;
1004 VMRegPair* _regs; // VMReg list of arguments
1005
1006 friend class SignatureIterator; // so do_parameters_on can call do_type
1007 void do_type(BasicType type) {
1008 if (is_reference_type(type)) handle_oop_offset();
1009 _offset += parameter_type_word_count(type);
1010 }
1011
1012 virtual void handle_oop_offset() {
1013 // Extract low order register number from register array.
1014 // In LP64-land, the high-order bits are valid but unhelpful.
1015 VMReg reg = _regs[_offset].first();
1016 oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1017 #ifdef ASSERT
1018 if (loc == nullptr) {
1019 if (_reg_map->should_skip_missing()) {
1020 return;
1021 }
1022 tty->print_cr("Error walking frame oops:");
1023 _fr.print_on(tty);
1024 assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1025 }
1026 #endif
1027 _f->do_oop(loc);
1028 }
1029
1030 public:
1031 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1032 : SignatureIterator(signature) {
1033
1034 // initialize CompiledArgumentOopFinder
1035 _f = f;
1036 _offset = 0;
1037 _has_receiver = has_receiver;
1038 _has_appendix = has_appendix;
1039 _fr = fr;
1040 _reg_map = (RegisterMap*)reg_map;
1041 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
1042
1043 int arg_size;
1044 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
1045 assert(arg_size == _arg_size, "wrong arg size");
1046 }
1047
1048 void oops_do() {
1049 if (_has_receiver) {
1050 handle_oop_offset();
1051 _offset++;
1052 }
1053 do_parameters_on(this);
1054 if (_has_appendix) {
1055 handle_oop_offset();
1056 _offset++;
1057 }
1058 }
1059 };
1060
1061 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1062 const RegisterMap* reg_map, OopClosure* f) const {
1063 // ResourceMark rm;
1064 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1065 finder.oops_do();
1405 tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1406 values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1407 err_msg("stack %d", e), 1);
1408 }
1409 if (tos != nullptr) {
1410 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1411 }
1412
1413 if (reg_map != nullptr) {
1414 FrameValuesOopClosure oopsFn;
1415 oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1416 oopsFn.describe(values, frame_no);
1417 }
1418 } else if (is_entry_frame()) {
1419 // For now just label the frame
1420 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1421 } else if (is_compiled_frame()) {
1422 // For now just label the frame
1423 nmethod* nm = cb()->as_nmethod();
1424 values.describe(-1, info_address,
1425 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
1426 p2i(nm),
1427 nm->method()->name_and_sig_as_C_string(),
1428 (_deopt_state == is_deoptimized) ?
1429 " (deoptimized)" :
1430 ((_deopt_state == unknown) ? " (state unknown)" : "")),
1431 3);
1432
1433 { // mark arguments (see nmethod::print_nmethod_labels)
1434 Method* m = nm->method();
1435
1436 int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
1437 int sizeargs = m->size_of_parameters();
1438
1439 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
1440 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
1441 {
1442 int sig_index = 0;
1443 if (!m->is_static()) {
1444 sig_bt[sig_index++] = T_OBJECT; // 'this'
1445 }
1446 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
1447 BasicType t = ss.type();
1448 assert(type2size[t] == 1 || type2size[t] == 2, "size is 1 or 2");
1449 sig_bt[sig_index++] = t;
1450 if (type2size[t] == 2) {
1451 sig_bt[sig_index++] = T_VOID;
1452 }
1453 }
1454 assert(sig_index == sizeargs, "");
1455 }
1456 int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
1457 assert(stack_arg_slots == nm->as_nmethod()->num_stack_arg_slots(false /* rounded */) || nm->is_osr_method(), "");
1458 int out_preserve = SharedRuntime::out_preserve_stack_slots();
1459 int sig_index = 0;
1460 int arg_index = (m->is_static() ? 0 : -1);
1461 for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
1462 bool at_this = (arg_index == -1);
1463 bool at_old_sp = false;
1464 BasicType t = (at_this ? T_OBJECT : ss.type());
1465 assert(t == sig_bt[sig_index], "sigs in sync");
1466 VMReg fst = regs[sig_index].first();
1467 if (fst->is_stack()) {
1468 assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1469 int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1470 intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1471 if (at_this) {
1472 values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1473 } else {
1474 values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1475 }
1476 }
1477 sig_index += type2size[t];
1478 arg_index += 1;
1479 if (!at_this) {
1480 ss.next();
1481 }
1482 }
1483 }
1484
1485 if (reg_map != nullptr && is_java_frame()) {
1486 int scope_no = 0;
1487 for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1488 Method* m = scope->method();
1489 int bci = scope->bci();
1490 values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1491
1492 { // mark locals
1493 GrowableArray<ScopeValue*>* scvs = scope->locals();
1494 int scvs_length = scvs != nullptr ? scvs->length() : 0;
1495 for (int i = 0; i < scvs_length; i++) {
1496 intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1497 if (stack_address != nullptr) {
1498 values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1499 }
1500 }
1501 }
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/moduleEntry.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/vmreg.inline.hpp"
29 #include "compiler/abstractCompiler.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/oopMapCache.hpp"
35 #include "logging/log.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/inlineKlass.hpp"
39 #include "oops/markWord.hpp"
40 #include "oops/method.inline.hpp"
41 #include "oops/methodData.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/stackChunkOop.inline.hpp"
44 #include "oops/verifyOopClosure.hpp"
45 #include "prims/methodHandles.hpp"
46 #include "runtime/continuation.hpp"
47 #include "runtime/continuationEntry.inline.hpp"
48 #include "runtime/frame.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/javaCalls.hpp"
51 #include "runtime/javaThread.hpp"
52 #include "runtime/monitorChunk.hpp"
53 #include "runtime/os.hpp"
54 #include "runtime/safefetch.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/signature.hpp"
57 #include "runtime/stackValue.hpp"
58 #include "runtime/stubCodeGenerator.hpp"
59 #include "runtime/stubRoutines.hpp"
60 #include "utilities/debug.hpp"
61 #include "utilities/decoder.hpp"
62 #include "utilities/formatBuffer.hpp"
63 #ifdef COMPILER1
64 #include "c1/c1_Runtime1.hpp"
65 #endif
66
67 RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
68 _thread = thread;
69 _update_map = update_map == UpdateMap::include;
70 _process_frames = process_frames == ProcessFrames::include;
71 _walk_cont = walk_cont == WalkContinuation::include;
72 clear();
73 DEBUG_ONLY (_update_for_id = nullptr;)
74 NOT_PRODUCT(_skip_missing = false;)
75 NOT_PRODUCT(_async = false;)
76
77 if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
78 _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
79 }
80 _chunk_index = -1;
81
82 #ifndef PRODUCT
83 for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
84 #endif /* PRODUCT */
85 }
348 return false;
349
350 return !nm->is_at_poll_return(pc());
351 }
352
353 void frame::deoptimize(JavaThread* thread) {
354 assert(thread == nullptr
355 || (thread->frame_anchor()->has_last_Java_frame() &&
356 thread->frame_anchor()->walkable()), "must be");
357 // Schedule deoptimization of an nmethod activation with this frame.
358 assert(_cb != nullptr && _cb->is_nmethod(), "must be");
359
360 // If the call site is a MethodHandle call site use the MH deopt handler.
361 nmethod* nm = _cb->as_nmethod();
362 address deopt = nm->deopt_handler_entry();
363
364 NativePostCallNop* inst = nativePostCallNop_at(pc());
365
366 // Save the original pc before we patch in the new one
367 nm->set_original_pc(this, pc());
368
369 #ifdef COMPILER1
370 if (nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
371 pc() < nm->verified_inline_entry_point()) {
372 // The VEP and VIEP(RO) of C1-compiled methods call into the runtime to buffer scalarized value
373 // type args. We can't deoptimize at that point because the buffers have not yet been initialized.
374 // Also, if the method is synchronized, we first need to acquire the lock.
375 // Don't patch the return pc to delay deoptimization until we enter the method body (the check
376 // added in LIRGenerator::do_Base will detect the pending deoptimization by checking the original_pc).
377 #if defined ASSERT && !defined AARCH64 // Stub call site does not look like NativeCall on AArch64
378 NativeCall* call = nativeCall_before(this->pc());
379 address dest = call->destination();
380 assert(dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_no_receiver_id) ||
381 dest == Runtime1::entry_for(StubId::c1_buffer_inline_args_id), "unexpected safepoint in entry point");
382 #endif
383 return;
384 }
385 #endif
386
387 patch_pc(thread, deopt);
388 assert(is_deoptimized_frame(), "must be");
389
390 #ifdef ASSERT
391 if (thread != nullptr) {
392 frame check = thread->last_frame();
393 if (is_older(check.id())) {
394 RegisterMap map(thread,
395 RegisterMap::UpdateMap::skip,
396 RegisterMap::ProcessFrames::include,
397 RegisterMap::WalkContinuation::skip);
398 while (id() != check.id()) {
399 check = check.sender(&map);
400 }
401 assert(check.is_deoptimized_frame(), "missed deopt");
402 }
403 }
404 #endif // ASSERT
405 }
406
765 }
766
767
768 /*
769 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
770 max_stack value of the method in order to compute the expression stack address.
771 It uses the Method* in order to get the max_stack value but during GC this
772 Method* value saved on the frame is changed by reverse_and_push and hence cannot
773 be used. So we save the max_stack value in the FrameClosure object and pass it
774 down to the interpreter_frame_expression_stack_at method
775 */
776 class InterpreterFrameClosure : public OffsetClosure {
777 private:
778 const frame* _fr;
779 OopClosure* _f;
780 int _max_locals;
781 int _max_stack;
782
783 public:
784 InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
785 OopClosure* f, BufferedValueClosure* bvt_f) {
786 _fr = fr;
787 _max_locals = max_locals;
788 _max_stack = max_stack;
789 _f = f;
790 }
791
792 void offset_do(int offset) {
793 oop* addr;
794 if (offset < _max_locals) {
795 addr = (oop*) _fr->interpreter_frame_local_at(offset);
796 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
797 if (_f != nullptr) {
798 _f->do_oop(addr);
799 }
800 } else {
801 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
802 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
803 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
804 bool in_stack;
805 if (frame::interpreter_frame_expression_stack_direction() > 0) {
806 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
807 } else {
808 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
809 }
810 if (in_stack) {
811 if (_f != nullptr) {
812 _f->do_oop(addr);
813 }
814 }
815 }
816 }
817 };
818
819
820 class InterpretedArgumentOopFinder: public SignatureIterator {
821 private:
822 OopClosure* _f; // Closure to invoke
823 int _offset; // TOS-relative offset, decremented with each argument
824 bool _has_receiver; // true if the callee has a receiver
825 const frame* _fr;
826
827 friend class SignatureIterator; // so do_parameters_on can call do_type
828 void do_type(BasicType type) {
829 _offset -= parameter_type_word_count(type);
830 if (is_reference_type(type)) oop_offset_do();
831 }
832
833 void oop_offset_do() {
955 f->do_oop(interpreter_frame_mirror_addr());
956
957 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
958
959 // Process a callee's arguments if we are at a call site
960 // (i.e., if we are at an invoke bytecode)
961 // This is used sometimes for calling into the VM, not for another
962 // interpreted or compiled frame.
963 if (!m->is_native() && map != nullptr && map->include_argument_oops()) {
964 Bytecode_invoke call = Bytecode_invoke_check(m, bci);
965 if (call.is_valid() && interpreter_frame_expression_stack_size() > 0) {
966 ResourceMark rm(thread); // is this right ???
967 Symbol* signature = call.signature();
968 bool has_receiver = call.has_receiver();
969 // We are at a call site & the expression stack is not empty
970 // so we might have callee arguments we need to process.
971 oops_interpreted_arguments_do(signature, has_receiver, f);
972 }
973 }
974
975 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f, nullptr);
976
977 // process locals & expression stack
978 InterpreterOopMap mask;
979 if (query_oop_map_cache) {
980 m->mask_for(m, bci, &mask);
981 } else {
982 OopMapCache::compute_one_oop_map(m, bci, &mask);
983 }
984 mask.iterate_oop(&blk);
985 }
986
987 void frame::buffered_values_interpreted_do(BufferedValueClosure* f) {
988 assert(is_interpreted_frame(), "Not an interpreted frame");
989 Thread *thread = Thread::current();
990 methodHandle m (thread, interpreter_frame_method());
991 jint bci = interpreter_frame_bci();
992
993 assert(m->is_method(), "checking frame value");
994 assert(!m->is_native() && bci >= 0 && bci < m->code_size(),
995 "invalid bci value");
996
997 InterpreterFrameClosure blk(this, m->max_locals(), m->max_stack(), nullptr, f);
998
999 // process locals & expression stack
1000 InterpreterOopMap mask;
1001 m->mask_for(bci, &mask);
1002 mask.iterate_oop(&blk);
1003 }
1004
1005 template void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) const;
1006 template void frame::oops_interpreted_do(OopClosure* f, const SmallRegisterMapNoArgs* map, bool query_oop_map_cache) const;
1007 template void frame::oops_interpreted_do(OopClosure* f, const SmallRegisterMapWithArgs* map, bool query_oop_map_cache) const;
1008
1009 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
1010 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
1011 finder.oops_do();
1012 }
1013
1014 void frame::oops_nmethod_do(OopClosure* f, NMethodClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
1015 assert(_cb != nullptr, "sanity check");
1016 assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
1017 if (oop_map() != nullptr) {
1018 if (df != nullptr) {
1019 _oop_map->oops_do(this, reg_map, f, df);
1020 } else {
1021 _oop_map->oops_do(this, reg_map, f, derived_mode);
1022 }
1023
1024 // Preserve potential arguments for a callee. We handle this by dispatching
1040 class CompiledArgumentOopFinder: public SignatureIterator {
1041 protected:
1042 OopClosure* _f;
1043 int _offset; // the current offset, incremented with each argument
1044 bool _has_receiver; // true if the callee has a receiver
1045 bool _has_appendix; // true if the call has an appendix
1046 frame _fr;
1047 RegisterMap* _reg_map;
1048 int _arg_size;
1049 VMRegPair* _regs; // VMReg list of arguments
1050
1051 friend class SignatureIterator; // so do_parameters_on can call do_type
1052 void do_type(BasicType type) {
1053 if (is_reference_type(type)) handle_oop_offset();
1054 _offset += parameter_type_word_count(type);
1055 }
1056
1057 virtual void handle_oop_offset() {
1058 // Extract low order register number from register array.
1059 // In LP64-land, the high-order bits are valid but unhelpful.
1060 assert(_offset < _arg_size, "out of bounds");
1061 VMReg reg = _regs[_offset].first();
1062 oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
1063 #ifdef ASSERT
1064 if (loc == nullptr) {
1065 if (_reg_map->should_skip_missing()) {
1066 return;
1067 }
1068 tty->print_cr("Error walking frame oops:");
1069 _fr.print_on(tty);
1070 assert(loc != nullptr, "missing register map entry reg: %d %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
1071 }
1072 #endif
1073 _f->do_oop(loc);
1074 }
1075
1076 public:
1077 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1078 : SignatureIterator(signature) {
1079
1080 // initialize CompiledArgumentOopFinder
1081 _f = f;
1082 _offset = 0;
1083 _has_receiver = has_receiver;
1084 _has_appendix = has_appendix;
1085 _fr = fr;
1086 _reg_map = (RegisterMap*)reg_map;
1087 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &_arg_size);
1088 }
1089
1090 void oops_do() {
1091 if (_has_receiver) {
1092 handle_oop_offset();
1093 _offset++;
1094 }
1095 do_parameters_on(this);
1096 if (_has_appendix) {
1097 handle_oop_offset();
1098 _offset++;
1099 }
1100 }
1101 };
1102
1103 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix,
1104 const RegisterMap* reg_map, OopClosure* f) const {
1105 // ResourceMark rm;
1106 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1107 finder.oops_do();
1447 tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1448 values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1449 err_msg("stack %d", e), 1);
1450 }
1451 if (tos != nullptr) {
1452 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
1453 }
1454
1455 if (reg_map != nullptr) {
1456 FrameValuesOopClosure oopsFn;
1457 oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
1458 oopsFn.describe(values, frame_no);
1459 }
1460 } else if (is_entry_frame()) {
1461 // For now just label the frame
1462 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1463 } else if (is_compiled_frame()) {
1464 // For now just label the frame
1465 nmethod* nm = cb()->as_nmethod();
1466 values.describe(-1, info_address,
1467 FormatBuffer<1024>("#%d nmethod (%s %d) " INTPTR_FORMAT " for method J %s%s", frame_no,
1468 nm->is_compiled_by_c1() ? "c1" : "c2", nm->frame_size(), p2i(nm),
1469 nm->method()->name_and_sig_as_C_string(),
1470 (_deopt_state == is_deoptimized) ?
1471 " (deoptimized)" :
1472 ((_deopt_state == unknown) ? " (state unknown)" : "")),
1473 3);
1474
1475 { // mark arguments (see nmethod::print_nmethod_labels)
1476 Method* m = nm->method();
1477
1478 CompiledEntrySignature ces(m);
1479 ces.compute_calling_conventions(false);
1480 const GrowableArray<SigEntry>* sig_cc = nm->is_compiled_by_c2() ? ces.sig_cc() : ces.sig();
1481 const VMRegPair* regs = nm->is_compiled_by_c2() ? ces.regs_cc() : ces.regs();
1482
1483 int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
1484 int out_preserve = SharedRuntime::out_preserve_stack_slots();
1485 int sig_index = 0;
1486 int arg_index = (m->is_static() ? 0 : -1);
1487 for (ExtendedSignature sig = ExtendedSignature(sig_cc, SigEntryFilter()); !sig.at_end(); ++sig) {
1488 bool at_this = (arg_index == -1);
1489 BasicType t = (*sig)._bt;
1490 VMReg fst = regs[sig_index].first();
1491 if (fst->is_stack()) {
1492 assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
1493 int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
1494 intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
1495 if (at_this) {
1496 values.describe(frame_no, stack_address, err_msg("this for #%d", frame_no), 1);
1497 } else {
1498 values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
1499 }
1500 }
1501 sig_index += type2size[t];
1502 arg_index += 1;
1503 }
1504 }
1505
1506 if (reg_map != nullptr && is_java_frame()) {
1507 int scope_no = 0;
1508 for (ScopeDesc* scope = nm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
1509 Method* m = scope->method();
1510 int bci = scope->bci();
1511 values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
1512
1513 { // mark locals
1514 GrowableArray<ScopeValue*>* scvs = scope->locals();
1515 int scvs_length = scvs != nullptr ? scvs->length() : 0;
1516 for (int i = 0; i < scvs_length; i++) {
1517 intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
1518 if (stack_address != nullptr) {
1519 values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
1520 }
1521 }
1522 }
|