< prev index next > src/hotspot/share/runtime/frame.cpp
Print this page
#include "memory/universe.hpp"
#include "oops/markWord.hpp"
#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
+ #include "oops/inlineKlass.hpp"
#include "oops/stackChunkOop.inline.hpp"
#include "oops/verifyOopClosure.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/continuation.hpp"
#include "runtime/continuationEntry.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/debug.hpp"
#include "utilities/decoder.hpp"
#include "utilities/formatBuffer.hpp"
+ #ifdef COMPILER1
+ #include "c1/c1_Runtime1.hpp"
+ #endif
RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames process_frames, WalkContinuation walk_cont) {
_thread = thread;
_update_map = update_map == UpdateMap::include;
_process_frames = process_frames == ProcessFrames::include;
NativePostCallNop* inst = nativePostCallNop_at(pc());
// Save the original pc before we patch in the new one
nm->set_original_pc(this, pc());
+
+ #ifdef COMPILER1
+ if (nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
+ pc() < nm->verified_inline_entry_point()) {
+ // The VEP and VIEP(RO) of C1-compiled methods call into the runtime to buffer scalarized value
+ // type args. We can't deoptimize at that point because the buffers have not yet been initialized.
+ // Also, if the method is synchronized, we first need to acquire the lock.
+ // Don't patch the return pc to delay deoptimization until we enter the method body (the check
+ // added in LIRGenerator::do_Base will detect the pending deoptimization by checking the original_pc).
+ #if defined ASSERT && !defined AARCH64 // Stub call site does not look like NativeCall on AArch64
+ NativeCall* call = nativeCall_before(this->pc());
+ address dest = call->destination();
+ assert(dest == Runtime1::entry_for(C1StubId::buffer_inline_args_no_receiver_id) ||
+ dest == Runtime1::entry_for(C1StubId::buffer_inline_args_id), "unexpected safepoint in entry point");
+ #endif
+ return;
+ }
+ #endif
+
patch_pc(thread, deopt);
assert(is_deoptimized_frame(), "must be");
#ifdef ASSERT
if (thread != nullptr) {
int _max_locals;
int _max_stack;
public:
InterpreterFrameClosure(const frame* fr, int max_locals, int max_stack,
- OopClosure* f) {
+ OopClosure* f, BufferedValueClosure* bvt_f) {
_fr = fr;
_max_locals = max_locals;
_max_stack = max_stack;
_f = f;
}
void offset_do(int offset) {
oop* addr;
if (offset < _max_locals) {
addr = (oop*) _fr->interpreter_frame_local_at(offset);
assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
- _f->do_oop(addr);
+ if (_f != nullptr) {
+ _f->do_oop(addr);
+ }
} else {
addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
// In case of exceptions, the expression stack is invalid and the esp will be reset to express
// this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
bool in_stack;
in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
} else {
in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
}
if (in_stack) {
- _f->do_oop(addr);
+ if (_f != nullptr) {
+ _f->do_oop(addr);
+ }
}
}
}
};
oops_interpreted_arguments_do(signature, has_receiver, f);
}
}
}
- InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
+ InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f, nullptr);
// process locals & expression stack
InterpreterOopMap mask;
if (query_oop_map_cache) {
m->mask_for(m, bci, &mask);
OopMapCache::compute_one_oop_map(m, bci, &mask);
}
mask.iterate_oop(&blk);
}
+ void frame::buffered_values_interpreted_do(BufferedValueClosure* f) {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ Thread *thread = Thread::current();
+ methodHandle m (thread, interpreter_frame_method());
+ jint bci = interpreter_frame_bci();
+
+ assert(m->is_method(), "checking frame value");
+ assert(!m->is_native() && bci >= 0 && bci < m->code_size(),
+ "invalid bci value");
+
+ InterpreterFrameClosure blk(this, m->max_locals(), m->max_stack(), nullptr, f);
+
+ // process locals & expression stack
+ InterpreterOopMap mask;
+ m->mask_for(bci, &mask);
+ mask.iterate_oop(&blk);
+ }
void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) const {
InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
finder.oops_do();
}
}
virtual void handle_oop_offset() {
// Extract low order register number from register array.
// In LP64-land, the high-order bits are valid but unhelpful.
+ assert(_offset < _arg_size, "out of bounds");
VMReg reg = _regs[_offset].first();
oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
#ifdef ASSERT
if (loc == nullptr) {
if (_reg_map->should_skip_missing()) {
_offset = 0;
_has_receiver = has_receiver;
_has_appendix = has_appendix;
_fr = fr;
_reg_map = (RegisterMap*)reg_map;
- _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
-
- int arg_size;
- _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
- assert(arg_size == _arg_size, "wrong arg size");
+ _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &_arg_size);
}
void oops_do() {
if (_has_receiver) {
handle_oop_offset();
values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
} else if (is_compiled_frame()) {
// For now just label the frame
nmethod* nm = cb()->as_nmethod();
values.describe(-1, info_address,
- FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
- p2i(nm),
+ FormatBuffer<1024>("#%d nmethod (%s %d) " INTPTR_FORMAT " for method J %s%s", frame_no,
+ nm->is_compiled_by_c1() ? "c1" : "c2", nm->frame_size(), p2i(nm),
nm->method()->name_and_sig_as_C_string(),
(_deopt_state == is_deoptimized) ?
" (deoptimized)" :
((_deopt_state == unknown) ? " (state unknown)" : "")),
3);
{ // mark arguments (see nmethod::print_nmethod_labels)
Method* m = nm->method();
+ CompiledEntrySignature ces(m);
+ ces.compute_calling_conventions(false);
+ const GrowableArray<SigEntry>* sig_cc = nm->is_compiled_by_c2() ? ces.sig_cc() : ces.sig();
+ const VMRegPair* regs = nm->is_compiled_by_c2() ? ces.regs_cc() : ces.regs();
+
int stack_slot_offset = nm->frame_size() * wordSize; // offset, in bytes, to caller sp
- int sizeargs = m->size_of_parameters();
-
- BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
- VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
- {
- int sig_index = 0;
- if (!m->is_static()) {
- sig_bt[sig_index++] = T_OBJECT; // 'this'
- }
- for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
- BasicType t = ss.type();
- assert(type2size[t] == 1 || type2size[t] == 2, "size is 1 or 2");
- sig_bt[sig_index++] = t;
- if (type2size[t] == 2) {
- sig_bt[sig_index++] = T_VOID;
- }
- }
- assert(sig_index == sizeargs, "");
- }
- int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
- assert(stack_arg_slots == nm->as_nmethod()->num_stack_arg_slots(false /* rounded */) || nm->is_osr_method(), "");
int out_preserve = SharedRuntime::out_preserve_stack_slots();
int sig_index = 0;
int arg_index = (m->is_static() ? 0 : -1);
- for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
+ for (ExtendedSignature sig = ExtendedSignature(sig_cc, SigEntryFilter()); !sig.at_end(); ++sig) {
bool at_this = (arg_index == -1);
- bool at_old_sp = false;
- BasicType t = (at_this ? T_OBJECT : ss.type());
- assert(t == sig_bt[sig_index], "sigs in sync");
+ BasicType t = (*sig)._bt;
VMReg fst = regs[sig_index].first();
if (fst->is_stack()) {
assert(((int)fst->reg2stack()) >= 0, "reg2stack: %d", fst->reg2stack());
int offset = (fst->reg2stack() + out_preserve) * VMRegImpl::stack_slot_size + stack_slot_offset;
intptr_t* stack_address = (intptr_t*)((address)unextended_sp() + offset);
values.describe(frame_no, stack_address, err_msg("param %d %s for #%d", arg_index, type2name(t), frame_no), 1);
}
}
sig_index += type2size[t];
arg_index += 1;
- if (!at_this) {
- ss.next();
- }
}
}
if (reg_map != nullptr && is_java_frame()) {
int scope_no = 0;
< prev index next >