< prev index next > src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
Print this page
#ifndef PRODUCT
extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
#endif
- VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
+ VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true);
- VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
+ VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// overflow in subsequently generated stubs.
address start_pc;
int slop_bytes = 0;
int slop_delta = 0;
+ ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() : Method::from_compiled_inline_ro_offset();
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), stub_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
#if (!defined(PRODUCT) && defined(COMPILER2))
#ifndef PRODUCT
if (DebugVtables) {
Label L;
__ cbz(rmethod, L);
- __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
+ __ ldr(rscratch1, Address(rmethod, entry_offset));
__ cbnz(rscratch1, L);
__ stop("Vtable entry is null");
__ bind(L);
}
#endif // PRODUCT
// r0: receiver klass
// rmethod: Method*
// r2: receiver
address ame_addr = __ pc();
- __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
+ __ ldr(rscratch1, Address(rmethod, entry_offset));
__ br(rscratch1);
masm->flush();
bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
return s;
}
- VtableStub* VtableStubs::create_itable_stub(int itable_index) {
+ VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(false);
- VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
+ VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
// Can be null if there is no free space in the code cache.
if (s == nullptr) {
return nullptr;
}
// overflow in subsequently generated stubs.
address start_pc;
int slop_bytes = 0;
int slop_delta = 0;
+ ByteSize entry_offset = caller_is_c1 ? Method::from_compiled_inline_offset() : Method::from_compiled_inline_ro_offset();
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), stub_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
#if (!defined(PRODUCT) && defined(COMPILER2))
#ifdef ASSERT
if (DebugVtables) {
Label L2;
__ cbz(rmethod, L2);
- __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
+ __ ldr(rscratch1, Address(rmethod, entry_offset));
__ cbnz(rscratch1, L2);
__ stop("compiler entrypoint is null");
__ bind(L2);
}
#endif // ASSERT
// rmethod: Method*
// j_rarg0: receiver
address ame_addr = __ pc();
- __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
+ __ ldr(rscratch1, Address(rmethod, entry_offset));
__ br(rscratch1);
__ bind(L_no_such_interface);
// Handle IncompatibleClassChangeError in itable stubs.
// More detailed error message.
< prev index next >