< prev index next > src/hotspot/share/code/vtableStubs.cpp
Print this page
check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
s->set_exception_points(npe_addr, ame_addr);
}
- address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
+ address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
assert(vtable_index >= 0, "must be positive");
VtableStub* s;
{
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
- s = lookup(is_vtable_stub, vtable_index);
+ s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
if (s == nullptr) {
if (is_vtable_stub) {
- s = create_vtable_stub(vtable_index);
+ s = create_vtable_stub(vtable_index, caller_is_c1);
} else {
- s = create_itable_stub(vtable_index);
+ s = create_itable_stub(vtable_index, caller_is_c1);
}
// Creation of vtable or itable can fail if there is not enough free space in the code cache.
if (s == nullptr) {
return nullptr;
}
- enter(is_vtable_stub, vtable_index, s);
+ enter(is_vtable_stub, vtable_index, caller_is_c1, s);
if (PrintAdapterHandlers) {
- tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (" SIZE_FORMAT " bytes)",
+ tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (" SIZE_FORMAT " bytes)",
+ caller_is_c1 ? "c1" : "full opt",
is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
Disassembler::decode(s->code_begin(), s->code_end());
}
// Notify JVMTI about this stub. The event will be recorded by the enclosing
// JvmtiDynamicCodeEventCollector and posted when this thread has released
// all locks. Only post this event if a new state is not required. Creating a new state would
// cause a safepoint and the caller of this code has a NoSafepointVerifier.
if (JvmtiExport::should_post_dynamic_code_generated()) {
- JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
+ JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", // FIXME: need to pass caller_is_c1??
s->code_begin(), s->code_end());
}
}
}
return s->entry_point();
}
- inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
+ inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
// Assumption: receiver_location < 4 in most cases.
int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
+ if (caller_is_c1) {
+ hash = 7 - hash;
+ }
return (is_vtable_stub ? ~hash : hash) & mask;
}
- inline uint VtableStubs::unsafe_hash(address entry_point) {
+ inline uint VtableStubs::unsafe_hash(address entry_point, bool caller_is_c1) {
// The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
address vtable_stub_addr = entry_point - VtableStub::entry_offset();
assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
short vtable_index;
static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
- return hash(is_vtable_stub, vtable_index);
+ return hash(is_vtable_stub, vtable_index, caller_is_c1);
}
-
- VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
+ VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
assert_lock_strong(VtableStubs_lock);
- unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
+ unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
VtableStub* s = Atomic::load(&_table[hash]);
- while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
+ while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
return s;
}
- void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
+ void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
assert_lock_strong(VtableStubs_lock);
- assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
- unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
+ assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
+ unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
// Insert s at the beginning of the corresponding list.
s->set_next(Atomic::load(&_table[h]));
// Make sure that concurrent readers not taking the mutex observe the writing of "next".
Atomic::release_store(&_table[h], s);
}
// The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
// to generate the hash that would have been used if it was. The lookup in the
// _table will only succeed if there is a VtableStub with an entry point at
// the pc.
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
- uint hash = VtableStubs::unsafe_hash(pc);
+ VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
+ uint hash = VtableStubs::unsafe_hash(pc, stub->caller_is_c1());
VtableStub* s;
for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
}
< prev index next >