< prev index next > src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
Print this page
class NativeNMethodBarrier {
address _instruction_address;
int* _guard_addr;
nmethod* _nm;
+ public:
address instruction_address() const { return _instruction_address; }
int *guard_addr() {
return _guard_addr;
}
int local_guard_offset(nmethod* nm) {
// It's the last instruction
return (-entry_barrier_offset(nm)) - 4;
}
- public:
- NativeNMethodBarrier(nmethod* nm): _nm(nm) {
+ NativeNMethodBarrier(nmethod* nm, address alt_entry_instruction_address = 0): _nm(nm) {
#if INCLUDE_JVMCI
if (nm->is_compiled_by_jvmci()) {
+ assert(alt_entry_instruction_address == 0, "invariant");
address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
RelocIterator iter(nm, pc, pc + 4);
guarantee(iter.next(), "missing relocs");
guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
_guard_addr = (int*) iter.section_word_reloc()->target();
_instruction_address = pc;
} else
#endif
{
- _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
+ _instruction_address = (alt_entry_instruction_address != 0) ? alt_entry_instruction_address :
+ nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
if (nm->is_compiled_by_c2()) {
// With c2 compiled code, the guard is out-of-line in a stub
// We find it using the RelocIterator.
RelocIterator iter(nm);
while (iter.next()) {
new_frame->fp = frame.fp();
new_frame->lr = frame.pc();
new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
}
+ static void set_value(nmethod* nm, jint val) {
+ NativeNMethodBarrier cmp1 = NativeNMethodBarrier(nm);
+ cmp1.set_value(val);
+
+ if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
+ // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
+ assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
+ address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
+ address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
+
+ int barrier_offset = cmp1.instruction_address() - method_body;
+ NativeNMethodBarrier cmp2 = NativeNMethodBarrier(nm, entry_point2 + barrier_offset);
+ assert(cmp1.instruction_address() != cmp2.instruction_address(), "sanity");
+ debug_only(cmp2.verify());
+ cmp2.set_value(val);
+
+ if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
+ NativeNMethodBarrier cmp3 = NativeNMethodBarrier(nm, nm->verified_inline_ro_entry_point() + barrier_offset);
+ assert(cmp1.instruction_address() != cmp3.instruction_address() && cmp2.instruction_address() != cmp3.instruction_address(), "sanity");
+ debug_only(cmp3.verify());
+ cmp3.set_value(val);
+ }
+ }
+ }
+
void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
}
// further fencing by mutators, before they are allowed to enter.
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->increment_patching_epoch();
}
- NativeNMethodBarrier barrier(nm);
- barrier.set_value(value);
+ set_value(nm, value);
}
int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return disarmed_guard_value();
< prev index next >