< prev index next > src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
Print this page
}
void ShenandoahBarrierStubC2::enter_if_gc_state(MacroAssembler& masm, const char test_state) {
Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
! int bit_to_check = ShenandoahThreadLocalData::gc_state_to_fast_bit(test_state);
Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_offset()));
__ ldrb(rscratch1, gc_state_fast);
! if (_use_trampoline) {
! __ tbnz(rscratch1, bit_to_check, _trampoline_entry);
} else {
! __ tbz(rscratch1, bit_to_check, *continuation());
! __ b(*entry());
}
- // This is were the slowpath stub will return to or the code above will
- // jump to if the checks are false
- __ bind(*continuation());
}
bool needs_acquiring_load_exclusive(const MachNode *n) {
assert(n->is_CAS(true), "expecting a compare and swap");
if (n->is_CAS(false)) {
}
void ShenandoahBarrierStubC2::enter_if_gc_state(MacroAssembler& masm, const char test_state) {
Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
! // Emit the unconditional branch in the first version of the method.
+ // Let the rest of runtime figure out how to manage it.
+ __ relocate(barrier_Relocation::spec());
+ __ b(*entry());
+
+ #ifdef ASSERT
Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_offset()));
__ ldrb(rscratch1, gc_state_fast);
! __ cbz(rscratch1, *continuation());
! __ hlt(0); // Correctness bug: barrier is NOP-ed, but heap is NOT IDLE
+ #endif
+ __ bind(*continuation());
+ }
+
+ address ShenandoahBarrierSetAssembler::parse_stub_address(address pc) {
+ NativeInstruction* ni = nativeInstruction_at(pc);
+ assert(ni->is_jump(), "Initial code version: GC barrier fastpath must be a jump");
+ NativeJump* jmp = nativeJump_at(pc);
+ return jmp->jump_destination();
+ }
+
+ void insert_nop(address pc) {
+ *(pc + 0) = 0x1F;
+ *(pc + 1) = 0x20;
+ *(pc + 2) = 0x03;
+ *(pc + 3) = 0xD5;
+ ICache::invalidate_range(pc, 4);
+ }
+
+ bool is_nop(address pc) {
+ if (*(pc + 0) != 0x1F) return false;
+ if (*(pc + 1) != 0x20) return false;
+ if (*(pc + 2) != 0x03) return false;
+ if (*(pc + 3) != 0xD5) return false;
+ return true;
+ }
+
+ void check_at(bool cond, address pc, const char* msg) {
+ assert(cond, "%s: at PC " PTR_FORMAT ": %02x%02x%02x%02x%02x",
+ msg, p2i(pc), *(pc + 0), *(pc + 1), *(pc + 2), *(pc + 3), *(pc + 4));
+ }
+
+ void ShenandoahBarrierSetAssembler::patch_branch_to_nop(address pc) {
+ NativeInstruction* ni = nativeInstruction_at(pc);
+ if (ni->is_jump()) {
+ insert_nop(pc);
} else {
! check_at(is_nop(pc), pc, "Should already be nop");
! }
+ }
+
+ void ShenandoahBarrierSetAssembler::patch_nop_to_branch(address pc, address stub_addr) {
+ NativeInstruction* ni = nativeInstruction_at(pc);
+ if (is_nop(pc)) {
+ NativeJump::insert(pc, stub_addr);
+ } else {
+ check_at(ni->is_jump(), pc, "Should already be jump");
+ check_at(nativeJump_at(pc)->jump_destination() == stub_addr, pc, "Jump should be to the same address");
}
}
bool needs_acquiring_load_exclusive(const MachNode *n) {
assert(n->is_CAS(true), "expecting a compare and swap");
if (n->is_CAS(false)) {
}
}
#undef __
#define __ masm.
- // Only handles forward branch jumps, target_offset >= branch_offset
- static bool aarch64_test_and_branch_reachable(int branch_offset, int target_offset) {
- assert(branch_offset >= 0, "branch to stub offsets must be positive");
- assert(target_offset >= 0, "offset in stubs section must be positive");
- assert(target_offset >= branch_offset, "forward branches only, branch_offset -> target_offset");
- return (target_offset - branch_offset) < (int)(32*K);
- }
-
- void ShenandoahBarrierStubC2::post_init(int offset) {
- // If we are in scratch emit mode we assume worst case,
- // and use no trampolines.
- PhaseOutput* const output = Compile::current()->output();
- if (output->in_scratch_emit_size()) {
- return;
- }
-
- // Assume that each trampoline is one single instruction and that the stubs
- // will follow immediately after the _code section. We emit trampolines until
- // we can no longer do it.
- const int code_size = output->buffer_sizing_data()->_code;
- const int trampoline_offset = trampoline_stubs_count() * NativeInstruction::instruction_size;
- _use_trampoline = aarch64_test_and_branch_reachable(_fastpath_branch_offset, code_size + trampoline_offset);
- if (_use_trampoline) {
- inc_trampoline_stubs_count();
- }
- }
-
- void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
- if (_do_emit_actual) {
- emit_code_actual(masm);
- return;
- }
-
- if (_use_trampoline) {
- // Emit the trampoline and jump to real entry.
- const int target_offset = __ offset();
- assert(aarch64_test_and_branch_reachable(_fastpath_branch_offset, target_offset), "trampoline should be reachable");
- __ bind(_trampoline_entry);
- __ b(*entry());
- }
-
- // Do it again, this time with actual emits.
- _do_emit_actual = true;
- ShenandoahBarrierStubC2::register_stub(this);
- }
-
void ShenandoahBarrierStubC2::load_and_decode(MacroAssembler& masm, Label& target_if_null) {
if (_do_load) {
// Fastpath sets _obj==noreg if it tells the slowpath to do the load
_obj = rscratch2;
__ encode_heap_oop_not_null(_obj);
}
}
}
! void ShenandoahBarrierStubC2::emit_code_actual(MacroAssembler& masm) {
assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
__ bind(*entry());
load_and_decode(masm, *continuation());
__ encode_heap_oop_not_null(_obj);
}
}
}
! void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
__ bind(*entry());
load_and_decode(masm, *continuation());
// The node doesn't even need keepalive barrier, just don't check anything else
if (!_needs_keep_alive_barrier) {
return ;
}
! // If another barrier is enabled as well, do a runtime check for a specific barrier.
! if (_needs_load_ref_barrier) {
! Address gcs_addr(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
! __ ldrb(tmp1, gcs_addr);
- __ tbz(tmp1, ShenandoahHeap::MARKING_BITPOS, L_done);
- }
// If buffer is full, call into runtime.
__ ldr(tmp1, index);
__ cbz(tmp1, L_runtime);
// The node doesn't even need keepalive barrier, just don't check anything else
if (!_needs_keep_alive_barrier) {
return ;
}
! // Hotpatched GC checks only care about idle/non-idle state, so we need to check specific state.
! Address gcs_addr(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
! __ ldrb(tmp1, gcs_addr);
! __ tbz(tmp1, ShenandoahHeap::MARKING_BITPOS, L_done);
// If buffer is full, call into runtime.
__ ldr(tmp1, index);
__ cbz(tmp1, L_runtime);
// The node doesn't even need LRB barrier, just don't check anything else
if (!_needs_load_ref_barrier) {
return ;
}
! // If another barrier is enabled as well, do a runtime check for a specific barrier.
! if (_needs_keep_alive_barrier) {
! char state_to_check = ShenandoahHeap::HAS_FORWARDED | (_needs_load_ref_weak_barrier ? ShenandoahHeap::WEAK_ROOTS : 0);
! int bit_to_check = ShenandoahThreadLocalData::gc_state_to_fast_bit(state_to_check);
! Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_offset()));
! __ ldrb(tmp, gc_state_fast);
- __ tbz(tmp, bit_to_check, L_done);
- }
// If weak references are being processed, weak/phantom loads need to go slow,
// regadless of their cset status.
if (_needs_load_ref_weak_barrier) {
Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
// The node doesn't even need LRB barrier, just don't check anything else
if (!_needs_load_ref_barrier) {
return ;
}
! // Hotpatched GC checks only care about idle/non-idle state, so we need to check again.
! char state_to_check = ShenandoahHeap::HAS_FORWARDED | (_needs_load_ref_weak_barrier ? ShenandoahHeap::WEAK_ROOTS : 0);
! int bit_to_check = ShenandoahThreadLocalData::gc_state_to_fast_bit(state_to_check);
! Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_offset()));
! __ ldrb(tmp, gc_state_fast);
! __ tbz(tmp, bit_to_check, L_done);
// If weak references are being processed, weak/phantom loads need to go slow,
// regadless of their cset status.
if (_needs_load_ref_weak_barrier) {
Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
< prev index next >