< prev index next > src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
Print this page
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
#endif
+ #ifdef COMPILER2
+ #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+ #include "opto/output.hpp"
+ #endif
#define __ masm->
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register src, Register dst, Register count, RegSet saved_regs) {
}
#undef __
#endif // COMPILER1
+
+ #ifdef COMPILER2
+ #undef __
+ #define __ masm.
+
+ int ShenandoahBarrierStubC2::available_gp_registers() {
+ return Register::number_of_registers;
+ }
+
+ bool ShenandoahBarrierStubC2::is_special_register(Register r) {
+ return R18_RESERVED_ONLY(r == r18_tls ||)
+ r == rfp || r == sp || r == lr ||
+ r == rheapbase || r == rthread ||
+ r == rscratch1 || r == rscratch2;
+ }
+
+ void ShenandoahBarrierStubC2::enter_if_gc_state(MacroAssembler& masm, const char test_state, Register tmp) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
+
+ Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(test_state)));
+ if (_needs_far_jump) {
+ __ ldrb(rscratch1, gc_state_fast);
+ __ cbz(rscratch1, *continuation());
+ __ b(*entry());
+ } else {
+ __ ldrb(rscratch1, gc_state_fast);
+ __ cbnz(rscratch1, *entry());
+ }
+
+ // This is were the slowpath stub will return to or the code above will
+ // jump to if the checks are false
+ __ bind(*continuation());
+ }
+
+ #undef __
+ #define __ masm->
+
+ void ShenandoahBarrierSetAssembler::compare_and_set_c2(const MachNode* node, MacroAssembler* masm, Register res, Register addr,
+ Register oldval, Register newval, Register tmp, bool exchange, bool narrow, bool weak, bool acquire) {
+ Assembler::operand_size op_size = narrow ? Assembler::word : Assembler::xword;
+
+ // Pre-barrier covers several things:
+ // a. Avoids false positives from CAS encountering to-space memory values.
+ // b. Satisfies the need for LRB for the CAE result.
+ // c. Records old value for the sake of SATB.
+ //
+ // (a) and (b) are covered because load barrier does memory location fixup.
+ // (c) is covered by KA on the current memory value.
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, narrow, /* do_load: */ true);
+ char check = 0;
+ check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
+ assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for CAS");
+ stub->enter_if_gc_state(*masm, check);
+ }
+
+ // CAS!
+ __ cmpxchg(addr, oldval, newval, op_size, acquire, /* release */ true, weak, exchange ? res : noreg);
+
+ // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
+ if (!exchange) {
+ assert(res != noreg, "need result register");
+ __ cset(res, Assembler::EQ);
+ }
+
+ // Post-barrier deals with card updates.
+ card_barrier_c2(node, masm, Address(addr, 0));
+ }
+
+ void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register preval,
+ Register newval, Register addr, Register tmp, bool is_acquire) {
+ bool is_narrow = node->bottom_type()->isa_narrowoop();
+
+ // Pre-barrier covers several things:
+ // a. Satisfies the need for LRB for the GAS result.
+ // b. Records old value for the sake of SATB.
+ //
+ // (a) is covered because load barrier does memory location fixup.
+ // (b) is covered by KA on the current memory value.
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, is_narrow, /* do_load: */ true);
+ char check = 0;
+ check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
+ assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for GAS");
+ stub->enter_if_gc_state(*masm, check);
+ }
+
+ if (is_narrow) {
+ if (is_acquire) {
+ __ atomic_xchgalw(preval, newval, addr);
+ } else {
+ __ atomic_xchgw(preval, newval, addr);
+ }
+ } else {
+ if (is_acquire) {
+ __ atomic_xchgal(preval, newval, addr);
+ } else {
+ __ atomic_xchg(preval, newval, addr);
+ }
+ }
+
+ // Post-barrier deals with card updates.
+ card_barrier_c2(node, masm, Address(addr, 0));
+ }
+
+ void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm, Address dst, bool dst_narrow,
+ Register src, bool src_narrow, Register tmp, bool is_volatile) {
+
+ // Pre-barrier: SATB, keep-alive the current memory value.
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier(node), "Should not be required for stores");
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, dst, dst_narrow, /* do_load: */ true);
+ stub->enter_if_gc_state(*masm, ShenandoahHeap::MARKING);
+ }
+
+ // Do the actual store
+ if (dst_narrow) {
+ if (!src_narrow) {
+ // Need to encode into rscratch, because we cannot clobber src.
+ if (ShenandoahBarrierStubC2::maybe_null(node)) {
+ __ encode_heap_oop(rscratch1, src);
+ } else {
+ __ encode_heap_oop_not_null(rscratch1, src);
+ }
+ src = rscratch1;
+ }
+
+ if (is_volatile) {
+ __ stlrw(src, dst.base());
+ } else {
+ __ strw(src, dst);
+ }
+ } else {
+ if (is_volatile) {
+ __ stlr(src, dst.base());
+ } else {
+ __ str(src, dst);
+ }
+ }
+
+ // Post-barrier: card updates.
+ card_barrier_c2(node, masm, dst);
+ }
+
+ void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src, bool is_narrow, bool is_acquire) {
+ // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
+ if (is_narrow) {
+ if (is_acquire) {
+ __ ldarw(dst, src.base());
+ } else {
+ __ ldrw(dst, src);
+ }
+ } else {
+ if (is_acquire) {
+ __ ldar(dst, src.base());
+ } else {
+ __ ldr(dst, src);
+ }
+ }
+
+ // Post-barrier: LRB / KA / weak-root processing.
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, dst, src, is_narrow, /* do_load: */ false);
+ char check = 0;
+ check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
+ stub->enter_if_gc_state(*masm, check);
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Address address) {
+ if (!ShenandoahBarrierStubC2::needs_card_barrier(node)) {
+ return;
+ }
+
+ assert(CardTable::dirty_card_val() == 0, "must be");
+ Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+
+ // rscratch1 = card table base (holder)
+ Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
+ __ ldr(rscratch1, curr_ct_holder_addr);
+
+ // rscratch2 = effective address
+ __ lea(rscratch2, address);
+
+ // rscratch2 = &card_table[ addr >> CardTable::card_shift() ] ; card index
+ __ add(rscratch2, rscratch1, rscratch2, Assembler::LSR, CardTable::card_shift());
+
+ if (UseCondCardMark) {
+ Label L_already_dirty;
+ __ ldrb(rscratch1, Address(rscratch2));
+ __ cbz(rscratch1, L_already_dirty);
+ __ strb(zr, Address(rscratch2));
+ __ bind(L_already_dirty);
+ } else {
+ __ strb(zr, Address(rscratch2));
+ }
+ }
+ #undef __
+ #define __ masm.
+
+ void ShenandoahBarrierStubC2::post_init() {
+ // If we are in scratch emit mode we assume worst case,
+ // and force the use of trampolines
+ PhaseOutput* const output = Compile::current()->output();
+ if (output->in_scratch_emit_size()) {
+ _needs_far_jump = true;
+ return;
+ }
+
+ // TODO: how correct is this? factor out this into a method.
+ const int code_size = output->buffer_sizing_data()->_code +
+ output->buffer_sizing_data()->_stub +
+ output->buffer_sizing_data()->_reloc;
+ _needs_far_jump = code_size >= (int)(1*M);
+ }
+
+ void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
+ assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
+
+ __ bind(*entry());
+
+ // If we need to load ourselves, do it here.
+ if (_do_load) {
+ if (_narrow) {
+ __ ldrw(_obj, _addr);
+ } else {
+ __ ldr(_obj, _addr);
+ }
+ }
+
+ // If the object is null, there is no point in applying barriers.
+ maybe_far_jump_if_zero(masm, _obj, continuation());
+
+ // We need to make sure that loads done by callers survive across slow-path calls.
+ // For self-loads, we need to care about the case when both KA and LRB are enabled (rare).
+ if (!_do_load || (_needs_keep_alive_barrier && _needs_load_ref_barrier)) {
+ preserve(_obj);
+ }
+
+ // Go for barriers. Barriers can return straight to continuation, as long
+ // as another barrier is not needed and we can reach the fastpath.
+ if (_needs_keep_alive_barrier && _needs_load_ref_barrier) {
+ keepalive(masm, nullptr);
+ lrb(masm, continuation());
+ } else if (_needs_keep_alive_barrier) {
+ keepalive(masm, continuation());
+ } else if (_needs_load_ref_barrier) {
+ lrb(masm, continuation());
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+
+ void ShenandoahBarrierStubC2::maybe_far_jump_if_zero(MacroAssembler& masm, Register reg, Label* L_done) {
+ if (_needs_far_jump) {
+ Label L_short_jump;
+ __ cbnz(reg, L_short_jump);
+ __ b(*L_done);
+ __ bind(L_short_jump);
+ } else {
+ __ cbz(reg, *L_done);
+ }
+ }
+
+ void ShenandoahBarrierStubC2::keepalive(MacroAssembler& masm, Label* L_done) {
+ Address index(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ Label L_through, L_slowpath;
+
+ Register tmp1 = rscratch1;
+ Register tmp2 = rscratch2;
+ assert_different_registers(tmp1, tmp2, _obj, _addr.base(), _addr.index());
+
+ // If another barrier is enabled as well, do a runtime check for a specific barrier.
+ if (_needs_load_ref_barrier) {
+ Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(ShenandoahHeap::MARKING)));
+ __ ldrb(tmp1, gc_state_fast);
+ if (L_done != nullptr) {
+ maybe_far_jump_if_zero(masm, tmp1, L_done);
+ } else {
+ __ cbz(tmp1, L_through);
+ }
+ }
+
+ // Fast-path: put object into buffer.
+ // If buffer is already full, go slow.
+ __ ldr(tmp1, index);
+ __ cbz(tmp1, L_slowpath);
+ __ sub(tmp1, tmp1, wordSize);
+ __ str(tmp1, index);
+ __ ldr(tmp2, buffer);
+
+ // If object is narrow, we need to unpack it before inserting into buffer,
+ // and pack it back. We can skip the unpack if we know that object is not preserved.
+ if (_narrow) {
+ __ decode_heap_oop_not_null(_obj);
+ }
+ __ str(_obj, Address(tmp2, tmp1));
+ if (_narrow && is_preserved(_obj)) {
+ __ encode_heap_oop_not_null(_obj);
+ }
+
+ // Fast-path exits here.
+ if (L_done != nullptr) {
+ __ b(*L_done);
+ } else {
+ __ b(L_through);
+ }
+
+ // Slow-path: call runtime to handle.
+ __ bind(L_slowpath);
+
+ // The Load match rule in the .ad file may have legitimized the load address
+ // using a TEMP register and in that case we need to explicitly preserve them
+ // here because the RA does not consider TEMP as live-in, of course.
+ if (_needs_load_ref_barrier) {
+ preserve(_addr.base());
+ preserve(_addr.index());
+ }
+
+ {
+ SaveLiveRegisters slr(&masm, this);
+
+ // Go to runtime and handle the rest there.
+ __ mov(c_rarg0, _obj);
+ __ mov(lr, keepalive_runtime_entry_addr());
+ __ blr(lr);
+ }
+
+ if (L_done != nullptr) {
+ __ b(*L_done);
+ } else {
+ __ bind(L_through);
+ }
+ }
+
+ void ShenandoahBarrierStubC2::lrb(MacroAssembler& masm, Label* L_done) {
+ assert(L_done != nullptr, "Must be set");
+
+ Label L_slow;
+
+ Register tmp1 = rscratch1;
+ Register tmp2 = rscratch2;
+ assert_different_registers(tmp1, tmp2, _obj, _addr.base(), _addr.index());
+
+ // If another barrier is enabled as well, do a runtime check for a specific barrier.
+ if (_needs_keep_alive_barrier) {
+ char state_to_check = ShenandoahHeap::HAS_FORWARDED | (_needs_load_ref_weak_barrier ? ShenandoahHeap::WEAK_ROOTS : 0);
+ Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(state_to_check)));
+ __ ldrb(tmp1, gc_state_fast);
+ maybe_far_jump_if_zero(masm, tmp1, L_done);
+ }
+
+ // If weak references are being processed, weak/phantom loads need to go slow,
+ // regardless of their cset status.
+ if (_needs_load_ref_weak_barrier) {
+ Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(ShenandoahHeap::WEAK_ROOTS)));
+ __ ldrb(tmp1, gc_state_fast);
+ __ cbnz(tmp1, L_slow);
+ }
+
+ // Cset-check. Fall-through to slow if in collection set.
+ if (_narrow) {
+ __ decode_heap_oop_not_null(tmp2, _obj);
+ } else {
+ tmp2 = _obj;
+ }
+ __ mov(tmp1, ShenandoahHeap::in_cset_fast_test_addr());
+ __ add(tmp1, tmp1, tmp2, Assembler::LSR, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+ __ ldrb(tmp1, Address(tmp1, 0));
+ maybe_far_jump_if_zero(masm, tmp1, L_done);
+
+ // Slow path
+ __ bind(L_slow);
+
+ // Obj is the result, need to temporarily stop preserving it.
+ bool is_obj_preserved = is_preserved(_obj);
+ if (is_obj_preserved) {
+ dont_preserve(_obj);
+ }
+ {
+ SaveLiveRegisters slr(&masm, this);
+
+ // Shuffle in the arguments. The end result should be:
+ // c_rarg0 <-- obj
+ // c_rarg1 <-- lea(addr)
+ if (c_rarg0 == _obj) {
+ __ lea(c_rarg1, _addr);
+ } else if (c_rarg1 == _obj) {
+ // Set up arguments in reverse, and then flip them
+ __ lea(c_rarg0, _addr);
+ // flip them
+ __ mov(tmp1, c_rarg0);
+ __ mov(c_rarg0, c_rarg1);
+ __ mov(c_rarg1, tmp1);
+ } else {
+ assert_different_registers(c_rarg1, _obj);
+ __ lea(c_rarg1, _addr);
+ __ mov(c_rarg0, _obj);
+ }
+
+ // Go to runtime and handle the rest there.
+ __ mov(lr, lrb_runtime_entry_addr());
+ __ blr(lr);
+
+ // Save the result where needed.
+ if (_obj != r0) {
+ __ mov(_obj, r0);
+ }
+ }
+ if (is_obj_preserved) {
+ preserve(_obj);
+ }
+
+ __ b(*L_done);
+ }
+
+ #undef __
+ #define __ masm->
+
+ #endif // COMPILER2
< prev index next >