< prev index next >

src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp

Print this page
@@ -40,10 +40,14 @@
  #ifdef COMPILER1
  #include "c1/c1_LIRAssembler.hpp"
  #include "c1/c1_MacroAssembler.hpp"
  #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  #endif
+ #ifdef COMPILER2
+ #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+ #include "opto/output.hpp"
+ #endif
  
  #define __ masm->
  
  void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
                                                         Register src, Register dst, Register count, RegSet saved_regs) {

@@ -625,10 +629,501 @@
    } else {
      __ cset(result, Assembler::EQ);
    }
  }
  
+ #ifdef COMPILER2
+ void ShenandoahBarrierStubC2::gc_state_check_c2(MacroAssembler* masm, Register gcstate, const unsigned char test_state, ShenandoahBarrierStubC2* slow_stub) {
+   if (ShenandoahGCStateCheckRemove) {
+     // Unrealistic: remove all barrier fastpath checks.
+   } else if (ShenandoahGCStateCheckHotpatch) {
+     // In the ideal world, we would hot-patch the branch to slow stub with a single
+     // (unconditional) jump or nop, based on our current GC state.
+     __ nop();
+   } else {
+     int bit_to_check = ShenandoahThreadLocalData::gc_state_to_fast_bit(test_state);
+     Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_offset()));
+     __ ldrb(gcstate, gc_state_fast);
+     if (slow_stub->_test_and_branch_reachable) {
+       __ tbnz(gcstate, bit_to_check, *slow_stub->entry());
+     } else {
+       __ tbz(gcstate, bit_to_check, *slow_stub->continuation());
+       __ b(*slow_stub->entry());
+     }
+ 
+     // This is were the slowpath stub will return to or the code above will
+     // jump to if the checks are false
+     __ bind(*slow_stub->continuation());
+   }
+ }
+ 
+ bool needs_acquiring_load_exclusive(const MachNode *n) {
+   assert(n->is_CAS(true), "expecting a compare and swap");
+   if (n->is_CAS(false)) {
+     assert(n->has_trailing_membar(), "expected trailing membar");
+   } else {
+     return n->has_trailing_membar();
+   }
+ 
+   // so we can just return true here
+   return true;
+ }
+ 
+ void ShenandoahBarrierSetAssembler::compare_and_set_c2(const MachNode* node, MacroAssembler* masm, Register res, Register addr,
+     Register oldval, Register newval, Register tmp, bool exchange, bool maybe_null, bool narrow, bool weak) {
+   bool acquire = needs_acquiring_load_exclusive(node);
+   Assembler::operand_size op_size = narrow ? Assembler::word : Assembler::xword;
+ 
+   // Pre-barrier covers several things:
+   //  a. Avoids false positives from CAS encountering to-space memory values.
+   //  b. Satisfies the need for LRB for the CAE result.
+   //  c. Records old value for the sake of SATB.
+   //
+   // (a) and (b) are covered because load barrier does memory location fixup.
+   // (c) is covered by KA on the current memory value.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, narrow, /* do_load: */ true, __ offset());
+     char check = 0;
+     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)   ? ShenandoahHeap::HAS_FORWARDED : 0;
+     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for CAS");
+     ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, check, stub);
+   }
+ 
+   // CAS!
+   __ cmpxchg(addr, oldval, newval, op_size, acquire, /* release */ true, weak, exchange ? res : noreg);
+ 
+   // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
+   if (!exchange) {
+     assert(res != noreg, "need result register");
+     __ cset(res, Assembler::EQ);
+   }
+ 
+   // Post-barrier deals with card updates.
+   card_barrier_c2(node, masm, Address(addr, 0));
+ }
+ 
+ void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register preval,
+     Register newval, Register addr, Register tmp) {
+   bool acquire = needs_acquiring_load_exclusive(node);
+   bool narrow = node->bottom_type()->isa_narrowoop();
+ 
+   // Pre-barrier covers several things:
+   //  a. Satisfies the need for LRB for the GAS result.
+   //  b. Records old value for the sake of SATB.
+   //
+   // (a) is covered because load barrier does memory location fixup.
+   // (b) is covered by KA on the current memory value.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, narrow, /* do_load: */ true, __ offset());
+     char check = 0;
+     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)   ? ShenandoahHeap::HAS_FORWARDED : 0;
+     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for GAS");
+     ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, check, stub);
+   }
+ 
+   if (narrow) {
+     if (acquire) {
+       __ atomic_xchgalw(preval, newval, addr);
+     } else {
+       __ atomic_xchgw(preval, newval, addr);
+     }
+   } else {
+     if (acquire) {
+       __ atomic_xchgal(preval, newval, addr);
+     } else {
+       __ atomic_xchg(preval, newval, addr);
+     }
+   }
+ 
+   // Post-barrier deals with card updates.
+   card_barrier_c2(node, masm, Address(addr, 0));
+ }
+ 
+ void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm, Address dst, bool dst_narrow,
+     Register src, bool src_narrow, Register tmp) {
+ 
+   // Pre-barrier: SATB, keep-alive the current memory value.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier(node), "Should not be required for stores");
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, dst, dst_narrow, /* do_load: */ true, __ offset());
+     ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, ShenandoahHeap::MARKING, stub);
+   }
+ 
+   // Do the actual store
+   bool is_volatile = node->has_trailing_membar();
+   if (dst_narrow) {
+     if (!src_narrow) {
+       // Need to encode into rscratch, because we cannot clobber src.
+       // TODO: Maybe there is a matcher way to test that src is unused after this?
+       __ mov(rscratch1, src);
+       if (ShenandoahBarrierStubC2::src_not_null(node)) {
+         __ encode_heap_oop_not_null(rscratch1);
+       } else {
+         __ encode_heap_oop(rscratch1);
+       }
+       src = rscratch1;
+     }
+ 
+     if (is_volatile) {
+       __ stlrw(src, dst.base());
+     } else {
+       __ strw(src, dst);
+     }
+   } else {
+     if (is_volatile) {
+       __ stlr(src, dst.base());
+     } else {
+       __ str(src, dst);
+     }
+   }
+ 
+   // Post-barrier: card updates.
+   card_barrier_c2(node, masm, dst);
+ }
+ 
+ void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src) {
+   bool acquire = node->memory_order() == MemNode::MemOrd::acquire;
+   bool narrow = node->bottom_type()->isa_narrowoop();
+ 
+   // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
+   if (narrow) {
+     if (acquire) {
+       __ ldarw(dst, src.base());
+     } else {
+       __ ldrw(dst, src);
+     }
+   } else {
+     if (acquire) {
+       __ ldar(dst, src.base());
+     } else {
+       __ ldr(dst, src);
+     }
+   }
+ 
+   // Post-barrier: LRB / KA / weak-root processing.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, dst, src, narrow, /* do_load: */ false, __ offset());
+     char check = 0;
+     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node)    ? ShenandoahHeap::MARKING : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)      ? ShenandoahHeap::HAS_FORWARDED : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
+     ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, check, stub);
+   }
+ }
+ 
+ void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Address address) {
+   if (ShenandoahSkipBarriers || (node->barrier_data() & ShenandoahBitCardMark) == 0) {
+     return;
+   }
+ 
+   assert(CardTable::dirty_card_val() == 0, "must be");
+   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+ 
+   // rscratch2 = addr >> CardTable::card_shift()
+   __ lea(rscratch2, address);
+   __ lsr(rscratch2, rscratch2, CardTable::card_shift());
+ 
+   // rscratch1 = card table base (holder)
+   Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
+   __ ldr(rscratch1, curr_ct_holder_addr);
+ 
+   // rscratch2 = &card_table[card_index]
+   __ add(rscratch2, rscratch1, rscratch2);
+ 
+   if (UseCondCardMark) {
+     Label L_already_dirty;
+     __ ldrb(rscratch1, Address(rscratch2));
+     __ cbz(rscratch1, L_already_dirty);
+     __ strb(zr, Address(rscratch2));
+     __ bind(L_already_dirty);
+   } else {
+     __ strb(zr, Address(rscratch2));
+   }
+ }
+ #undef __
+ #define __ masm.
+ 
+ // Only handles forward branch jumps, target_offset >= branch_offset
+ // FIXME: copied verbatim from ZGC, duplicated code.
+ static bool aarch64_test_and_branch_reachable(int branch_offset, int target_offset) {
+   assert(branch_offset >= 0, "branch to stub offsets must be positive");
+   assert(target_offset >= 0, "offset in stubs section must be positive");
+   assert(target_offset >= branch_offset, "forward branches only, branch_offset -> target_offset");
+ 
+   const int test_and_branch_delta_limit = 32 * K;
+ 
+   const int test_and_branch_to_trampoline_delta = target_offset - branch_offset;
+ 
+   return test_and_branch_to_trampoline_delta < test_and_branch_delta_limit;
+ }
+ 
+ ShenandoahBarrierStubC2::ShenandoahBarrierStubC2(const MachNode* node, Register obj, Address addr, bool narrow, bool do_load, int offset) :
+   BarrierStubC2(node),
+   _obj(obj),
+   _addr(addr),
+   _do_load(do_load),
+   _narrow(narrow),
+   _maybe_null(!src_not_null(node)),
+   _needs_load_ref_barrier(needs_load_ref_barrier(node)),
+   _needs_load_ref_weak_barrier(needs_load_ref_barrier_weak(node)),
+   _needs_keep_alive_barrier(needs_keep_alive_barrier(node)),
+   _fastpath_branch_offset(offset),
+   _test_and_branch_reachable(),
+   _skip_trampoline(),
+   _test_and_branch_reachable_entry() {
+ 
+   assert(!ShenandoahSkipBarriers, "Do not touch stubs when disabled");
+   assert(!_narrow || is_heap_access(node), "Only heap accesses can be narrow");
+ 
+   // If we are in scratch emit mode we assume worse case by leaving
+   // _test_and_branch_reachable false.
+   PhaseOutput* const output = Compile::current()->output();
+   if (output->in_scratch_emit_size()) {
+     return;
+   }
+ 
+   // Assume that each trampoline is one single instruction and that the stubs
+   // will follow immediatelly after the _code section. Therefore, we are
+   // checking if the distance between the fastpath branch and the
+   // trampoline/entry of the current Stub is less than 32K.
+   const int code_size = output->buffer_sizing_data()->_code;
+   const int trampoline_offset = trampoline_stubs_count() * NativeInstruction::instruction_size;
+   _test_and_branch_reachable = aarch64_test_and_branch_reachable(_fastpath_branch_offset, code_size + trampoline_offset);
+   if (_test_and_branch_reachable) {
+     inc_trampoline_stubs_count();
+   }
+ }
+ 
+ void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
+   // If we reach here with _skip_trampoline set it means that earlier we
+   // emitted a trampoline to this stub and now we need to emit the actual stub.
+   if (_skip_trampoline) {
+     emit_code_actual(masm);
+     return;
+   }
+   _skip_trampoline = true;
+ 
+   // The fastpath executes two branch instructions to reach this stub, let's
+   // just emit the stub here and not add a third one.
+   if (!_test_and_branch_reachable) {
+     // By registering the stub again, after setting _skip_trampoline, we'll
+     // effectivelly cause the stub to be emitted the next time ::emit_code is
+     // called.
+     ShenandoahBarrierStubC2::register_stub(this);
+     return;
+   }
+ 
+   // This is entry point when coming from fastpath, IFF it's able to reach here
+   // with a test and branch instruction, otherwise the entry is
+   // ShenandoahBarrierStubC2::entry();
+   const int target_offset = __ offset();
+   __ bind(_test_and_branch_reachable_entry);
+ 
+ #ifdef ASSERT
+   // Current assumption is that the barrier stubs are the first stubs emitted
+   // after the actual code
+   PhaseOutput* const output = Compile::current()->output();
+   assert(stubs_start_offset() <= output->buffer_sizing_data()->_code, "stubs are assumed to be emitted directly after code and code_size is a hard limit on where it can start");
+   assert(aarch64_test_and_branch_reachable(_fastpath_branch_offset, target_offset), "trampoline should be reachable");
+ #endif
+ 
+   // Next fastpath branch's offset is unknown, but it's > current _fastpath_branch_offset
+   const int next_branch_offset = _fastpath_branch_offset + NativeInstruction::instruction_size;
+ 
+   // If emitting the current stub directly does not interfere with emission of
+   // the next potential trampoline then do it to avoid executing additional
+   // branch when coming from fastpath.
+   if (aarch64_test_and_branch_reachable(next_branch_offset, target_offset + get_stub_size())) {
+     emit_code_actual(masm);
+   } else {
+     __ b(*BarrierStubC2::entry());
+     // By registering the stub again, after setting _skip_trampoline to true,
+     // we'll effectivelly cause the stub to be emitted the next time
+     // ::emit_code is called.
+     ShenandoahBarrierStubC2::register_stub(this);
+   }
+ }
+ 
+ Label* ShenandoahBarrierStubC2::entry() {
+   if (_test_and_branch_reachable) {
+     return &_test_and_branch_reachable_entry;
+   }
+   return BarrierStubC2::entry();
+ }
+ 
+ int ShenandoahBarrierStubC2::get_stub_size() {
+   PhaseOutput* const output = Compile::current()->output();
+   assert(!output->in_scratch_emit_size(), "only used when emitting stubs");
+   BufferBlob* const blob = output->scratch_buffer_blob();
+   CodeBuffer cb(blob->content_begin(), (address)output->scratch_locs_memory() - blob->content_begin());
+   MacroAssembler masm(&cb);
+   output->set_in_scratch_emit_size(true);
+   emit_code_actual(masm);
+   output->set_in_scratch_emit_size(false);
+   return cb.insts_size();
+ }
+ 
+ void ShenandoahBarrierStubC2::emit_code_actual(MacroAssembler& masm) {
+   assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
+ 
+   // Stub entry
+   if (!Compile::current()->output()->in_scratch_emit_size()) {
+     __ bind(*BarrierStubC2::entry());
+   }
+ 
+   // If we need to load ourselves, do it here.
+   if (_do_load) {
+     // This does the load and the decode if necessary
+     __ load_heap_oop(_obj, _addr, noreg, noreg, AS_RAW);
+   } else if (_narrow) {
+     // If object is narrow, we need to decode it first: barrier checks need full oops.
+     if (_maybe_null) {
+       __ decode_heap_oop(_obj);
+     } else {
+       __ decode_heap_oop_not_null(_obj);
+     }
+   }
+ 
+   if (_do_load || _maybe_null) {
+     __ cbz(_obj, *continuation());
+   }
+ 
+   keepalive(&masm, _obj, rscratch1, rscratch2);
+ 
+   lrb(&masm, _obj, _addr, noreg);
+ 
+   // If object is narrow, we need to encode it before exiting.
+   // For encoding, dst can only turn null if we are dealing with weak loads.
+   // Otherwise, we have already null-checked. We can skip all this if we performed
+   // the load ourselves, which means the value is not used by caller.
+   if (_narrow && !_do_load) {
+     if (_needs_load_ref_weak_barrier) {
+       __ encode_heap_oop(_obj);
+     } else {
+       __ encode_heap_oop_not_null(_obj);
+     }
+   }
+ 
+   // Go back to fast path
+   __ b(*continuation());
+ }
+ 
+ #undef __
+ #define __ masm->
+ 
+ void ShenandoahBarrierStubC2::keepalive(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2) {
+   Address index(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+   Address buffer(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+   Label L_runtime;
+   Label L_done;
+ 
+   // The node doesn't even need keepalive barrier, just don't check anything else
+   if (!_needs_keep_alive_barrier) {
+     return ;
+   }
+ 
+   // If both LRB and KeepAlive barriers are required (rare), do a runtime check
+   // for enabled barrier.
+   if (_needs_load_ref_barrier) {
+     Address gcs_addr(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+     __ ldrb(rscratch1, gcs_addr);
+     __ tbz(rscratch1, ShenandoahHeap::MARKING_BITPOS, L_done);
+   }
+ 
+   // If buffer is full, call into runtime.
+   __ ldr(tmp1, index);
+   __ cbz(tmp1, L_runtime);
+ 
+   // The buffer is not full, store value into it.
+   __ sub(tmp1, tmp1, wordSize);
+   __ str(tmp1, index);
+   __ ldr(tmp2, buffer);
+   __ str(obj, Address(tmp2, tmp1));
+   __ b(L_done);
+ 
+   // Runtime call
+   __ bind(L_runtime);
+ 
+   preserve(obj);
+   {
+     SaveLiveRegisters save_registers(masm, this);
+     __ mov(c_rarg0, obj);
+     __ mov(tmp1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre));
+     __ blr(tmp1);
+   }
+ 
+   __ bind(L_done);
+ }
+ 
+ void ShenandoahBarrierStubC2::lrb(MacroAssembler* masm, Register obj, Address addr, Register tmp) {
+   Label L_done;
+ 
+   // The node doesn't even need LRB barrier, just don't check anything else
+   if (!_needs_load_ref_barrier) {
+     return ;
+   }
+ 
+   if ((_node->barrier_data() & ShenandoahBitStrong) != 0) {
+     // If both LRB and KeepAlive barriers are required (rare), do a runtime
+     // check for enabled barrier.
+     if (_needs_keep_alive_barrier) {
+       Address gcs_addr(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+       __ ldrb(rscratch1, gcs_addr);
+       if (_needs_load_ref_weak_barrier) {
+         __ orr(rscratch1, rscratch1, rscratch1, Assembler::LSR, ShenandoahHeap::WEAK_ROOTS_BITPOS);
+       }
+       __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, L_done);
+     }
+ 
+     // Weak/phantom loads always need to go to runtime. For strong refs we
+     // check if the object in cset, if they are not, then we are done with LRB.
+     __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
+     __ lsr(rscratch1, obj, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+     __ ldrb(rscratch2, Address(rscratch2, rscratch1));
+     __ cbz(rscratch2, L_done);
+   }
+ 
+   dont_preserve(obj);
+   {
+     SaveLiveRegisters save_registers(masm, this);
+ 
+     // Shuffle in the arguments. The end result should be:
+     //   c_rarg0 <-- obj
+     //   c_rarg1 <-- lea(addr)
+     if (c_rarg0 == obj) {
+       __ lea(c_rarg1, addr);
+     } else if (c_rarg1 == obj) {
+       // Set up arguments in reverse, and then flip them
+       __ lea(c_rarg0, addr);
+       // flip them
+       __ mov(rscratch1, c_rarg0);
+       __ mov(c_rarg0, c_rarg1);
+       __ mov(c_rarg1, rscratch1);
+     } else {
+       assert_different_registers(c_rarg1, obj);
+       __ lea(c_rarg1, addr);
+       __ mov(c_rarg0, obj);
+     }
+ 
+     // Get address of runtime LRB entry and call it
+     __ mov(rscratch1, lrb_runtime_entry_addr());
+     __ blr(rscratch1);
+ 
+     // If we loaded the object in the stub it means we don't need to return it
+     // to fastpath, so no need to make this mov.
+     if (!_do_load) {
+       __ mov(obj, r0);
+     }
+   }
+ 
+   __ bind(L_done);
+ }
+ 
+ #endif // COMPILER2
+ 
  void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                                       Register start, Register count, Register scratch) {
    assert(ShenandoahCardBarrier, "Should have been checked by caller");
  
    Label L_loop, L_done;
< prev index next >