< prev index next >

src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp

Print this page
@@ -40,10 +40,16 @@
  #ifdef COMPILER1
  #include "c1/c1_LIRAssembler.hpp"
  #include "c1/c1_MacroAssembler.hpp"
  #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  #endif
+ #ifdef COMPILER2
+ #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+ #include "opto/output.hpp"
+ #include "utilities/population_count.hpp"
+ #include "utilities/powerOfTwo.hpp"
+ #endif
  
  #define __ masm->
  
  void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
                                                         Register src, Register dst, Register count, RegSet saved_regs) {

@@ -766,5 +772,386 @@
  }
  
  #undef __
  
  #endif // COMPILER1
+ 
+ #ifdef COMPILER2
+ 
+ #undef __
+ #define __ masm.
+ 
+ int ShenandoahBarrierStubC2::available_gp_registers() {
+   return Register::number_of_registers;
+ }
+ 
+ bool ShenandoahBarrierStubC2::is_special_register(Register r) {
+   return r == fp || r == sp ||
+          r == xheapbase || r == xthread ||
+          r == t0 || r == t1 || r == zr;
+ }
+ 
+ void ShenandoahBarrierStubC2::enter_if_gc_state(MacroAssembler& masm, const char test_state, Register tmp) {
+   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
+ 
+   Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(test_state)));
+   __ lbu(t0, gc_state_fast);
+   __ beqz(t0, *continuation());
+   __ j(*entry());
+ 
+   // This is were the slowpath stub will return to or the code above will
+   // jump to if the checks are false
+   __ bind(*continuation());
+ }
+ 
+ #undef __
+ #define __ masm->
+ 
+ void ShenandoahBarrierSetAssembler::compare_and_set_c2(const MachNode* node, MacroAssembler* masm, Register res, Register addr,
+     Register oldval, Register newval, Register tmp, bool exchange, bool narrow, bool is_acquire) {
+   const Assembler::Aqrl acquire = is_acquire ? Assembler::aq : Assembler::relaxed;
+   const Assembler::Aqrl release = Assembler::rl;
+ 
+   // Pre-barrier covers several things:
+   //  a. Avoids false positives from CAS encountering to-space memory values.
+   //  b. Satisfies the need for LRB for the CAE result.
+   //  c. Records old value for the sake of SATB.
+   //
+   // (a) and (b) are covered because load barrier does memory location fixup.
+   // (c) is covered by KA on the current memory value.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, Address(addr, 0), narrow, /* do_load: */ true);
+     char check = 0;
+     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)   ? ShenandoahHeap::HAS_FORWARDED : 0;
+     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for CAS/CAE");
+     stub->enter_if_gc_state(*masm, check);
+   }
+ 
+   // Existing RISCV cmpxchg_oop already handles Shenandoah forwarded-value retry logic.
+   // It returns:
+   //   - boolean 0/1 for CAS (!exchange)
+   //   - loaded/current value for CAE (exchange)
+   ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, addr, oldval, newval, acquire, release, exchange /* is_cae */, res);
+ 
+   // Post-barrier deals with card updates.
+   card_barrier_c2(node, masm, Address(addr, 0));
+ }
+ 
+ void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register preval,
+     Register newval, Register addr, Register tmp, bool is_acquire) {
+   const bool is_narrow = node->bottom_type()->isa_narrowoop();
+ 
+   // Pre-barrier covers several things:
+   //  a. Satisfies the need for LRB for the GAS result.
+   //  b. Records old value for the sake of SATB.
+   //
+   // (a) is covered because load barrier does memory location fixup.
+   // (b) is covered by KA on the current memory value.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, Address(addr, 0), is_narrow, /* do_load: */ true);
+     char check = 0;
+     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)   ? ShenandoahHeap::HAS_FORWARDED : 0;
+     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for GAS");
+     stub->enter_if_gc_state(*masm, check);
+   }
+ 
+   if (is_narrow) {
+     if (is_acquire) {
+       __ atomic_xchgalwu(preval, newval, addr);
+     } else {
+       __ atomic_xchgwu(preval, newval, addr);
+     }
+   } else {
+     if (is_acquire) {
+       __ atomic_xchgal(preval, newval, addr);
+     } else {
+       __ atomic_xchg(preval, newval, addr);
+     }
+   }
+ 
+   // Post-barrier deals with card updates.
+   card_barrier_c2(node, masm, Address(addr, 0));
+ }
+ 
+ void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm, Address dst, bool dst_narrow,
+     Register src, bool src_narrow, Register tmp) {
+ 
+   // Pre-barrier: SATB / keep-alive on current value in memory.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier(node), "Should not be required for stores");
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, dst, dst_narrow, /* do_load: */ true);
+     stub->enter_if_gc_state(*masm, ShenandoahHeap::MARKING);
+   }
+ 
+   // Do the actual store
+   if (dst_narrow) {
+     if (!src_narrow) {
+       // Need to encode into tmp, because we cannot clobber src.
+       assert(tmp != noreg, "need temp register");
+       if (ShenandoahBarrierStubC2::maybe_null(node)) {
+         __ encode_heap_oop(tmp, src);
+       } else {
+         __ encode_heap_oop_not_null(tmp, src);
+       }
+       src = tmp;
+     }
+     __ sw(src, dst);
+   } else {
+     __ sd(src, dst);
+   }
+ 
+   // Post-barrier: card updates.
+   card_barrier_c2(node, masm, dst);
+ }
+ 
+ void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src, bool is_narrow) {
+   // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
+   if (is_narrow) {
+     __ lwu(dst, src);
+   } else {
+     __ ld(dst, src);
+   }
+ 
+   // Post-barrier: LRB / KA / weak-root processing.
+   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, dst, src, is_narrow, /* do_load: */ false);
+     char check = 0;
+     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node)    ? ShenandoahHeap::MARKING : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)      ? ShenandoahHeap::HAS_FORWARDED : 0;
+     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
+     stub->enter_if_gc_state(*masm, check);
+   }
+ }
+ 
+ void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Address address) {
+   if (!ShenandoahBarrierStubC2::needs_card_barrier(node)) {
+     return;
+   }
+ 
+   assert(CardTable::dirty_card_val() == 0, "must be");
+   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+ 
+   // t0 = card table base (holder)
+   Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
+   __ ld(t0, curr_ct_holder_addr);
+ 
+   // t1 = effective address
+   __ la(t1, address);
+ 
+   // t1 = &card_table[ addr >> CardTable::card_shift() ] ; card index
+   __ srli(t1, t1, CardTable::card_shift());
+   __ add(t1, t1, t0);
+ 
+   if (UseCondCardMark) {
+     Label L_already_dirty;
+     __ lbu(t0, Address(t1));
+     __ beqz(t0, L_already_dirty);
+     __ sb(zr, Address(t1));
+     __ bind(L_already_dirty);
+   } else {
+     __ sb(zr, Address(t1));
+   }
+ }
+ 
+ #undef __
+ #define __ masm.
+ 
+ void ShenandoahBarrierStubC2::post_init() {
+   // If we are in scratch emit mode we assume worst case,
+   // and force the use of trampolines
+   PhaseOutput* const output = Compile::current()->output();
+   if (output->in_scratch_emit_size()) {
+     return;
+   }
+ }
+ 
+ void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
+   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
+   assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
+ 
+   __ bind(*entry());
+ 
+   // If we need to load ourselves, do it here.
+   if (_do_load) {
+     if (_narrow) {
+       __ lwu(_obj, _addr);
+     } else {
+       __ ld(_obj, _addr);
+     }
+   }
+ 
+   // If the object is null, there is no point in applying barriers.
+   maybe_far_jump_if_zero(masm, _obj, continuation());
+ 
+   // Go for barriers. Barriers can return straight to continuation, as long
+   // as another barrier is not needed and we can reach the fastpath.
+   if (_needs_keep_alive_barrier && _needs_load_ref_barrier) {
+     keepalive(masm, nullptr);
+     lrb(masm, continuation());
+   } else if (_needs_keep_alive_barrier) {
+     keepalive(masm, continuation());
+   } else if (_needs_load_ref_barrier) {
+     lrb(masm, continuation());
+   } else {
+     ShouldNotReachHere();
+   }
+ }
+ 
+ void ShenandoahBarrierStubC2::maybe_far_jump_if_zero(MacroAssembler& masm, Register reg, Label* L_done) {
+   Label L_short_jump;
+   __ bnez(reg, L_short_jump);
+   __ j(*L_done);
+   __ bind(L_short_jump);
+ }
+ 
+ void ShenandoahBarrierStubC2::keepalive(MacroAssembler& masm, Label* L_done) {
+   Address index(xthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+   Address buffer(xthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+ 
+   Label L_through, L_slowpath;
+ 
+   Register tmp1 = t0;
+   Register tmp2 = t1;
+   assert_different_registers(tmp1, tmp2, _obj, _addr.base(), _addr.index());
+ 
+   // If another barrier is enabled as well, do a runtime check for a specific barrier.
+   if (_needs_load_ref_barrier) {
+     Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(ShenandoahHeap::MARKING)));
+     __ lbu(t0, gc_state_fast);
+     if (L_done != nullptr) {
+       maybe_far_jump_if_zero(masm, tmp1, L_done);
+     } else {
+       __ beqz(tmp1, L_through);
+     }
+   }
+ 
+   // Fast-path: put object into buffer.
+   // If buffer is already full, go slow.
+   __ ld(tmp1, index);
+   __ beqz(tmp1, L_slowpath);
+   __ subi(tmp1, tmp1, wordSize);
+   __ sd(tmp1, index);
+   __ ld(tmp2, buffer);
+ 
+   // If object is narrow, we need to unpack it before inserting into buffer.
+   __ add(tmp1, tmp1, tmp2);
+   if (_narrow) {
+     __ decode_heap_oop_not_null(tmp2, _obj);
+     __ sd(tmp2, Address(tmp1));
+   } else {
+     __ sd(_obj, Address(tmp1));
+   }
+ 
+   // Fast-path exits here.
+   if (L_done != nullptr) {
+     __ j(*L_done);
+   } else {
+     __ j(L_through);
+   }
+ 
+   // Slow-path: call runtime to handle.
+   __ bind(L_slowpath);
+ 
+   // If this stub also supports LRB then we need to preserve _obj to use it there.
+   if (_needs_load_ref_barrier) {
+     preserve(_obj);
+   } else {
+     dont_preserve(_obj);
+   }
+ 
+   {
+     SaveLiveRegisters slr(&masm, this);
+ 
+     // Go to runtime and handle the rest there.
+     __ mv(c_rarg0, _obj);
+     __ rt_call(keepalive_runtime_entry_addr());
+   }
+ 
+   if (L_done != nullptr) {
+     __ j(*L_done);
+   } else {
+     __ bind(L_through);
+   }
+ }
+ 
+ void ShenandoahBarrierStubC2::lrb(MacroAssembler& masm, Label* L_done) {
+   assert(L_done != nullptr, "Must be set");
+ 
+   Label L_slow;
+ 
+   Register tmp1 = t0;
+   Register tmp2 = t1;
+   assert_different_registers(tmp1, tmp2, _obj, _addr.base(), _addr.index());
+ 
+   // If another barrier is enabled as well, do a runtime check for a specific barrier.
+   if (_needs_keep_alive_barrier) {
+     char state_to_check = ShenandoahHeap::HAS_FORWARDED | (_needs_load_ref_weak_barrier ? ShenandoahHeap::WEAK_ROOTS : 0);
+     Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(state_to_check)));
+     __ lbu(tmp1, gc_state_fast);
+     maybe_far_jump_if_zero(masm, tmp1, L_done);
+   }
+ 
+   // If weak references are being processed, weak/phantom loads need to go slow,
+   // regardless of their cset status.
+   if (_needs_load_ref_weak_barrier) {
+     Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(ShenandoahHeap::WEAK_ROOTS)));
+     __ lbu(tmp1, gc_state_fast);
+     __ bnez(tmp1, L_slow);
+   }
+ 
+   // Cset-check. Fall-through to slow if in collection set.
+   if (_narrow) {
+     __ decode_heap_oop_not_null(tmp2, _obj);
+   } else {
+     __ mv(tmp2, _obj);
+   }
+ 
+   __ mv(tmp1, ShenandoahHeap::in_cset_fast_test_addr());
+   __ srli(tmp2, tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+   __ add(tmp1, tmp1, tmp2);
+   __ lbu(tmp1, Address(tmp1, 0));
+   maybe_far_jump_if_zero(masm, tmp1, L_done);
+ 
+   // Slow path
+   __ bind(L_slow);
+ 
+   // Obj is the result, need to temporarily stop preserving it.
+   dont_preserve(_obj);
+   {
+     SaveLiveRegisters slr(&masm, this);
+ 
+     // Shuffle in the arguments. The end result should be:
+     //   c_rarg0 <- obj
+     //   c_rarg1 <- lea(addr)
+     if (c_rarg0 == _obj) {
+       __ la(c_rarg1, _addr);
+     } else if (c_rarg1 == _obj) {
+       // Set up arguments in reverse, and then flip them
+       __ la(c_rarg0, _addr);
+       // flip them
+       __ mv(t0, c_rarg0);
+       __ mv(c_rarg0, c_rarg1);
+       __ mv(c_rarg1, t0);
+     } else {
+       assert_different_registers(c_rarg1, _obj);
+       __ la(c_rarg1, _addr);
+       __ mv(c_rarg0, _obj);
+     }
+ 
+     // Go to runtime and handle the rest there.
+     __ rt_call(lrb_runtime_entry_addr());
+ 
+     // Save the result where needed.
+     if (_narrow) {
+       __ zext_w(_obj, x10);
+     } else {
+       __ mv(_obj, x10);
+     }
+   }
+   preserve(_obj);
+ 
+   __ j(*L_done);
+ }
+ 
+ #endif // COMPILER2
< prev index next >