< prev index next > src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
Print this page
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
#endif
+ #ifdef COMPILER2
+ #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+ #endif
#define __ masm->
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
}
#undef __
#endif // COMPILER1
+
+ #ifdef COMPILER2
+
+ #undef __
+ #define __ masm->
+
+ void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src, bool narrow) {
+ // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
+ if (narrow) {
+ __ movl(dst, src);
+ } else {
+ __ movq(dst, src);
+ }
+
+ // Post-barrier: LRB
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, dst, src, narrow, false);
+ char check = 0;
+ check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
+ stub->enter_if_gc_state(*masm, check);
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm,
+ Address dst, bool dst_narrow,
+ Register src, bool src_narrow,
+ Register tmp) {
+
+ // Pre-barrier: SATB, keep-alive the current memory value.
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier(node), "Should not be required for stores");
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, dst, dst_narrow, true);
+ stub->enter_if_gc_state(*masm, ShenandoahHeap::MARKING);
+ }
+
+ // Need to encode into tmp, because we cannot clobber src.
+ if (dst_narrow && !src_narrow) {
+ __ movq(tmp, src);
+ if (ShenandoahBarrierStubC2::maybe_null(node)) {
+ __ encode_heap_oop(tmp);
+ } else {
+ __ encode_heap_oop_not_null(tmp);
+ }
+ src = tmp;
+ }
+
+ // Do the actual store
+ if (dst_narrow) {
+ __ movl(dst, src);
+ } else {
+ __ movq(dst, src);
+ }
+
+ // Post-barrier: card updates.
+ if (ShenandoahBarrierStubC2::needs_card_barrier(node)) {
+ card_barrier_c2(masm, dst, tmp);
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::compare_and_set_c2(const MachNode* node, MacroAssembler* masm,
+ Register res, Address addr,
+ Register oldval, Register newval, Register tmp,
+ bool narrow) {
+
+ assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
+
+ // Oldval and newval can be in the same register, but all other registers should be
+ // distinct for extra safety, as we shuffle register values around.
+ assert_different_registers(oldval, tmp, addr.base(), addr.index());
+ assert_different_registers(newval, tmp, addr.base(), addr.index());
+
+ // Pre-barrier covers several things:
+ // a. Avoids false positives from CAS encountering to-space memory values.
+ // b. Satisfies the need for LRB for the CAE result.
+ // c. Records old value for the sake of SATB.
+ //
+ // (a) and (b) are covered because load barrier does memory location fixup.
+ // (c) is covered by KA on the current memory value.
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, narrow, true);
+ char check = 0;
+ check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
+ assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for CAS");
+ stub->enter_if_gc_state(*masm, check);
+ }
+
+ // CAS!
+ __ lock();
+ if (narrow) {
+ __ cmpxchgl(newval, addr);
+ } else {
+ __ cmpxchgptr(newval, addr);
+ }
+
+ // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
+ if (res != noreg) {
+ __ setcc(Assembler::equal, res);
+ }
+
+ // Post-barrier deals with card updates.
+ if (ShenandoahBarrierStubC2::needs_card_barrier(node)) {
+ card_barrier_c2(masm, addr, tmp);
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register newval, Address addr, Register tmp, bool narrow) {
+ assert_different_registers(newval, tmp, addr.base(), addr.index());
+
+ // Pre-barrier covers several things:
+ // a. Satisfies the need for LRB for the GAS result.
+ // b. Records old value for the sake of SATB.
+ //
+ // (a) is covered because load barrier does memory location fixup.
+ // (b) is covered by KA on the current memory value.
+ if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
+ ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, narrow, true);
+ char check = 0;
+ check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
+ check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
+ assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for GAS");
+ stub->enter_if_gc_state(*masm, check);
+ }
+
+ if (narrow) {
+ __ xchgl(newval, addr);
+ } else {
+ __ xchgq(newval, addr);
+ }
+
+ // Post-barrier deals with card updates.
+ if (ShenandoahBarrierStubC2::needs_card_barrier(node)) {
+ card_barrier_c2(masm, addr, tmp);
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::card_barrier_c2(MacroAssembler* masm, Address dst, Register tmp) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+
+ __ lea(tmp, dst);
+ __ shrptr(tmp, CardTable::card_shift());
+ __ addptr(tmp, Address(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset())));
+ Address card_address(tmp, 0);
+
+ assert(CardTable::dirty_card_val() == 0, "Encoding assumption");
+ Label L_done;
+ if (UseCondCardMark) {
+ __ cmpb(card_address, 0);
+ __ jccb(Assembler::equal, L_done);
+ }
+ if (UseCompressedOops && CompressedOops::base() == nullptr) {
+ __ movb(card_address, r12);
+ } else {
+ __ movb(card_address, 0);
+ }
+ __ bind(L_done);
+ }
+
+ #undef __
+ #define __ masm.
+
+ void ShenandoahBarrierStubC2::enter_if_gc_state(MacroAssembler& masm, const char test_state, Register tmp) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
+
+ Address gc_state_fast(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(test_state)));
+ __ cmpb(gc_state_fast, 0);
+ __ jcc(Assembler::notEqual, *entry());
+ __ bind(*continuation());
+ }
+
+ void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
+
+ __ align(InteriorEntryAlignment);
+ __ bind(*entry());
+
+ // If we need to load ourselves, do it here.
+ if (_do_load) {
+ if (_narrow) {
+ __ movl(_obj, _addr);
+ } else {
+ __ movq(_obj, _addr);
+ }
+ }
+
+ // If the object is null, there is no point in applying barriers.
+ if (_narrow) {
+ __ testl(_obj, _obj);
+ } else {
+ __ testq(_obj, _obj);
+ }
+ __ jcc(Assembler::zero, *continuation());
+
+ // We need to make sure that loads done by callers survive across slow-path calls.
+ // For self-loads, we need to care about the case when both KA and LRB are enabled (rare).
+ if (!_do_load || (_needs_keep_alive_barrier && _needs_load_ref_barrier)) {
+ preserve(_obj);
+ }
+
+ // Go for barriers. If both KA and LRB are needed (rare), do additional gc-state
+ // checks to verify which one is currently needed. Note that KA and LRB are *not*
+ // exclusive, because we can have an overlapping marking/evac in generational mode.
+ if (_needs_keep_alive_barrier && _needs_load_ref_barrier) {
+ Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+
+ Label L_skip_keepalive;
+ __ testb(gc_state, ShenandoahHeap::MARKING);
+ __ jcc(Assembler::zero, L_skip_keepalive);
+ keepalive(masm, nullptr);
+ __ bind(L_skip_keepalive);
+
+ __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | (_needs_load_ref_weak_barrier ? ShenandoahHeap::WEAK_ROOTS : 0));
+ __ jcc(Assembler::zero, *continuation());
+ lrb(masm, continuation());
+ } else if (_needs_keep_alive_barrier) {
+ keepalive(masm, continuation());
+ } else if (_needs_load_ref_barrier) {
+ lrb(masm, continuation());
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+
+ void ShenandoahBarrierStubC2::keepalive(MacroAssembler& masm, Label* L_done) {
+ Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ Label L_pop_and_slow;
+
+ // Need temp to work, allocate one now.
+ bool tmp_live;
+ Register tmp = select_temp_register(tmp_live);
+ if (tmp_live) {
+ __ push(tmp);
+ }
+
+ // Fast-path: put object into buffer.
+ // If buffer is already full, go slow.
+ __ movptr(tmp, index);
+ __ subptr(tmp, wordSize);
+ __ jccb(Assembler::below, L_pop_and_slow);
+ __ movptr(index, tmp);
+ __ addptr(tmp, buffer);
+
+ // If object is narrow, we need to unpack it before inserting into buffer,
+ // and pack it back. We can skip the unpack if we know that object is not preserved.
+ if (_narrow) {
+ __ decode_heap_oop_not_null(_obj);
+ }
+ __ movptr(Address(tmp, 0), _obj);
+ if (_narrow && is_preserved(_obj)) {
+ __ encode_heap_oop_not_null(_obj);
+ }
+
+ // Fast-path exits here.
+ if (tmp_live) {
+ __ pop(tmp);
+ }
+
+ Label L_fallthrough;
+ if (L_done != nullptr) {
+ __ jmp(*L_done);
+ } else {
+ __ jmp(L_fallthrough);
+ }
+
+ // Slow-path: call runtime to handle.
+ // Need to pop tmp immediately for stack to remain aligned.
+ __ bind(L_pop_and_slow);
+ if (tmp_live) {
+ __ pop(tmp);
+ }
+ {
+ SaveLiveRegisters slr(&masm, this);
+
+ // Shuffle in the arguments. The end result should be:
+ // c_rarg0 <-- obj
+ if (c_rarg0 != _obj) {
+ __ mov(c_rarg0, _obj);
+ }
+
+ // Go to runtime and handle the rest there.
+ __ call(RuntimeAddress(keepalive_runtime_entry_addr()));
+ }
+ if (L_done != nullptr) {
+ __ jmp(*L_done);
+ } else {
+ __ bind(L_fallthrough);
+ }
+ }
+
+ void ShenandoahBarrierStubC2::lrb(MacroAssembler& masm, Label* L_done) {
+ assert(L_done != nullptr, "Must be set");
+
+ Label L_pop_and_slow, L_slow;
+
+ // If weak references are being processed, weak/phantom loads need to go slow,
+ // regardless of their cset status.
+ if (_needs_load_ref_weak_barrier) {
+ Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ __ testb(gc_state, ShenandoahHeap::WEAK_ROOTS);
+ __ jccb(Assembler::notZero, L_slow);
+ }
+
+ // Need temp to work, allocate one now.
+ bool tmp_live;
+ Register tmp = select_temp_register(tmp_live);
+ if (tmp_live) {
+ __ push(tmp);
+ }
+
+ // Compute the cset bitmap index
+ if (_narrow) {
+ __ decode_heap_oop_not_null(tmp, _obj);
+ } else {
+ __ movptr(tmp, _obj);
+ }
+ __ shrptr(tmp, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+
+ Address cset_addr_arg;
+ intptr_t cset_addr = reinterpret_cast<intptr_t>(ShenandoahHeap::in_cset_fast_test_addr());
+ if ((cset_addr >> 3) < INT32_MAX) {
+ // Cset bitmap is at easily encodeable address. Just use it as offset.
+ assert(is_aligned(cset_addr, 8), "Sanity");
+ cset_addr_arg = Address(tmp, cset_addr >> 3, Address::times_8);
+ } else {
+ // Cset bitmap is way further than our encoding limit. Add its address fully.
+ bool tmp2_live;
+ Register tmp2 = select_temp_register(tmp2_live, /* skip_reg1 = */ tmp);
+ if (tmp2_live) {
+ __ push(tmp2);
+ }
+ __ movptr(tmp2, cset_addr);
+ __ addptr(tmp, tmp2);
+ if (tmp2_live) {
+ __ pop(tmp2);
+ }
+ cset_addr_arg = Address(tmp, 0);
+ }
+
+ // Cset-check. Fall-through to slow if in collection set.
+ __ cmpb(cset_addr_arg, 0);
+ if (tmp_live) {
+ __ jccb(Assembler::notEqual, L_pop_and_slow);
+ __ pop(tmp);
+ __ jmp(*L_done);
+ } else {
+ // Nothing else to do, jump back
+ __ jcc(Assembler::equal, *L_done);
+ }
+
+ // Slow path
+ __ bind(L_pop_and_slow);
+ // Need to pop tmp immediately for stack to remain aligned.
+ if (tmp_live) {
+ __ pop(tmp);
+ }
+ __ bind(L_slow);
+
+ // Obj is the result, need to temporarily stop preserving it.
+ bool is_obj_preserved = is_preserved(_obj);
+ if (is_obj_preserved) {
+ dont_preserve(_obj);
+ }
+ {
+ SaveLiveRegisters slr(&masm, this);
+
+ assert_different_registers(rax, c_rarg0, c_rarg1);
+
+ // Shuffle in the arguments. The end result should be:
+ // c_rarg0 <-- obj
+ // c_rarg1 <-- lea(addr)
+ if (_obj == c_rarg0) {
+ __ lea(c_rarg1, _addr);
+ } else if (_obj == c_rarg1) {
+ // Set up arguments in reverse, and then flip them
+ __ lea(c_rarg0, _addr);
+ __ xchgptr(c_rarg0, c_rarg1);
+ } else {
+ assert_different_registers(_obj, c_rarg0, c_rarg1);
+ __ lea(c_rarg1, _addr);
+ __ movptr(c_rarg0, _obj);
+ }
+
+ // Go to runtime and handle the rest there.
+ __ call(RuntimeAddress(lrb_runtime_entry_addr()));
+
+ // Save the result where needed.
+ if (_obj != rax) {
+ __ movptr(_obj, rax);
+ }
+ }
+ if (is_obj_preserved) {
+ preserve(_obj);
+ }
+
+ __ jmp(*L_done);
+ }
+
+ int ShenandoahBarrierStubC2::available_gp_registers() {
+ return Register::available_gp_registers();
+ }
+
+ bool ShenandoahBarrierStubC2::is_special_register(Register r) {
+ return r == rsp || r == rbp || r == r12_heapbase || r == r15_thread;
+ }
+
+ void ShenandoahBarrierStubC2::post_init() {
+ // Do nothing.
+ }
+ #undef __
+ #endif
< prev index next >