< prev index next > src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
Print this page
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
#endif
+ #ifdef COMPILER2
+ #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
+ #endif
#define __ masm->
static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
if (handle_gpr) {
//
// Before reaching to resolve sequence, see if we can avoid the whole shebang
// with filters.
// Filter: when offending in-memory value is null, the failure is definitely legitimate
! __ testptr(oldval, oldval);
__ jcc(Assembler::zero, L_failure);
// Filter: when heap is stable, the failure is definitely legitimate
const Register thread = r15_thread;
Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
//
// Before reaching to resolve sequence, see if we can avoid the whole shebang
// with filters.
// Filter: when offending in-memory value is null, the failure is definitely legitimate
! if (UseCompressedOops) {
+ __ testl(oldval, oldval);
+ } else {
+ __ testptr(oldval, oldval);
+ }
__ jcc(Assembler::zero, L_failure);
// Filter: when heap is stable, the failure is definitely legitimate
const Register thread = r15_thread;
Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
__ movptr(res, 1);
__ bind(exit);
}
}
+ #ifdef COMPILER2
+ void ShenandoahBarrierSetAssembler::gc_state_check_c2(MacroAssembler* masm, const char test_state, BarrierStubC2* slow_stub) {
+ const int size = 11;
+ if (ShenandoahNopGCState) {
+ __ nop(size);
+ return;
+ }
+ #ifdef ASSERT
+ address start = __ pc();
+ #endif
+
+ Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ __ testb(gc_state, test_state);
+ __ jcc(Assembler::notZero, *slow_stub->entry());
+
+ #ifdef ASSERT
+ int actual_size = __ pc() - start;
+ assert(actual_size == size, "Should be: %d == %d", actual_size, size);
+ #endif
+ }
+
+ void ShenandoahBarrierSetAssembler::load_ref_barrier_c2(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp1, Register tmp2, Register tmp3, bool narrow) {
+ if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
+ return;
+ }
+ Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+
+ ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp1, tmp2, tmp3, narrow);
+ stub->dont_preserve(obj); // set at the end, no need to save
+ if (tmp1 != noreg) {
+ stub->dont_preserve(tmp1); // temp, no need to save
+ }
+ if (tmp2 != noreg) {
+ stub->dont_preserve(tmp2); // temp, no need to save
+ }
+ if (tmp3 != noreg) {
+ stub->dont_preserve(tmp3); // temp, no need to save
+ }
+
+ int flags = ShenandoahHeap::HAS_FORWARDED;
+ if ((node->barrier_data() & ShenandoahBarrierStrong) == 0) {
+ flags |= ShenandoahHeap::WEAK_ROOTS;
+ }
+ gc_state_check_c2(masm, flags, stub);
+ __ bind(*stub->continuation());
+ }
+
+ void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm,
+ Register dst,
+ Address src,
+ bool narrow,
+ Register tmp) {
+ // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
+ if (narrow) {
+ __ movl(dst, src);
+ } else {
+ __ movq(dst, src);
+ }
+
+ // Emit barrier if needed
+ if (ShenandoahLoadBarrierStubC2::needs_barrier(node)) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+
+ ShenandoahLoadBarrierStubC2* const stub = ShenandoahLoadBarrierStubC2::create(node, dst, src, narrow, tmp);
+ stub->dont_preserve(tmp); // temp, no need to save
+
+ char check = 0;
+ check |= ShenandoahLoadBarrierStubC2::needs_satb_barrier(node) ? ShenandoahHeap::MARKING : 0;
+ check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
+ check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
+ gc_state_check_c2(masm, check, stub);
+ __ bind(*stub->continuation());
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm,
+ Address dst, bool dst_narrow,
+ Register src, bool src_narrow,
+ Register tmp) {
+ // Emit barrier if needed
+ if (ShenandoahStoreBarrierStubC2::needs_barrier(node)) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+
+ if (ShenandoahStoreBarrierStubC2::needs_satb_barrier(node)) {
+ ShenandoahStoreBarrierStubC2* const stub = ShenandoahStoreBarrierStubC2::create(node, dst, dst_narrow, src, src_narrow, tmp);
+ stub->dont_preserve(tmp); // temp, no need to preserve it
+
+ gc_state_check_c2(masm, ShenandoahHeap::MARKING, stub);
+ __ bind(*stub->continuation());
+ }
+
+ if (ShenandoahStoreBarrierStubC2::needs_card_barrier(node)) {
+ // Card table barrier is not conditional on GC state.
+ // You might think this needs to be a post-barrier. But I don't think it does: the card table updates
+ // and stores are not expected to be ordered. As long as there is no safepoint between these stores, we are
+ // free to do them in any order.
+
+ // So it is convenient to pull card table update here. It also follows the stencil we want:
+ // there should be a single gc-state check for every possible fast path. If card table barrier needed
+ // a gc state check, we would have commoned it with gc state check for SATB barrier above, and _then_
+ // called to the slowpath.
+
+ // Using this address compute sequence allows us to use only one temp register.
+ // TODO: Upstream this separately, mainline Shenandoah might benefit from this already?
+ __ lea(tmp, dst);
+ __ shrptr(tmp, CardTable::card_shift());
+ __ addptr(tmp, Address(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset())));
+
+ int dirty = CardTable::dirty_card_val();
+ if (UseCondCardMark) {
+ Label L_already_dirty;
+ __ cmpb(Address(tmp, 0), dirty);
+ __ jccb(Assembler::equal, L_already_dirty);
+ __ movb(Address(tmp, 0), dirty);
+ __ bind(L_already_dirty);
+ } else {
+ __ movb(Address(tmp, 0), dirty);
+ }
+ }
+ }
+
+ // Need to encode into tmp, because we cannot clobber src.
+ // TODO: Maybe there is a matcher way to test that src is unused after this?
+ if (dst_narrow && !src_narrow) {
+ __ movq(tmp, src);
+ if (ShenandoahStoreBarrierStubC2::src_not_null(node)) {
+ __ encode_heap_oop_not_null(tmp);
+ } else {
+ __ encode_heap_oop(tmp);
+ }
+ src = tmp;
+ }
+
+ // Do the actual store
+ if (dst_narrow) {
+ __ movl(dst, src);
+ } else {
+ __ movq(dst, src);
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::satb_barrier_c2(const MachNode* node, MacroAssembler* masm,
+ Register addr, Register preval, Register tmp) {
+ if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
+ return;
+ }
+ ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, preval, tmp, /* TODO: */ false);
+ Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+
+ gc_state_check_c2(masm, ShenandoahHeap::MARKING, stub);
+ __ bind(*stub->continuation());
+ }
+
+ void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm,
+ Register addr, Register addr_tmp, Register tmp) {
+ if ((node->barrier_data() & ShenandoahBarrierCardMark) == 0) {
+ return;
+ }
+ Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
+ if (addr != noreg) {
+ __ mov(addr_tmp, addr);
+ }
+ __ shrptr(addr_tmp, CardTable::card_shift());
+
+ Address curr_ct_holder_addr(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
+ __ movptr(tmp, curr_ct_holder_addr);
+ Address card_addr(tmp, addr_tmp, Address::times_1);
+
+ int dirty = CardTable::dirty_card_val();
+ if (UseCondCardMark) {
+ Label L_already_dirty;
+ __ cmpb(card_addr, dirty);
+ __ jccb(Assembler::equal, L_already_dirty);
+ __ movb(card_addr, dirty);
+ __ bind(L_already_dirty);
+ } else {
+ __ movb(card_addr, dirty);
+ }
+ }
+
+ void ShenandoahBarrierSetAssembler::cmpxchg_oop_c2(const MachNode* node, MacroAssembler* masm,
+ Register res, Address addr, Register oldval, Register newval, Register tmp1, Register tmp2,
+ bool exchange) {
+ assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
+ assert_different_registers(oldval, tmp1, tmp2);
+ assert_different_registers(newval, tmp1, tmp2);
+
+ // Remember oldval for retry logic in slow path. We need to do it here,
+ // because it will be overwritten by the fast-path CAS.
+ if (ShenandoahCASBarrier) {
+ __ movptr(tmp2, oldval);
+ }
+
+ // Fast-path: Try to CAS optimistically. If successful, then we are done.
+ __ lock();
+ if (UseCompressedOops) {
+ __ cmpxchgl(newval, addr);
+ } else {
+ __ cmpxchgptr(newval, addr);
+ }
+
+ // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
+ // This would be the final result if we do not go slow.
+ if (!exchange) {
+ assert(res != noreg, "need result register");
+ __ setcc(Assembler::equal, res);
+ } else {
+ assert(res == noreg, "no result expected");
+ }
+
+ if (ShenandoahCASBarrier) {
+ ShenandoahCASBarrierSlowStubC2* const stub =
+ ShenandoahCASBarrierSlowStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, exchange);
+ if (res != noreg) {
+ stub->dont_preserve(res); // set at the end, no need to save
+ }
+ stub->dont_preserve(oldval); // saved explicitly
+ stub->dont_preserve(tmp1); // temp, no need to save
+ stub->dont_preserve(tmp2); // temp, no need to save
+
+ // On success, we do not need any additional handling.
+ __ jccb(Assembler::equal, *stub->continuation());
+
+ // If GC is in progress, it is likely we need additional handling for false negatives.
+ // Slow stub re-enters with result set correctly.
+ gc_state_check_c2(masm, ShenandoahHeap::HAS_FORWARDED, stub);
+ __ bind(*stub->continuation());
+ }
+ }
+
+ #undef __
+ #define __ masm.
+
+ void ShenandoahLoadBarrierStubC2::emit_code(MacroAssembler& masm) {
+ __ bind(*entry());
+
+ assert_different_registers(_tmp, _dst);
+
+ Label L_end;
+
+ // If the object is null, there is no point in applying barriers.
+ if (_narrow) {
+ __ testl(_dst, _dst);
+ } else {
+ __ testptr(_dst, _dst);
+ }
+ __ jcc(Assembler::equal, *continuation());
+
+ // If object is narrow, we need to decode it first.
+ if (_narrow) {
+ __ decode_heap_oop_not_null(_dst);
+ }
+
+ if (_needs_load_ref_barrier) {
+ Label L_lrb_done;
+
+ bool is_weak = (_node->barrier_data() & ShenandoahBarrierStrong) == 0;
+
+ // Runtime check for LRB
+ Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | (is_weak ? ShenandoahHeap::WEAK_ROOTS : 0));
+ __ jcc(Assembler::zero, L_lrb_done);
+
+ // Weak/phantom loads always need to go to runtime.
+ if (!is_weak) {
+ __ movptr(_tmp, _dst);
+ __ shrptr(_tmp, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+ __ addptr(_tmp, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
+ __ testb(Address(_tmp, 0), 0xFF);
+ __ jcc(Assembler::zero, L_lrb_done);
+ }
+
+ dont_preserve(_dst); // For LRB we must not preserve _dst
+ {
+ SaveLiveRegisters save_registers(&masm, this);
+
+ // Shuffle in the arguments. The end result should be:
+ // c_rarg0 <-- _dst
+ // c_rarg1 <-- lea(_src)
+ if (c_rarg0 == _dst) {
+ __ lea(c_rarg1, _src);
+ } else if (c_rarg1 == _dst) {
+ // Set up arguments in reverse, and then flip them
+ __ lea(c_rarg0, _src);
+ __ xchgptr(c_rarg0, c_rarg1);
+ } else {
+ assert_different_registers(c_rarg1, _dst);
+ __ lea(c_rarg1, _src);
+ __ movptr(c_rarg0, _dst);
+ }
+
+ address entry;
+ if (_narrow) {
+ if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
+ } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
+ } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
+ }
+ } else {
+ if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
+ } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
+ } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
+ }
+ }
+ __ call(RuntimeAddress(entry), rax);
+ assert(!save_registers.contains(_dst), "must not save result register");
+ __ movptr(_dst, rax);
+ }
+
+ // Paranoia: if LRB returns null for a weak access, do NOT feed it into SATB, which does not accept null pointers.
+ __ testptr(_dst, _dst);
+ __ jcc(Assembler::equal, L_end);
+
+ __ bind(L_lrb_done);
+ }
+
+ if (_needs_satb_barrier) {
+ // Push obj to SATB, if needed.
+
+ Label L_satb_done, L_satb_runtime;
+
+ // Runtime check for SATB
+ Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+ __ testb(gc_state, ShenandoahHeap::MARKING);
+ __ jcc(Assembler::zero, L_satb_done);
+
+ // Can we store a value in the given thread's buffer?
+ // (The index field is typed as size_t.)
+ Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ __ movptr(_tmp, index);
+ __ testptr(_tmp, _tmp);
+ __ jccb(Assembler::zero, L_satb_runtime);
+ // The buffer is not full, store value into it.
+ __ subptr(_tmp, wordSize);
+ __ movptr(index, _tmp);
+ __ addptr(_tmp, buffer);
+ __ movptr(Address(_tmp, 0), _dst);
+ __ jmp(L_satb_done);
+
+ __ bind(L_satb_runtime);
+
+ preserve(_dst); // For SATB we must preserve _dst
+ {
+ SaveLiveRegisters save_registers(&masm, this);
+ if (c_rarg0 != _dst) {
+ __ mov(c_rarg0, _dst);
+ }
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
+ }
+
+ __ bind(L_satb_done);
+ }
+
+ __ bind(L_end);
+ if (_narrow) {
+ __ encode_heap_oop(_dst);
+ }
+
+ __ jmp(*continuation());
+ }
+
+ void ShenandoahStoreBarrierStubC2::emit_code(MacroAssembler& masm) {
+ __ bind(*entry());
+
+ Label L_runtime, L_preval_null;
+
+ // We need 2 temp registers for this code to work.
+ // _tmp is already allocated and will carry preval for the call.
+ // Allocate the other one now.
+ Register tmp2 = noreg;
+ for (int i = 0; i < 8; i++) {
+ Register r = as_Register(i);
+ if (r != rsp && r != rbp && r != _src && r != _tmp) {
+ if (tmp2 == noreg) {
+ tmp2 = r;
+ break;
+ }
+ }
+ }
+
+ assert(tmp2 != noreg, "tmp2 allocated");
+ assert_different_registers(_tmp, tmp2, _src);
+
+ Register preval = _tmp;
+ Register slot = tmp2;
+
+ // Load value from memory
+ if (_dst_narrow) {
+ __ movl(preval, _dst);
+ } else {
+ __ movq(preval, _dst);
+ }
+
+ // Is the previous value null?
+ __ cmpptr(preval, NULL_WORD);
+ __ jccb(Assembler::equal, L_preval_null);
+
+ if (_dst_narrow) {
+ __ decode_heap_oop_not_null(preval);
+ }
+
+ // Can we store a value in the given thread's buffer?
+ // (The index field is typed as size_t.)
+ Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ __ push(tmp2);
+ __ movptr(slot, index);
+ __ testptr(slot, slot);
+ __ jccb(Assembler::zero, L_runtime);
+ // The buffer is not full, store value into it.
+ __ subptr(slot, wordSize);
+ __ movptr(index, slot);
+ __ addptr(slot, buffer);
+ __ movptr(Address(slot, 0), preval);
+
+ // Pop temps and exit
+ __ pop(tmp2);
+ __ bind(L_preval_null);
+ __ jmp(*continuation());
+
+ __ bind(L_runtime);
+ __ pop(tmp2);
+ {
+ SaveLiveRegisters save_registers(&masm, this);
+ if (c_rarg0 != preval) {
+ __ mov(c_rarg0, preval);
+ }
+ // rax is a caller-saved, non-argument-passing register, so it does not
+ // interfere with c_rarg0 or c_rarg1. If it contained any live value before
+ // entering this stub, it is saved at this point, and restored after the
+ // call. If it did not contain any live value, it is free to be used. In
+ // either case, it is safe to use it here as a call scratch register.
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
+ }
+ __ jmp(*continuation());
+ }
+
+ void ShenandoahLoadRefBarrierStubC2::emit_code(MacroAssembler& masm) {
+ Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
+ __ bind(*entry());
+
+ Register obj = _obj;
+ if (_narrow) {
+ __ movl(_tmp1, _obj);
+ __ decode_heap_oop(_tmp1);
+ obj = _tmp1;
+ }
+
+ // Weak/phantom loads always need to go to runtime.
+ if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
+ __ movptr(_tmp2, obj);
+ __ shrptr(_tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+ __ movptr(_tmp3, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
+ __ movbool(_tmp2, Address(_tmp2, _tmp3, Address::times_1));
+ __ testbool(_tmp2);
+ __ jcc(Assembler::zero, *continuation());
+ }
+
+ {
+ SaveLiveRegisters save_registers(&masm, this);
+ if (c_rarg0 != obj) {
+ if (c_rarg0 == _addr) {
+ __ movptr(_tmp2, _addr);
+ _addr = _tmp2;
+ }
+ __ movptr(c_rarg0, obj);
+ }
+ if (c_rarg1 != _addr) {
+ __ movptr(c_rarg1, _addr);
+ }
+
+ address entry;
+ if (_narrow) {
+ if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
+ } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
+ } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
+ }
+ } else {
+ if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
+ } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
+ } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
+ entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
+ }
+ }
+ __ call(RuntimeAddress(entry), rax);
+ assert(!save_registers.contains(_obj), "must not save result register");
+ __ movptr(_obj, rax);
+ }
+ if (_narrow) {
+ __ encode_heap_oop(_obj);
+ }
+
+ __ jmp(*continuation());
+ }
+
+ void ShenandoahSATBBarrierStubC2::emit_code(MacroAssembler& masm) {
+ __ bind(*entry());
+ Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ Label runtime;
+
+ // Do we need to load the previous value?
+ if (_addr != noreg) {
+ __ load_heap_oop(_preval, Address(_addr, 0), noreg, AS_RAW);
+ }
+ // Is the previous value null?
+ __ cmpptr(_preval, NULL_WORD);
+ __ jcc(Assembler::equal, *continuation());
+
+ // Can we store a value in the given thread's buffer?
+ // (The index field is typed as size_t.)
+ __ movptr(_tmp, index);
+ __ testptr(_tmp, _tmp);
+ __ jccb(Assembler::zero, runtime);
+ // The buffer is not full, store value into it.
+ __ subptr(_tmp, wordSize);
+ __ movptr(index, _tmp);
+ __ addptr(_tmp, buffer);
+ __ movptr(Address(_tmp, 0), _preval);
+
+ __ jmp(*continuation());
+
+ __ bind(runtime);
+ {
+ SaveLiveRegisters save_registers(&masm, this);
+ if (c_rarg0 != _preval) {
+ __ mov(c_rarg0, _preval);
+ }
+ // rax is a caller-saved, non-argument-passing register, so it does not
+ // interfere with c_rarg0 or c_rarg1. If it contained any live value before
+ // entering this stub, it is saved at this point, and restored after the
+ // call. If it did not contain any live value, it is free to be used. In
+ // either case, it is safe to use it here as a call scratch register.
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
+ }
+ __ jmp(*continuation());
+ }
+
+ void ShenandoahCASBarrierSlowStubC2::emit_code(MacroAssembler& masm) {
+ __ bind(*entry());
+
+ // CAS has failed because the value held at addr does not match expected.
+ // This may be a false negative because the version in memory might be
+ // the from-space version of the same object we currently hold to-space
+ // reference for.
+ //
+ // To resolve this, we need to pass the location through the LRB fixup,
+ // this will make sure that the location has only to-space pointers.
+ // To avoid calling into runtime often, we cset-check the object first.
+ // We can inline most of the work here, but there is little point,
+ // as CAS failures over cset locations must be rare. This fast-slow split
+ // matches what we do for normal LRB.
+
+ assert(_expected == rax, "expected must be rax");
+
+ // Non-strong references should always go to runtime. We do not expect
+ // CASes over non-strong locations.
+ assert((_node->barrier_data() & ShenandoahBarrierStrong) != 0, "Only strong references for CASes");
+
+ Label L_final;
+
+ // Fast-path stashed original oldval to tmp2 for us. We need to save it
+ // for the final retry. This frees up tmp2 for cset check below.
+ __ push(_tmp2);
+
+ // (Compressed) failure witness is in _expected.
+ // Unpack it and check if it is in collection set.
+ __ movptr(_tmp1, _expected);
+ if (UseCompressedOops) {
+ __ decode_heap_oop(_tmp1);
+ }
+ __ shrptr(_tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
+ __ movptr(_tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
+ __ movbool(_tmp1, Address(_tmp1, _tmp2, Address::times_1));
+ __ testbool(_tmp1);
+ __ jcc(Assembler::zero, L_final);
+
+ {
+ SaveLiveRegisters save_registers(&masm, this);
+ // Load up failure witness again.
+ if (c_rarg0 != _expected) {
+ __ movptr(c_rarg0, _expected);
+ }
+ if (UseCompressedOops) {
+ __ decode_heap_oop(c_rarg0);
+ }
+ __ lea(c_rarg1, _addr);
+
+ if (UseCompressedOops) {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), 2);
+ } else {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), 2);
+ }
+ // We have called LRB to fix up the heap location. We do not care about its result,
+ // as we will just try to CAS the location again.
+ }
+
+ __ bind(L_final);
+
+ // Try to CAS again with the original expected value.
+ // At this point, there can no longer be false negatives.
+ __ pop(_expected);
+ __ lock();
+ if (UseCompressedOops) {
+ __ cmpxchgl(_new_val, _addr);
+ } else {
+ __ cmpxchgptr(_new_val, _addr);
+ }
+ if (!_cae) {
+ assert(_result != noreg, "need result register");
+ __ setcc(Assembler::equal, _result);
+ } else {
+ assert(_result == noreg, "no result expected");
+ }
+ __ jmp(*continuation());
+ }
+
+ #undef __
+ #define __ masm->
+ #endif
+
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
< prev index next >