< prev index next >
src/cpu/aarch64/vm/aarch64.ad
Print this page
@@ -925,10 +925,13 @@
// definitions necessary in the rest of the architecture description
source_hpp %{
#include "opto/addnode.hpp"
+#if INCLUDE_ALL_GCS
+#include "shenandoahBarrierSetAssembler_aarch64.hpp"
+#endif
class CallStubImpl {
//--------------------------------------------------------------
//---< Used for optimization in Compile::shorten_branches >---
@@ -2796,10 +2799,19 @@
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true);
%}
+ enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{
+ MacroAssembler _masm(&cbuf);
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ Register tmp = $tmp$$Register;
+ __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+ ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register);
+ %}
+
// The only difference between aarch64_enc_cmpxchg and
// aarch64_enc_cmpxchg_acq is that we use load-acquire in the
// CompareAndSwap sequence to serve as a barrier on acquiring a
// lock.
enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
@@ -2814,10 +2826,20 @@
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true);
%}
+ enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{
+ MacroAssembler _masm(&cbuf);
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ Register tmp = $tmp$$Register;
+ __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+ ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ /*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false,
+ $res$$Register);
+ %}
+
// auxiliary used for CompareAndSwapX to set result register
enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
MacroAssembler _masm(&cbuf);
Register res_reg = as_Register($res$$reg);
__ cset(res_reg, Assembler::EQ);
@@ -4212,12 +4234,12 @@
// Card Table Byte Map Base
operand immByteMapBase()
%{
// Get base of card map
- predicate((jbyte*)n->get_ptr() ==
- ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
+ predicate(!UseShenandoahGC && // TODO: Should really check for BS::is_a, see JDK-8193193
+ (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
@@ -6937,11 +6959,57 @@
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "str $src, $mem\t# ptr" %}
- ins_encode(aarch64_enc_str(src, mem));
+ ins_encode %{
+ int opcode = $mem->opcode();
+ Register base = as_Register($mem$$base);
+ int index = $mem$$index;
+ int size = $mem$$scale;
+ int disp = $mem$$disp;
+ Register reg = as_Register($src$$reg);
+
+ // we sometimes get asked to store the stack pointer into the
+ // current thread -- we cannot do that directly on AArch64
+ if (reg == r31_sp) {
+ MacroAssembler _masm(&cbuf);
+ assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
+ __ mov(rscratch2, sp);
+ reg = rscratch2;
+ }
+ Address::extend scale;
+
+ // Hooboy, this is fugly. We need a way to communicate to the
+ // encoder that the index needs to be sign extended, so we have to
+ // enumerate all the cases.
+ switch (opcode) {
+ case INDINDEXSCALEDOFFSETI2L:
+ case INDINDEXSCALEDI2L:
+ case INDINDEXSCALEDOFFSETI2LN:
+ case INDINDEXSCALEDI2LN:
+ case INDINDEXOFFSETI2L:
+ case INDINDEXOFFSETI2LN:
+ scale = Address::sxtw(size);
+ break;
+ default:
+ scale = Address::lsl(size);
+ }
+ Address adr;
+ if (index == -1) {
+ adr = Address(base, disp);
+ } else {
+ if (disp == 0) {
+ adr = Address(base, as_Register(index), scale);
+ } else {
+ __ lea(rscratch1, Address(base, disp));
+ adr = Address(rscratch1, as_Register(index), scale);
+ }
+ }
+
+ __ str(reg, adr);
+ %}
ins_pipe(istore_reg_mem);
%}
// Store Pointer
@@ -8061,10 +8129,11 @@
ins_pipe(pipe_slow);
%}
instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR);
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
@@ -8077,12 +8146,30 @@
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
+instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+
+ predicate(UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypePtr::NULL_PTR);
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ effect(TEMP tmp, KILL cr);
+
+ format %{
+ "cmpxchg_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp, res));
+
+ ins_pipe(pipe_slow);
+%}
+
instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
+ predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypeNarrowOop::NULL_PTR);
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
@@ -8095,10 +8182,30 @@
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
+instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+
+ predicate(UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypeNarrowOop::NULL_PTR);
+ match(Set res (CompareAndSwapN mem (Binary oldval newval)));
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ effect(TEMP tmp, KILL cr);
+
+ format %{
+ "cmpxchgw_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+ %}
+
+ ins_encode %{
+ Register tmp = $tmp$$Register;
+ __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+ ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register);
+ %}
+
+ ins_pipe(pipe_slow);
+%}
// alternative CompareAndSwapX when we are eliding barriers
instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
@@ -8138,11 +8245,11 @@
ins_pipe(pipe_slow);
%}
instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- predicate(needs_acquiring_load_exclusive(n));
+ predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR));
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
@@ -8155,13 +8262,30 @@
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
+instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
+
+ predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypePtr::NULL_PTR);
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+
+ effect(TEMP tmp, KILL cr);
+
+ format %{
+ "cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp, res));
+
+ ins_pipe(pipe_slow);
+%}
+
instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
- predicate(needs_acquiring_load_exclusive(n));
+ predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier|| n->in(3)->in(1)->bottom_type() == TypeNarrowOop::NULL_PTR));
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
@@ -8174,10 +8298,30 @@
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
+instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
+
+ predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypeNarrowOop::NULL_PTR);
+ match(Set res (CompareAndSwapN mem (Binary oldval newval)));
+ ins_cost(VOLATILE_REF_COST);
+
+ effect(TEMP tmp, KILL cr);
+
+ format %{
+ "cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
+ %}
+
+ ins_encode %{
+ Register tmp = $tmp$$Register;
+ __ mov(tmp, $oldval$$Register); // Must not clobber oldval.
+ ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register);
+ %}
+
+ ins_pipe(pipe_slow);
+%}
instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
match(Set prev (GetAndSetI mem newv));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
< prev index next >