1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "c1/c1_MacroAssembler.hpp"
  26 #include "c1/c1_LIRAssembler.hpp"
  27 #include "macroAssembler_aarch64.hpp"
  28 #include "shenandoahBarrierSetAssembler_aarch64.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  35 #include "runtime/stubCodeGenerator.hpp"
  36 #include "runtime/thread.hpp"
  37 
  38 ShenandoahBarrierSetAssembler* ShenandoahBarrierSetAssembler::bsasm() {
  39   return ShenandoahBarrierSet::barrier_set()->bsasm();
  40 }
  41 
  42 #define __ masm->
  43 
  44 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, bool dest_uninitialized,
  45                                                        Register src, Register dst, Register count) {
  46   if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahStoreValEnqueueBarrier || ShenandoahLoadRefBarrier) {
  47 
  48     Label done;
  49 
  50     // Avoid calling runtime if count == 0
  51     __ cbz(count, done);
  52 
  53     // Is GC active?
  54     Address gc_state(rthread, in_bytes(JavaThread::gc_state_offset()));
  55     __ ldrb(rscratch1, gc_state);
  56     if (ShenandoahSATBBarrier && dest_uninitialized) {
  57       __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
  58     } else {
  59       __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
  60       __ tst(rscratch1, rscratch2);
  61       __ br(Assembler::EQ, done);
  62     }
  63 
  64     __ push_call_clobbered_registers();
  65     if (UseCompressedOops) {
  66         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), src, dst, count);
  67     } else {
  68         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
  69     }
  70     __ pop_call_clobbered_registers();
  71     __ bind(done);
  72   }
  73 }
  74 
  75 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
  76   assert(ShenandoahCASBarrier, "should be enabled");
  77   Label is_null;
  78   __ cbz(dst, is_null);
  79   resolve_forward_pointer_not_null(masm, dst, tmp);
  80   __ bind(is_null);
  81 }
  82 
  83 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitely
  84 // passed in.
  85 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
  86   assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled");
  87   // The below loads the mark word, checks if the lowest two bits are
  88   // set, and if so, clear the lowest two bits and copy the result
  89   // to dst. Otherwise it leaves dst alone.
  90   // Implementing this is surprisingly awkward. I do it here by:
  91   // - Inverting the mark word
  92   // - Test lowest two bits == 0
  93   // - If so, set the lowest two bits
  94   // - Invert the result back, and copy to dst
  95 
  96   bool borrow_reg = (tmp == noreg);
  97   if (borrow_reg) {
  98     // No free registers available. Make one useful.
  99     tmp = rscratch1;
 100     if (tmp == dst) {
 101       tmp = rscratch2;
 102     }
 103     __ push(RegSet::of(tmp), sp);
 104   }
 105 
 106   assert_different_registers(tmp, dst);
 107 
 108   Label done;
 109   __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
 110   __ eon(tmp, tmp, zr);
 111   __ ands(zr, tmp, markOopDesc::lock_mask_in_place);
 112   __ br(Assembler::NE, done);
 113   __ orr(tmp, tmp, markOopDesc::marked_value);
 114   __ eon(dst, tmp, zr);
 115   __ bind(done);
 116 
 117   if (borrow_reg) {
 118     __ pop(RegSet::of(tmp), sp);
 119   }
 120 }
 121 
 122 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr) {
 123   if (!ShenandoahLoadRefBarrier) {
 124     return;
 125   }
 126 
 127   assert(dst != rscratch2, "need rscratch2");
 128   assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
 129 
 130   bool is_narrow  = UseCompressedOops;
 131 
 132   Label heap_stable, not_cset;
 133   __ enter();
 134   Address gc_state(rthread, in_bytes(JavaThread::gc_state_offset()));
 135   __ ldrb(rscratch2, gc_state);
 136 
 137   // Check for heap stability
 138   __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
 139 
 140   // use r1 for load address
 141   Register result_dst = dst;
 142   if (dst == r1) {
 143     __ mov(rscratch1, dst);
 144     dst = rscratch1;
 145   }
 146 
 147   // Save r0 and r1, unless it is an output register
 148   RegSet to_save = RegSet::of(r0, r1) - result_dst;
 149   __ push(to_save, sp);
 150   __ lea(r1, load_addr);
 151   __ mov(r0, dst);
 152 
 153   // Test for in-cset
 154   __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
 155   __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 156   __ ldrb(rscratch2, Address(rscratch2, rscratch1));
 157   __ tbz(rscratch2, 0, not_cset);
 158 
 159   __ push_call_clobbered_registers();
 160   if (is_narrow) {
 161     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));
 162   } else {
 163     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));
 164   }
 165   __ blr(lr);
 166   __ mov(rscratch1, r0);
 167   __ pop_call_clobbered_registers();
 168   __ mov(r0, rscratch1);
 169 
 170   __ bind(not_cset);
 171 
 172   __ mov(result_dst, r0);
 173   __ pop(to_save, sp);
 174 
 175   __ bind(heap_stable);
 176   __ leave();
 177 }
 178 
 179 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
 180   if (ShenandoahStoreValEnqueueBarrier) {
 181     // Save possibly live regs.
 182     RegSet live_regs = RegSet::range(r0, r4) - dst;
 183     __ push(live_regs, sp);
 184     __ strd(v0, __ pre(sp, 2 * -wordSize));
 185 
 186     __ g1_write_barrier_pre(noreg, dst, rthread, tmp, true, false);
 187 
 188     // Restore possibly live regs.
 189     __ ldrd(v0, __ post(sp, 2 * wordSize));
 190     __ pop(live_regs, sp);
 191   }
 192 }
 193 
 194 void ShenandoahBarrierSetAssembler::load_heap_oop(MacroAssembler* masm, Register dst, Address src) {
 195   Register result_dst = dst;
 196 
 197   // Preserve src location for LRB
 198   if (dst == src.base() || dst == src.index()) {
 199     dst = rscratch1;
 200   }
 201   assert_different_registers(dst, src.base(), src.index());
 202 
 203   if (UseCompressedOops) {
 204     __ ldrw(dst, src);
 205     __ decode_heap_oop(dst);
 206   } else {
 207     __ ldr(dst, src);
 208   }
 209 
 210   load_reference_barrier(masm, dst, src);
 211 
 212   if (dst != result_dst) {
 213     __ mov(result_dst, dst);
 214     dst = result_dst;
 215   }
 216 }
 217 
 218 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
 219                                                 bool acquire, bool release, bool weak, bool is_cae,
 220                                                 Register result) {
 221 
 222   Register tmp1 = rscratch1;
 223   Register tmp2 = rscratch2;
 224   bool is_narrow = UseCompressedOops;
 225   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
 226 
 227   assert_different_registers(addr, expected, new_val, tmp1, tmp2);
 228 
 229   Label retry, done, fail;
 230 
 231   // CAS, using LL/SC pair.
 232   __ bind(retry);
 233   __ load_exclusive(tmp1, addr, size, acquire);
 234   if (is_narrow) {
 235     __ cmpw(tmp1, expected);
 236   } else {
 237     __ cmp(tmp1, expected);
 238   }
 239   __ br(Assembler::NE, fail);
 240   __ store_exclusive(tmp2, new_val, addr, size, release);
 241   if (weak) {
 242     __ cmpw(tmp2, 0u); // If the store fails, return NE to our caller
 243   } else {
 244     __ cbnzw(tmp2, retry);
 245   }
 246   __ b(done);
 247 
 248   __ bind(fail);
 249   // Check if rb(expected)==rb(tmp1)
 250   // Shuffle registers so that we have memory value ready for next expected.
 251   __ mov(tmp2, expected);
 252   __ mov(expected, tmp1);
 253   if (is_narrow) {
 254     __ decode_heap_oop(tmp1, tmp1);
 255     __ decode_heap_oop(tmp2, tmp2);
 256   }
 257   resolve_forward_pointer(masm, tmp1);
 258   resolve_forward_pointer(masm, tmp2);
 259   __ cmp(tmp1, tmp2);
 260   // Retry with expected now being the value we just loaded from addr.
 261   __ br(Assembler::EQ, retry);
 262   if (is_cae && is_narrow) {
 263     // For cmp-and-exchange and narrow oops, we need to restore
 264     // the compressed old-value. We moved it to 'expected' a few lines up.
 265     __ mov(result, expected);
 266   }
 267   __ bind(done);
 268 
 269   if (is_cae) {
 270     __ mov(result, tmp1);
 271   } else {
 272     __ cset(result, Assembler::EQ);
 273   }
 274 }
 275 
 276 #undef __
 277 
 278 #ifdef COMPILER1
 279 
 280 #define __ ce->masm()->
 281 
 282 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
 283   __ bind(*stub->entry());
 284 
 285   Register obj = stub->obj()->as_register();
 286   Register res = stub->result()->as_register();
 287   Register addr = stub->addr()->as_pointer_register();
 288   Register tmp1 = stub->tmp1()->as_register();
 289   Register tmp2 = stub->tmp2()->as_register();
 290 
 291   assert(res == r0, "result must arrive in r0");
 292 
 293   if (res != obj) {
 294     __ mov(res, obj);
 295   }
 296 
 297   // Check for null.
 298   __ cbz(res, *stub->continuation());
 299 
 300   // Check for object in cset.
 301   __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
 302   __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 303   __ ldrb(tmp2, Address(tmp2, tmp1));
 304   __ cbz(tmp2, *stub->continuation());
 305 
 306   // Check if object is already forwarded.
 307   Label slow_path;
 308   __ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes()));
 309   __ eon(tmp1, tmp1, zr);
 310   __ ands(zr, tmp1, markOopDesc::lock_mask_in_place);
 311   __ br(Assembler::NE, slow_path);
 312 
 313   // Decode forwarded object.
 314   __ orr(tmp1, tmp1, markOopDesc::marked_value);
 315   __ eon(res, tmp1, zr);
 316   __ b(*stub->continuation());
 317 
 318   __ bind(slow_path);
 319   ce->store_parameter(res, 0);
 320   ce->store_parameter(addr, 1);
 321   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::shenandoah_lrb_slow_id)));
 322 
 323   __ b(*stub->continuation());
 324 }
 325 
 326 #undef __
 327 
 328 #endif // COMPILER1