1 /*
   2  * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  30 #include "gc/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  35 #include "interpreter/interp_masm.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "runtime/javaThread.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #ifdef COMPILER1
  40 #include "c1/c1_LIRAssembler.hpp"
  41 #include "c1/c1_MacroAssembler.hpp"
  42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  43 #endif
  44 #ifdef COMPILER2
  45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  46 #endif
  47 
  48 #define __ masm->
  49 
  50 #ifdef PRODUCT
  51 #define BLOCK_COMMENT(str) /* nothing */
  52 #else
  53 #define BLOCK_COMMENT(str) __ block_comment(str)
  54 #endif
  55 
  56 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  57                                                        Register src, Register dst, Register count, RegSet saved_regs) {
  58   if (is_oop) {
  59     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  60     if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
  61 
  62       Label done;
  63 
  64       // Avoid calling runtime if count == 0
  65       __ cbz(count, done);
  66 
  67       // Is GC active?
  68       Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
  69       __ ldrb(rscratch1, gc_state);
  70       if (ShenandoahSATBBarrier && dest_uninitialized) {
  71         __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
  72       } else {
  73         __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
  74         __ tst(rscratch1, rscratch2);
  75         __ br(Assembler::EQ, done);
  76       }
  77 
  78       __ push(saved_regs, sp);
  79       if (UseCompressedOops) {
  80         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop), src, dst, count);
  81       } else {
  82         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop), src, dst, count);
  83       }
  84       __ pop(saved_regs, sp);
  85       __ bind(done);
  86     }
  87   }
  88 }
  89 
  90 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  91                                                        Register start, Register count, Register tmp) {
  92   if (ShenandoahCardBarrier && is_oop) {
  93     gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp);
  94   }
  95 }
  96 
  97 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
  98                                                                  Register obj,
  99                                                                  Register pre_val,
 100                                                                  Register thread,
 101                                                                  Register tmp,
 102                                                                  bool tosca_live,
 103                                                                  bool expand_call) {
 104   if (ShenandoahSATBBarrier) {
 105     satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
 106   }
 107 }
 108 
 109 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
 110                                                            Register obj,
 111                                                            Register pre_val,
 112                                                            Register thread,
 113                                                            Register tmp1,
 114                                                            Register tmp2,
 115                                                            bool tosca_live,
 116                                                            bool expand_call) {
 117   // If expand_call is true then we expand the call_VM_leaf macro
 118   // directly to skip generating the check by
 119   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
 120 
 121   assert(thread == rthread, "must be");
 122 
 123   Label done;
 124   Label runtime;
 125 
 126   assert_different_registers(obj, pre_val, tmp1, tmp2);
 127   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
 128 
 129   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 130   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 131 
 132   // Is marking active?
 133   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 134   __ ldrb(tmp1, gc_state);
 135   __ tbz(tmp1, ShenandoahHeap::MARKING_BITPOS, done);
 136 
 137   // Do we need to load the previous value?
 138   if (obj != noreg) {
 139     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
 140   }
 141 
 142   // Is the previous value null?
 143   __ cbz(pre_val, done);
 144 
 145   // Can we store original value in the thread's buffer?
 146   // Is index == 0?
 147   // (The index field is typed as size_t.)
 148 
 149   __ ldr(tmp1, index);                      // tmp := *index_adr
 150   __ cbz(tmp1, runtime);                    // tmp == 0?
 151                                         // If yes, goto runtime
 152 
 153   __ sub(tmp1, tmp1, wordSize);             // tmp := tmp - wordSize
 154   __ str(tmp1, index);                      // *index_adr := tmp
 155   __ ldr(tmp2, buffer);
 156   __ add(tmp1, tmp1, tmp2);                 // tmp := tmp + *buffer_adr
 157 
 158   // Record the previous value
 159   __ str(pre_val, Address(tmp1, 0));
 160   __ b(done);
 161 
 162   __ bind(runtime);
 163   // save the live input values
 164   RegSet saved = RegSet::of(pre_val);
 165   if (tosca_live) saved += RegSet::of(r0);
 166   if (obj != noreg) saved += RegSet::of(obj);
 167 
 168   __ push(saved, sp);
 169 
 170   // Calling the runtime using the regular call_VM_leaf mechanism generates
 171   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
 172   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
 173   //
 174   // If we care generating the pre-barrier without a frame (e.g. in the
 175   // intrinsified Reference.get() routine) then rfp might be pointing to
 176   // the caller frame and so this check will most likely fail at runtime.
 177   //
 178   // Expanding the call directly bypasses the generation of the check.
 179   // So when we do not have have a full interpreter frame on the stack
 180   // expand_call should be passed true.
 181 
 182   if (expand_call) {
 183     assert(pre_val != c_rarg1, "smashed arg");
 184     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
 185   } else {
 186     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
 187   }
 188 
 189   __ pop(saved, sp);
 190 
 191   __ bind(done);
 192 }
 193 
 194 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
 195   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
 196   Label is_null;
 197   __ cbz(dst, is_null);
 198   resolve_forward_pointer_not_null(masm, dst, tmp);
 199   __ bind(is_null);
 200 }
 201 
 202 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitly
 203 // passed in.
 204 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
 205   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
 206   // The below loads the mark word, checks if the lowest two bits are
 207   // set, and if so, clear the lowest two bits and copy the result
 208   // to dst. Otherwise it leaves dst alone.
 209   // Implementing this is surprisingly awkward. I do it here by:
 210   // - Inverting the mark word
 211   // - Test lowest two bits == 0
 212   // - If so, set the lowest two bits
 213   // - Invert the result back, and copy to dst
 214 
 215   bool borrow_reg = (tmp == noreg);
 216   if (borrow_reg) {
 217     // No free registers available. Make one useful.
 218     tmp = rscratch1;
 219     if (tmp == dst) {
 220       tmp = rscratch2;
 221     }
 222     __ push(RegSet::of(tmp), sp);
 223   }
 224 
 225   assert_different_registers(tmp, dst);
 226 
 227   Label done;
 228   __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
 229   __ eon(tmp, tmp, zr);
 230   __ ands(zr, tmp, markWord::lock_mask_in_place);
 231   __ br(Assembler::NE, done);
 232   __ orr(tmp, tmp, markWord::marked_value);
 233   __ eon(dst, tmp, zr);
 234   __ bind(done);
 235 
 236   if (borrow_reg) {
 237     __ pop(RegSet::of(tmp), sp);
 238   }
 239 }
 240 
 241 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators) {
 242   assert(ShenandoahLoadRefBarrier, "Should be enabled");
 243   assert(dst != rscratch2, "need rscratch2");
 244   assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
 245 
 246   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 247   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 248   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 249   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 250   bool is_narrow  = UseCompressedOops && !is_native;
 251 
 252   Label heap_stable, not_cset;
 253   __ enter(/*strip_ret_addr*/true);
 254   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 255   __ ldrb(rscratch2, gc_state);
 256 
 257   // Check for heap stability
 258   if (is_strong) {
 259     __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
 260   } else {
 261     Label lrb;
 262     __ tbnz(rscratch2, ShenandoahHeap::WEAK_ROOTS_BITPOS, lrb);
 263     __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
 264     __ bind(lrb);
 265   }
 266 
 267   // use r1 for load address
 268   Register result_dst = dst;
 269   if (dst == r1) {
 270     __ mov(rscratch1, dst);
 271     dst = rscratch1;
 272   }
 273 
 274   // Save r0 and r1, unless it is an output register
 275   RegSet to_save = RegSet::of(r0, r1) - result_dst;
 276   __ push(to_save, sp);
 277   __ lea(r1, load_addr);
 278   __ mov(r0, dst);
 279 
 280   // Test for in-cset
 281   if (is_strong) {
 282     __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
 283     __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 284     __ ldrb(rscratch2, Address(rscratch2, rscratch1));
 285     __ tbz(rscratch2, 0, not_cset);
 286   }
 287 
 288   __ push_call_clobbered_registers();
 289   if (is_strong) {
 290     if (is_narrow) {
 291       __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
 292     } else {
 293       __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
 294     }
 295   } else if (is_weak) {
 296     if (is_narrow) {
 297       __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
 298     } else {
 299       __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
 300     }
 301   } else {
 302     assert(is_phantom, "only remaining strength");
 303     assert(!is_narrow, "phantom access cannot be narrow");
 304     // AOT saved adapters need relocation for this call.
 305     __ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
 306   }
 307   __ blr(lr);
 308   __ mov(rscratch1, r0);
 309   __ pop_call_clobbered_registers();
 310   __ mov(r0, rscratch1);
 311 
 312   __ bind(not_cset);
 313 
 314   __ mov(result_dst, r0);
 315   __ pop(to_save, sp);
 316 
 317   __ bind(heap_stable);
 318   __ leave();
 319 }
 320 
 321 //
 322 // Arguments:
 323 //
 324 // Inputs:
 325 //   src:        oop location to load from, might be clobbered
 326 //
 327 // Output:
 328 //   dst:        oop loaded from src location
 329 //
 330 // Kill:
 331 //   rscratch1 (scratch reg)
 332 //
 333 // Alias:
 334 //   dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
 335 //
 336 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 337                                             Register dst, Address src, Register tmp1, Register tmp2) {
 338   // 1: non-reference load, no additional barrier is needed
 339   if (!is_reference_type(type)) {
 340     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
 341     return;
 342   }
 343 
 344   // 2: load a reference from src location and apply LRB if needed
 345   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
 346     Register result_dst = dst;
 347 
 348     // Preserve src location for LRB
 349     if (dst == src.base() || dst == src.index()) {
 350       dst = rscratch1;
 351     }
 352     assert_different_registers(dst, src.base(), src.index());
 353 
 354     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
 355 
 356     load_reference_barrier(masm, dst, src, decorators);
 357 
 358     if (dst != result_dst) {
 359       __ mov(result_dst, dst);
 360       dst = result_dst;
 361     }
 362   } else {
 363     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
 364   }
 365 
 366   // 3: apply keep-alive barrier if needed
 367   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 368     __ enter(/*strip_ret_addr*/true);
 369     __ push_call_clobbered_registers();
 370     satb_write_barrier_pre(masm /* masm */,
 371                            noreg /* obj */,
 372                            dst /* pre_val */,
 373                            rthread /* thread */,
 374                            tmp1 /* tmp1 */,
 375                            tmp2 /* tmp2 */,
 376                            true /* tosca_live */,
 377                            true /* expand_call */);
 378     __ pop_call_clobbered_registers();
 379     __ leave();
 380   }
 381 }
 382 
 383 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
 384   assert(ShenandoahCardBarrier, "Should have been checked by caller");
 385 
 386   __ lsr(obj, obj, CardTable::card_shift());
 387 
 388   assert(CardTable::dirty_card_val() == 0, "must be");
 389 
 390   Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
 391   __ ldr(rscratch1, curr_ct_holder_addr);
 392 
 393   if (UseCondCardMark) {
 394     Label L_already_dirty;
 395     __ ldrb(rscratch2, Address(obj, rscratch1));
 396     __ cbz(rscratch2, L_already_dirty);
 397     __ strb(zr, Address(obj, rscratch1));
 398     __ bind(L_already_dirty);
 399   } else {
 400     __ strb(zr, Address(obj, rscratch1));
 401   }
 402 }
 403 
 404 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 405                                              Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
 406   bool on_oop = is_reference_type(type);
 407   if (!on_oop) {
 408     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
 409     return;
 410   }
 411 
 412   // flatten object address if needed
 413   if (dst.index() == noreg && dst.offset() == 0) {
 414     if (dst.base() != tmp3) {
 415       __ mov(tmp3, dst.base());
 416     }
 417   } else {
 418     __ lea(tmp3, dst);
 419   }
 420 
 421   shenandoah_write_barrier_pre(masm,
 422                                tmp3 /* obj */,
 423                                tmp2 /* pre_val */,
 424                                rthread /* thread */,
 425                                tmp1  /* tmp */,
 426                                val != noreg /* tosca_live */,
 427                                false /* expand_call */);
 428 
 429   BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
 430 
 431   bool in_heap = (decorators & IN_HEAP) != 0;
 432   bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
 433   if (needs_post_barrier) {
 434     store_check(masm, tmp3);
 435   }
 436 }
 437 
 438 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
 439                                                                   Register obj, Register tmp, Label& slowpath) {
 440   Label done;
 441   // Resolve jobject
 442   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
 443 
 444   // Check for null.
 445   __ cbz(obj, done);
 446 
 447   assert(obj != rscratch2, "need rscratch2");
 448   Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
 449   __ lea(rscratch2, gc_state);
 450   __ ldrb(rscratch2, Address(rscratch2));
 451 
 452   // Check for heap in evacuation phase
 453   __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath);
 454 
 455   __ bind(done);
 456 }
 457 
 458 // Special Shenandoah CAS implementation that handles false negatives due
 459 // to concurrent evacuation.  The service is more complex than a
 460 // traditional CAS operation because the CAS operation is intended to
 461 // succeed if the reference at addr exactly matches expected or if the
 462 // reference at addr holds a pointer to a from-space object that has
 463 // been relocated to the location named by expected.  There are two
 464 // races that must be addressed:
 465 //  a) A parallel thread may mutate the contents of addr so that it points
 466 //     to a different object.  In this case, the CAS operation should fail.
 467 //  b) A parallel thread may heal the contents of addr, replacing a
 468 //     from-space pointer held in addr with the to-space pointer
 469 //     representing the new location of the object.
 470 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
 471 // or it refers to an object that is not being evacuated out of
 472 // from-space, or it refers to the to-space version of an object that
 473 // is being evacuated out of from-space.
 474 //
 475 // By default the value held in the result register following execution
 476 // of the generated code sequence is 0 to indicate failure of CAS,
 477 // non-zero to indicate success. If is_cae, the result is the value most
 478 // recently fetched from addr rather than a boolean success indicator.
 479 //
 480 // Clobbers rscratch1, rscratch2
 481 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
 482                                                 Register addr,
 483                                                 Register expected,
 484                                                 Register new_val,
 485                                                 bool acquire, bool release,
 486                                                 bool is_cae,
 487                                                 Register result) {
 488   Register tmp1 = rscratch1;
 489   Register tmp2 = rscratch2;
 490   bool is_narrow = UseCompressedOops;
 491   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
 492 
 493   assert_different_registers(addr, expected, tmp1, tmp2);
 494   assert_different_registers(addr, new_val,  tmp1, tmp2);
 495 
 496   Label step4, done;
 497 
 498   // There are two ways to reach this label.  Initial entry into the
 499   // cmpxchg_oop code expansion starts at step1 (which is equivalent
 500   // to label step4).  Additionally, in the rare case that four steps
 501   // are required to perform the requested operation, the fourth step
 502   // is the same as the first.  On a second pass through step 1,
 503   // control may flow through step 2 on its way to failure.  It will
 504   // not flow from step 2 to step 3 since we are assured that the
 505   // memory at addr no longer holds a from-space pointer.
 506   //
 507   // The comments that immediately follow the step4 label apply only
 508   // to the case in which control reaches this label by branch from
 509   // step 3.
 510 
 511   __ bind (step4);
 512 
 513   // Step 4. CAS has failed because the value most recently fetched
 514   // from addr is no longer the from-space pointer held in tmp2.  If a
 515   // different thread replaced the in-memory value with its equivalent
 516   // to-space pointer, then CAS may still be able to succeed.  The
 517   // value held in the expected register has not changed.
 518   //
 519   // It is extremely rare we reach this point.  For this reason, the
 520   // implementation opts for smaller rather than potentially faster
 521   // code.  Ultimately, smaller code for this rare case most likely
 522   // delivers higher overall throughput by enabling improved icache
 523   // performance.
 524 
 525   // Step 1. Fast-path.
 526   //
 527   // Try to CAS with given arguments.  If successful, then we are done.
 528   //
 529   // No label required for step 1.
 530 
 531   __ cmpxchg(addr, expected, new_val, size, acquire, release, false, tmp2);
 532   // EQ flag set iff success.  tmp2 holds value fetched.
 533 
 534   // If expected equals null but tmp2 does not equal null, the
 535   // following branches to done to report failure of CAS.  If both
 536   // expected and tmp2 equal null, the following branches to done to
 537   // report success of CAS.  There's no need for a special test of
 538   // expected equal to null.
 539 
 540   __ br(Assembler::EQ, done);
 541   // if CAS failed, fall through to step 2
 542 
 543   // Step 2. CAS has failed because the value held at addr does not
 544   // match expected.  This may be a false negative because the value fetched
 545   // from addr (now held in tmp2) may be a from-space pointer to the
 546   // original copy of same object referenced by to-space pointer expected.
 547   //
 548   // To resolve this, it suffices to find the forward pointer associated
 549   // with fetched value.  If this matches expected, retry CAS with new
 550   // parameters.  If this mismatches, then we have a legitimate
 551   // failure, and we're done.
 552   //
 553   // No need for step2 label.
 554 
 555   // overwrite tmp1 with from-space pointer fetched from memory
 556   __ mov(tmp1, tmp2);
 557 
 558   if (is_narrow) {
 559     // Decode tmp1 in order to resolve its forward pointer
 560     __ decode_heap_oop(tmp1, tmp1);
 561   }
 562   resolve_forward_pointer(masm, tmp1);
 563   // Encode tmp1 to compare against expected.
 564   __ encode_heap_oop(tmp1, tmp1);
 565 
 566   // Does forwarded value of fetched from-space pointer match original
 567   // value of expected?  If tmp1 holds null, this comparison will fail
 568   // because we know from step1 that expected is not null.  There is
 569   // no need for a separate test for tmp1 (the value originally held
 570   // in memory) equal to null.
 571   __ cmp(tmp1, expected);
 572 
 573   // If not, then the failure was legitimate and we're done.
 574   // Branching to done with NE condition denotes failure.
 575   __ br(Assembler::NE, done);
 576 
 577   // Fall through to step 3.  No need for step3 label.
 578 
 579   // Step 3.  We've confirmed that the value originally held in memory
 580   // (now held in tmp2) pointed to from-space version of original
 581   // expected value.  Try the CAS again with the from-space expected
 582   // value.  If it now succeeds, we're good.
 583   //
 584   // Note: tmp2 holds encoded from-space pointer that matches to-space
 585   // object residing at expected.  tmp2 is the new "expected".
 586 
 587   // Note that macro implementation of __cmpxchg cannot use same register
 588   // tmp2 for result and expected since it overwrites result before it
 589   // compares result with expected.
 590   __ cmpxchg(addr, tmp2, new_val, size, acquire, release, false, noreg);
 591   // EQ flag set iff success.  tmp2 holds value fetched, tmp1 (rscratch1) clobbered.
 592 
 593   // If fetched value did not equal the new expected, this could
 594   // still be a false negative because some other thread may have
 595   // newly overwritten the memory value with its to-space equivalent.
 596   __ br(Assembler::NE, step4);
 597 
 598   if (is_cae) {
 599     // We're falling through to done to indicate success.  Success
 600     // with is_cae is denoted by returning the value of expected as
 601     // result.
 602     __ mov(tmp2, expected);
 603   }
 604 
 605   __ bind(done);
 606   // At entry to done, the Z (EQ) flag is on iff if the CAS
 607   // operation was successful.  Additionally, if is_cae, tmp2 holds
 608   // the value most recently fetched from addr. In this case, success
 609   // is denoted by tmp2 matching expected.
 610 
 611   if (is_cae) {
 612     __ mov(result, tmp2);
 613   } else {
 614     __ cset(result, Assembler::EQ);
 615   }
 616 }
 617 
 618 #ifdef COMPILER2
 619 void ShenandoahBarrierSetAssembler::load_ref_barrier_c2(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp, bool narrow, bool maybe_null) {
 620   assert_different_registers(obj, addr, tmp);
 621   BLOCK_COMMENT("load_ref_barrier_c2 {");
 622   if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
 623     return;
 624   }
 625   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
 626   ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp, noreg, noreg, narrow);
 627 
 628   // Don't preserve the obj across the runtime call, we override it from the return value anyway.
 629   stub->dont_preserve(obj);
 630   if (tmp != noreg) {
 631     stub->dont_preserve(tmp); // temp, no need to save
 632   }
 633 
 634   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 635   __ ldrb(rscratch1, gc_state);
 636 
 637   // Check if GC marking is in progress or we are handling a weak reference, otherwise we don't have to do anything.
 638   bool is_strong = (node->barrier_data() & ShenandoahBarrierStrong) != 0;
 639   if (is_strong) {
 640     __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
 641     __ b(*stub->entry());
 642   } else {
 643     static_assert(ShenandoahHeap::HAS_FORWARDED_BITPOS == 0, "Relied on in LRB check below.");
 644     __ orr(tmp, rscratch1, rscratch1, Assembler::LSR, ShenandoahHeap::WEAK_ROOTS_BITPOS);
 645     __ tbz(tmp, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
 646     __ b(*stub->entry());
 647   }
 648 
 649   __ bind(*stub->continuation());
 650   BLOCK_COMMENT("} load_ref_barrier_c2");
 651 }
 652 
 653 void ShenandoahBarrierSetAssembler::load_ref_barrier_c3(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp, bool narrow, bool maybe_null, Register gc_state) {
 654   BLOCK_COMMENT("load_ref_barrier_c3 {");
 655   if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
 656     return;
 657   }
 658   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
 659   ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp, noreg, noreg, narrow);
 660 
 661   // Don't preserve the obj across the runtime call, we override it from the return value anyway.
 662   stub->dont_preserve(obj);
 663 
 664   // Check if GC marking is in progress or we are handling a weak reference,
 665   // otherwise we don't have to do anything. The code below was optimized to
 666   // use less registers and instructions as possible at the expense of always
 667   // having a branch instruction. The reason why we use this particular branch
 668   // scheme is because the stub entry may be too far for the tbnz to jump to.
 669   bool is_strong = (node->barrier_data() & ShenandoahBarrierStrong) != 0;
 670   if (is_strong) {
 671     __ tbz(gc_state, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
 672     __ b(*stub->entry());
 673   } else {
 674     static_assert(ShenandoahHeap::HAS_FORWARDED_BITPOS == 0, "Relied on in LRB check below.");
 675     __ orr(tmp, gc_state, gc_state, Assembler::LSR, ShenandoahHeap::WEAK_ROOTS_BITPOS);
 676     __ tbz(tmp, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
 677     __ b(*stub->entry());
 678   }
 679 
 680   __ bind(*stub->continuation());
 681   BLOCK_COMMENT("} load_ref_barrier_c3");
 682 }
 683 
 684 void ShenandoahBarrierSetAssembler::satb_barrier_c3(const MachNode* node, MacroAssembler* masm, Register addr, Register pre_val, Register gc_state) {
 685   assert_different_registers(addr, pre_val);
 686   if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
 687     return;
 688   }
 689 
 690   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
 691   ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, pre_val);
 692 
 693   // Check if GC marking is in progress, otherwise we don't have to do anything.
 694   __ tstw(gc_state, ShenandoahHeap::MARKING);
 695   __ br(Assembler::NE, *stub->entry());
 696   __ bind(*stub->continuation());
 697 }
 698 
 699 void ShenandoahBarrierSetAssembler::satb_barrier_c2(const MachNode* node, MacroAssembler* masm, Register addr, Register pre_val) {
 700   assert_different_registers(addr, pre_val);
 701   if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
 702     return;
 703   }
 704   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
 705   ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, pre_val);
 706 
 707   // Check if GC marking is in progress, otherwise we don't have to do anything.
 708   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 709   __ ldrb(rscratch1, gc_state);
 710   __ tstw(rscratch1, ShenandoahHeap::MARKING);
 711   __ br(Assembler::NE, *stub->entry());
 712   __ bind(*stub->continuation());
 713 }
 714 
 715 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Register addr, Register tmp) {
 716   if (!ShenandoahCardBarrier ||
 717       (node->barrier_data() & (ShenandoahBarrierCardMark | ShenandoahBarrierCardMarkNotNull)) == 0) {
 718     return;
 719   }
 720 
 721   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
 722   __ lsr(tmp, addr, CardTable::card_shift());
 723 
 724   assert(CardTable::dirty_card_val() == 0, "must be");
 725 
 726   Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
 727   __ ldr(rscratch1, curr_ct_holder_addr);
 728 
 729   if (UseCondCardMark) {
 730     Label L_already_dirty;
 731     __ ldrb(rscratch2, Address(tmp, rscratch1));
 732     __ cbz(rscratch2, L_already_dirty);
 733     __ strb(zr, Address(tmp, rscratch1));
 734     __ bind(L_already_dirty);
 735   } else {
 736     __ strb(zr, Address(tmp, rscratch1));
 737   }
 738 }
 739 
 740 void ShenandoahBarrierSetAssembler::cmpxchg_oop_c2(const MachNode* node,
 741                                                    MacroAssembler* masm,
 742                                                    Register addr, Register oldval,
 743                                                    Register newval, Register res,
 744                                                    Register tmp1, Register tmp2,
 745                                                    bool acquire, bool release, bool weak, bool exchange) {
 746   BLOCK_COMMENT("cmpxchg_oop_c2 {");
 747   assert(res != noreg, "need result register");
 748   assert_different_registers(oldval, addr, res, tmp1, tmp2);
 749   assert_different_registers(newval, addr, res, tmp1, tmp2);
 750 
 751   // Fast-path: Try to CAS optimistically. If successful, then we are done.
 752   // EQ flag set iff success. 'tmp2' holds value fetched.
 753   Assembler::operand_size size = UseCompressedOops ? Assembler::word : Assembler::xword;
 754   __ cmpxchg(addr, oldval, newval, size, acquire, release, weak, tmp2);
 755 
 756   // If we need a boolean result out of CAS, set the flag appropriately.  This
 757   // would be the final result if we do not go slow.
 758   if (!exchange) {
 759     __ cset(res, Assembler::EQ);
 760   } else {
 761     __ mov(res, tmp2);
 762   }
 763 
 764   if (ShenandoahCASBarrier) {
 765     ShenandoahCASBarrierSlowStubC2* const slow_stub =
 766       ShenandoahCASBarrierSlowStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, exchange, acquire, release, weak);
 767 
 768     slow_stub->dont_preserve(res);    // set at the end, no need to save
 769     slow_stub->dont_preserve(oldval); // saved explicitly
 770     slow_stub->dont_preserve(tmp1);   // temp, no need to save
 771     slow_stub->dont_preserve(tmp2);   // temp, no need to save
 772 
 773     // On success, we do not need any additional handling.
 774     __ br(Assembler::EQ, *slow_stub->continuation());
 775 
 776     // If GC is in progress, it is likely we need additional handling for false negatives.
 777     Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 778     __ ldrb(tmp1, gc_state);
 779     __ tbz(tmp1, ShenandoahHeap::HAS_FORWARDED_BITPOS, *slow_stub->continuation());
 780     __ b(*slow_stub->entry());
 781 
 782     // Slow stub re-enters with result set correctly.
 783     __ bind(*slow_stub->continuation());
 784   }
 785 
 786   BLOCK_COMMENT("} cmpxchg_oop_c2");
 787 }
 788 
 789 #undef __
 790 #define __ masm.
 791 
 792 void ShenandoahLoadRefBarrierStubC2::emit_code(MacroAssembler& masm) {
 793   BLOCK_COMMENT("ShenandoahLoadRefBarrierStubC2::emit_code {");
 794   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
 795   __ bind(*entry());
 796   Register obj = _obj;
 797   if (_narrow) {
 798     __ decode_heap_oop(_tmp1, _obj);
 799     obj = _tmp1;
 800   }
 801   // Weak/phantom loads always need to go to runtime.
 802   if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
 803     // Check for object in cset.
 804     __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
 805     __ lsr(rscratch1, obj, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 806     __ ldrb(rscratch2, Address(rscratch2, rscratch1));
 807     __ cbz(rscratch2, *continuation());
 808   }
 809   {
 810     SaveLiveRegisters save_registers(&masm, this);
 811     if (c_rarg0 != obj) {
 812       if (c_rarg0 == _addr) {
 813         __ mov(rscratch1, _addr);
 814         _addr = rscratch1;
 815       }
 816       __ mov(c_rarg0, obj);
 817     }
 818     __ mov(c_rarg1, _addr);
 819 
 820     if (_narrow) {
 821       if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
 822         __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
 823       } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
 824         __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
 825       } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
 826         __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow));
 827       }
 828     } else {
 829       if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
 830         __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
 831       } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
 832         __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
 833       } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
 834         __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
 835       }
 836     }
 837     __ blr(rscratch1);
 838     __ mov(_obj, r0);
 839   }
 840   if (_narrow) {
 841     __ encode_heap_oop(_obj);
 842   }
 843   __ b(*continuation());
 844   BLOCK_COMMENT("} ShenandoahLoadRefBarrierStubC2::emit_code");
 845 }
 846 
 847 void ShenandoahSATBBarrierStubC2::emit_code(MacroAssembler& masm) {
 848   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
 849   __ bind(*entry());
 850   // Do we need to load the previous value?
 851   if (_addr != noreg) {
 852     __ load_heap_oop(_preval, Address(_addr, 0), noreg, noreg, AS_RAW);
 853   }
 854 
 855   Address index(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 856   Address buffer(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 857   Label runtime;
 858   __ ldr(rscratch1, index);
 859   // If buffer is full, call into runtime.
 860   __ cbz(rscratch1, runtime);
 861 
 862   // The buffer is not full, store value into it.
 863   __ sub(rscratch1, rscratch1, wordSize);
 864   __ str(rscratch1, index);
 865   __ ldr(rscratch2, buffer);
 866   __ str(_preval, Address(rscratch2, rscratch1));
 867   __ b(*continuation());
 868 
 869   // Runtime call
 870   __ bind(runtime);
 871   {
 872     SaveLiveRegisters save_registers(&masm, this);
 873     if (c_rarg0 != _preval) {
 874       __ mov(c_rarg0, _preval);
 875     }
 876     __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2));
 877     __ blr(rscratch1);
 878   }
 879   __ b(*continuation());
 880 }
 881 
 882 void ShenandoahCASBarrierMidStubC2::emit_code(MacroAssembler& masm) {
 883   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
 884   __ bind(*entry());
 885 
 886   // Check if CAS result is null. If it is, then we must have a legitimate failure.
 887   // This makes loading the fwdptr in the slow-path simpler.
 888   __ tst(_result, _result);
 889   // In case of !CAE, this has the correct value for legitimate failure (0/false)
 890   // in result register.
 891   __ br(Assembler::EQ, *continuation());
 892 
 893   // Check if GC is in progress, otherwise we must have a legitimate failure.
 894   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 895   __ ldrb(_tmp, gc_state);
 896   __ tstw(_tmp, ShenandoahHeap::HAS_FORWARDED);
 897   __ br(Assembler::NE, *_slow_stub->entry());
 898 
 899   if (!_cae) {
 900     __ mov(_result, 0); // result = false
 901   }
 902   __ b(*continuation());
 903 }
 904 
 905 void ShenandoahCASBarrierSlowStubC2::emit_code(MacroAssembler& masm) {
 906   __ bind(*entry());
 907 
 908   // CAS has failed because the value held at addr does not match expected.
 909   // This may be a false negative because the version in memory might be
 910   // the from-space version of the same object we currently hold to-space
 911   // reference for.
 912   //
 913   // To resolve this, we need to pass the location through the LRB fixup,
 914   // this will make sure that the location has only to-space pointers.
 915   // To avoid calling into runtime often, we cset-check the object first.
 916   // We can inline most of the work here, but there is little point,
 917   // as CAS failures over cset locations must be rare. This fast-slow split
 918   // matches what we do for normal LRB.
 919 
 920   // Non-strong references should always go to runtime. We do not expect
 921   // CASes over non-strong locations.
 922   assert((_node->barrier_data() & ShenandoahBarrierStrong) != 0, "Only strong references for CASes");
 923 
 924   Label L_final;
 925 
 926   // (Compressed) failure witness is in _tmp2.
 927   // Unpack it and check if it is in collection set.
 928   // We need to backup the compressed version to use in the LRB.
 929   __ mov(_result, _tmp2);
 930   if (UseCompressedOops) {
 931     __ decode_heap_oop(_tmp2);
 932   }
 933 
 934   __ mov(_tmp1, ShenandoahHeap::in_cset_fast_test_addr());
 935   __ lsr(_tmp2, _tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 936   __ ldrb(_tmp1, Address(_tmp1, _tmp2));
 937   __ cbz(_tmp1, L_final);
 938 
 939   {
 940     SaveLiveRegisters save_registers(&masm, this);
 941     // Load up failure witness again.
 942     __ mov(c_rarg0, _result);
 943     if (UseCompressedOops) {
 944       __ decode_heap_oop(c_rarg0);
 945     }
 946     __ mov(c_rarg1, _addr_reg);
 947 
 948     if (UseCompressedOops) {
 949       __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), 2);
 950     } else {
 951       __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), 2);
 952     }
 953     // We have called LRB to fix up the heap location. We do not care about its
 954     // result, as we will just try to CAS the location again.
 955   }
 956 
 957   __ bind(L_final);
 958 
 959   Assembler::operand_size size = UseCompressedOops ? Assembler::word : Assembler::xword;
 960   __ cmpxchg(_addr_reg, _expected, _new_val, size, _acquire, _release, _weak, _result);
 961 
 962   if (!_cae) {
 963     __ cset(_result, Assembler::EQ);
 964   }
 965   __ b(*continuation());
 966 }
 967 #undef __
 968 #define __ masm->
 969 #endif // COMPILER2
 970 
 971 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
 972                                                                      Register start, Register count, Register scratch) {
 973   assert(ShenandoahCardBarrier, "Should have been checked by caller");
 974 
 975   Label L_loop, L_done;
 976   const Register end = count;
 977 
 978   // Zero count? Nothing to do.
 979   __ cbz(count, L_done);
 980 
 981   // end = start + count << LogBytesPerHeapOop
 982   // last element address to make inclusive
 983   __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop)));
 984   __ sub(end, end, BytesPerHeapOop);
 985   __ lsr(start, start, CardTable::card_shift());
 986   __ lsr(end, end, CardTable::card_shift());
 987 
 988   // number of bytes to copy
 989   __ sub(count, end, start);
 990 
 991   Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
 992   __ ldr(scratch, curr_ct_holder_addr);
 993   __ add(start, start, scratch);
 994   __ bind(L_loop);
 995   __ strb(zr, Address(start, count));
 996   __ subs(count, count, 1);
 997   __ br(Assembler::GE, L_loop);
 998   __ bind(L_done);
 999 }
1000 
1001 #undef __
1002 
1003 #ifdef COMPILER1
1004 
1005 #define __ ce->masm()->
1006 
1007 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
1008   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1009   // At this point we know that marking is in progress.
1010   // If do_load() is true then we have to emit the
1011   // load of the previous value; otherwise it has already
1012   // been loaded into _pre_val.
1013 
1014   __ bind(*stub->entry());
1015 
1016   assert(stub->pre_val()->is_register(), "Precondition.");
1017 
1018   Register pre_val_reg = stub->pre_val()->as_register();
1019 
1020   if (stub->do_load()) {
1021     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
1022   }
1023   __ cbz(pre_val_reg, *stub->continuation());
1024   ce->store_parameter(stub->pre_val()->as_register(), 0);
1025   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
1026   __ b(*stub->continuation());
1027 }
1028 
1029 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
1030   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1031   __ bind(*stub->entry());
1032 
1033   DecoratorSet decorators = stub->decorators();
1034   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
1035   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
1036   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1037   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
1038 
1039   Register obj = stub->obj()->as_register();
1040   Register res = stub->result()->as_register();
1041   Register addr = stub->addr()->as_pointer_register();
1042   Register tmp1 = stub->tmp1()->as_register();
1043   Register tmp2 = stub->tmp2()->as_register();
1044 
1045   assert(res == r0, "result must arrive in r0");
1046 
1047   if (res != obj) {
1048     __ mov(res, obj);
1049   }
1050 
1051   if (is_strong) {
1052     // Check for object in cset.
1053     __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
1054     __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1055     __ ldrb(tmp2, Address(tmp2, tmp1));
1056     __ cbz(tmp2, *stub->continuation());
1057   }
1058 
1059   ce->store_parameter(res, 0);
1060   ce->store_parameter(addr, 1);
1061   if (is_strong) {
1062     if (is_native) {
1063       __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
1064     } else {
1065       __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
1066     }
1067   } else if (is_weak) {
1068     __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
1069   } else {
1070     assert(is_phantom, "only remaining strength");
1071     __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
1072   }
1073 
1074   __ b(*stub->continuation());
1075 }
1076 
1077 #undef __
1078 
1079 #define __ sasm->
1080 
1081 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
1082   __ prologue("shenandoah_pre_barrier", false);
1083 
1084   // arg0 : previous value of memory
1085 
1086   BarrierSet* bs = BarrierSet::barrier_set();
1087 
1088   const Register pre_val = r0;
1089   const Register thread = rthread;
1090   const Register tmp = rscratch1;
1091 
1092   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1093   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1094 
1095   Label done;
1096   Label runtime;
1097 
1098   // Is marking still active?
1099   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1100   __ ldrb(tmp, gc_state);
1101   __ tbz(tmp, ShenandoahHeap::MARKING_BITPOS, done);
1102 
1103   // Can we store original value in the thread's buffer?
1104   __ ldr(tmp, queue_index);
1105   __ cbz(tmp, runtime);
1106 
1107   __ sub(tmp, tmp, wordSize);
1108   __ str(tmp, queue_index);
1109   __ ldr(rscratch2, buffer);
1110   __ add(tmp, tmp, rscratch2);
1111   __ load_parameter(0, rscratch2);
1112   __ str(rscratch2, Address(tmp, 0));
1113   __ b(done);
1114 
1115   __ bind(runtime);
1116   __ push_call_clobbered_registers();
1117   __ load_parameter(0, pre_val);
1118   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
1119   __ pop_call_clobbered_registers();
1120   __ bind(done);
1121 
1122   __ epilogue();
1123 }
1124 
1125 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
1126   __ prologue("shenandoah_load_reference_barrier", false);
1127   // arg0 : object to be resolved
1128 
1129   __ push_call_clobbered_registers();
1130   __ load_parameter(0, r0);
1131   __ load_parameter(1, r1);
1132 
1133   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
1134   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
1135   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1136   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
1137   if (is_strong) {
1138     if (is_native) {
1139       __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
1140     } else {
1141       if (UseCompressedOops) {
1142         __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
1143       } else {
1144         __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
1145       }
1146     }
1147   } else if (is_weak) {
1148     assert(!is_native, "weak must not be called off-heap");
1149     if (UseCompressedOops) {
1150       __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
1151     } else {
1152       __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
1153     }
1154   } else {
1155     assert(is_phantom, "only remaining strength");
1156     assert(is_native, "phantom must only be called off-heap");
1157     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
1158   }
1159   __ blr(lr);
1160   __ mov(rscratch1, r0);
1161   __ pop_call_clobbered_registers();
1162   __ mov(r0, rscratch1);
1163 
1164   __ epilogue();
1165 }
1166 
1167 #undef __
1168 
1169 #endif // COMPILER1