1 /*
   2  * Copyright (c) 2018, 2024, Red Hat, Inc. All rights reserved.
   3  * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "gc/shared/gcArguments.hpp"
  29 #include "gc/shared/gc_globals.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  32 #include "gc/shenandoah/shenandoahForwarding.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  36 #include "gc/shenandoah/shenandoahRuntime.hpp"
  37 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  39 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "macroAssembler_ppc.hpp"
  42 #include "runtime/javaThread.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 #include "vm_version_ppc.hpp"
  46 #ifdef COMPILER1
  47 #include "c1/c1_LIRAssembler.hpp"
  48 #include "c1/c1_MacroAssembler.hpp"
  49 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  50 #endif
  51 
  52 #define __ masm->
  53 
  54 void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
  55                                                        Register base, RegisterOrConstant ind_or_offs,
  56                                                        Register tmp1, Register tmp2, Register tmp3,
  57                                                        MacroAssembler::PreservationLevel preservation_level) {
  58   if (ShenandoahSATBBarrier) {
  59     __ block_comment("satb_write_barrier (shenandoahgc) {");
  60     satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
  61     __ block_comment("} satb_write_barrier (shenandoahgc)");
  62   }
  63 }
  64 
  65 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler *masm, DecoratorSet decorators,
  66                                                            Register base, RegisterOrConstant ind_or_offs,
  67                                                            Register dst,
  68                                                            Register tmp1, Register tmp2,
  69                                                            MacroAssembler::PreservationLevel preservation_level) {
  70   if (ShenandoahLoadRefBarrier) {
  71     __ block_comment("load_reference_barrier (shenandoahgc) {");
  72     load_reference_barrier_impl(masm, decorators, base, ind_or_offs, dst, tmp1, tmp2, preservation_level);
  73     __ block_comment("} load_reference_barrier (shenandoahgc)");
  74   }
  75 }
  76 
  77 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType type,
  78                                                        Register src, Register dst, Register count,
  79                                                        Register preserve1, Register preserve2) {
  80   Register R11_tmp = R11_scratch1;
  81 
  82   assert_different_registers(src, dst, count, R11_tmp, noreg);
  83   if (preserve1 != noreg) {
  84     // Technically not required, but likely to indicate an error.
  85     assert_different_registers(preserve1, preserve2);
  86   }
  87 
  88   /* ==== Check whether barrier is required (optimizations) ==== */
  89   // Fast path: Component type of array is not a reference type.
  90   if (!is_reference_type(type)) {
  91     return;
  92   }
  93 
  94   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  95 
  96   // Fast path: No barrier required if for every barrier type, it is either disabled or would not store
  97   // any useful information.
  98   if ((!ShenandoahSATBBarrier || dest_uninitialized) && !ShenandoahLoadRefBarrier) {
  99     return;
 100   }
 101 
 102   __ block_comment("arraycopy_prologue (shenandoahgc) {");
 103   Label skip_prologue;
 104 
 105   // Fast path: Array is of length zero.
 106   __ cmpdi(CCR0, count, 0);
 107   __ beq(CCR0, skip_prologue);
 108 
 109   /* ==== Check whether barrier is required (gc state) ==== */
 110   __ lbz(R11_tmp, in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
 111          R16_thread);
 112 
 113   // The set of garbage collection states requiring barriers depends on the available barrier types and the
 114   // type of the reference in question.
 115   // For instance, satb barriers may be skipped if it is certain that the overridden values are not relevant
 116   // for the garbage collector.
 117   const int required_states = ShenandoahSATBBarrier && dest_uninitialized
 118                               ? ShenandoahHeap::HAS_FORWARDED
 119                               : ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
 120 
 121   __ andi_(R11_tmp, R11_tmp, required_states);
 122   __ beq(CCR0, skip_prologue);
 123 
 124   /* ==== Invoke runtime ==== */
 125   // Save to-be-preserved registers.
 126   int highest_preserve_register_index = 0;
 127   {
 128     if (preserve1 != noreg && preserve1->is_volatile()) {
 129       __ std(preserve1, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
 130     }
 131     if (preserve2 != noreg && preserve2 != preserve1 && preserve2->is_volatile()) {
 132       __ std(preserve2, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
 133     }
 134 
 135     __ std(src, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
 136     __ std(dst, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
 137     __ std(count, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
 138 
 139     __ save_LR(R11_tmp);
 140     __ push_frame_reg_args(-BytesPerWord * highest_preserve_register_index,
 141                            R11_tmp);
 142   }
 143 
 144   // Invoke runtime.
 145   address jrt_address = nullptr;
 146   if (UseCompressedOops) {
 147     jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
 148   } else {
 149     jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop);
 150   }
 151   assert(jrt_address != nullptr, "jrt routine cannot be found");
 152 
 153   __ call_VM_leaf(jrt_address, src, dst, count);
 154 
 155   // Restore to-be-preserved registers.
 156   {
 157     __ pop_frame();
 158     __ restore_LR(R11_tmp);
 159 
 160     __ ld(count, -BytesPerWord * highest_preserve_register_index--, R1_SP);
 161     __ ld(dst, -BytesPerWord * highest_preserve_register_index--, R1_SP);
 162     __ ld(src, -BytesPerWord * highest_preserve_register_index--, R1_SP);
 163 
 164     if (preserve2 != noreg && preserve2 != preserve1 && preserve2->is_volatile()) {
 165       __ ld(preserve2, -BytesPerWord * highest_preserve_register_index--, R1_SP);
 166     }
 167     if (preserve1 != noreg && preserve1->is_volatile()) {
 168       __ ld(preserve1, -BytesPerWord * highest_preserve_register_index--, R1_SP);
 169     }
 170   }
 171 
 172   __ bind(skip_prologue);
 173   __ block_comment("} arraycopy_prologue (shenandoahgc)");
 174 }
 175 
 176 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 177                                                        Register dst, Register count,
 178                                                        Register preserve) {
 179   if (ShenandoahCardBarrier && is_reference_type(type)) {
 180     __ block_comment("arraycopy_epilogue (shenandoahgc) {");
 181     gen_write_ref_array_post_barrier(masm, decorators, dst, count, preserve);
 182     __ block_comment("} arraycopy_epilogue (shenandoahgc)");
 183   }
 184 }
 185 
 186 // The to-be-enqueued value can either be determined
 187 // - dynamically by passing the reference's address information (load mode) or
 188 // - statically by passing a register the value is stored in (preloaded mode)
 189 //   - for performance optimizations in cases where the previous value is known (currently not implemented) and
 190 //   - for incremental-update barriers.
 191 //
 192 // decorators:  The previous value's decorator set.
 193 //              In "load mode", the value must equal '0'.
 194 // base:        Base register of the reference's address (load mode).
 195 //              In "preloaded mode", the register must equal 'noreg'.
 196 // ind_or_offs: Index or offset of the reference's address (load mode).
 197 //              If 'base' equals 'noreg' (preloaded mode), the passed value is ignored.
 198 // pre_val:     Register holding the to-be-stored value (preloaded mode).
 199 //              In "load mode", this register acts as a temporary register and must
 200 //              thus not be 'noreg'.  In "preloaded mode", its content will be sustained.
 201 // tmp1/tmp2:   Temporary registers, one of which must be non-volatile in "preloaded mode".
 202 void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
 203                                                             Register base, RegisterOrConstant ind_or_offs,
 204                                                             Register pre_val,
 205                                                             Register tmp1, Register tmp2,
 206                                                             MacroAssembler::PreservationLevel preservation_level) {
 207   assert_different_registers(tmp1, tmp2, pre_val, noreg);
 208 
 209   Label skip_barrier;
 210 
 211   /* ==== Determine necessary runtime invocation preservation measures ==== */
 212   const bool needs_frame           = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
 213   const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
 214   const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
 215 
 216   // Check whether marking is active.
 217   __ lbz(tmp1, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
 218 
 219   __ andi_(tmp1, tmp1, ShenandoahHeap::MARKING);
 220   __ beq(CCR0, skip_barrier);
 221 
 222   /* ==== Determine the reference's previous value ==== */
 223   bool preloaded_mode = base == noreg;
 224   Register pre_val_save = noreg;
 225 
 226   if (preloaded_mode) {
 227     // Previous value has been passed to the method, so it must not be determined manually.
 228     // In case 'pre_val' is a volatile register, it must be saved across the C-call
 229     // as callers may depend on its value.
 230     // Unless the general purposes registers are saved anyway, one of the temporary registers
 231     // (i.e., 'tmp1' and 'tmp2') is used to the preserve 'pre_val'.
 232     if (!preserve_gp_registers && pre_val->is_volatile()) {
 233       pre_val_save = !tmp1->is_volatile() ? tmp1 : tmp2;
 234       assert(!pre_val_save->is_volatile(), "at least one of the temporary registers must be non-volatile");
 235     }
 236 
 237     if ((decorators & IS_NOT_NULL) != 0) {
 238 #ifdef ASSERT
 239       __ cmpdi(CCR0, pre_val, 0);
 240       __ asm_assert_ne("null oop is not allowed");
 241 #endif // ASSERT
 242     } else {
 243       __ cmpdi(CCR0, pre_val, 0);
 244       __ beq(CCR0, skip_barrier);
 245     }
 246   } else {
 247     // Load from the reference address to determine the reference's current value (before the store is being performed).
 248     // Contrary to the given value in "preloaded mode", it is not necessary to preserve it.
 249     assert(decorators == 0, "decorator set must be empty");
 250     assert(base != noreg, "base must be a register");
 251     assert(!ind_or_offs.is_register() || ind_or_offs.as_register() != noreg, "ind_or_offs must be a register");
 252     if (UseCompressedOops) {
 253       __ lwz(pre_val, ind_or_offs, base);
 254     } else {
 255       __ ld(pre_val, ind_or_offs, base);
 256     }
 257 
 258     __ cmpdi(CCR0, pre_val, 0);
 259     __ beq(CCR0, skip_barrier);
 260 
 261     if (UseCompressedOops) {
 262       __ decode_heap_oop_not_null(pre_val);
 263     }
 264   }
 265 
 266   /* ==== Try to enqueue the to-be-stored value directly into thread's local SATB mark queue ==== */
 267   {
 268     Label runtime;
 269     Register Rbuffer = tmp1, Rindex = tmp2;
 270 
 271     // Check whether the queue has enough capacity to store another oop.
 272     // If not, jump to the runtime to commit the buffer and to allocate a new one.
 273     // (The buffer's index corresponds to the amount of remaining free space.)
 274     __ ld(Rindex, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
 275     __ cmpdi(CCR0, Rindex, 0);
 276     __ beq(CCR0, runtime); // If index == 0 (buffer is full), goto runtime.
 277 
 278     // Capacity suffices.  Decrement the queue's size by the size of one oop.
 279     // (The buffer is filled contrary to the heap's growing direction, i.e., it is filled downwards.)
 280     __ addi(Rindex, Rindex, -wordSize);
 281     __ std(Rindex, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
 282 
 283     // Enqueue the previous value and skip the invocation of the runtime.
 284     __ ld(Rbuffer, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);
 285     __ stdx(pre_val, Rbuffer, Rindex);
 286     __ b(skip_barrier);
 287 
 288     __ bind(runtime);
 289   }
 290 
 291   /* ==== Invoke runtime to commit SATB mark queue to gc and allocate a new buffer ==== */
 292   // Save to-be-preserved registers.
 293   int nbytes_save = 0;
 294 
 295   if (needs_frame) {
 296     if (preserve_gp_registers) {
 297       nbytes_save = (preserve_fp_registers
 298                      ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
 299                      : MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
 300       __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
 301     }
 302 
 303     __ save_LR(tmp1);
 304     __ push_frame_reg_args(nbytes_save, tmp2);
 305   }
 306 
 307   if (!preserve_gp_registers && preloaded_mode && pre_val->is_volatile()) {
 308     assert(pre_val_save != noreg, "nv_save must not be noreg");
 309 
 310     // 'pre_val' register must be saved manually unless general-purpose are preserved in general.
 311     __ mr(pre_val_save, pre_val);
 312   }
 313 
 314   // Invoke runtime.
 315   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, R16_thread);
 316 
 317   // Restore to-be-preserved registers.
 318   if (!preserve_gp_registers && preloaded_mode && pre_val->is_volatile()) {
 319     __ mr(pre_val, pre_val_save);
 320   }
 321 
 322   if (needs_frame) {
 323     __ pop_frame();
 324     __ restore_LR(tmp1);
 325 
 326     if (preserve_gp_registers) {
 327       __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
 328     }
 329   }
 330 
 331   __ bind(skip_barrier);
 332 }
 333 
 334 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler *masm, Register dst, Register tmp) {
 335   __ block_comment("resolve_forward_pointer_not_null (shenandoahgc) {");
 336 
 337   Register tmp1 = tmp,
 338            R0_tmp2 = R0;
 339   assert_different_registers(dst, tmp1, R0_tmp2, noreg);
 340 
 341   // If the object has been evacuated, the mark word layout is as follows:
 342   // | forwarding pointer (62-bit) | '11' (2-bit) |
 343 
 344   // The invariant that stack/thread pointers have the lowest two bits cleared permits retrieving
 345   // the forwarding pointer solely by inversing the lowest two bits.
 346   // This invariant follows inevitably from hotspot's minimal alignment.
 347   assert(markWord::marked_value <= (unsigned long) MinObjAlignmentInBytes,
 348          "marked value must not be higher than hotspot's minimal alignment");
 349 
 350   Label done;
 351 
 352   // Load the object's mark word.
 353   __ ld(tmp1, oopDesc::mark_offset_in_bytes(), dst);
 354 
 355   // Load the bit mask for the lock bits.
 356   __ li(R0_tmp2, markWord::lock_mask_in_place);
 357 
 358   // Check whether all bits matching the bit mask are set.
 359   // If that is the case, the object has been evacuated and the most significant bits form the forward pointer.
 360   __ andc_(R0_tmp2, R0_tmp2, tmp1);
 361 
 362   assert(markWord::lock_mask_in_place == markWord::marked_value,
 363          "marked value must equal the value obtained when all lock bits are being set");
 364   if (VM_Version::has_isel()) {
 365     __ xori(tmp1, tmp1, markWord::lock_mask_in_place);
 366     __ isel(dst, CCR0, Assembler::equal, false, tmp1);
 367   } else {
 368     __ bne(CCR0, done);
 369     __ xori(dst, tmp1, markWord::lock_mask_in_place);
 370   }
 371 
 372   __ bind(done);
 373   __ block_comment("} resolve_forward_pointer_not_null (shenandoahgc)");
 374 }
 375 
 376 // base:        Base register of the reference's address.
 377 // ind_or_offs: Index or offset of the reference's address (load mode).
 378 // dst:         Reference's address.  In case the object has been evacuated, this is the to-space version
 379 //              of that object.
 380 void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
 381     MacroAssembler *masm, DecoratorSet decorators,
 382     Register base, RegisterOrConstant ind_or_offs,
 383     Register dst,
 384     Register tmp1, Register tmp2,
 385     MacroAssembler::PreservationLevel preservation_level) {
 386   if (ind_or_offs.is_register()) {
 387     assert_different_registers(tmp1, tmp2, base, ind_or_offs.as_register(), dst, noreg);
 388   } else {
 389     assert_different_registers(tmp1, tmp2, base, dst, noreg);
 390   }
 391 
 392   Label skip_barrier;
 393 
 394   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 395   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 396   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 397   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 398   bool is_narrow  = UseCompressedOops && !is_native;
 399 
 400   /* ==== Check whether heap is stable ==== */
 401   __ lbz(tmp2, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
 402 
 403   if (is_strong) {
 404     // For strong references, the heap is considered stable if "has forwarded" is not active.
 405     __ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
 406     __ beq(CCR0, skip_barrier);
 407 #ifdef ASSERT
 408     // "evacuation" -> (implies) "has forwarded".  If we reach this code, "has forwarded" must thus be set.
 409     __ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
 410     __ asm_assert_ne("'has forwarded' is missing");
 411 #endif // ASSERT
 412   } else {
 413     // For all non-strong references, the heap is considered stable if not any of "has forwarded",
 414     // "root set processing", and "weak reference processing" is active.
 415     // The additional phase conditions are in place to avoid the resurrection of weak references (see JDK-8266440).
 416     Label skip_fastpath;
 417     __ andi_(tmp1, tmp2, ShenandoahHeap::WEAK_ROOTS);
 418     __ bne(CCR0, skip_fastpath);
 419 
 420     __ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
 421     __ beq(CCR0, skip_barrier);
 422 #ifdef ASSERT
 423     // "evacuation" -> (implies) "has forwarded".  If we reach this code, "has forwarded" must thus be set.
 424     __ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
 425     __ asm_assert_ne("'has forwarded' is missing");
 426 #endif // ASSERT
 427 
 428     __ bind(skip_fastpath);
 429   }
 430 
 431   /* ==== Check whether region is in collection set ==== */
 432   if (is_strong) {
 433     // Shenandoah stores metadata on regions in a continuous area of memory in which a single byte corresponds to
 434     // an entire region of the shenandoah heap.  At present, only the least significant bit is of significance
 435     // and indicates whether the region is part of the collection set.
 436     //
 437     // All regions are of the same size and are always aligned by a power of two.
 438     // Any address can thus be shifted by a fixed number of bits to retrieve the address prefix shared by
 439     // all objects within that region (region identification bits).
 440     //
 441     //  | unused bits | region identification bits | object identification bits |
 442     //  (Region size depends on a couple of criteria, such as page size, user-provided arguments and the max heap size.
 443     //   The number of object identification bits can thus not be determined at compile time.)
 444     //
 445     // -------------------------------------------------------  <--- cs (collection set) base address
 446     // | lost space due to heap space base address                   -> 'ShenandoahHeap::in_cset_fast_test_addr()'
 447     // | (region identification bits contain heap base offset)
 448     // |------------------------------------------------------  <--- cs base address + (heap_base >> region size shift)
 449     // | collection set in the proper                                -> shift: 'region_size_bytes_shift_jint()'
 450     // |
 451     // |------------------------------------------------------  <--- cs base address + (heap_base >> region size shift)
 452     //                                                                               + number of regions
 453     __ load_const_optimized(tmp2, ShenandoahHeap::in_cset_fast_test_addr(), tmp1);
 454     __ srdi(tmp1, dst, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 455     __ lbzx(tmp2, tmp1, tmp2);
 456     __ andi_(tmp2, tmp2, 1);
 457     __ beq(CCR0, skip_barrier);
 458   }
 459 
 460   /* ==== Invoke runtime ==== */
 461   // Save to-be-preserved registers.
 462   int nbytes_save = 0;
 463 
 464   const bool needs_frame           = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
 465   const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
 466   const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
 467 
 468   if (needs_frame) {
 469     if (preserve_gp_registers) {
 470       nbytes_save = (preserve_fp_registers
 471                      ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
 472                      : MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
 473       __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
 474     }
 475 
 476     __ save_LR(tmp1);
 477     __ push_frame_reg_args(nbytes_save, tmp1);
 478   }
 479 
 480   // Calculate the reference's absolute address.
 481   __ add(R4_ARG2, ind_or_offs, base);
 482 
 483   // Invoke runtime.
 484   address jrt_address = nullptr;
 485 
 486   if (is_strong) {
 487     if (is_narrow) {
 488       jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
 489     } else {
 490       jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
 491     }
 492   } else if (is_weak) {
 493     if (is_narrow) {
 494       jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 495     } else {
 496       jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 497     }
 498   } else {
 499     assert(is_phantom, "only remaining strength");
 500     assert(!is_narrow, "phantom access cannot be narrow");
 501     jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
 502   }
 503   assert(jrt_address != nullptr, "jrt routine cannot be found");
 504 
 505   __ call_VM_leaf(jrt_address, dst /* reference */, R4_ARG2 /* reference address */);
 506 
 507   // Restore to-be-preserved registers.
 508   if (preserve_gp_registers) {
 509     __ mr(R0, R3_RET);
 510   } else {
 511     __ mr_if_needed(dst, R3_RET);
 512   }
 513 
 514   if (needs_frame) {
 515     __ pop_frame();
 516     __ restore_LR(tmp1);
 517 
 518     if (preserve_gp_registers) {
 519       __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
 520       __ mr(dst, R0);
 521     }
 522   }
 523 
 524   __ bind(skip_barrier);
 525 }
 526 
 527 // base:           Base register of the reference's address.
 528 // ind_or_offs:    Index or offset of the reference's address.
 529 // L_handle_null:  An optional label that will be jumped to if the reference is null.
 530 void ShenandoahBarrierSetAssembler::load_at(
 531     MacroAssembler *masm, DecoratorSet decorators, BasicType type,
 532     Register base, RegisterOrConstant ind_or_offs, Register dst,
 533     Register tmp1, Register tmp2,
 534     MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
 535   // Register must not clash, except 'base' and 'dst'.
 536   if (ind_or_offs.is_register()) {
 537     if (base != noreg) {
 538       assert_different_registers(tmp1, tmp2, base, ind_or_offs.register_or_noreg(), R0, noreg);
 539     }
 540     assert_different_registers(tmp1, tmp2, dst, ind_or_offs.register_or_noreg(), R0, noreg);
 541   } else {
 542     if (base == noreg) {
 543       assert_different_registers(tmp1, tmp2, base, R0, noreg);
 544     }
 545     assert_different_registers(tmp1, tmp2, dst, R0, noreg);
 546   }
 547 
 548   /* ==== Apply load barrier, if required ==== */
 549   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
 550     assert(is_reference_type(type), "need_load_reference_barrier must check whether type is a reference type");
 551 
 552     // If 'dst' clashes with either 'base' or 'ind_or_offs', use an intermediate result register
 553     // to keep the values of those alive until the load reference barrier is applied.
 554     Register intermediate_dst = (dst == base || (ind_or_offs.is_register() && dst == ind_or_offs.as_register()))
 555                                 ? tmp2
 556                                 : dst;
 557 
 558     BarrierSetAssembler::load_at(masm, decorators, type,
 559                                  base, ind_or_offs,
 560                                  intermediate_dst,
 561                                  tmp1, noreg,
 562                                  preservation_level, L_handle_null);
 563 
 564     load_reference_barrier(masm, decorators,
 565                            base, ind_or_offs,
 566                            intermediate_dst,
 567                            tmp1, R0,
 568                            preservation_level);
 569 
 570     __ mr_if_needed(dst, intermediate_dst);
 571   } else {
 572     BarrierSetAssembler::load_at(masm, decorators, type,
 573                                  base, ind_or_offs,
 574                                  dst,
 575                                  tmp1, tmp2,
 576                                  preservation_level, L_handle_null);
 577   }
 578 
 579   /* ==== Apply keep-alive barrier, if required (e.g., to inhibit weak reference resurrection) ==== */
 580   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 581     if (ShenandoahSATBBarrier) {
 582       __ block_comment("keep_alive_barrier (shenandoahgc) {");
 583       satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
 584       __ block_comment("} keep_alive_barrier (shenandoahgc)");
 585     }
 586   }
 587 }
 588 
 589 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
 590   assert(ShenandoahCardBarrier, "Should have been checked by caller");
 591 
 592   ShenandoahBarrierSet* ctbs = ShenandoahBarrierSet::barrier_set();
 593   CardTable* ct = ctbs->card_table();
 594   assert_different_registers(base, tmp, R0);
 595 
 596   if (ind_or_offs.is_constant()) {
 597     __ add_const_optimized(base, base, ind_or_offs.as_constant(), tmp);
 598   } else {
 599     __ add(base, ind_or_offs.as_register(), base);
 600   }
 601 
 602   __ load_const_optimized(tmp, (address)ct->byte_map_base(), R0);
 603   __ srdi(base, base, CardTable::card_shift());
 604   __ li(R0, CardTable::dirty_card_val());
 605   __ stbx(R0, tmp, base);
 606 }
 607 
 608 // base:        Base register of the reference's address.
 609 // ind_or_offs: Index or offset of the reference's address.
 610 // val:         To-be-stored value/reference's new value.
 611 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet decorators, BasicType type,
 612                                              Register base, RegisterOrConstant ind_or_offs, Register val,
 613                                              Register tmp1, Register tmp2, Register tmp3,
 614                                              MacroAssembler::PreservationLevel preservation_level) {
 615   if (is_reference_type(type)) {
 616     if (ShenandoahSATBBarrier) {
 617       satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
 618     }
 619   }
 620 
 621   BarrierSetAssembler::store_at(masm, decorators, type,
 622                                 base, ind_or_offs,
 623                                 val,
 624                                 tmp1, tmp2, tmp3,
 625                                 preservation_level);
 626 
 627   // No need for post barrier if storing NULL
 628   if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
 629     store_check(masm, base, ind_or_offs, tmp1);
 630   }
 631 }
 632 
 633 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler *masm,
 634                                                                   Register dst, Register jni_env, Register obj,
 635                                                                   Register tmp, Label &slowpath) {
 636   __ block_comment("try_resolve_jobject_in_native (shenandoahgc) {");
 637 
 638   assert_different_registers(jni_env, obj, tmp);
 639 
 640   Label done;
 641 
 642   // Fast path: Reference is null (JNI tags are zero for null pointers).
 643   __ cmpdi(CCR0, obj, 0);
 644   __ beq(CCR0, done);
 645 
 646   // Resolve jobject using standard implementation.
 647   BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
 648 
 649   // Check whether heap is stable.
 650   __ lbz(tmp,
 651          in_bytes(ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()),
 652          jni_env);
 653 
 654   __ andi_(tmp, tmp, ShenandoahHeap::EVACUATION | ShenandoahHeap::HAS_FORWARDED);
 655   __ bne(CCR0, slowpath);
 656 
 657   __ bind(done);
 658   __ block_comment("} try_resolve_jobject_in_native (shenandoahgc)");
 659 }
 660 
 661 // Special shenandoah CAS implementation that handles false negatives due
 662 // to concurrent evacuation.  That is, the CAS operation is intended to succeed in
 663 // the following scenarios (success criteria):
 664 //  s1) The reference pointer ('base_addr') equals the expected ('expected') pointer.
 665 //  s2) The reference pointer refers to the from-space version of an already-evacuated
 666 //      object, whereas the expected pointer refers to the to-space version of the same object.
 667 // Situations in which the reference pointer refers to the to-space version of an object
 668 // and the expected pointer refers to the from-space version of the same object can not occur due to
 669 // shenandoah's strong to-space invariant.  This also implies that the reference stored in 'new_val'
 670 // can not refer to the from-space version of an already-evacuated object.
 671 //
 672 // To guarantee correct behavior in concurrent environments, two races must be addressed:
 673 //  r1) A concurrent thread may heal the reference pointer (i.e., it is no longer referring to the
 674 //      from-space version but to the to-space version of the object in question).
 675 //      In this case, the CAS operation should succeed.
 676 //  r2) A concurrent thread may mutate the reference (i.e., the reference pointer refers to an entirely different object).
 677 //      In this case, the CAS operation should fail.
 678 //
 679 // By default, the value held in the 'result' register is zero to indicate failure of CAS,
 680 // non-zero to indicate success.  If 'is_cae' is set, the result is the most recently fetched
 681 // value from 'base_addr' rather than a boolean success indicator.
 682 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register base_addr,
 683                                                 Register expected, Register new_val, Register tmp1, Register tmp2,
 684                                                 bool is_cae, Register result) {
 685   __ block_comment("cmpxchg_oop (shenandoahgc) {");
 686 
 687   assert_different_registers(base_addr, new_val, tmp1, tmp2, result, R0);
 688   assert_different_registers(base_addr, expected, tmp1, tmp2, result, R0);
 689 
 690   // Potential clash of 'success_flag' and 'tmp' is being accounted for.
 691   Register success_flag  = is_cae ? noreg  : result,
 692            current_value = is_cae ? result : tmp1,
 693            tmp           = is_cae ? tmp1   : result,
 694            initial_value = tmp2;
 695 
 696   Label done, step_four;
 697 
 698   __ bind(step_four);
 699 
 700   /* ==== Step 1 ("Standard" CAS) ==== */
 701   // Fast path: The values stored in 'expected' and 'base_addr' are equal.
 702   // Given that 'expected' must refer to the to-space object of an evacuated object (strong to-space invariant),
 703   // no special processing is required.
 704   if (UseCompressedOops) {
 705     __ cmpxchgw(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
 706                 false, success_flag, nullptr, true);
 707   } else {
 708     __ cmpxchgd(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
 709                 false, success_flag, nullptr, true);
 710   }
 711 
 712   // Skip the rest of the barrier if the CAS operation succeeds immediately.
 713   // If it does not, the value stored at the address is either the from-space pointer of the
 714   // referenced object (success criteria s2)) or simply another object.
 715   __ beq(CCR0, done);
 716 
 717   /* ==== Step 2 (Null check) ==== */
 718   // The success criteria s2) cannot be matched with a null pointer
 719   // (null pointers cannot be subject to concurrent evacuation).  The failure of the CAS operation is thus legitimate.
 720   __ cmpdi(CCR0, current_value, 0);
 721   __ beq(CCR0, done);
 722 
 723   /* ==== Step 3 (reference pointer refers to from-space version; success criteria s2)) ==== */
 724   // To check whether the reference pointer refers to the from-space version, the forward
 725   // pointer of the object referred to by the reference is resolved and compared against the expected pointer.
 726   // If this check succeed, another CAS operation is issued with the from-space pointer being the expected pointer.
 727   //
 728   // Save the potential from-space pointer.
 729   __ mr(initial_value, current_value);
 730 
 731   // Resolve forward pointer.
 732   if (UseCompressedOops) { __ decode_heap_oop_not_null(current_value); }
 733   resolve_forward_pointer_not_null(masm, current_value, tmp);
 734   if (UseCompressedOops) { __ encode_heap_oop_not_null(current_value); }
 735 
 736   if (!is_cae) {
 737     // 'success_flag' was overwritten by call to 'resovle_forward_pointer_not_null'.
 738     // Load zero into register for the potential failure case.
 739     __ li(success_flag, 0);
 740   }
 741   __ cmpd(CCR0, current_value, expected);
 742   __ bne(CCR0, done);
 743 
 744   // Discard fetched value as it might be a reference to the from-space version of an object.
 745   if (UseCompressedOops) {
 746     __ cmpxchgw(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
 747                 false, success_flag);
 748   } else {
 749     __ cmpxchgd(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
 750                 false, success_flag);
 751   }
 752 
 753   /* ==== Step 4 (Retry CAS with to-space pointer (success criteria s2) under race r1)) ==== */
 754   // The reference pointer could have been healed whilst the previous CAS operation was being performed.
 755   // Another CAS operation must thus be issued with the to-space pointer being the expected pointer.
 756   // If that CAS operation fails as well, race r2) must have occurred, indicating that
 757   // the operation failure is legitimate.
 758   //
 759   // To keep the code's size small and thus improving cache (icache) performance, this highly
 760   // unlikely case should be handled by the smallest possible code.  Instead of emitting a third,
 761   // explicit CAS operation, the code jumps back and reuses the first CAS operation (step 1)
 762   // (passed arguments are identical).
 763   //
 764   // A failure of the CAS operation in step 1 would imply that the overall CAS operation is supposed
 765   // to fail.  Jumping back to step 1 requires, however, that step 2 and step 3 are re-executed as well.
 766   // It is thus important to ensure that a re-execution of those steps does not put program correctness
 767   // at risk:
 768   // - Step 2: Either terminates in failure (desired result) or falls through to step 3.
 769   // - Step 3: Terminates if the comparison between the forwarded, fetched pointer and the expected value
 770   //           fails.  Unless the reference has been updated in the meanwhile once again, this is
 771   //           guaranteed to be the case.
 772   //           In case of a concurrent update, the CAS would be retried again. This is legitimate
 773   //           in terms of program correctness (even though it is not desired).
 774   __ bne(CCR0, step_four);
 775 
 776   __ bind(done);
 777   __ block_comment("} cmpxchg_oop (shenandoahgc)");
 778 }
 779 
 780 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
 781                                                                      Register addr, Register count, Register preserve) {
 782   assert(ShenandoahCardBarrier, "Should have been checked by caller");
 783 
 784   ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
 785   CardTable* ct = bs->card_table();
 786   assert_different_registers(addr, count, R0);
 787 
 788   Label L_skip_loop, L_store_loop;
 789 
 790   __ sldi_(count, count, LogBytesPerHeapOop);
 791 
 792   // Zero length? Skip.
 793   __ beq(CCR0, L_skip_loop);
 794 
 795   __ addi(count, count, -BytesPerHeapOop);
 796   __ add(count, addr, count);
 797   // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
 798   __ srdi(addr, addr, CardTable::card_shift());
 799   __ srdi(count, count, CardTable::card_shift());
 800   __ subf(count, addr, count);
 801   __ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
 802   __ addi(count, count, 1);
 803   __ li(R0, 0);
 804   __ mtctr(count);
 805 
 806   // Byte store loop
 807   __ bind(L_store_loop);
 808   __ stb(R0, 0, addr);
 809   __ addi(addr, addr, 1);
 810   __ bdnz(L_store_loop);
 811   __ bind(L_skip_loop);
 812 }
 813 
 814 #undef __
 815 
 816 #ifdef COMPILER1
 817 
 818 #define __ ce->masm()->
 819 
 820 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler *ce, ShenandoahPreBarrierStub *stub) {
 821   __ block_comment("gen_pre_barrier_stub (shenandoahgc) {");
 822 
 823   ShenandoahBarrierSetC1 *bs = (ShenandoahBarrierSetC1*) BarrierSet::barrier_set()->barrier_set_c1();
 824   __ bind(*stub->entry());
 825 
 826   // GC status has already been verified by 'ShenandoahBarrierSetC1::pre_barrier'.
 827   // This stub is the slowpath of that function.
 828 
 829   assert(stub->pre_val()->is_register(), "pre_val must be a register");
 830   Register pre_val = stub->pre_val()->as_register();
 831 
 832   // If 'do_load()' returns false, the to-be-stored value is already available in 'stub->pre_val()'
 833   // ("preloaded mode" of the store barrier).
 834   if (stub->do_load()) {
 835     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false);
 836   }
 837 
 838   // Fast path: Reference is null.
 839   __ cmpdi(CCR0, pre_val, 0);
 840   __ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
 841 
 842   // Argument passing via the stack.
 843   __ std(pre_val, -8, R1_SP);
 844 
 845   __ load_const_optimized(R0, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
 846   __ call_stub(R0);
 847 
 848   __ b(*stub->continuation());
 849   __ block_comment("} gen_pre_barrier_stub (shenandoahgc)");
 850 }
 851 
 852 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler *ce,
 853                                                                     ShenandoahLoadReferenceBarrierStub *stub) {
 854   __ block_comment("gen_load_reference_barrier_stub (shenandoahgc) {");
 855 
 856   ShenandoahBarrierSetC1 *bs = (ShenandoahBarrierSetC1*) BarrierSet::barrier_set()->barrier_set_c1();
 857   __ bind(*stub->entry());
 858 
 859   Register obj  = stub->obj()->as_register();
 860   Register res  = stub->result()->as_register();
 861   Register addr = stub->addr()->as_pointer_register();
 862   Register tmp1 = stub->tmp1()->as_register();
 863   Register tmp2 = stub->tmp2()->as_register();
 864   assert_different_registers(addr, res, tmp1, tmp2);
 865 
 866 #ifdef ASSERT
 867   // Ensure that 'res' is 'R3_ARG1' and contains the same value as 'obj' to reduce the number of required
 868   // copy instructions.
 869   assert(R3_RET == res, "res must be r3");
 870   __ cmpd(CCR0, res, obj);
 871   __ asm_assert_eq("result register must contain the reference stored in obj");
 872 #endif
 873 
 874   DecoratorSet decorators = stub->decorators();
 875 
 876   /* ==== Check whether region is in collection set ==== */
 877   // GC status (unstable) has already been verified by 'ShenandoahBarrierSetC1::load_reference_barrier_impl'.
 878   // This stub is the slowpath of that function.
 879 
 880   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 881   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 882   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 883   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 884 
 885   if (is_strong) {
 886     // Check whether object is in collection set.
 887     __ load_const_optimized(tmp2, ShenandoahHeap::in_cset_fast_test_addr(), tmp1);
 888     __ srdi(tmp1, obj, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 889     __ lbzx(tmp2, tmp1, tmp2);
 890 
 891     __ andi_(tmp2, tmp2, 1);
 892     __ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
 893   }
 894 
 895   address blob_addr = nullptr;
 896 
 897   if (is_strong) {
 898     if (is_native) {
 899       blob_addr = bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin();
 900     } else {
 901       blob_addr = bs->load_reference_barrier_strong_rt_code_blob()->code_begin();
 902     }
 903   } else if (is_weak) {
 904     blob_addr = bs->load_reference_barrier_weak_rt_code_blob()->code_begin();
 905   } else {
 906     assert(is_phantom, "only remaining strength");
 907     blob_addr = bs->load_reference_barrier_phantom_rt_code_blob()->code_begin();
 908   }
 909 
 910   assert(blob_addr != nullptr, "code blob cannot be found");
 911 
 912   // Argument passing via the stack.  'obj' is passed implicitly (as asserted above).
 913   __ std(addr, -8, R1_SP);
 914 
 915   __ load_const_optimized(tmp1, blob_addr, tmp2);
 916   __ call_stub(tmp1);
 917 
 918   // 'res' is 'R3_RET'.  The result is thus already in the correct register.
 919 
 920   __ b(*stub->continuation());
 921   __ block_comment("} gen_load_reference_barrier_stub (shenandoahgc)");
 922 }
 923 
 924 #undef __
 925 
 926 #define __ sasm->
 927 
 928 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler *sasm) {
 929   __ block_comment("generate_c1_pre_barrier_runtime_stub (shenandoahgc) {");
 930 
 931   Label runtime, skip_barrier;
 932   BarrierSet *bs = BarrierSet::barrier_set();
 933 
 934   // Argument passing via the stack.
 935   const int caller_stack_slots = 3;
 936 
 937   Register R0_pre_val = R0;
 938   __ ld(R0, -8, R1_SP);
 939   Register R11_tmp1 = R11_scratch1;
 940   __ std(R11_tmp1, -16, R1_SP);
 941   Register R12_tmp2 = R12_scratch2;
 942   __ std(R12_tmp2, -24, R1_SP);
 943 
 944   /* ==== Check whether marking is active ==== */
 945   // Even though gc status was checked in 'ShenandoahBarrierSetAssembler::gen_pre_barrier_stub',
 946   // another check is required as a safepoint might have been reached in the meantime (JDK-8140588).
 947   __ lbz(R12_tmp2, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
 948 
 949   __ andi_(R12_tmp2, R12_tmp2, ShenandoahHeap::MARKING);
 950   __ beq(CCR0, skip_barrier);
 951 
 952   /* ==== Add previous value directly to thread-local SATB mark queue ==== */
 953   // Check queue's capacity.  Jump to runtime if no free slot is available.
 954   __ ld(R12_tmp2, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
 955   __ cmpdi(CCR0, R12_tmp2, 0);
 956   __ beq(CCR0, runtime);
 957 
 958   // Capacity suffices.  Decrement the queue's size by one slot (size of one oop).
 959   __ addi(R12_tmp2, R12_tmp2, -wordSize);
 960   __ std(R12_tmp2, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
 961 
 962   // Enqueue the previous value and skip the runtime invocation.
 963   __ ld(R11_tmp1, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);
 964   __ stdx(R0_pre_val, R11_tmp1, R12_tmp2);
 965   __ b(skip_barrier);
 966 
 967   __ bind(runtime);
 968 
 969   /* ==== Invoke runtime to commit SATB mark queue to gc and allocate a new buffer ==== */
 970   // Save to-be-preserved registers.
 971   const int nbytes_save = (MacroAssembler::num_volatile_regs + caller_stack_slots) * BytesPerWord;
 972   __ save_volatile_gprs(R1_SP, -nbytes_save);
 973   __ save_LR(R11_tmp1);
 974   __ push_frame_reg_args(nbytes_save, R11_tmp1);
 975 
 976   // Invoke runtime.
 977   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), R0_pre_val, R16_thread);
 978 
 979   // Restore to-be-preserved registers.
 980   __ pop_frame();
 981   __ restore_LR(R11_tmp1);
 982   __ restore_volatile_gprs(R1_SP, -nbytes_save);
 983 
 984   __ bind(skip_barrier);
 985 
 986   // Restore spilled registers.
 987   __ ld(R11_tmp1, -16, R1_SP);
 988   __ ld(R12_tmp2, -24, R1_SP);
 989 
 990   __ blr();
 991   __ block_comment("} generate_c1_pre_barrier_runtime_stub (shenandoahgc)");
 992 }
 993 
 994 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler *sasm,
 995                                                                                     DecoratorSet decorators) {
 996   __ block_comment("generate_c1_load_reference_barrier_runtime_stub (shenandoahgc) {");
 997 
 998   // Argument passing via the stack.
 999   const int caller_stack_slots = 1;
1000 
1001   // Save to-be-preserved registers.
1002   const int nbytes_save = (MacroAssembler::num_volatile_regs - 1 // 'R3_ARG1' is skipped
1003                            + caller_stack_slots) * BytesPerWord;
1004   __ save_volatile_gprs(R1_SP, -nbytes_save, true, false);
1005 
1006   // Load arguments from stack.
1007   // No load required, as assured by assertions in 'ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub'.
1008   Register R3_obj = R3_ARG1;
1009   Register R4_load_addr = R4_ARG2;
1010   __ ld(R4_load_addr, -8, R1_SP);
1011 
1012   Register R11_tmp = R11_scratch1;
1013 
1014   /* ==== Invoke runtime ==== */
1015   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
1016   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
1017   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1018   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
1019 
1020   address jrt_address = nullptr;
1021 
1022   if (is_strong) {
1023     if (is_native) {
1024       jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1025     } else {
1026       if (UseCompressedOops) {
1027         jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
1028       } else {
1029         jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1030       }
1031     }
1032   } else if (is_weak) {
1033     assert(!is_native, "weak load reference barrier must not be called off-heap");
1034     if (UseCompressedOops) {
1035       jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
1036     } else {
1037       jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1038     }
1039   } else {
1040     assert(is_phantom, "reference type must be phantom");
1041     assert(is_native, "phantom load reference barrier must be called off-heap");
1042     jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1043   }
1044   assert(jrt_address != nullptr, "load reference barrier runtime routine cannot be found");
1045 
1046   __ save_LR(R11_tmp);
1047   __ push_frame_reg_args(nbytes_save, R11_tmp);
1048 
1049   // Invoke runtime.  Arguments are already stored in the corresponding registers.
1050   __ call_VM_leaf(jrt_address, R3_obj, R4_load_addr);
1051 
1052   // Restore to-be-preserved registers.
1053   __ pop_frame();
1054   __ restore_LR(R11_tmp);
1055   __ restore_volatile_gprs(R1_SP, -nbytes_save, true, false); // Skip 'R3_RET' register.
1056 
1057   __ blr();
1058   __ block_comment("} generate_c1_load_reference_barrier_runtime_stub (shenandoahgc)");
1059 }
1060 
1061 #undef __
1062 
1063 #endif // COMPILER1