1 /*
  2  * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  3  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 29 #include "gc/shenandoah/shenandoahForwarding.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 32 #include "gc/shenandoah/shenandoahRuntime.hpp"
 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 35 #include "interpreter/interpreter.hpp"
 36 #include "interpreter/interp_masm.hpp"
 37 #include "runtime/sharedRuntime.hpp"
 38 #include "runtime/thread.hpp"
 39 #ifdef COMPILER1
 40 #include "c1/c1_LIRAssembler.hpp"
 41 #include "c1/c1_MacroAssembler.hpp"
 42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
 43 #endif
 44 
 45 #define __ masm->
 46 
 47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
 48                                                        Register src, Register dst, Register count, RegSet saved_regs) {
 49   if (is_oop) {
 50     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
 51     if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) {
 52 
 53       Label done;
 54 
 55       // Avoid calling runtime if count == 0
 56       __ beqz(count, done);
 57 
 58       // Is GC active?
 59       Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 60       assert_different_registers(src, dst, count, t0);
 61 
 62       __ lbu(t0, gc_state);
 63       if (ShenandoahSATBBarrier && dest_uninitialized) {
 64         __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED);
 65         __ beqz(t0, done);
 66       } else {
 67         __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
 68         __ beqz(t0, done);
 69       }
 70 
 71       __ push_reg(saved_regs, sp);
 72       if (UseCompressedOops) {
 73         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry),
 74                         src, dst, count);
 75       } else {
 76         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
 77       }
 78       __ pop_reg(saved_regs, sp);
 79       __ bind(done);
 80     }
 81   }
 82 }
 83 
 84 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
 85                                                                  Register obj,
 86                                                                  Register pre_val,
 87                                                                  Register thread,
 88                                                                  Register tmp,
 89                                                                  bool tosca_live,
 90                                                                  bool expand_call) {
 91   if (ShenandoahSATBBarrier) {
 92     satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
 93   }
 94 }
 95 
 96 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
 97                                                            Register obj,
 98                                                            Register pre_val,
 99                                                            Register thread,
100                                                            Register tmp,
101                                                            bool tosca_live,
102                                                            bool expand_call) {
103   // If expand_call is true then we expand the call_VM_leaf macro
104   // directly to skip generating the check by
105   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
106   assert(thread == xthread, "must be");
107 
108   Label done;
109   Label runtime;
110 
111   assert_different_registers(obj, pre_val, tmp, t0);
112   assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
113 
114   Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
115   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
116   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
117 
118   // Is marking active?
119   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
120     __ lwu(tmp, in_progress);
121   } else {
122     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
123     __ lbu(tmp, in_progress);
124   }
125   __ beqz(tmp, done);
126 
127   // Do we need to load the previous value?
128   if (obj != noreg) {
129     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
130   }
131 
132   // Is the previous value null?
133   __ beqz(pre_val, done);
134 
135   // Can we store original value in the thread's buffer?
136   // Is index == 0?
137   // (The index field is typed as size_t.)
138   __ ld(tmp, index);                        // tmp := *index_adr
139   __ beqz(tmp, runtime);                    // tmp == 0? If yes, goto runtime
140 
141   __ sub(tmp, tmp, wordSize);               // tmp := tmp - wordSize
142   __ sd(tmp, index);                        // *index_adr := tmp
143   __ ld(t0, buffer);
144   __ add(tmp, tmp, t0);                     // tmp := tmp + *buffer_adr
145 
146   // Record the previous value
147   __ sd(pre_val, Address(tmp, 0));
148   __ j(done);
149 
150   __ bind(runtime);
151   // save the live input values
152   RegSet saved = RegSet::of(pre_val);
153   if (tosca_live) saved += RegSet::of(x10);
154   if (obj != noreg) saved += RegSet::of(obj);
155 
156   __ push_reg(saved, sp);
157 
158   // Calling the runtime using the regular call_VM_leaf mechanism generates
159   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
160   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
161   //
162   // If we care generating the pre-barrier without a frame (e.g. in the
163   // intrinsified Reference.get() routine) then ebp might be pointing to
164   // the caller frame and so this check will most likely fail at runtime.
165   //
166   // Expanding the call directly bypasses the generation of the check.
167   // So when we do not have have a full interpreter frame on the stack
168   // expand_call should be passed true.
169   if (expand_call) {
170     assert(pre_val != c_rarg1, "smashed arg");
171     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
172   } else {
173     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
174   }
175 
176   __ pop_reg(saved, sp);
177 
178   __ bind(done);
179 }
180 
181 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
182   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
183 
184   Label is_null;
185   __ beqz(dst, is_null);
186   resolve_forward_pointer_not_null(masm, dst, tmp);
187   __ bind(is_null);
188 }
189 
190 // IMPORTANT: This must preserve all registers, even t0 and t1, except those explicitely
191 // passed in.
192 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
193   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
194   // The below loads the mark word, checks if the lowest two bits are
195   // set, and if so, clear the lowest two bits and copy the result
196   // to dst. Otherwise it leaves dst alone.
197   // Implementing this is surprisingly awkward. I do it here by:
198   // - Inverting the mark word
199   // - Test lowest two bits == 0
200   // - If so, set the lowest two bits
201   // - Invert the result back, and copy to dst
202   RegSet saved_regs = RegSet::of(t2);
203   bool borrow_reg = (tmp == noreg);
204   if (borrow_reg) {
205     // No free registers available. Make one useful.
206     tmp = t0;
207     if (tmp == dst) {
208       tmp = t1;
209     }
210     saved_regs += RegSet::of(tmp);
211   }
212 
213   assert_different_registers(tmp, dst, t2);
214   __ push_reg(saved_regs, sp);
215 
216   Label done;
217   __ ld(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
218   __ xori(tmp, tmp, -1); // eon with 0 is equivalent to XOR with -1
219   __ andi(t2, tmp, markWord::lock_mask_in_place);
220   __ bnez(t2, done);
221   __ ori(tmp, tmp, markWord::marked_value);
222   __ xori(dst, tmp, -1); // eon with 0 is equivalent to XOR with -1
223   __ bind(done);
224 
225   __ pop_reg(saved_regs, sp);
226 }
227 
228 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
229                                                            Register dst,
230                                                            Address load_addr,
231                                                            DecoratorSet decorators) {
232   assert(ShenandoahLoadRefBarrier, "Should be enabled");
233   assert(dst != t1 && load_addr.base() != t1, "need t1");
234   assert_different_registers(load_addr.base(), t0, t1);
235 
236   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
237   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
238   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
239   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
240   bool is_narrow  = UseCompressedOops && !is_native;
241 
242   Label heap_stable, not_cset;
243   __ enter();
244   Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
245   __ lbu(t1, gc_state);
246 
247   // Check for heap stability
248   if (is_strong) {
249     __ andi(t1, t1, ShenandoahHeap::HAS_FORWARDED);
250     __ beqz(t1, heap_stable);
251   } else {
252     Label lrb;
253     __ andi(t0, t1, ShenandoahHeap::WEAK_ROOTS);
254     __ bnez(t0, lrb);
255     __ andi(t0, t1, ShenandoahHeap::HAS_FORWARDED);
256     __ beqz(t0, heap_stable);
257     __ bind(lrb);
258   }
259 
260   // use x11 for load address
261   Register result_dst = dst;
262   if (dst == x11) {
263     __ mv(t1, dst);
264     dst = t1;
265   }
266 
267   // Save x10 and x11, unless it is an output register
268   RegSet saved_regs = RegSet::of(x10, x11) - result_dst;
269   __ push_reg(saved_regs, sp);
270   __ la(x11, load_addr);
271   __ mv(x10, dst);
272 
273   // Test for in-cset
274   if (is_strong) {
275     __ li(t1, (uint64_t)ShenandoahHeap::in_cset_fast_test_addr());
276     __ srli(t0, x10, ShenandoahHeapRegion::region_size_bytes_shift_jint());
277     __ add(t1, t1, t0);
278     __ lbu(t1, Address(t1));
279     __ andi(t0, t1, 1);
280     __ beqz(t0, not_cset);
281   }
282 
283   __ push_call_clobbered_registers();
284   if (is_strong) {
285     if (is_narrow) {
286       __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong_narrow);
287     } else {
288       __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong);
289     }
290   } else if (is_weak) {
291     if (is_narrow) {
292       __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak_narrow);
293     } else {
294       __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak);
295     }
296   } else {
297     assert(is_phantom, "only remaining strength");
298     assert(!is_narrow, "phantom access cannot be narrow");
299     __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak);
300   }
301   __ jalr(ra);
302   __ mv(t0, x10);
303   __ pop_call_clobbered_registers();
304   __ mv(x10, t0);
305   __ bind(not_cset);
306   __ mv(result_dst, x10);
307   __ pop_reg(saved_regs, sp);
308 
309   __ bind(heap_stable);
310   __ leave();
311 }
312 
313 void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) {
314   if (ShenandoahIUBarrier) {
315     __ push_call_clobbered_registers();
316 
317     satb_write_barrier_pre(masm, noreg, dst, xthread, tmp, true, false);
318 
319     __ pop_call_clobbered_registers();
320   }
321 }
322 
323 //
324 // Arguments:
325 //
326 // Inputs:
327 //   src:        oop location to load from, might be clobbered
328 //
329 // Output:
330 //   dst:        oop loaded from src location
331 //
332 // Kill:
333 //   x30 (tmp reg)
334 //
335 // Alias:
336 //   dst: x30 (might use x30 as temporary output register to avoid clobbering src)
337 //
338 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
339                                             DecoratorSet decorators,
340                                             BasicType type,
341                                             Register dst,
342                                             Address src,
343                                             Register tmp1,
344                                             Register tmp_thread) {
345   // 1: non-reference load, no additional barrier is needed
346   if (!is_reference_type(type)) {
347     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
348     return;
349   }
350 
351   // 2: load a reference from src location and apply LRB if needed
352   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
353     Register result_dst = dst;
354 
355     // Preserve src location for LRB
356     RegSet saved_regs;
357     if (dst == src.base()) {
358       dst = (src.base() == x28) ? x29 : x28;
359       saved_regs = RegSet::of(dst);
360       __ push_reg(saved_regs, sp);
361     }
362     assert_different_registers(dst, src.base());
363 
364     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
365 
366     load_reference_barrier(masm, dst, src, decorators);
367 
368     if (dst != result_dst) {
369       __ mv(result_dst, dst);
370       dst = result_dst;
371     }
372 
373     if (saved_regs.bits() != 0) {
374       __ pop_reg(saved_regs, sp);
375     }
376   } else {
377     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
378   }
379 
380   // 3: apply keep-alive barrier if needed
381   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
382     __ enter();
383     __ push_call_clobbered_registers();
384     satb_write_barrier_pre(masm /* masm */,
385                            noreg /* obj */,
386                            dst /* pre_val */,
387                            xthread /* thread */,
388                            tmp1 /* tmp */,
389                            true /* tosca_live */,
390                            true /* expand_call */);
391     __ pop_call_clobbered_registers();
392     __ leave();
393   }
394 }
395 
396 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
397                                              Address dst, Register val, Register tmp1, Register tmp2) {
398   bool on_oop = is_reference_type(type);
399   if (!on_oop) {
400     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
401     return;
402   }
403 
404   // flatten object address if needed
405   if (dst.offset() == 0) {
406     if (dst.base() != x13) {
407       __ mv(x13, dst.base());
408     }
409   } else {
410     __ la(x13, dst);
411   }
412 
413   shenandoah_write_barrier_pre(masm,
414                                x13 /* obj */,
415                                tmp2 /* pre_val */,
416                                xthread /* thread */,
417                                tmp1  /* tmp */,
418                                val != noreg /* tosca_live */,
419                                false /* expand_call */);
420 
421   if (val == noreg) {
422     BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), noreg, noreg, noreg);
423   } else {
424     iu_barrier(masm, val, tmp1);
425     // G1 barrier needs uncompressed oop for region cross check.
426     Register new_val = val;
427     if (UseCompressedOops) {
428       new_val = t1;
429       __ mv(new_val, val);
430     }
431     BarrierSetAssembler::store_at(masm, decorators, type, Address(x13, 0), val, noreg, noreg);
432   }
433 }
434 
435 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
436                                                                   Register obj, Register tmp, Label& slowpath) {
437   Label done;
438   // Resolve jobject
439   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
440 
441   // Check for null.
442   __ beqz(obj, done);
443 
444   assert(obj != t1, "need t1");
445   Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
446   __ lbu(t1, gc_state);
447 
448   // Check for heap in evacuation phase
449   __ andi(t0, t1, ShenandoahHeap::EVACUATION);
450   __ bnez(t0, slowpath);
451 
452   __ bind(done);
453 }
454 
455 // Special Shenandoah CAS implementation that handles false negatives due
456 // to concurrent evacuation.  The service is more complex than a
457 // traditional CAS operation because the CAS operation is intended to
458 // succeed if the reference at addr exactly matches expected or if the
459 // reference at addr holds a pointer to a from-space object that has
460 // been relocated to the location named by expected.  There are two
461 // races that must be addressed:
462 //  a) A parallel thread may mutate the contents of addr so that it points
463 //     to a different object.  In this case, the CAS operation should fail.
464 //  b) A parallel thread may heal the contents of addr, replacing a
465 //     from-space pointer held in addr with the to-space pointer
466 //     representing the new location of the object.
467 // Upon entry to cmpxchg_oop, it is assured that new_val equals NULL
468 // or it refers to an object that is not being evacuated out of
469 // from-space, or it refers to the to-space version of an object that
470 // is being evacuated out of from-space.
471 //
472 // By default the value held in the result register following execution
473 // of the generated code sequence is 0 to indicate failure of CAS,
474 // non-zero to indicate success. If is_cae, the result is the value most
475 // recently fetched from addr rather than a boolean success indicator.
476 //
477 // Clobbers t0, t1
478 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
479                                                 Register addr,
480                                                 Register expected,
481                                                 Register new_val,
482                                                 Assembler::Aqrl acquire,
483                                                 Assembler::Aqrl release,
484                                                 bool is_cae,
485                                                 Register result) {
486   bool is_narrow = UseCompressedOops;
487   Assembler::operand_size size = is_narrow ? Assembler::uint32 : Assembler::int64;
488 
489   assert_different_registers(addr, expected, t0, t1);
490   assert_different_registers(addr, new_val, t0, t1);
491 
492   Label retry, success, fail, done;
493 
494   __ bind(retry);
495 
496   // Step1: Try to CAS.
497   __ cmpxchg(addr, expected, new_val, size, acquire, release, /* result */ t1);
498 
499   // If success, then we are done.
500   __ beq(expected, t1, success);
501 
502   // Step2: CAS failed, check the forwared pointer.
503   __ mv(t0, t1);
504 
505   if (is_narrow) {
506     __ decode_heap_oop(t0, t0);
507   }
508   resolve_forward_pointer(masm, t0);
509 
510   __ encode_heap_oop(t0, t0);
511 
512   // Report failure when the forwarded oop was not expected.
513   __ bne(t0, expected, fail);
514 
515   // Step 3: CAS again using the forwarded oop.
516   __ cmpxchg(addr, t1, new_val, size, acquire, release, /* result */ t0);
517 
518   // Retry when failed.
519   __ bne(t0, t1, retry);
520 
521   __ bind(success);
522   if (is_cae) {
523     __ mv(result, expected);
524   } else {
525     __ addi(result, zr, 1);
526   }
527   __ j(done);
528 
529   __ bind(fail);
530   if (is_cae) {
531     __ mv(result, t0);
532   } else {
533     __ mv(result, zr);
534   }
535 
536   __ bind(done);
537 }
538 
539 #undef __
540 
541 #ifdef COMPILER1
542 
543 #define __ ce->masm()->
544 
545 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
546   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
547   // At this point we know that marking is in progress.
548   // If do_load() is true then we have to emit the
549   // load of the previous value; otherwise it has already
550   // been loaded into _pre_val.
551   __ bind(*stub->entry());
552 
553   assert(stub->pre_val()->is_register(), "Precondition.");
554 
555   Register pre_val_reg = stub->pre_val()->as_register();
556 
557   if (stub->do_load()) {
558     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */, false /* unaligned */);
559   }
560   __ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true);
561   ce->store_parameter(stub->pre_val()->as_register(), 0);
562   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
563   __ j(*stub->continuation());
564 }
565 
566 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce,
567                                                                     ShenandoahLoadReferenceBarrierStub* stub) {
568   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
569   __ bind(*stub->entry());
570 
571   DecoratorSet decorators = stub->decorators();
572   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
573   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
574   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
575   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
576 
577   Register obj = stub->obj()->as_register();
578   Register res = stub->result()->as_register();
579   Register addr = stub->addr()->as_pointer_register();
580   Register tmp1 = stub->tmp1()->as_register();
581   Register tmp2 = stub->tmp2()->as_register();
582 
583   assert(res == x10, "result must arrive in x10");
584   assert_different_registers(tmp1, tmp2, t0);
585 
586   if (res != obj) {
587     __ mv(res, obj);
588   }
589 
590   if (is_strong) {
591     // Check for object in cset.
592     __ mv(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
593     __ srli(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
594     __ add(tmp2, tmp2, tmp1);
595     __ lbu(tmp2, Address(tmp2));
596     __ beqz(tmp2, *stub->continuation(), true /* is_far */);
597   }
598 
599   ce->store_parameter(res, 0);
600   ce->store_parameter(addr, 1);
601 
602   if (is_strong) {
603     if (is_native) {
604       __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
605     } else {
606       __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
607     }
608   } else if (is_weak) {
609     __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
610   } else {
611     assert(is_phantom, "only remaining strength");
612     __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
613   }
614 
615   __ j(*stub->continuation());
616 }
617 
618 #undef __
619 
620 #define __ sasm->
621 
622 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
623   __ prologue("shenandoah_pre_barrier", false);
624 
625   // arg0 : previous value of memory
626 
627   BarrierSet* bs = BarrierSet::barrier_set();
628 
629   const Register pre_val = x10;
630   const Register thread = xthread;
631   const Register tmp = t0;
632 
633   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
634   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
635 
636   Label done;
637   Label runtime;
638 
639   // Is marking still active?
640   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
641   __ lb(tmp, gc_state);
642   __ andi(tmp, tmp, ShenandoahHeap::MARKING);
643   __ beqz(tmp, done);
644 
645   // Can we store original value in the thread's buffer?
646   __ ld(tmp, queue_index);
647   __ beqz(tmp, runtime);
648 
649   __ sub(tmp, tmp, wordSize);
650   __ sd(tmp, queue_index);
651   __ ld(t1, buffer);
652   __ add(tmp, tmp, t1);
653   __ load_parameter(0, t1);
654   __ sd(t1, Address(tmp, 0));
655   __ j(done);
656 
657   __ bind(runtime);
658   __ push_call_clobbered_registers();
659   __ load_parameter(0, pre_val);
660   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
661   __ pop_call_clobbered_registers();
662   __ bind(done);
663 
664   __ epilogue();
665 }
666 
667 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm,
668                                                                                     DecoratorSet decorators) {
669   __ prologue("shenandoah_load_reference_barrier", false);
670   // arg0 : object to be resolved
671 
672   __ push_call_clobbered_registers();
673   __ load_parameter(0, x10);
674   __ load_parameter(1, x11);
675 
676   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
677   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
678   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
679   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
680   if (is_strong) {
681     if (is_native) {
682       __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong);
683     } else {
684       if (UseCompressedOops) {
685         __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong_narrow);
686       } else {
687         __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_strong);
688       }
689     }
690   } else if (is_weak) {
691     assert(!is_native, "weak must not be called off-heap");
692     if (UseCompressedOops) {
693       __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak_narrow);
694     } else {
695       __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_weak);
696     }
697   } else {
698     assert(is_phantom, "only remaining strength");
699     assert(is_native, "phantom must only be called off-heap");
700     __ li(ra, (int64_t)(uintptr_t)ShenandoahRuntime::load_reference_barrier_phantom);
701   }
702   __ jalr(ra);
703   __ mv(t0, x10);
704   __ pop_call_clobbered_registers();
705   __ mv(x10, t0);
706 
707   __ epilogue();
708 }
709 
710 #undef __
711 
712 #endif // COMPILER1