1 /*
  2  * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  3  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 29 #include "gc/shenandoah/shenandoahForwarding.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 32 #include "gc/shenandoah/shenandoahRuntime.hpp"
 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 35 #include "interpreter/interpreter.hpp"
 36 #include "interpreter/interp_masm.hpp"
 37 #include "runtime/javaThread.hpp"
 38 #include "runtime/sharedRuntime.hpp"
 39 #ifdef COMPILER1
 40 #include "c1/c1_LIRAssembler.hpp"
 41 #include "c1/c1_MacroAssembler.hpp"
 42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
 43 #endif
 44 
 45 #define __ masm->
 46 
 47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
 48                                                        Register src, Register dst, Register count, RegSet saved_regs) {
 49   if (is_oop) {
 50     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
 51     if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
 52 
 53       Label done;
 54 
 55       // Avoid calling runtime if count == 0
 56       __ beqz(count, done);
 57 
 58       // Is GC active?
 59       Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 60       assert_different_registers(src, dst, count, t0);
 61 
 62       __ lbu(t0, gc_state);
 63       if (ShenandoahSATBBarrier && dest_uninitialized) {
 64         __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS);
 65         __ beqz(t0, done);
 66       } else {
 67         __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
 68         __ beqz(t0, done);
 69       }
 70 
 71       __ push_reg(saved_regs, sp);
 72       if (UseCompressedOops) {
 73         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry),
 74                         src, dst, count);
 75       } else {
 76         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
 77       }
 78       __ pop_reg(saved_regs, sp);
 79       __ bind(done);
 80     }
 81   }
 82 }
 83 
 84 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
 85                                                                  Register obj,
 86                                                                  Register pre_val,
 87                                                                  Register thread,
 88                                                                  Register tmp,
 89                                                                  bool tosca_live,
 90                                                                  bool expand_call) {
 91   if (ShenandoahSATBBarrier) {
 92     satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
 93   }
 94 }
 95 
 96 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
 97                                                            Register obj,
 98                                                            Register pre_val,
 99                                                            Register thread,
100                                                            Register tmp1,
101                                                            Register tmp2,
102                                                            bool tosca_live,
103                                                            bool expand_call) {
104   // If expand_call is true then we expand the call_VM_leaf macro
105   // directly to skip generating the check by
106   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
107   assert(thread == xthread, "must be");
108 
109   Label done;
110   Label runtime;
111 
112   assert_different_registers(obj, pre_val, tmp1, tmp2);
113   assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
114 
115   Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
116   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
117   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
118 
119   // Is marking active?
120   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
121     __ lwu(tmp1, in_progress);
122   } else {
123     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
124     __ lbu(tmp1, in_progress);
125   }
126   __ beqz(tmp1, done);
127 
128   // Do we need to load the previous value?
129   if (obj != noreg) {
130     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
131   }
132 
133   // Is the previous value null?
134   __ beqz(pre_val, done);
135 
136   // Can we store original value in the thread's buffer?
137   // Is index == 0?
138   // (The index field is typed as size_t.)
139   __ ld(tmp1, index);                  // tmp := *index_adr
140   __ beqz(tmp1, runtime);              // tmp == 0? If yes, goto runtime
141 
142   __ sub(tmp1, tmp1, wordSize);        // tmp := tmp - wordSize
143   __ sd(tmp1, index);                  // *index_adr := tmp
144   __ ld(tmp2, buffer);
145   __ add(tmp1, tmp1, tmp2);            // tmp := tmp + *buffer_adr
146 
147   // Record the previous value
148   __ sd(pre_val, Address(tmp1, 0));
149   __ j(done);
150 
151   __ bind(runtime);
152   // save the live input values
153   RegSet saved = RegSet::of(pre_val);
154   if (tosca_live) saved += RegSet::of(x10);
155   if (obj != noreg) saved += RegSet::of(obj);
156 
157   __ push_reg(saved, sp);
158 
159   // Calling the runtime using the regular call_VM_leaf mechanism generates
160   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
161   // that checks that the *(rfp+frame::interpreter_frame_last_sp) is null.
162   //
163   // If we care generating the pre-barrier without a frame (e.g. in the
164   // intrinsified Reference.get() routine) then ebp might be pointing to
165   // the caller frame and so this check will most likely fail at runtime.
166   //
167   // Expanding the call directly bypasses the generation of the check.
168   // So when we do not have have a full interpreter frame on the stack
169   // expand_call should be passed true.
170   if (expand_call) {
171     assert(pre_val != c_rarg1, "smashed arg");
172     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
173   } else {
174     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
175   }
176 
177   __ pop_reg(saved, sp);
178 
179   __ bind(done);
180 }
181 
182 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
183   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
184 
185   Label is_null;
186   __ beqz(dst, is_null);
187   resolve_forward_pointer_not_null(masm, dst, tmp);
188   __ bind(is_null);
189 }
190 
191 // IMPORTANT: This must preserve all registers, even t0 and t1, except those explicitly
192 // passed in.
193 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
194   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
195   // The below loads the mark word, checks if the lowest two bits are
196   // set, and if so, clear the lowest two bits and copy the result
197   // to dst. Otherwise it leaves dst alone.
198   // Implementing this is surprisingly awkward. I do it here by:
199   // - Inverting the mark word
200   // - Test lowest two bits == 0
201   // - If so, set the lowest two bits
202   // - Invert the result back, and copy to dst
203   RegSet saved_regs = RegSet::of(t2);
204   bool borrow_reg = (tmp == noreg);
205   if (borrow_reg) {
206     // No free registers available. Make one useful.
207     tmp = t0;
208     if (tmp == dst) {
209       tmp = t1;
210     }
211     saved_regs += RegSet::of(tmp);
212   }
213 
214   assert_different_registers(tmp, dst, t2);
215   __ push_reg(saved_regs, sp);
216 
217   Label done;
218   __ ld(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
219   __ xori(tmp, tmp, -1); // eon with 0 is equivalent to XOR with -1
220   __ andi(t2, tmp, markWord::lock_mask_in_place);
221   __ bnez(t2, done);
222   __ ori(tmp, tmp, markWord::marked_value);
223   __ xori(dst, tmp, -1); // eon with 0 is equivalent to XOR with -1
224   __ bind(done);
225 
226   __ pop_reg(saved_regs, sp);
227 }
228 
229 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
230                                                            Register dst,
231                                                            Address load_addr,
232                                                            DecoratorSet decorators) {
233   assert(ShenandoahLoadRefBarrier, "Should be enabled");
234   assert(dst != t1 && load_addr.base() != t1, "need t1");
235   assert_different_registers(load_addr.base(), t0, t1);
236 
237   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
238   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
239   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
240   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
241   bool is_narrow  = UseCompressedOops && !is_native;
242 
243   Label heap_stable, not_cset;
244   __ enter();
245   Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
246   __ lbu(t1, gc_state);
247 
248   // Check for heap stability
249   if (is_strong) {
250     __ test_bit(t1, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS);
251     __ beqz(t1, heap_stable);
252   } else {
253     Label lrb;
254     __ test_bit(t0, t1, ShenandoahHeap::WEAK_ROOTS_BITPOS);
255     __ bnez(t0, lrb);
256     __ test_bit(t0, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS);
257     __ beqz(t0, heap_stable);
258     __ bind(lrb);
259   }
260 
261   // use x11 for load address
262   Register result_dst = dst;
263   if (dst == x11) {
264     __ mv(t1, dst);
265     dst = t1;
266   }
267 
268   // Save x10 and x11, unless it is an output register
269   RegSet saved_regs = RegSet::of(x10, x11) - result_dst;
270   __ push_reg(saved_regs, sp);
271   __ la(x11, load_addr);
272   __ mv(x10, dst);
273 
274   // Test for in-cset
275   if (is_strong) {
276     __ mv(t1, ShenandoahHeap::in_cset_fast_test_addr());
277     __ srli(t0, x10, ShenandoahHeapRegion::region_size_bytes_shift_jint());
278     __ add(t1, t1, t0);
279     __ lbu(t1, Address(t1));
280     __ test_bit(t0, t1, 0);
281     __ beqz(t0, not_cset);
282   }
283 
284   __ push_call_clobbered_registers();
285   address target = nullptr;
286   if (is_strong) {
287     if (is_narrow) {
288       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
289     } else {
290       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
291     }
292   } else if (is_weak) {
293     if (is_narrow) {
294       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
295     } else {
296       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
297     }
298   } else {
299     assert(is_phantom, "only remaining strength");
300     assert(!is_narrow, "phantom access cannot be narrow");
301     target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
302   }
303   __ call(target);
304   __ mv(t0, x10);
305   __ pop_call_clobbered_registers();
306   __ mv(x10, t0);
307   __ bind(not_cset);
308   __ mv(result_dst, x10);
309   __ pop_reg(saved_regs, sp);
310 
311   __ bind(heap_stable);
312   __ leave();
313 }
314 
315 //
316 // Arguments:
317 //
318 // Inputs:
319 //   src:        oop location to load from, might be clobbered
320 //
321 // Output:
322 //   dst:        oop loaded from src location
323 //
324 // Kill:
325 //   x30 (tmp reg)
326 //
327 // Alias:
328 //   dst: x30 (might use x30 as temporary output register to avoid clobbering src)
329 //
330 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
331                                             DecoratorSet decorators,
332                                             BasicType type,
333                                             Register dst,
334                                             Address src,
335                                             Register tmp1,
336                                             Register tmp2) {
337   // 1: non-reference load, no additional barrier is needed
338   if (!is_reference_type(type)) {
339     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
340     return;
341   }
342 
343   // 2: load a reference from src location and apply LRB if needed
344   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
345     Register result_dst = dst;
346 
347     // Preserve src location for LRB
348     RegSet saved_regs;
349     if (dst == src.base()) {
350       dst = (src.base() == x28) ? x29 : x28;
351       saved_regs = RegSet::of(dst);
352       __ push_reg(saved_regs, sp);
353     }
354     assert_different_registers(dst, src.base());
355 
356     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
357 
358     load_reference_barrier(masm, dst, src, decorators);
359 
360     if (dst != result_dst) {
361       __ mv(result_dst, dst);
362       dst = result_dst;
363     }
364 
365     if (saved_regs.bits() != 0) {
366       __ pop_reg(saved_regs, sp);
367     }
368   } else {
369     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
370   }
371 
372   // 3: apply keep-alive barrier if needed
373   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
374     __ enter();
375     __ push_call_clobbered_registers();
376     satb_write_barrier_pre(masm /* masm */,
377                            noreg /* obj */,
378                            dst /* pre_val */,
379                            xthread /* thread */,
380                            tmp1 /* tmp1 */,
381                            tmp2 /* tmp2 */,
382                            true /* tosca_live */,
383                            true /* expand_call */);
384     __ pop_call_clobbered_registers();
385     __ leave();
386   }
387 }
388 
389 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
390                                              Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
391   bool on_oop = is_reference_type(type);
392   if (!on_oop) {
393     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
394     return;
395   }
396 
397   // flatten object address if needed
398   if (dst.offset() == 0) {
399     if (dst.base() != tmp3) {
400       __ mv(tmp3, dst.base());
401     }
402   } else {
403     __ la(tmp3, dst);
404   }
405 
406   shenandoah_write_barrier_pre(masm,
407                                tmp3 /* obj */,
408                                tmp2 /* pre_val */,
409                                xthread /* thread */,
410                                tmp1  /* tmp */,
411                                val != noreg /* tosca_live */,
412                                false /* expand_call */);
413 
414   if (val == noreg) {
415     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
416   } else {
417     // Barrier needs uncompressed oop for region cross check.
418     Register new_val = val;
419     if (UseCompressedOops) {
420       new_val = t1;
421       __ mv(new_val, val);
422     }
423     BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
424   }
425 }
426 
427 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
428                                                                   Register obj, Register tmp, Label& slowpath) {
429   Label done;
430   // Resolve jobject
431   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
432 
433   // Check for null.
434   __ beqz(obj, done);
435 
436   assert(obj != t1, "need t1");
437   Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
438   __ lbu(t1, gc_state);
439 
440   // Check for heap in evacuation phase
441   __ test_bit(t0, t1, ShenandoahHeap::EVACUATION_BITPOS);
442   __ bnez(t0, slowpath);
443 
444   __ bind(done);
445 }
446 
447 // Special Shenandoah CAS implementation that handles false negatives due
448 // to concurrent evacuation.  The service is more complex than a
449 // traditional CAS operation because the CAS operation is intended to
450 // succeed if the reference at addr exactly matches expected or if the
451 // reference at addr holds a pointer to a from-space object that has
452 // been relocated to the location named by expected.  There are two
453 // races that must be addressed:
454 //  a) A parallel thread may mutate the contents of addr so that it points
455 //     to a different object.  In this case, the CAS operation should fail.
456 //  b) A parallel thread may heal the contents of addr, replacing a
457 //     from-space pointer held in addr with the to-space pointer
458 //     representing the new location of the object.
459 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
460 // or it refers to an object that is not being evacuated out of
461 // from-space, or it refers to the to-space version of an object that
462 // is being evacuated out of from-space.
463 //
464 // By default the value held in the result register following execution
465 // of the generated code sequence is 0 to indicate failure of CAS,
466 // non-zero to indicate success. If is_cae, the result is the value most
467 // recently fetched from addr rather than a boolean success indicator.
468 //
469 // Clobbers t0, t1
470 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
471                                                 Register addr,
472                                                 Register expected,
473                                                 Register new_val,
474                                                 Assembler::Aqrl acquire,
475                                                 Assembler::Aqrl release,
476                                                 bool is_cae,
477                                                 Register result) {
478   bool is_narrow = UseCompressedOops;
479   Assembler::operand_size size = is_narrow ? Assembler::uint32 : Assembler::int64;
480 
481   assert_different_registers(addr, expected, t0, t1);
482   assert_different_registers(addr, new_val, t0, t1);
483 
484   Label retry, success, fail, done;
485 
486   __ bind(retry);
487 
488   // Step1: Try to CAS.
489   __ cmpxchg(addr, expected, new_val, size, acquire, release, /* result */ t1);
490 
491   // If success, then we are done.
492   __ beq(expected, t1, success);
493 
494   // Step2: CAS failed, check the forwarded pointer.
495   __ mv(t0, t1);
496 
497   if (is_narrow) {
498     __ decode_heap_oop(t0, t0);
499   }
500   resolve_forward_pointer(masm, t0);
501 
502   __ encode_heap_oop(t0, t0);
503 
504   // Report failure when the forwarded oop was not expected.
505   __ bne(t0, expected, fail);
506 
507   // Step 3: CAS again using the forwarded oop.
508   __ cmpxchg(addr, t1, new_val, size, acquire, release, /* result */ t0);
509 
510   // Retry when failed.
511   __ bne(t0, t1, retry);
512 
513   __ bind(success);
514   if (is_cae) {
515     __ mv(result, expected);
516   } else {
517     __ mv(result, 1);
518   }
519   __ j(done);
520 
521   __ bind(fail);
522   if (is_cae) {
523     __ mv(result, t0);
524   } else {
525     __ mv(result, zr);
526   }
527 
528   __ bind(done);
529 }
530 
531 #undef __
532 
533 #ifdef COMPILER1
534 
535 #define __ ce->masm()->
536 
537 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
538   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
539   // At this point we know that marking is in progress.
540   // If do_load() is true then we have to emit the
541   // load of the previous value; otherwise it has already
542   // been loaded into _pre_val.
543   __ bind(*stub->entry());
544 
545   assert(stub->pre_val()->is_register(), "Precondition.");
546 
547   Register pre_val_reg = stub->pre_val()->as_register();
548 
549   if (stub->do_load()) {
550     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */);
551   }
552   __ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true);
553   ce->store_parameter(stub->pre_val()->as_register(), 0);
554   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
555   __ j(*stub->continuation());
556 }
557 
558 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce,
559                                                                     ShenandoahLoadReferenceBarrierStub* stub) {
560   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
561   __ bind(*stub->entry());
562 
563   DecoratorSet decorators = stub->decorators();
564   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
565   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
566   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
567   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
568 
569   Register obj = stub->obj()->as_register();
570   Register res = stub->result()->as_register();
571   Register addr = stub->addr()->as_pointer_register();
572   Register tmp1 = stub->tmp1()->as_register();
573   Register tmp2 = stub->tmp2()->as_register();
574 
575   assert(res == x10, "result must arrive in x10");
576   assert_different_registers(tmp1, tmp2, t0);
577 
578   if (res != obj) {
579     __ mv(res, obj);
580   }
581 
582   if (is_strong) {
583     // Check for object in cset.
584     __ mv(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
585     __ srli(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
586     __ add(tmp2, tmp2, tmp1);
587     __ lbu(tmp2, Address(tmp2));
588     __ beqz(tmp2, *stub->continuation(), true /* is_far */);
589   }
590 
591   ce->store_parameter(res, 0);
592   ce->store_parameter(addr, 1);
593 
594   if (is_strong) {
595     if (is_native) {
596       __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
597     } else {
598       __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
599     }
600   } else if (is_weak) {
601     __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
602   } else {
603     assert(is_phantom, "only remaining strength");
604     __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
605   }
606 
607   __ j(*stub->continuation());
608 }
609 
610 #undef __
611 
612 #define __ sasm->
613 
614 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
615   __ prologue("shenandoah_pre_barrier", false);
616 
617   // arg0 : previous value of memory
618 
619   BarrierSet* bs = BarrierSet::barrier_set();
620 
621   const Register pre_val = x10;
622   const Register thread = xthread;
623   const Register tmp = t0;
624 
625   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
626   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
627 
628   Label done;
629   Label runtime;
630 
631   // Is marking still active?
632   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
633   __ lb(tmp, gc_state);
634   __ test_bit(tmp, tmp, ShenandoahHeap::MARKING_BITPOS);
635   __ beqz(tmp, done);
636 
637   // Can we store original value in the thread's buffer?
638   __ ld(tmp, queue_index);
639   __ beqz(tmp, runtime);
640 
641   __ sub(tmp, tmp, wordSize);
642   __ sd(tmp, queue_index);
643   __ ld(t1, buffer);
644   __ add(tmp, tmp, t1);
645   __ load_parameter(0, t1);
646   __ sd(t1, Address(tmp, 0));
647   __ j(done);
648 
649   __ bind(runtime);
650   __ push_call_clobbered_registers();
651   __ load_parameter(0, pre_val);
652   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
653   __ pop_call_clobbered_registers();
654   __ bind(done);
655 
656   __ epilogue();
657 }
658 
659 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm,
660                                                                                     DecoratorSet decorators) {
661   __ prologue("shenandoah_load_reference_barrier", false);
662   // arg0 : object to be resolved
663 
664   __ push_call_clobbered_registers();
665   __ load_parameter(0, x10);
666   __ load_parameter(1, x11);
667 
668   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
669   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
670   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
671   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
672   address target  = nullptr;
673   if (is_strong) {
674     if (is_native) {
675       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
676     } else {
677       if (UseCompressedOops) {
678         target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
679       } else {
680         target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
681       }
682     }
683   } else if (is_weak) {
684     assert(!is_native, "weak must not be called off-heap");
685     if (UseCompressedOops) {
686       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
687     } else {
688       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
689     }
690   } else {
691     assert(is_phantom, "only remaining strength");
692     assert(is_native, "phantom must only be called off-heap");
693     target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
694   }
695   __ call(target);
696   __ mv(t0, x10);
697   __ pop_call_clobbered_registers();
698   __ mv(x10, t0);
699 
700   __ epilogue();
701 }
702 
703 #undef __
704 
705 #endif // COMPILER1