< prev index next >

src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp

Print this page

  25  */
  26 
  27 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  31 #include "gc/shenandoah/shenandoahForwarding.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  34 #include "gc/shenandoah/shenandoahRuntime.hpp"
  35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  36 #include "interpreter/interp_masm.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "runtime/javaThread.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #ifdef COMPILER1
  41 #include "c1/c1_LIRAssembler.hpp"
  42 #include "c1/c1_MacroAssembler.hpp"
  43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  44 #endif






  45 
  46 #define __ masm->
  47 
  48 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  49                                                        Register src, Register dst, Register count, RegSet saved_regs) {
  50   if (is_oop) {
  51     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  52     if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
  53 
  54       Label done;
  55 
  56       // Avoid calling runtime if count == 0
  57       __ beqz(count, done);
  58 
  59       // Is GC active?
  60       Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
  61       assert_different_registers(src, dst, count, t0);
  62 
  63       __ lbu(t0, gc_state);
  64       if (ShenandoahSATBBarrier && dest_uninitialized) {

 760       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 761     } else {
 762       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 763     }
 764   } else {
 765     assert(is_phantom, "only remaining strength");
 766     assert(is_native, "phantom must only be called off-heap");
 767     target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
 768   }
 769   __ rt_call(target);
 770   __ mv(t0, x10);
 771   __ pop_call_clobbered_registers();
 772   __ mv(x10, t0);
 773 
 774   __ epilogue();
 775 }
 776 
 777 #undef __
 778 
 779 #endif // COMPILER1

























































































































































































































































































































































































































































  25  */
  26 
  27 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  31 #include "gc/shenandoah/shenandoahForwarding.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  34 #include "gc/shenandoah/shenandoahRuntime.hpp"
  35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  36 #include "interpreter/interp_masm.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "runtime/javaThread.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #ifdef COMPILER1
  41 #include "c1/c1_LIRAssembler.hpp"
  42 #include "c1/c1_MacroAssembler.hpp"
  43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  44 #endif
  45 #ifdef COMPILER2
  46 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  47 #include "opto/output.hpp"
  48 #include "utilities/population_count.hpp"
  49 #include "utilities/powerOfTwo.hpp"
  50 #endif
  51 
  52 #define __ masm->
  53 
  54 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  55                                                        Register src, Register dst, Register count, RegSet saved_regs) {
  56   if (is_oop) {
  57     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  58     if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
  59 
  60       Label done;
  61 
  62       // Avoid calling runtime if count == 0
  63       __ beqz(count, done);
  64 
  65       // Is GC active?
  66       Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
  67       assert_different_registers(src, dst, count, t0);
  68 
  69       __ lbu(t0, gc_state);
  70       if (ShenandoahSATBBarrier && dest_uninitialized) {

 766       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 767     } else {
 768       target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 769     }
 770   } else {
 771     assert(is_phantom, "only remaining strength");
 772     assert(is_native, "phantom must only be called off-heap");
 773     target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
 774   }
 775   __ rt_call(target);
 776   __ mv(t0, x10);
 777   __ pop_call_clobbered_registers();
 778   __ mv(x10, t0);
 779 
 780   __ epilogue();
 781 }
 782 
 783 #undef __
 784 
 785 #endif // COMPILER1
 786 
 787 #ifdef COMPILER2
 788 
 789 #undef __
 790 #define __ masm.
 791 
 792 bool ShenandoahBarrierStubC2::push_save_register_if_live(MacroAssembler& masm, Register reg) {
 793   if (is_live(reg)) {
 794     push_save_register(masm, reg);
 795     return true;
 796   } else {
 797     return false;
 798   }
 799 }
 800 
 801 void ShenandoahBarrierStubC2::push_save_register(MacroAssembler& masm, Register reg) {
 802   __ sw(reg, Address(sp, push_save_slot()));
 803 }
 804 
 805 void ShenandoahBarrierStubC2::pop_save_register(MacroAssembler& masm, Register reg) {
 806   __ ld(reg, Address(sp, pop_save_slot()));
 807 }
 808 
 809 bool ShenandoahBarrierStubC2::has_live_vector_registers() {
 810   // TODO: Implement; currently assumes vector registers.
 811   return true;
 812 }
 813 
 814 bool ShenandoahBarrierStubC2::is_live(Register reg) {
 815   // TODO: Precompute the generic register map for faster lookups.
 816   RegMaskIterator rmi(preserve_set());
 817   while (rmi.has_next()) {
 818     const OptoReg::Name opto_reg = rmi.next();
 819     const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
 820     if (vm_reg->is_Register() && reg == vm_reg->as_Register()) {
 821       return true;
 822     }
 823   }
 824   return false;
 825 }
 826 
 827 Register ShenandoahBarrierStubC2::select_temp_register(bool& selected_live, Address addr, Register reg1) {
 828   Register tmp = noreg;
 829   Register fallback_live = noreg;
 830 
 831   // Try to select non-live first:
 832   for (int i = 0; i < Register::number_of_registers; i++) {
 833     Register r = as_Register(i);
 834     if (r != fp && r != sp &&
 835         r != xheapbase && r != xthread &&
 836         r != t0 && r != t1 && r != zr &&
 837         r != reg1 && r != addr.base() && r != addr.index()) {
 838       if (!is_live(r)) {
 839         tmp = r;
 840         break;
 841       } else if (fallback_live == noreg) {
 842         fallback_live = r;
 843       }
 844     }
 845   }
 846 
 847   // If we could not find a non-live register, select the live fallback:
 848   if (tmp == noreg) {
 849     tmp = fallback_live;
 850     selected_live = true;
 851   } else {
 852     selected_live = false;
 853   }
 854 
 855   assert(tmp != noreg, "successfully selected");
 856   assert_different_registers(tmp, reg1);
 857   assert_different_registers(tmp, addr.base());
 858   assert_different_registers(tmp, addr.index());
 859   return tmp;
 860 }
 861 
 862 void ShenandoahBarrierStubC2::enter_if_gc_state(MacroAssembler& masm, const char test_state) {
 863   int bit_to_check = ShenandoahThreadLocalData::gc_state_to_fast_bit(test_state);
 864   Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_offset()));
 865   __ lbu(t0, gc_state_fast);
 866   __ test_bit(t0, t0, bit_to_check);
 867 
 868   // The bnez cannot jumper further than +/-4Kb
 869   __ beqz(t0, *continuation());
 870   __ j(*entry());
 871 
 872   // Fast path falls through here when the barrier is not needed.
 873   __ bind(*continuation());
 874 }
 875 
 876 #undef __
 877 #define __ masm->
 878 
 879 void ShenandoahBarrierSetAssembler::compare_and_set_c2(const MachNode* node, MacroAssembler* masm, Register res, Register addr,
 880     Register oldval, Register newval, Register tmp, bool exchange, bool maybe_null, bool narrow, bool weak, bool is_acquire) {
 881   const Assembler::Aqrl acquire = is_acquire ? Assembler::aq : Assembler::relaxed;
 882   const Assembler::Aqrl release = Assembler::rl;
 883 
 884   // Pre-barrier covers several things:
 885   //  a. Avoids false positives from CAS encountering to-space memory values.
 886   //  b. Satisfies the need for LRB for the CAE result.
 887   //  c. Records old value for the sake of SATB.
 888   //
 889   // (a) and (b) are covered because load barrier does memory location fixup.
 890   // (c) is covered by KA on the current memory value.
 891   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
 892     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, Address(addr, 0), narrow, /* do_load: */ true);
 893     char check = 0;
 894     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
 895     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)   ? ShenandoahHeap::HAS_FORWARDED : 0;
 896     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for CAS/CAE");
 897     stub->enter_if_gc_state(*masm, check);
 898   }
 899 
 900   // Existing RISCV cmpxchg_oop already handles Shenandoah forwarded-value retry logic.
 901   // It returns:
 902   //   - boolean 0/1 for CAS (!exchange)
 903   //   - loaded/current value for CAE (exchange)
 904   ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, addr, oldval, newval, acquire, release, exchange /* is_cae */, res);
 905 
 906   // Post-barrier deals with card updates.
 907   card_barrier_c2(node, masm, Address(addr, 0));
 908 }
 909 
 910 void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register preval,
 911     Register newval, Register addr, Register tmp, bool is_acquire) {
 912   const bool narrow = node->bottom_type()->isa_narrowoop();
 913 
 914   // Pre-barrier covers several things:
 915   //  a. Satisfies the need for LRB for the GAS result.
 916   //  b. Records old value for the sake of SATB.
 917   //
 918   // (a) is covered because load barrier does memory location fixup.
 919   // (b) is covered by KA on the current memory value.
 920   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
 921     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, Address(addr, 0), narrow, /* do_load: */ true);
 922     char check = 0;
 923     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
 924     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)   ? ShenandoahHeap::HAS_FORWARDED : 0;
 925     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for GAS");
 926     stub->enter_if_gc_state(*masm, check);
 927   }
 928 
 929   if (narrow) {
 930     if (is_acquire) {
 931       __ atomic_xchgalwu(preval, newval, addr);
 932     } else {
 933       __ atomic_xchgwu(preval, newval, addr);
 934     }
 935   } else {
 936     if (is_acquire) {
 937       __ atomic_xchgal(preval, newval, addr);
 938     } else {
 939       __ atomic_xchg(preval, newval, addr);
 940     }
 941   }
 942 
 943   // Post-barrier deals with card updates.
 944   card_barrier_c2(node, masm, Address(addr, 0));
 945 }
 946 
 947 void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm, Address dst, bool dst_narrow,
 948     Register src, bool src_narrow, Register tmp, bool is_volatile) {
 949 
 950   // Pre-barrier: SATB / keep-alive on current value in memory.
 951   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
 952     assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier(node), "Should not be required for stores");
 953     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, dst, dst_narrow, /* do_load: */ true);
 954     stub->enter_if_gc_state(*masm, ShenandoahHeap::MARKING);
 955   }
 956 
 957   // Do the actual store
 958   if (dst_narrow) {
 959     Register actual_src = src;
 960     if (!src_narrow) {
 961       assert(tmp != noreg, "need temp register");
 962       __ mv(tmp, src);
 963       if (ShenandoahBarrierStubC2::maybe_null(node)) {
 964         __ encode_heap_oop(tmp, tmp);
 965       } else {
 966         __ encode_heap_oop_not_null(tmp, tmp);
 967       }
 968       actual_src = tmp;
 969     }
 970 
 971     if (is_volatile) {
 972       __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
 973       __ sw(actual_src, dst);
 974     } else {
 975       __ sw(actual_src, dst);
 976     }
 977   } else {
 978     if (is_volatile) {
 979       __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore);
 980       __ sd(src, dst);
 981     } else {
 982       __ sd(src, dst);
 983     }
 984   }
 985 
 986   // Post-barrier: card updates.
 987   card_barrier_c2(node, masm, dst);
 988 }
 989 
 990 void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src, bool is_acquire) {
 991   const bool narrow = node->bottom_type()->isa_narrowoop();
 992 
 993   // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
 994   if (narrow) {
 995     __ lwu(dst, src);
 996     if (is_acquire) {
 997       __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 998     }
 999   } else {
1000     __ ld(dst, src);
1001     if (is_acquire) {
1002       __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
1003     }
1004   }
1005 
1006   // Post-barrier: LRB / KA / weak-root processing.
1007   if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
1008     ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, dst, src, narrow, /* do_load: */ false);
1009     char check = 0;
1010     check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node)    ? ShenandoahHeap::MARKING : 0;
1011     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node)      ? ShenandoahHeap::HAS_FORWARDED : 0;
1012     check |= ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
1013     stub->enter_if_gc_state(*masm, check);
1014   }
1015 }
1016 
1017 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Address address) {
1018   if (!ShenandoahBarrierStubC2::needs_card_barrier(node)) {
1019     return;
1020   }
1021 
1022   assert(CardTable::dirty_card_val() == 0, "must be");
1023   Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
1024 
1025   // t1 = effective address
1026   __ la(t1, address);
1027 
1028   // t1 = card index
1029   __ srli(t1, t1, CardTable::card_shift());
1030 
1031   // t0 = card table base
1032   Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
1033   __ ld(t0, curr_ct_holder_addr);
1034 
1035   // t1 = &card_table[card_index]
1036   __ add(t1, t1, t0);
1037 
1038   if (UseCondCardMark) {
1039     Label L_already_dirty;
1040     __ lbu(t0, Address(t1));
1041     __ beqz(t0, L_already_dirty);
1042     __ sb(zr, Address(t1));
1043     __ bind(L_already_dirty);
1044   } else {
1045     __ sb(zr, Address(t1));
1046   }
1047 }
1048 
1049 #undef __
1050 #define __ masm.
1051 
1052 void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
1053   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
1054 
1055   assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
1056 
1057   Label L_done;
1058 
1059   // Stub entry
1060   __ bind(*BarrierStubC2::entry());
1061 
1062   // If needed, perform the load here so stub logic sees the current oop value.
1063   if (_do_load) {
1064     __ load_heap_oop(_obj, _addr, noreg, noreg, AS_RAW);
1065   } else if (_narrow) {
1066     // Decode narrow oop before barrier processing.
1067     if (_maybe_null) {
1068       __ decode_heap_oop(_obj, _obj);
1069     } else {
1070       __ decode_heap_oop_not_null(_obj, _obj);
1071     }
1072   }
1073 
1074   if (_do_load || _maybe_null) {
1075     __ beqz(_obj, L_done);
1076   }
1077 
1078   keepalive(masm, _obj, t0);
1079 
1080   lrb(masm, _obj, _addr, noreg);
1081 
1082   // If object is narrow, we need to encode it before exiting.
1083   // For encoding, dst can only turn null if we are dealing with weak loads.
1084   // Otherwise, we have already null-checked. We can skip all this if we performed
1085   // the load ourselves, which means the value is not used by caller.
1086   if (_narrow && !_do_load) {
1087     if (_needs_load_ref_weak_barrier) {
1088       __ encode_heap_oop(_obj, _obj);
1089     } else {
1090       __ encode_heap_oop_not_null(_obj, _obj);
1091     }
1092   }
1093 
1094   // Go back to fast path
1095   __ bind(L_done);
1096   __ j(*continuation());
1097 }
1098 
1099 void ShenandoahBarrierStubC2::keepalive(MacroAssembler& masm, Register obj, Register tmp1, Label* L_done_unused) {
1100   Address index(xthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1101   Address buffer(xthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1102   Label L_runtime;
1103   Label L_done;
1104 
1105   // The node doesn't even need keepalive barrier, just don't check anything else
1106   if (!_needs_keep_alive_barrier) {
1107     return;
1108   }
1109 
1110   // If both LRB and KeepAlive barriers are required (rare), do a runtime check
1111   // for enabled barrier.
1112   if (_needs_load_ref_barrier) {
1113     Address gcs_addr(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1114     __ lbu(t0, gcs_addr);
1115     __ test_bit(t0, t0, ShenandoahHeap::MARKING_BITPOS);
1116     __ beqz(t0, L_done);
1117   }
1118 
1119   // If the queue is full, go to runtime.
1120   __ ld(tmp1, index);
1121   __ beqz(tmp1, L_runtime);
1122 
1123   bool selected_live = false;
1124   Register tmp2 = select_temp_register(selected_live, _addr, obj);
1125   if (selected_live) {
1126     push_save_register(masm, tmp2);
1127   }
1128 
1129   // Push into SATB queue.
1130   __ subi(tmp1, tmp1, wordSize);
1131   __ sd(tmp1, index);
1132   __ ld(tmp2, buffer);
1133   __ add(tmp1, tmp1, tmp2);
1134   __ sd(obj, Address(tmp1, 0));
1135   __ j(L_done);
1136 
1137   // Runtime call
1138   __ bind(L_runtime);
1139 
1140   preserve(obj);
1141   {
1142     SaveLiveRegisters save_registers(&masm, this);
1143     __ mv(c_rarg0, obj);
1144     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
1145   }
1146 
1147   __ bind(L_done);
1148 
1149   if (selected_live) {
1150     pop_save_register(masm, tmp2);
1151   }
1152 }
1153 
1154 void ShenandoahBarrierStubC2::lrb(MacroAssembler& masm, Register obj, Address addr, Register tmp, Label* L_done_unused) {
1155   Label L_done;
1156 
1157   // The node doesn't even need LRB barrier, just don't check anything else
1158   if (!_needs_load_ref_barrier) {
1159     return;
1160   }
1161 
1162   if ((_node->barrier_data() & ShenandoahBitStrong) != 0) {
1163     // If both LRB and KeepAlive barriers are required (rare), do a runtime
1164     // check for enabled barrier.
1165     if (_needs_keep_alive_barrier) {
1166       Address gcs_addr(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1167       __ lbu(t0, gcs_addr);
1168 
1169       if (_needs_load_ref_weak_barrier) {
1170         __ srli(t1, t0, ShenandoahHeap::WEAK_ROOTS_BITPOS);
1171         __ orr(t0, t0, t1);
1172       }
1173 
1174       __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS);
1175       __ beqz(t0, L_done);
1176     }
1177 
1178     // Weak/phantom loads always need to go to runtime. For strong refs we
1179     // check if the object in cset, if they are not, then we are done with LRB.
1180     __ mv(t1, ShenandoahHeap::in_cset_fast_test_addr());
1181     __ srli(t0, obj, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1182     __ add(t1, t1, t0);
1183     __ lbu(t1, Address(t1));
1184     __ beqz(t1, L_done);
1185   }
1186 
1187   dont_preserve(obj);
1188   {
1189     SaveLiveRegisters save_registers(&masm, this);
1190 
1191     // Runtime call wants:
1192     //   c_rarg0 <- obj
1193     //   c_rarg1 <- lea(addr)
1194     if (c_rarg0 == obj) {
1195       __ la(c_rarg1, addr);
1196     } else if (c_rarg1 == obj) {
1197       // Set up arguments in reverse, and then flip them
1198       __ la(c_rarg0, addr);
1199       // flip them
1200       __ mv(t0, c_rarg0);
1201       __ mv(c_rarg0, c_rarg1);
1202       __ mv(c_rarg1, t0);
1203     } else {
1204       assert_different_registers(c_rarg1, obj);
1205       __ la(c_rarg1, addr);
1206       __ mv(c_rarg0, obj);
1207     }
1208 
1209     // Get address of runtime LRB entry and call it
1210     __ rt_call(lrb_runtime_entry_addr());
1211 
1212     // If we loaded the object in the stub it means we don't need to return it
1213     // to fastpath, so no need to make this mov.
1214     if (!_do_load) {
1215       __ mv(obj, x10);
1216     }
1217   }
1218 
1219   __ bind(L_done);
1220 }
1221 
1222 void ShenandoahBarrierStubC2::post_init(int offset) {
1223   // Do nothing.
1224 }
1225 #endif // COMPILER2
< prev index next >