1 /*
  2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "classfile/classLoaderData.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetAssembler.hpp"
 30 #include "gc/shared/barrierSetNMethod.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "interpreter/interp_masm.hpp"
 33 #include "memory/universe.hpp"
 34 #include "runtime/jniHandles.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "runtime/thread.hpp"
 38 
 39 #define __ masm->
 40 
 41 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 42                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
 43   assert_cond(masm != NULL);
 44 
 45   // RA is live. It must be saved around calls.
 46 
 47   bool in_heap = (decorators & IN_HEAP) != 0;
 48   bool in_native = (decorators & IN_NATIVE) != 0;
 49   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
 50   switch (type) {
 51     case T_OBJECT:  // fall through
 52     case T_ARRAY: {
 53       if (in_heap) {
 54         if (UseCompressedOops) {
 55           __ lwu(dst, src);
 56           if (is_not_null) {
 57             __ decode_heap_oop_not_null(dst);
 58           } else {
 59             __ decode_heap_oop(dst);
 60           }
 61         } else {
 62           __ ld(dst, src);
 63         }
 64       } else {
 65         assert(in_native, "why else?");
 66         __ ld(dst, src);
 67       }
 68       break;
 69     }
 70     case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
 71     case T_BYTE:    __ load_signed_byte   (dst, src); break;
 72     case T_CHAR:    __ load_unsigned_short(dst, src); break;
 73     case T_SHORT:   __ load_signed_short  (dst, src); break;
 74     case T_INT:     __ lw                 (dst, src); break;
 75     case T_LONG:    __ ld                 (dst, src); break;
 76     case T_ADDRESS: __ ld                 (dst, src); break;
 77     case T_FLOAT:   __ flw                (f10, src); break;
 78     case T_DOUBLE:  __ fld                (f10, src); break;
 79     default: Unimplemented();
 80   }
 81 }
 82 
 83 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 84                                    Address dst, Register val, Register tmp1, Register tmp2) {
 85   assert_cond(masm != NULL);
 86   bool in_heap = (decorators & IN_HEAP) != 0;
 87   bool in_native = (decorators & IN_NATIVE) != 0;
 88   switch (type) {
 89     case T_OBJECT: // fall through
 90     case T_ARRAY: {
 91       val = val == noreg ? zr : val;
 92       if (in_heap) {
 93         if (UseCompressedOops) {
 94           assert(!dst.uses(val), "not enough registers");
 95           if (val != zr) {
 96             __ encode_heap_oop(val);
 97           }
 98           __ sw(val, dst);
 99         } else {
100           __ sd(val, dst);
101         }
102       } else {
103         assert(in_native, "why else?");
104         __ sd(val, dst);
105       }
106       break;
107     }
108     case T_BOOLEAN:
109       __ andi(val, val, 0x1);  // boolean is true if LSB is 1
110       __ sb(val, dst);
111       break;
112     case T_BYTE:    __ sb(val, dst); break;
113     case T_CHAR:    __ sh(val, dst); break;
114     case T_SHORT:   __ sh(val, dst); break;
115     case T_INT:     __ sw(val, dst); break;
116     case T_LONG:    __ sd(val, dst); break;
117     case T_ADDRESS: __ sd(val, dst); break;
118     case T_FLOAT:   __ fsw(f10,  dst); break;
119     case T_DOUBLE:  __ fsd(f10,  dst); break;
120     default: Unimplemented();
121   }
122 
123 }
124 
125 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
126                                                         Register obj, Register tmp, Label& slowpath) {
127   assert_cond(masm != NULL);
128   // If mask changes we need to ensure that the inverse is still encodable as an immediate
129   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
130   __ andi(obj, obj, ~JNIHandles::weak_tag_mask);
131   __ ld(obj, Address(obj, 0));             // *obj
132 }
133 
134 // Defines obj, preserves var_size_in_bytes, okay for tmp2 == var_size_in_bytes.
135 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
136                                         Register var_size_in_bytes,
137                                         int con_size_in_bytes,
138                                         Register tmp1,
139                                         Register tmp2,
140                                         Label& slow_case,
141                                         bool is_far) {
142   assert_cond(masm != NULL);
143   assert_different_registers(obj, tmp2);
144   assert_different_registers(obj, var_size_in_bytes);
145   Register end = tmp2;
146 
147   __ ld(obj, Address(xthread, JavaThread::tlab_top_offset()));
148   if (var_size_in_bytes == noreg) {
149     __ la(end, Address(obj, con_size_in_bytes));
150   } else {
151     __ add(end, obj, var_size_in_bytes);
152   }
153   __ ld(t0, Address(xthread, JavaThread::tlab_end_offset()));
154   __ bgtu(end, t0, slow_case, is_far);
155 
156   // update the tlab top pointer
157   __ sd(end, Address(xthread, JavaThread::tlab_top_offset()));
158 
159   // recover var_size_in_bytes if necessary
160   if (var_size_in_bytes == end) {
161     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
162   }
163 }
164 
165 // Defines obj, preserves var_size_in_bytes
166 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
167                                         Register var_size_in_bytes,
168                                         int con_size_in_bytes,
169                                         Register tmp1,
170                                         Label& slow_case,
171                                         bool is_far) {
172   assert_cond(masm != NULL);
173   assert_different_registers(obj, var_size_in_bytes, tmp1);
174   if (!Universe::heap()->supports_inline_contig_alloc()) {
175     __ j(slow_case);
176   } else {
177     Register end = tmp1;
178     Label retry;
179     __ bind(retry);
180 
181     // Get the current end of the heap
182     ExternalAddress address_end((address) Universe::heap()->end_addr());
183     {
184       int32_t offset;
185       __ la_patchable(t1, address_end, offset);
186       __ ld(t1, Address(t1, offset));
187     }
188 
189     // Get the current top of the heap
190     ExternalAddress address_top((address) Universe::heap()->top_addr());
191     {
192       int32_t offset;
193       __ la_patchable(t0, address_top, offset);
194       __ addi(t0, t0, offset);
195       __ lr_d(obj, t0, Assembler::aqrl);
196     }
197 
198     // Adjust it my the size of our new object
199     if (var_size_in_bytes == noreg) {
200       __ la(end, Address(obj, con_size_in_bytes));
201     } else {
202       __ add(end, obj, var_size_in_bytes);
203     }
204 
205     // if end < obj then we wrapped around high memory
206     __ bltu(end, obj, slow_case, is_far);
207 
208     __ bgtu(end, t1, slow_case, is_far);
209 
210     // If heap_top hasn't been changed by some other thread, update it.
211     __ sc_d(t1, end, t0, Assembler::rl);
212     __ bnez(t1, retry);
213 
214     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, tmp1);
215   }
216 }
217 
218 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
219                                                Register var_size_in_bytes,
220                                                int con_size_in_bytes,
221                                                Register tmp1) {
222   assert_cond(masm != NULL);
223   assert(tmp1->is_valid(), "need temp reg");
224 
225   __ ld(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
226   if (var_size_in_bytes->is_valid()) {
227     __ add(tmp1, tmp1, var_size_in_bytes);
228   } else {
229     __ add(tmp1, tmp1, con_size_in_bytes);
230   }
231   __ sd(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
232 }
233 
234 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
235   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
236 
237   if (bs_nm == NULL) {
238     return;
239   }
240 
241   // RISCV atomic operations require that the memory address be naturally aligned.
242   __ align(4);
243 
244   Label skip, guard;
245   Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
246 
247   __ lwu(t0, guard);
248 
249   // Subsequent loads of oops must occur after load of guard value.
250   // BarrierSetNMethod::disarm sets guard with release semantics.
251   __ membar(MacroAssembler::LoadLoad);
252   __ lwu(t1, thread_disarmed_addr);
253   __ beq(t0, t1, skip);
254 
255   int32_t offset = 0;
256   __ movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), offset);
257   __ jalr(ra, t0, offset);
258   __ j(skip);
259 
260   __ bind(guard);
261 
262   MacroAssembler::assert_alignment(__ pc());
263   __ emit_int32(0); // nmethod guard value. Skipped over in common case.
264 
265   __ bind(skip);
266 }
267 
268 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
269   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
270   if (bs == NULL) {
271     return;
272   }
273 
274   Label bad_call;
275   __ beqz(xmethod, bad_call);
276 
277   // Pointer chase to the method holder to find out if the method is concurrently unloading.
278   Label method_live;
279   __ load_method_holder_cld(t0, xmethod);
280 
281   // Is it a strong CLD?
282   __ lwu(t1, Address(t0, ClassLoaderData::keep_alive_offset()));
283   __ bnez(t1, method_live);
284 
285   // Is it a weak but alive CLD?
286   __ push_reg(RegSet::of(x28, x29), sp);
287 
288   __ ld(x28, Address(t0, ClassLoaderData::holder_offset()));
289 
290   // Uses x28 & x29, so we must pass new temporaries.
291   __ resolve_weak_handle(x28, x29);
292   __ mv(t0, x28);
293 
294   __ pop_reg(RegSet::of(x28, x29), sp);
295 
296   __ bnez(t0, method_live);
297 
298   __ bind(bad_call);
299 
300   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
301   __ bind(method_live);
302 }