1 /*
  2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "classfile/classLoaderData.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetAssembler.hpp"
 30 #include "gc/shared/barrierSetNMethod.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "interpreter/interp_masm.hpp"
 33 #include "memory/universe.hpp"
 34 #include "runtime/jniHandles.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "runtime/thread.hpp"
 38 
 39 #define __ masm->
 40 
 41 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 42                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
 43   assert_cond(masm != NULL);
 44 
 45   // LR is live. It must be saved around calls.
 46 
 47   bool in_heap = (decorators & IN_HEAP) != 0;
 48   bool in_native = (decorators & IN_NATIVE) != 0;
 49   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
 50   switch (type) {
 51     case T_OBJECT:  // fall through
 52     case T_ARRAY: {
 53       if (in_heap) {
 54         if (UseCompressedOops) {
 55           __ lwu(dst, src);
 56           if (is_not_null) {
 57             __ decode_heap_oop_not_null(dst);
 58           } else {
 59             __ decode_heap_oop(dst);
 60           }
 61         } else {
 62           __ ld(dst, src);
 63         }
 64       } else {
 65         assert(in_native, "why else?");
 66         __ ld(dst, src);
 67       }
 68       break;
 69     }
 70     case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
 71     case T_BYTE:    __ load_signed_byte   (dst, src); break;
 72     case T_CHAR:    __ load_unsigned_short(dst, src); break;
 73     case T_SHORT:   __ load_signed_short  (dst, src); break;
 74     case T_INT:     __ lw                 (dst, src); break;
 75     case T_LONG:    __ ld                 (dst, src); break;
 76     case T_ADDRESS: __ ld                 (dst, src); break;
 77     case T_FLOAT:   __ flw                (f10, src); break;
 78     case T_DOUBLE:  __ fld                (f10, src); break;
 79     default: Unimplemented();
 80   }
 81 }
 82 
 83 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 84                                    Address dst, Register val, Register tmp1, Register tmp2) {
 85   assert_cond(masm != NULL);
 86   bool in_heap = (decorators & IN_HEAP) != 0;
 87   bool in_native = (decorators & IN_NATIVE) != 0;
 88   switch (type) {
 89     case T_OBJECT: // fall through
 90     case T_ARRAY: {
 91       val = val == noreg ? zr : val;
 92       if (in_heap) {
 93         if (UseCompressedOops) {
 94           assert(!dst.uses(val), "not enough registers");
 95           if (val != zr) {
 96             __ encode_heap_oop(val);
 97           }
 98           __ sw(val, dst);
 99         } else {
100           __ sd(val, dst);
101         }
102       } else {
103         assert(in_native, "why else?");
104         __ sd(val, dst);
105       }
106       break;
107     }
108     case T_BOOLEAN:
109       __ andi(val, val, 0x1);  // boolean is true if LSB is 1
110       __ sb(val, dst);
111       break;
112     case T_BYTE:    __ sb(val, dst); break;
113     case T_CHAR:    __ sh(val, dst); break;
114     case T_SHORT:   __ sh(val, dst); break;
115     case T_INT:     __ sw(val, dst); break;
116     case T_LONG:    __ sd(val, dst); break;
117     case T_ADDRESS: __ sd(val, dst); break;
118     case T_FLOAT:   __ fsw(f10,  dst); break;
119     case T_DOUBLE:  __ fsd(f10,  dst); break;
120     default: Unimplemented();
121   }
122 
123 }
124 
125 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
126                                                         Register obj, Register tmp, Label& slowpath) {
127   assert_cond(masm != NULL);
128   // If mask changes we need to ensure that the inverse is still encodable as an immediate
129   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
130   __ andi(obj, obj, ~JNIHandles::weak_tag_mask);
131   __ ld(obj, Address(obj, 0));             // *obj
132 }
133 
134 // Defines obj, preserves var_size_in_bytes, okay for tmp2 == var_size_in_bytes.
135 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
136                                         Register var_size_in_bytes,
137                                         int con_size_in_bytes,
138                                         Register tmp1,
139                                         Register tmp2,
140                                         Label& slow_case,
141                                         bool is_far) {
142   assert_cond(masm != NULL);
143   assert_different_registers(obj, tmp2);
144   assert_different_registers(obj, var_size_in_bytes);
145   Register end = tmp2;
146 
147   __ ld(obj, Address(xthread, JavaThread::tlab_top_offset()));
148   if (var_size_in_bytes == noreg) {
149     __ la(end, Address(obj, con_size_in_bytes));
150   } else {
151     __ add(end, obj, var_size_in_bytes);
152   }
153   __ ld(t0, Address(xthread, JavaThread::tlab_end_offset()));
154   __ bgtu(end, t0, slow_case, is_far);
155 
156   // update the tlab top pointer
157   __ sd(end, Address(xthread, JavaThread::tlab_top_offset()));
158 
159   // recover var_size_in_bytes if necessary
160   if (var_size_in_bytes == end) {
161     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
162   }
163 }
164 
165 // Defines obj, preserves var_size_in_bytes
166 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
167                                         Register var_size_in_bytes,
168                                         int con_size_in_bytes,
169                                         Register tmp1,
170                                         Label& slow_case,
171                                         bool is_far) {
172   assert_cond(masm != NULL);
173   assert_different_registers(obj, var_size_in_bytes, tmp1);
174   if (!Universe::heap()->supports_inline_contig_alloc()) {
175     __ j(slow_case);
176   } else {
177     Register end = tmp1;
178     Label retry;
179     int32_t offset = 0;
180     __ bind(retry);
181 
182     // Get the current top of the heap
183     ExternalAddress address_top((address) Universe::heap()->top_addr());
184     __ la_patchable(t2, address_top, offset);
185     __ addi(t2, t2, offset);
186     __ lr_d(obj, t2, Assembler::aqrl);
187 
188     // Adjust it my the size of our new object
189     if (var_size_in_bytes == noreg) {
190       __ la(end, Address(obj, con_size_in_bytes));
191     } else {
192       __ add(end, obj, var_size_in_bytes);
193     }
194 
195     // if end < obj then we wrapped around high memory
196     __ bltu(end, obj, slow_case, is_far);
197 
198     Register heap_end = t1;
199     // Get the current end of the heap
200     ExternalAddress address_end((address) Universe::heap()->end_addr());
201     offset = 0;
202     __ la_patchable(heap_end, address_end, offset);
203     __ ld(heap_end, Address(heap_end, offset));
204 
205     __ bgtu(end, heap_end, slow_case, is_far);
206 
207     // If heap_top hasn't been changed by some other thread, update it.
208     __ sc_d(t1, end, t2, Assembler::rl);
209     __ bnez(t1, retry);
210     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, tmp1);
211   }
212 }
213 
214 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
215                                                Register var_size_in_bytes,
216                                                int con_size_in_bytes,
217                                                Register tmp1) {
218   assert_cond(masm != NULL);
219   assert(tmp1->is_valid(), "need temp reg");
220 
221   __ ld(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
222   if (var_size_in_bytes->is_valid()) {
223     __ add(tmp1, tmp1, var_size_in_bytes);
224   } else {
225     __ add(tmp1, tmp1, con_size_in_bytes);
226   }
227   __ sd(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
228 }
229 
230 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
231   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
232 
233   if (bs_nm == NULL) {
234     return;
235   }
236 
237   Label skip, guard;
238   Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
239 
240   __ lwu(t0, guard);
241 
242   // Subsequent loads of oops must occur after load of guard value.
243   // BarrierSetNMethod::disarm sets guard with release semantics.
244   __ membar(MacroAssembler::LoadLoad);
245   __ lwu(t1, thread_disarmed_addr);
246   __ beq(t0, t1, skip);
247 
248   int32_t offset = 0;
249   __ movptr_with_offset(t0, StubRoutines::riscv64::method_entry_barrier(), offset);
250   __ jalr(lr, t0, offset);
251   __ j(skip);
252 
253   __ bind(guard);
254 
255   __ emit_int32(0); // nmethod guard value. Skipped over in common case.
256 
257   __ bind(skip);
258 }
259 
260 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
261   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
262   if (bs == NULL) {
263     return;
264   }
265 
266   Label bad_call;
267   __ beqz(xmethod, bad_call);
268 
269   // Pointer chase to the method holder to find out if the method is concurrently unloading.
270   Label method_live;
271   __ load_method_holder_cld(t0, xmethod);
272 
273   // Is it a strong CLD?
274   __ lwu(t1, Address(t0, ClassLoaderData::keep_alive_offset()));
275   __ bnez(t1, method_live);
276 
277   // Is it a weak but alive CLD?
278   __ push_reg(RegSet::of(x28, x29), sp);
279 
280   __ ld(x28, Address(t0, ClassLoaderData::holder_offset()));
281 
282   // Uses x28 & x29, so we must pass new temporaries.
283   __ resolve_weak_handle(x28, x29);
284   __ mv(t0, x28);
285 
286   __ pop_reg(RegSet::of(x28, x29), sp);
287 
288   __ bnez(t0, method_live);
289 
290   __ bind(bad_call);
291 
292   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
293   __ bind(method_live);
294 }
295