1 /*
  2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "asm/macroAssembler.inline.hpp"
 29 #include "c1/c1_CodeStubs.hpp"
 30 #include "c1/c1_FrameMap.hpp"
 31 #include "c1/c1_LIRAssembler.hpp"
 32 #include "c1/c1_MacroAssembler.hpp"
 33 #include "c1/c1_Runtime1.hpp"
 34 #include "classfile/javaClasses.hpp"
 35 #include "nativeInst_riscv.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "vmreg_riscv.inline.hpp"
 38 
 39 
 40 #define __ ce->masm()->
 41 
 42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 43   __ bind(_entry);
 44   InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
 45   __ relocate(safepoint_pc.rspec());
 46   __ la(t0, safepoint_pc.target());
 47   __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
 48 
 49   assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 50          "polling page return stub not created yet");
 51   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 52 
 53   __ far_jump(RuntimeAddress(stub));
 54 }
 55 
 56 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 57   __ bind(_entry);
 58   Metadata *m = _method->as_constant_ptr()->as_metadata();
 59   __ mov_metadata(t0, m);
 60   ce->store_parameter(t0, 1);
 61   ce->store_parameter(_bci, 0);
 62   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
 63   ce->add_call_info_here(_info);
 64   ce->verify_oop_map(_info);
 65   __ j(_continuation);
 66 }
 67 
 68 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
 69   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
 70   assert(info != NULL, "must have info");
 71   _info = new CodeEmitInfo(info);
 72 }
 73 
 74 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
 75   : _index(index), _array(), _throw_index_out_of_bounds_exception(true) {
 76   assert(info != NULL, "must have info");
 77   _info = new CodeEmitInfo(info);
 78 }
 79 
 80 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 81   __ bind(_entry);
 82   if (_info->deoptimize_on_exception()) {
 83     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 84     __ far_call(RuntimeAddress(a));
 85     ce->add_call_info_here(_info);
 86     ce->verify_oop_map(_info);
 87     debug_only(__ should_not_reach_here());
 88     return;
 89   }
 90 
 91   if (_index->is_cpu_register()) {
 92     __ mv(t0, _index->as_register());
 93   } else {
 94     __ mv(t0, _index->as_jint());
 95   }
 96   Runtime1::StubID stub_id;
 97   if (_throw_index_out_of_bounds_exception) {
 98     stub_id = Runtime1::throw_index_exception_id;
 99   } else {
100     assert(_array != LIR_Opr::nullOpr(), "sanity");
101     __ mv(t1, _array->as_pointer_register());
102     stub_id = Runtime1::throw_range_check_failed_id;
103   }
104   int32_t off = 0;
105   __ la_patchable(ra, RuntimeAddress(Runtime1::entry_for(stub_id)), off);
106   __ jalr(ra, ra, off);
107   ce->add_call_info_here(_info);
108   ce->verify_oop_map(_info);
109   debug_only(__ should_not_reach_here());
110 }
111 
112 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
113   _info = new CodeEmitInfo(info);
114 }
115 
116 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
117   __ bind(_entry);
118   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
119   __ far_call(RuntimeAddress(a));
120   ce->add_call_info_here(_info);
121   ce->verify_oop_map(_info);
122   debug_only(__ should_not_reach_here());
123 }
124 
125 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
126   if (_offset != -1) {
127     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
128   }
129   __ bind(_entry);
130   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
131   ce->add_call_info_here(_info);
132   ce->verify_oop_map(_info);
133 #ifdef ASSERT
134   __ should_not_reach_here();
135 #endif
136 }
137 
138 // Implementation of NewInstanceStub
139 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
140   _result = result;
141   _klass = klass;
142   _klass_reg = klass_reg;
143   _info = new CodeEmitInfo(info);
144   assert(stub_id == Runtime1::new_instance_id                 ||
145          stub_id == Runtime1::fast_new_instance_id            ||
146          stub_id == Runtime1::fast_new_instance_init_check_id,
147          "need new_instance id");
148   _stub_id = stub_id;
149 }
150 
151 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
152   assert(__ rsp_offset() == 0, "frame size should be fixed");
153   __ bind(_entry);
154   __ mv(x13, _klass_reg->as_register());
155   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
156   ce->add_call_info_here(_info);
157   ce->verify_oop_map(_info);
158   assert(_result->as_register() == x10, "result must in x10");
159   __ j(_continuation);
160 }
161 
162 // Implementation of NewTypeArrayStub
163 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
164   _klass_reg = klass_reg;
165   _length = length;
166   _result = result;
167   _info = new CodeEmitInfo(info);
168 }
169 
170 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
171   assert(__ rsp_offset() == 0, "frame size should be fixed");
172   __ bind(_entry);
173   assert(_length->as_register() == x9, "length must in x9");
174   assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
175   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
176   ce->add_call_info_here(_info);
177   ce->verify_oop_map(_info);
178   assert(_result->as_register() == x10, "result must in x10");
179   __ j(_continuation);
180 }
181 
182 // Implementation of NewObjectArrayStub
183 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
184   _klass_reg = klass_reg;
185   _result = result;
186   _length = length;
187   _info = new CodeEmitInfo(info);
188 }
189 
190 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
191   assert(__ rsp_offset() == 0, "frame size should be fixed");
192   __ bind(_entry);
193   assert(_length->as_register() == x9, "length must in x9");
194   assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
195   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
196   ce->add_call_info_here(_info);
197   ce->verify_oop_map(_info);
198   assert(_result->as_register() == x10, "result must in x10");
199   __ j(_continuation);
200 }
201 
202 // Implementation of MonitorAccessStubs
203 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, CodeEmitInfo* info)
204 : MonitorAccessStub(obj_reg) {
205   _info = new CodeEmitInfo(info);
206 }
207 
208 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
209   assert(__ rsp_offset() == 0, "frame size should be fixed");
210   __ bind(_entry);
211   ce->store_parameter(_obj_reg->as_register(),  0);
212   Runtime1::StubID enter_id;
213   if (ce->compilation()->has_fpu_code()) {
214     enter_id = Runtime1::monitorenter_id;
215   } else {
216     enter_id = Runtime1::monitorenter_nofpu_id;
217   }
218   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
219   ce->add_call_info_here(_info);
220   ce->verify_oop_map(_info);
221   __ j(_continuation);
222 }
223 
224 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
225   __ bind(_entry);
226   ce->store_parameter(_obj_reg->as_register(), 0);
227   // note: non-blocking leaf routine => no call info needed
228   Runtime1::StubID exit_id;
229   if (ce->compilation()->has_fpu_code()) {
230     exit_id = Runtime1::monitorexit_id;
231   } else {
232     exit_id = Runtime1::monitorexit_nofpu_id;
233   }
234   __ la(ra, _continuation);
235   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
236 }
237 
238 // Implementation of patching:
239 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
240 // - Replace original code with a call to the stub
241 // At Runtime:
242 // - call to stub, jump to runtime
243 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
244 // - in runtime: after initializing class, restore original code, reexecute instruction
245 
246 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
247 
248 void PatchingStub::align_patch_site(MacroAssembler* masm) {}
249 
250 void PatchingStub::emit_code(LIR_Assembler* ce) {
251   assert(false, "RISCV should not use C1 runtime patching");
252 }
253 
254 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
255   __ bind(_entry);
256   ce->store_parameter(_trap_request, 0);
257   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
258   ce->add_call_info_here(_info);
259   DEBUG_ONLY(__ should_not_reach_here());
260 }
261 
262 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
263   address a = NULL;
264   if (_info->deoptimize_on_exception()) {
265     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
266     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
267   } else {
268     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
269   }
270 
271   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
272   __ bind(_entry);
273   __ far_call(RuntimeAddress(a));
274   ce->add_call_info_here(_info);
275   ce->verify_oop_map(_info);
276   debug_only(__ should_not_reach_here());
277 }
278 
279 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
280   assert(__ rsp_offset() == 0, "frame size should be fixed");
281 
282   __ bind(_entry);
283   // pass the object in a tmp register because all other registers
284   // must be preserved
285   if (_obj->is_cpu_register()) {
286     __ mv(t0, _obj->as_register());
287   }
288   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), t1);
289   ce->add_call_info_here(_info);
290   debug_only(__ should_not_reach_here());
291 }
292 
293 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
294   // ---------------slow case: call to native-----------------
295   __ bind(_entry);
296   // Figure out where the args should go
297   // This should really convert the IntrinsicID to the Method* and signature
298   // but I don't know how to do that.
299   const int args_num = 5;
300   VMRegPair args[args_num];
301   BasicType signature[args_num] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
302   SharedRuntime::java_calling_convention(signature, args, args_num);
303 
304   // push parameters
305   Register r[args_num];
306   r[0] = src()->as_register();
307   r[1] = src_pos()->as_register();
308   r[2] = dst()->as_register();
309   r[3] = dst_pos()->as_register();
310   r[4] = length()->as_register();
311 
312   // next registers will get stored on the stack
313   for (int j = 0; j < args_num; j++) {
314     VMReg r_1 = args[j].first();
315     if (r_1->is_stack()) {
316       int st_off = r_1->reg2stack() * wordSize;
317       __ sd(r[j], Address(sp, st_off));
318     } else {
319       assert(r[j] == args[j].first()->as_Register(), "Wrong register for arg");
320     }
321   }
322 
323   ce->align_call(lir_static_call);
324 
325   ce->emit_static_call_stub();
326   if (ce->compilation()->bailed_out()) {
327     return; // CodeCache is full
328   }
329   Address resolve(SharedRuntime::get_resolve_static_call_stub(),
330                   relocInfo::static_call_type);
331   address call = __ trampoline_call(resolve);
332   if (call == NULL) {
333     ce->bailout("trampoline stub overflow");
334     return;
335   }
336   ce->add_call_info_here(info());
337 
338 #ifndef PRODUCT
339   if (PrintC1Statistics) {
340     __ la(t1, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
341     __ incrementw(Address(t1));
342   }
343 #endif
344 
345   __ j(_continuation);
346 }
347 
348 #undef __