1 /*
2 * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "c1/c1_CodeStubs.hpp"
30 #include "c1/c1_FrameMap.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_MacroAssembler.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "classfile/javaClasses.hpp"
35 #include "nativeInst_riscv.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "vmreg_riscv.inline.hpp"
38
39
40 #define __ ce->masm()->
41
42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
43 __ bind(_entry);
44 InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
45 __ relocate(safepoint_pc.rspec(), [&] {
46 int32_t offset;
47 __ la_patchable(t0, safepoint_pc.target(), offset);
48 __ addi(t0, t0, offset);
49 });
50 __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
51
52 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
53 "polling page return stub not created yet");
54 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
55
56 __ far_jump(RuntimeAddress(stub));
57 }
58
59 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
60 __ bind(_entry);
61 Metadata *m = _method->as_constant_ptr()->as_metadata();
62 __ mov_metadata(t0, m);
63 ce->store_parameter(t0, 1);
64 ce->store_parameter(_bci, 0);
65 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
66 ce->add_call_info_here(_info);
67 ce->verify_oop_map(_info);
68 __ j(_continuation);
69 }
70
71 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
72 __ bind(_entry);
73 if (_info->deoptimize_on_exception()) {
74 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
75 __ far_call(RuntimeAddress(a));
76 ce->add_call_info_here(_info);
77 ce->verify_oop_map(_info);
78 debug_only(__ should_not_reach_here());
79 return;
80 }
81
82 if (_index->is_cpu_register()) {
83 __ mv(t0, _index->as_register());
84 } else {
85 __ mv(t0, _index->as_jint());
86 }
87 Runtime1::StubID stub_id;
88 if (_throw_index_out_of_bounds_exception) {
89 stub_id = Runtime1::throw_index_exception_id;
90 } else {
91 assert(_array != LIR_Opr::nullOpr(), "sanity");
92 __ mv(t1, _array->as_pointer_register());
93 stub_id = Runtime1::throw_range_check_failed_id;
94 }
95 RuntimeAddress target(Runtime1::entry_for(stub_id));
96 __ relocate(target.rspec(), [&] {
97 int32_t offset;
98 __ la_patchable(ra, target, offset);
99 __ jalr(ra, ra, offset);
100 });
101 ce->add_call_info_here(_info);
102 ce->verify_oop_map(_info);
103 debug_only(__ should_not_reach_here());
104 }
105
106 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
107 _info = new CodeEmitInfo(info);
108 }
109
110 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
111 __ bind(_entry);
112 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
113 __ far_call(RuntimeAddress(a));
114 ce->add_call_info_here(_info);
115 ce->verify_oop_map(_info);
116 debug_only(__ should_not_reach_here());
117 }
118
119 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
120 if (_offset != -1) {
121 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
122 }
123 __ bind(_entry);
124 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
125 ce->add_call_info_here(_info);
126 ce->verify_oop_map(_info);
127 #ifdef ASSERT
128 __ should_not_reach_here();
129 #endif
130 }
131
132 // Implementation of NewInstanceStub
133 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
134 _result = result;
135 _klass = klass;
136 _klass_reg = klass_reg;
137 _info = new CodeEmitInfo(info);
138 assert(stub_id == Runtime1::new_instance_id ||
139 stub_id == Runtime1::fast_new_instance_id ||
140 stub_id == Runtime1::fast_new_instance_init_check_id,
141 "need new_instance id");
142 _stub_id = stub_id;
143 }
144
145 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
146 assert(__ rsp_offset() == 0, "frame size should be fixed");
147 __ bind(_entry);
148 __ mv(x13, _klass_reg->as_register());
149 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
150 ce->add_call_info_here(_info);
151 ce->verify_oop_map(_info);
152 assert(_result->as_register() == x10, "result must in x10");
153 __ j(_continuation);
154 }
155
156 // Implementation of NewTypeArrayStub
157 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
158 _klass_reg = klass_reg;
159 _length = length;
160 _result = result;
161 _info = new CodeEmitInfo(info);
162 }
163
164 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
165 assert(__ rsp_offset() == 0, "frame size should be fixed");
166 __ bind(_entry);
167 assert(_length->as_register() == x9, "length must in x9");
168 assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
169 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
170 ce->add_call_info_here(_info);
171 ce->verify_oop_map(_info);
172 assert(_result->as_register() == x10, "result must in x10");
173 __ j(_continuation);
174 }
175
176 // Implementation of NewObjectArrayStub
177 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
178 _klass_reg = klass_reg;
179 _result = result;
180 _length = length;
181 _info = new CodeEmitInfo(info);
182 }
183
184 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
185 assert(__ rsp_offset() == 0, "frame size should be fixed");
186 __ bind(_entry);
187 assert(_length->as_register() == x9, "length must in x9");
188 assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
189 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
190 ce->add_call_info_here(_info);
191 ce->verify_oop_map(_info);
192 assert(_result->as_register() == x10, "result must in x10");
193 __ j(_continuation);
194 }
195
196 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
197 assert(__ rsp_offset() == 0, "frame size should be fixed");
198 __ bind(_entry);
199 ce->store_parameter(_obj_reg->as_register(), 1);
200 ce->store_parameter(_lock_reg->as_register(), 0);
201 Runtime1::StubID enter_id;
202 if (ce->compilation()->has_fpu_code()) {
203 enter_id = Runtime1::monitorenter_id;
204 } else {
205 enter_id = Runtime1::monitorenter_nofpu_id;
206 }
207 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
208 ce->add_call_info_here(_info);
209 ce->verify_oop_map(_info);
210 __ j(_continuation);
211 }
212
213 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
214 __ bind(_entry);
215 if (_compute_lock) {
216 // lock_reg was destroyed by fast unlocking attempt => recompute it
217 ce->monitor_address(_monitor_ix, _lock_reg);
218 }
219 ce->store_parameter(_lock_reg->as_register(), 0);
220 // note: non-blocking leaf routine => no call info needed
221 Runtime1::StubID exit_id;
222 if (ce->compilation()->has_fpu_code()) {
223 exit_id = Runtime1::monitorexit_id;
224 } else {
225 exit_id = Runtime1::monitorexit_nofpu_id;
226 }
227 __ la(ra, _continuation);
228 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
229 }
230
231 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
232 // Currently not needed.
233 Unimplemented();
234 }
235
236 // Implementation of patching:
237 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
238 // - Replace original code with a call to the stub
239 // At Runtime:
240 // - call to stub, jump to runtime
241 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
242 // - in runtime: after initializing class, restore original code, reexecute instruction
243
244 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
245
246 void PatchingStub::align_patch_site(MacroAssembler* masm) {}
247
248 void PatchingStub::emit_code(LIR_Assembler* ce) {
249 assert(false, "RISCV should not use C1 runtime patching");
250 }
251
252 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
253 __ bind(_entry);
254 ce->store_parameter(_trap_request, 0);
255 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
256 ce->add_call_info_here(_info);
257 DEBUG_ONLY(__ should_not_reach_here());
258 }
259
260 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
261 address a = nullptr;
262 if (_info->deoptimize_on_exception()) {
263 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
264 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
265 } else {
266 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
267 }
268
269 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
270 __ bind(_entry);
271 __ far_call(RuntimeAddress(a));
272 ce->add_call_info_here(_info);
273 ce->verify_oop_map(_info);
274 debug_only(__ should_not_reach_here());
275 }
276
277 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
278 assert(__ rsp_offset() == 0, "frame size should be fixed");
279
280 __ bind(_entry);
281 // pass the object in a tmp register because all other registers
282 // must be preserved
283 if (_obj->is_cpu_register()) {
284 __ mv(t0, _obj->as_register());
285 }
286 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), t1);
287 ce->add_call_info_here(_info);
288 debug_only(__ should_not_reach_here());
289 }
290
291 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
292 // ---------------slow case: call to native-----------------
293 __ bind(_entry);
294 // Figure out where the args should go
295 // This should really convert the IntrinsicID to the Method* and signature
296 // but I don't know how to do that.
297 const int args_num = 5;
298 VMRegPair args[args_num];
299 BasicType signature[args_num] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
300 SharedRuntime::java_calling_convention(signature, args, args_num);
301
302 // push parameters
303 Register r[args_num];
304 r[0] = src()->as_register();
305 r[1] = src_pos()->as_register();
306 r[2] = dst()->as_register();
307 r[3] = dst_pos()->as_register();
308 r[4] = length()->as_register();
309
310 // next registers will get stored on the stack
311 for (int j = 0; j < args_num; j++) {
312 VMReg r_1 = args[j].first();
313 if (r_1->is_stack()) {
314 int st_off = r_1->reg2stack() * wordSize;
315 __ sd(r[j], Address(sp, st_off));
316 } else {
317 assert(r[j] == args[j].first()->as_Register(), "Wrong register for arg");
318 }
319 }
320
321 ce->align_call(lir_static_call);
322
323 ce->emit_static_call_stub();
324 if (ce->compilation()->bailed_out()) {
325 return; // CodeCache is full
326 }
327 Address resolve(SharedRuntime::get_resolve_static_call_stub(),
328 relocInfo::static_call_type);
329 address call = __ trampoline_call(resolve);
330 if (call == nullptr) {
331 ce->bailout("trampoline stub overflow");
332 return;
333 }
334 ce->add_call_info_here(info());
335
336 #ifndef PRODUCT
337 if (PrintC1Statistics) {
338 __ la(t1, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
339 __ incrementw(Address(t1));
340 }
341 #endif
342
343 __ j(_continuation);
344 }
345
346 #undef __