1 /*
2 * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "classfile/javaClasses.hpp"
34 #include "nativeInst_aarch64.hpp"
35 #include "runtime/objectMonitor.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "vmreg_aarch64.inline.hpp"
38
39
40 #define __ ce->masm()->
41
42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
43 __ bind(_entry);
44 InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
45 __ adr(rscratch1, safepoint_pc);
46 __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
47
48 assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
49 "polling page return stub not created yet");
50 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
51
52 __ far_jump(RuntimeAddress(stub));
53 }
54
55 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
56 __ bind(_entry);
57 Metadata *m = _method->as_constant_ptr()->as_metadata();
58 __ mov_metadata(rscratch1, m);
59 ce->store_parameter(rscratch1, 1);
60 ce->store_parameter(_bci, 0);
61 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
62 ce->add_call_info_here(_info);
63 ce->verify_oop_map(_info);
64 __ b(_continuation);
65 }
66
67 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
68 __ bind(_entry);
69 if (_info->deoptimize_on_exception()) {
70 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
71 __ far_call(RuntimeAddress(a));
72 ce->add_call_info_here(_info);
73 ce->verify_oop_map(_info);
74 debug_only(__ should_not_reach_here());
75 return;
76 }
77
78 if (_index->is_cpu_register()) {
79 __ mov(rscratch1, _index->as_register());
80 } else {
81 __ mov(rscratch1, _index->as_jint());
82 }
83 Runtime1::StubID stub_id;
84 if (_throw_index_out_of_bounds_exception) {
85 stub_id = Runtime1::throw_index_exception_id;
86 } else {
87 assert(_array != LIR_Opr::nullOpr(), "sanity");
88 __ mov(rscratch2, _array->as_pointer_register());
89 stub_id = Runtime1::throw_range_check_failed_id;
90 }
91 __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
92 __ blr(lr);
93 ce->add_call_info_here(_info);
94 ce->verify_oop_map(_info);
95 debug_only(__ should_not_reach_here());
96 }
97
98 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
99 _info = new CodeEmitInfo(info);
100 }
101
102 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
103 __ bind(_entry);
104 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
105 __ far_call(RuntimeAddress(a));
106 ce->add_call_info_here(_info);
107 ce->verify_oop_map(_info);
108 debug_only(__ should_not_reach_here());
109 }
110
111 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
112 if (_offset != -1) {
113 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
114 }
115 __ bind(_entry);
116 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
117 ce->add_call_info_here(_info);
118 ce->verify_oop_map(_info);
119 #ifdef ASSERT
120 __ should_not_reach_here();
121 #endif
122 }
123
124
125
126 // Implementation of NewInstanceStub
127
128 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
129 _result = result;
130 _klass = klass;
131 _klass_reg = klass_reg;
132 _info = new CodeEmitInfo(info);
133 assert(stub_id == Runtime1::new_instance_id ||
134 stub_id == Runtime1::fast_new_instance_id ||
135 stub_id == Runtime1::fast_new_instance_init_check_id,
136 "need new_instance id");
137 _stub_id = stub_id;
138 }
139
140
141
142 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
143 assert(__ rsp_offset() == 0, "frame size should be fixed");
144 __ bind(_entry);
145 __ mov(r3, _klass_reg->as_register());
146 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
147 ce->add_call_info_here(_info);
148 ce->verify_oop_map(_info);
149 assert(_result->as_register() == r0, "result must in r0,");
150 __ b(_continuation);
151 }
152
153
154 // Implementation of NewTypeArrayStub
155
156 // Implementation of NewTypeArrayStub
157
158 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
159 _klass_reg = klass_reg;
160 _length = length;
161 _result = result;
162 _info = new CodeEmitInfo(info);
163 }
164
165
166 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
167 assert(__ rsp_offset() == 0, "frame size should be fixed");
168 __ bind(_entry);
169 assert(_length->as_register() == r19, "length must in r19,");
170 assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
171 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
172 ce->add_call_info_here(_info);
173 ce->verify_oop_map(_info);
174 assert(_result->as_register() == r0, "result must in r0");
175 __ b(_continuation);
176 }
177
178
179 // Implementation of NewObjectArrayStub
180
181 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
182 _klass_reg = klass_reg;
183 _result = result;
184 _length = length;
185 _info = new CodeEmitInfo(info);
186 }
187
188
189 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
190 assert(__ rsp_offset() == 0, "frame size should be fixed");
191 __ bind(_entry);
192 assert(_length->as_register() == r19, "length must in r19,");
193 assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
194 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
195 ce->add_call_info_here(_info);
196 ce->verify_oop_map(_info);
197 assert(_result->as_register() == r0, "result must in r0");
198 __ b(_continuation);
199 }
200
201 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
202 assert(__ rsp_offset() == 0, "frame size should be fixed");
203 __ bind(_entry);
204 ce->store_parameter(_obj_reg->as_register(), 1);
205 ce->store_parameter(_lock_reg->as_register(), 0);
206 Runtime1::StubID enter_id;
207 if (ce->compilation()->has_fpu_code()) {
208 enter_id = Runtime1::monitorenter_id;
209 } else {
210 enter_id = Runtime1::monitorenter_nofpu_id;
211 }
212 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
213 ce->add_call_info_here(_info);
214 ce->verify_oop_map(_info);
215 __ b(_continuation);
216 }
217
218
219 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
220 __ bind(_entry);
221 if (_compute_lock) {
222 // lock_reg was destroyed by fast unlocking attempt => recompute it
223 ce->monitor_address(_monitor_ix, _lock_reg);
224 }
225 ce->store_parameter(_lock_reg->as_register(), 0);
226 // note: non-blocking leaf routine => no call info needed
227 Runtime1::StubID exit_id;
228 if (ce->compilation()->has_fpu_code()) {
229 exit_id = Runtime1::monitorexit_id;
230 } else {
231 exit_id = Runtime1::monitorexit_nofpu_id;
232 }
233 __ adr(lr, _continuation);
234 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
235 }
236
237 // Implementation of patching:
238 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
239 // - Replace original code with a call to the stub
240 // At Runtime:
241 // - call to stub, jump to runtime
242 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
243 // - in runtime: after initializing class, restore original code, reexecute instruction
244
245 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
246
247 void PatchingStub::align_patch_site(MacroAssembler* masm) {
248 }
249
250 void PatchingStub::emit_code(LIR_Assembler* ce) {
251 assert(false, "AArch64 should not use C1 runtime patching");
252 }
253
254
255 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
256 __ bind(_entry);
257 ce->store_parameter(_trap_request, 0);
258 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
259 ce->add_call_info_here(_info);
260 DEBUG_ONLY(__ should_not_reach_here());
261 }
262
263
264 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
265 address a;
266 if (_info->deoptimize_on_exception()) {
267 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
268 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
269 } else {
270 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
271 }
272
273 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
274 __ bind(_entry);
275 __ far_call(RuntimeAddress(a));
276 ce->add_call_info_here(_info);
277 ce->verify_oop_map(_info);
278 debug_only(__ should_not_reach_here());
279 }
280
281
282 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
283 assert(__ rsp_offset() == 0, "frame size should be fixed");
284
285 __ bind(_entry);
286 // pass the object in a scratch register because all other registers
287 // must be preserved
288 if (_obj->is_cpu_register()) {
289 __ mov(rscratch1, _obj->as_register());
290 }
291 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), rscratch2);
292 ce->add_call_info_here(_info);
293 debug_only(__ should_not_reach_here());
294 }
295
296
297 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
298 //---------------slow case: call to native-----------------
299 __ bind(_entry);
300 // Figure out where the args should go
301 // This should really convert the IntrinsicID to the Method* and signature
302 // but I don't know how to do that.
303 //
304 VMRegPair args[5];
305 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
306 SharedRuntime::java_calling_convention(signature, args, 5);
307
308 // push parameters
309 // (src, src_pos, dest, destPos, length)
310 Register r[5];
311 r[0] = src()->as_register();
312 r[1] = src_pos()->as_register();
313 r[2] = dst()->as_register();
314 r[3] = dst_pos()->as_register();
315 r[4] = length()->as_register();
316
317 // next registers will get stored on the stack
318 for (int i = 0; i < 5 ; i++ ) {
319 VMReg r_1 = args[i].first();
320 if (r_1->is_stack()) {
321 int st_off = r_1->reg2stack() * wordSize;
322 __ str (r[i], Address(sp, st_off));
323 } else {
324 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
325 }
326 }
327
328 ce->align_call(lir_static_call);
329
330 ce->emit_static_call_stub();
331 if (ce->compilation()->bailed_out()) {
332 return; // CodeCache is full
333 }
334 Address resolve(SharedRuntime::get_resolve_static_call_stub(),
335 relocInfo::static_call_type);
336 address call = __ trampoline_call(resolve);
337 if (call == nullptr) {
338 ce->bailout("trampoline stub overflow");
339 return;
340 }
341 ce->add_call_info_here(info());
342
343 #ifndef PRODUCT
344 if (PrintC1Statistics) {
345 __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
346 __ incrementw(Address(rscratch2));
347 }
348 #endif
349
350 __ b(_continuation);
351 }
352
353 #undef __
--- EOF ---