1 /*
2 * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "classfile/javaClasses.hpp"
33 #include "memory/universe.hpp"
34 #include "nativeInst_arm.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "utilities/macros.hpp"
37 #include "vmreg_arm.inline.hpp"
38
39 #define __ ce->masm()->
40
41 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
42 ShouldNotReachHere();
43 }
44
45 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
46 __ bind(_entry);
47 ce->store_parameter(_bci, 0);
48 ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
49 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
50 ce->add_call_info_here(_info);
51 ce->verify_oop_map(_info);
52
53 __ b(_continuation);
54 }
55
56 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
57 __ bind(_entry);
58
59 if (_info->deoptimize_on_exception()) {
60 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
61 ce->add_call_info_here(_info);
62 ce->verify_oop_map(_info);
63 debug_only(__ should_not_reach_here());
64 return;
65 }
66 // Pass the array index on stack because all registers must be preserved
67 ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2);
68 if (_index->is_cpu_register()) {
69 __ str_32(_index->as_register(), Address(SP));
70 } else {
71 __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
72 __ str_32(Rtemp, Address(SP));
73 }
74
75 if (_throw_index_out_of_bounds_exception) {
76 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
77 } else {
78 __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
79 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
80 }
81 ce->add_call_info_here(_info);
82 ce->verify_oop_map(_info);
83 DEBUG_ONLY(STOP("RangeCheck");)
84 }
85
86 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
87 _info = new CodeEmitInfo(info);
88 }
89
90 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
91 __ bind(_entry);
92 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
93 ce->add_call_info_here(_info);
94 ce->verify_oop_map(_info);
95 debug_only(__ should_not_reach_here());
96 }
97
98 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
99 if (_offset != -1) {
100 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
101 }
102 __ bind(_entry);
103 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id),
104 relocInfo::runtime_call_type);
105 ce->add_call_info_here(_info);
106 DEBUG_ONLY(STOP("DivByZero");)
107 }
108
109
110 // Implementation of NewInstanceStub
111
112 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
113 _result = result;
114 _klass = klass;
115 _klass_reg = klass_reg;
116 _info = new CodeEmitInfo(info);
117 assert(stub_id == Runtime1::new_instance_id ||
118 stub_id == Runtime1::fast_new_instance_id ||
119 stub_id == Runtime1::fast_new_instance_init_check_id,
120 "need new_instance id");
121 _stub_id = stub_id;
122 }
123
124
125 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
126 assert(_result->as_register() == R0, "runtime call setup");
127 assert(_klass_reg->as_register() == R1, "runtime call setup");
128 __ bind(_entry);
129 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
130 ce->add_call_info_here(_info);
131 ce->verify_oop_map(_info);
132 __ b(_continuation);
133 }
134
135
136 // Implementation of NewTypeArrayStub
137
138 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
139 _klass_reg = klass_reg;
140 _length = length;
141 _result = result;
142 _info = new CodeEmitInfo(info);
143 }
144
145
146 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
147 assert(_result->as_register() == R0, "runtime call setup");
148 assert(_klass_reg->as_register() == R1, "runtime call setup");
149 assert(_length->as_register() == R2, "runtime call setup");
150 __ bind(_entry);
151 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
152 ce->add_call_info_here(_info);
153 ce->verify_oop_map(_info);
154 __ b(_continuation);
155 }
156
157
158 // Implementation of NewObjectArrayStub
159
160 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
161 _klass_reg = klass_reg;
162 _result = result;
163 _length = length;
164 _info = new CodeEmitInfo(info);
165 }
166
167
168 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
169 assert(_result->as_register() == R0, "runtime call setup");
170 assert(_klass_reg->as_register() == R1, "runtime call setup");
171 assert(_length->as_register() == R2, "runtime call setup");
172 __ bind(_entry);
173 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
174 ce->add_call_info_here(_info);
175 ce->verify_oop_map(_info);
176 __ b(_continuation);
177 }
178
179 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
180 __ bind(_entry);
181 const Register obj_reg = _obj_reg->as_pointer_register();
182 const Register lock_reg = _lock_reg->as_pointer_register();
183
184 ce->verify_reserved_argument_area_size(2);
185 if (obj_reg < lock_reg) {
186 __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
187 } else {
188 __ str(obj_reg, Address(SP));
189 __ str(lock_reg, Address(SP, BytesPerWord));
190 }
191
192 Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
193 Runtime1::monitorenter_id :
194 Runtime1::monitorenter_nofpu_id;
195 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
196 ce->add_call_info_here(_info);
197 ce->verify_oop_map(_info);
198 __ b(_continuation);
199 }
200
201
202 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
203 __ bind(_entry);
204 if (_compute_lock) {
205 ce->monitor_address(_monitor_ix, _lock_reg);
206 }
207 const Register lock_reg = _lock_reg->as_pointer_register();
208
209 ce->verify_reserved_argument_area_size(1);
210 __ str(lock_reg, Address(SP));
211
212 // Non-blocking leaf routine - no call info needed
213 Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
214 Runtime1::monitorexit_id :
215 Runtime1::monitorexit_nofpu_id;
216 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
217 __ b(_continuation);
218 }
219
220
221 // Call return is directly after patch word
222 int PatchingStub::_patch_info_offset = 0;
223
224 void PatchingStub::align_patch_site(MacroAssembler* masm) {
225 #if 0
226 // TODO: investigate if we required to implement this
227 ShouldNotReachHere();
228 #endif
229 }
230
231 void PatchingStub::emit_code(LIR_Assembler* ce) {
232 const int patchable_instruction_offset = 0;
233
234 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
235 "not enough room for call");
236 assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
237 Label call_patch;
238 bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
239
240
241 if (is_load && !VM_Version::supports_movw()) {
242 address start = __ pc();
243
244 // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
245 // without creating relocation info entry.
246
247 assert((__ pc() - start) == patchable_instruction_offset, "should be");
248 __ ldr(_obj, Address(PC));
249 // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
250 __ nop();
251
252 #ifdef ASSERT
253 for (int i = 0; i < _bytes_to_copy; i++) {
254 assert(((address)_pc_start)[i] == start[i], "should be the same code");
255 }
256 #endif // ASSERT
257 }
258
259 address being_initialized_entry = __ pc();
260 if (CommentedAssembly) {
261 __ block_comment(" patch template");
262 }
263 if (is_load) {
264 address start = __ pc();
265 if (_id == load_mirror_id || _id == load_appendix_id) {
266 __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
267 } else {
268 __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
269 }
270 #ifdef ASSERT
271 for (int i = 0; i < _bytes_to_copy; i++) {
272 assert(((address)_pc_start)[i] == start[i], "should be the same code");
273 }
274 #endif // ASSERT
275 } else {
276 int* start = (int*)_pc_start;
277 int* end = start + (_bytes_to_copy / BytesPerInt);
278 while (start < end) {
279 __ emit_int32(*start++);
280 }
281 }
282 address end_of_patch = __ pc();
283
284 int bytes_to_skip = 0;
285 if (_id == load_mirror_id) {
286 int offset = __ offset();
287 if (CommentedAssembly) {
288 __ block_comment(" being_initialized check");
289 }
290
291 assert(_obj != noreg, "must be a valid register");
292 // Rtemp should be OK in C1
293 __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset()));
294 __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
295 __ cmp(Rtemp, Rthread);
296 __ b(call_patch, ne);
297 __ b(_patch_site_continuation);
298
299 bytes_to_skip += __ offset() - offset;
300 }
301
302 if (CommentedAssembly) {
303 __ block_comment("patch data - 3 high bytes of the word");
304 }
305 const int sizeof_patch_record = 4;
306 bytes_to_skip += sizeof_patch_record;
307 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
308 __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
309
310 address patch_info_pc = __ pc();
311 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
312
313 // runtime call will return here
314 Label call_return;
315 __ bind(call_return);
316 ce->add_call_info_here(_info);
317 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
318 __ b(_patch_site_entry);
319
320 address entry = __ pc();
321 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
322 address target = nullptr;
323 relocInfo::relocType reloc_type = relocInfo::none;
324 switch (_id) {
325 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
326 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
327 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
328 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
329 default: ShouldNotReachHere();
330 }
331 __ bind(call_patch);
332
333 if (CommentedAssembly) {
334 __ block_comment("patch entry point");
335 }
336
337 // arrange for call to return just after patch word
338 __ adr(LR, call_return);
339 __ jump(target, relocInfo::runtime_call_type, Rtemp);
340
341 if (is_load) {
342 CodeSection* cs = __ code_section();
343 address pc = (address)_pc_start;
344 RelocIterator iter(cs, pc, pc + 1);
345 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
346 }
347 }
348
349 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
350 __ bind(_entry);
351 __ mov_slow(Rtemp, _trap_request);
352 ce->verify_reserved_argument_area_size(1);
353 __ str(Rtemp, Address(SP));
354 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
355 ce->add_call_info_here(_info);
356 DEBUG_ONLY(__ should_not_reach_here());
357 }
358
359
360 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
361 address a;
362 if (_info->deoptimize_on_exception()) {
363 // Deoptimize, do not throw the exception, because it is
364 // probably wrong to do it here.
365 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
366 } else {
367 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
368 }
369 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
370 __ bind(_entry);
371 __ call(a, relocInfo::runtime_call_type);
372 ce->add_call_info_here(_info);
373 ce->verify_oop_map(_info);
374 DEBUG_ONLY(STOP("ImplicitNullCheck");)
375 }
376
377
378 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
379 __ bind(_entry);
380 // Pass the object on stack because all registers must be preserved
381 if (_obj->is_cpu_register()) {
382 ce->verify_reserved_argument_area_size(1);
383 __ str(_obj->as_pointer_register(), Address(SP));
384 } else {
385 assert(_obj->is_illegal(), "should be");
386 }
387 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
388 ce->add_call_info_here(_info);
389 DEBUG_ONLY(STOP("SimpleException");)
390 }
391
392
393 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
394 __ bind(_entry);
395
396 VMRegPair args[5];
397 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
398 SharedRuntime::java_calling_convention(signature, args, 5);
399
400 Register r[5];
401 r[0] = src()->as_pointer_register();
402 r[1] = src_pos()->as_register();
403 r[2] = dst()->as_pointer_register();
404 r[3] = dst_pos()->as_register();
405 r[4] = length()->as_register();
406
407 for (int i = 0; i < 5; i++) {
408 VMReg arg = args[i].first();
409 if (arg->is_stack()) {
410 __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size));
411 } else {
412 assert(r[i] == arg->as_Register(), "Calling conventions must match");
413 }
414 }
415
416 ce->emit_static_call_stub();
417 if (ce->compilation()->bailed_out()) {
418 return; // CodeCache is full
419 }
420 int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
421 assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
422 ce->add_call_info_here(info());
423 ce->verify_oop_map(info());
424 __ b(_continuation);
425 }
426
427 #undef __
--- EOF ---