1 /*
2 * Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.inline.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "memory/universe.hpp"
33 #include "nativeInst_arm.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/macros.hpp"
36 #include "vmreg_arm.inline.hpp"
37
38 #define __ ce->masm()->
39
40 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
41 ShouldNotReachHere();
42 }
43
44 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
45 __ bind(_entry);
46 ce->store_parameter(_bci, 0);
47 ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
48 __ call(Runtime1::entry_for(StubId::c1_counter_overflow_id), relocInfo::runtime_call_type);
49 ce->add_call_info_here(_info);
50 ce->verify_oop_map(_info);
51
52 __ b(_continuation);
53 }
54
55 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
56 __ bind(_entry);
57
58 if (_info->deoptimize_on_exception()) {
59 __ call(Runtime1::entry_for(StubId::c1_predicate_failed_trap_id), relocInfo::runtime_call_type);
60 ce->add_call_info_here(_info);
61 ce->verify_oop_map(_info);
62 DEBUG_ONLY(__ should_not_reach_here());
63 return;
64 }
65 // Pass the array index on stack because all registers must be preserved
66 ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2);
67 if (_index->is_cpu_register()) {
68 __ str_32(_index->as_register(), Address(SP));
69 } else {
70 __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
71 __ str_32(Rtemp, Address(SP));
72 }
73
74 if (_throw_index_out_of_bounds_exception) {
75 __ call(Runtime1::entry_for(StubId::c1_throw_index_exception_id), relocInfo::runtime_call_type);
76 } else {
77 __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
78 __ call(Runtime1::entry_for(StubId::c1_throw_range_check_failed_id), relocInfo::runtime_call_type);
79 }
80 ce->add_call_info_here(_info);
81 ce->verify_oop_map(_info);
82 DEBUG_ONLY(STOP("RangeCheck");)
83 }
84
85 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
86 _info = new CodeEmitInfo(info);
87 }
88
89 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
90 __ bind(_entry);
91 __ call(Runtime1::entry_for(StubId::c1_predicate_failed_trap_id), relocInfo::runtime_call_type);
92 ce->add_call_info_here(_info);
93 ce->verify_oop_map(_info);
94 DEBUG_ONLY(__ should_not_reach_here());
95 }
96
97 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
98 if (_offset != -1) {
99 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
100 }
101 __ bind(_entry);
102 __ call(Runtime1::entry_for(StubId::c1_throw_div0_exception_id),
103 relocInfo::runtime_call_type);
104 ce->add_call_info_here(_info);
105 DEBUG_ONLY(STOP("DivByZero");)
106 }
107
108
109 // Implementation of NewInstanceStub
110
111 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) {
112 _result = result;
113 _klass = klass;
114 _klass_reg = klass_reg;
115 _info = new CodeEmitInfo(info);
116 assert(stub_id == StubId::c1_new_instance_id ||
117 stub_id == StubId::c1_fast_new_instance_id ||
118 stub_id == StubId::c1_fast_new_instance_init_check_id,
119 "need new_instance id");
120 _stub_id = stub_id;
121 }
122
123
124 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
125 assert(_result->as_register() == R0, "runtime call setup");
126 assert(_klass_reg->as_register() == R1, "runtime call setup");
127 __ bind(_entry);
128 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
129 ce->add_call_info_here(_info);
130 ce->verify_oop_map(_info);
131 __ b(_continuation);
132 }
133
134
135 // Implementation of NewTypeArrayStub
136
137 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
138 _klass_reg = klass_reg;
139 _length = length;
140 _result = result;
141 _info = new CodeEmitInfo(info);
142 }
143
144
145 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
146 assert(_result->as_register() == R0, "runtime call setup");
147 assert(_klass_reg->as_register() == R1, "runtime call setup");
148 assert(_length->as_register() == R2, "runtime call setup");
149 __ bind(_entry);
150 __ call(Runtime1::entry_for(StubId::c1_new_type_array_id), relocInfo::runtime_call_type);
151 ce->add_call_info_here(_info);
152 ce->verify_oop_map(_info);
153 __ b(_continuation);
154 }
155
156
157 // Implementation of NewObjectArrayStub
158
159 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
160 CodeEmitInfo* info, bool is_null_free) {
161 _klass_reg = klass_reg;
162 _result = result;
163 _length = length;
164 _info = new CodeEmitInfo(info);
165 _is_null_free = is_null_free; // unimplemented
166 }
167
168
169 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
170 assert(_result->as_register() == R0, "runtime call setup");
171 assert(_klass_reg->as_register() == R1, "runtime call setup");
172 assert(_length->as_register() == R2, "runtime call setup");
173 __ bind(_entry);
174 __ call(Runtime1::entry_for(StubId::c1_new_object_array_id), relocInfo::runtime_call_type);
175 ce->add_call_info_here(_info);
176 ce->verify_oop_map(_info);
177 __ b(_continuation);
178 }
179
180 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
181 __ bind(_entry);
182 const Register obj_reg = _obj_reg->as_pointer_register();
183 const Register lock_reg = _lock_reg->as_pointer_register();
184
185 ce->verify_reserved_argument_area_size(2);
186 if (obj_reg->encoding() < lock_reg->encoding()) {
187 __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
188 } else {
189 __ str(obj_reg, Address(SP));
190 __ str(lock_reg, Address(SP, BytesPerWord));
191 }
192
193 StubId enter_id = ce->compilation()->has_fpu_code() ?
194 StubId::c1_monitorenter_id :
195 StubId::c1_monitorenter_nofpu_id;
196 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
197 ce->add_call_info_here(_info);
198 ce->verify_oop_map(_info);
199 __ b(_continuation);
200 }
201
202
203 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
204 __ bind(_entry);
205
206 // lock_reg was destroyed by fast unlocking attempt => recompute it
207 ce->monitor_address(_monitor_ix, _lock_reg);
208
209 const Register lock_reg = _lock_reg->as_pointer_register();
210
211 ce->verify_reserved_argument_area_size(1);
212 __ str(lock_reg, Address(SP));
213
214 // Non-blocking leaf routine - no call info needed
215 StubId exit_id = ce->compilation()->has_fpu_code() ?
216 StubId::c1_monitorexit_id :
217 StubId::c1_monitorexit_nofpu_id;
218 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
219 __ b(_continuation);
220 }
221
222
223 // Call return is directly after patch word
224 int PatchingStub::_patch_info_offset = 0;
225
226 void PatchingStub::align_patch_site(MacroAssembler* masm) {
227 #if 0
228 // TODO: investigate if we required to implement this
229 ShouldNotReachHere();
230 #endif
231 }
232
233 void PatchingStub::emit_code(LIR_Assembler* ce) {
234 const int patchable_instruction_offset = 0;
235
236 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
237 "not enough room for call");
238 assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
239 Label call_patch;
240 bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
241
242
243 if (is_load && !VM_Version::supports_movw()) {
244 address start = __ pc();
245
246 // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
247 // without creating relocation info entry.
248
249 assert((__ pc() - start) == patchable_instruction_offset, "should be");
250 __ ldr(_obj, Address(PC));
251 // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
252 __ nop();
253
254 #ifdef ASSERT
255 for (int i = 0; i < _bytes_to_copy; i++) {
256 assert(((address)_pc_start)[i] == start[i], "should be the same code");
257 }
258 #endif // ASSERT
259 }
260
261 address being_initialized_entry = __ pc();
262 if (CommentedAssembly) {
263 __ block_comment(" patch template");
264 }
265 if (is_load) {
266 address start = __ pc();
267 if (_id == load_mirror_id || _id == load_appendix_id) {
268 __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
269 } else {
270 __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
271 }
272 #ifdef ASSERT
273 for (int i = 0; i < _bytes_to_copy; i++) {
274 assert(((address)_pc_start)[i] == start[i], "should be the same code");
275 }
276 #endif // ASSERT
277 } else {
278 int* start = (int*)_pc_start;
279 int* end = start + (_bytes_to_copy / BytesPerInt);
280 while (start < end) {
281 __ emit_int32(*start++);
282 }
283 }
284 address end_of_patch = __ pc();
285
286 int bytes_to_skip = 0;
287 if (_id == load_mirror_id) {
288 int offset = __ offset();
289 if (CommentedAssembly) {
290 __ block_comment(" being_initialized check");
291 }
292
293 assert(_obj != noreg, "must be a valid register");
294 // Rtemp should be OK in C1
295 __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset()));
296 __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
297 __ cmp(Rtemp, Rthread);
298 __ b(call_patch, ne);
299 __ b(_patch_site_continuation);
300
301 bytes_to_skip += __ offset() - offset;
302 }
303
304 if (CommentedAssembly) {
305 __ block_comment("patch data - 3 high bytes of the word");
306 }
307 const int sizeof_patch_record = 4;
308 bytes_to_skip += sizeof_patch_record;
309 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
310 __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
311
312 address patch_info_pc = __ pc();
313 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
314
315 // runtime call will return here
316 Label call_return;
317 __ bind(call_return);
318 ce->add_call_info_here(_info);
319 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
320 __ b(_patch_site_entry);
321
322 address entry = __ pc();
323 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
324 address target = nullptr;
325 relocInfo::relocType reloc_type = relocInfo::none;
326 switch (_id) {
327 case access_field_id: target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break;
328 case load_klass_id: target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
329 case load_mirror_id: target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
330 case load_appendix_id: target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
331 default: ShouldNotReachHere();
332 }
333 __ bind(call_patch);
334
335 if (CommentedAssembly) {
336 __ block_comment("patch entry point");
337 }
338
339 // arrange for call to return just after patch word
340 __ adr(LR, call_return);
341 __ jump(target, relocInfo::runtime_call_type, Rtemp);
342
343 if (is_load) {
344 CodeSection* cs = __ code_section();
345 address pc = (address)_pc_start;
346 RelocIterator iter(cs, pc, pc + 1);
347 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
348 }
349 }
350
351 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
352 __ bind(_entry);
353 __ mov_slow(Rtemp, _trap_request);
354 ce->verify_reserved_argument_area_size(1);
355 __ str(Rtemp, Address(SP));
356 __ call(Runtime1::entry_for(StubId::c1_deoptimize_id), relocInfo::runtime_call_type);
357 ce->add_call_info_here(_info);
358 DEBUG_ONLY(__ should_not_reach_here());
359 }
360
361
362 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
363 address a;
364 if (_info->deoptimize_on_exception()) {
365 // Deoptimize, do not throw the exception, because it is
366 // probably wrong to do it here.
367 a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
368 } else {
369 a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id);
370 }
371 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
372 __ bind(_entry);
373 __ call(a, relocInfo::runtime_call_type);
374 ce->add_call_info_here(_info);
375 ce->verify_oop_map(_info);
376 DEBUG_ONLY(STOP("ImplicitNullCheck");)
377 }
378
379
380 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
381 __ bind(_entry);
382 // Pass the object on stack because all registers must be preserved
383 if (_obj->is_cpu_register()) {
384 ce->verify_reserved_argument_area_size(1);
385 __ str(_obj->as_pointer_register(), Address(SP));
386 } else {
387 assert(_obj->is_illegal(), "should be");
388 }
389 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
390 ce->add_call_info_here(_info);
391 DEBUG_ONLY(STOP("SimpleException");)
392 }
393
394
395 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
396 __ bind(_entry);
397
398 VMRegPair args[5];
399 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
400 SharedRuntime::java_calling_convention(signature, args, 5);
401
402 Register r[5];
403 r[0] = src()->as_pointer_register();
404 r[1] = src_pos()->as_register();
405 r[2] = dst()->as_pointer_register();
406 r[3] = dst_pos()->as_register();
407 r[4] = length()->as_register();
408
409 for (int i = 0; i < 5; i++) {
410 VMReg arg = args[i].first();
411 if (arg->is_stack()) {
412 __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size));
413 } else {
414 assert(r[i] == arg->as_Register(), "Calling conventions must match");
415 }
416 }
417
418 ce->emit_static_call_stub();
419 if (ce->compilation()->bailed_out()) {
420 return; // CodeCache is full
421 }
422 int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
423 assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
424 ce->add_call_info_here(info());
425 ce->verify_oop_map(info());
426 __ b(_continuation);
427 }
428
429 // Implementation of SubstitutabilityCheckStub
430 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
431 Unimplemented();
432 }
433
434 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) {
435 Unimplemented();
436 }
437
438 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
439 Unimplemented();
440 }
441
442 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
443 Unimplemented();
444 }
445
446 // Implementation of StoreFlattenedArrayStub
447
448 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
449 Unimplemented();
450 }
451
452 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
453 Unimplemented();
454 }
455
456 #undef __