1 /*
  2  * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/macroAssembler.inline.hpp"
 26 #include "c1/c1_CodeStubs.hpp"
 27 #include "c1/c1_FrameMap.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "c1/c1_Runtime1.hpp"
 31 #include "classfile/javaClasses.hpp"
 32 #include "memory/universe.hpp"
 33 #include "nativeInst_arm.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "vmreg_arm.inline.hpp"
 37 
 38 #define __ ce->masm()->
 39 
 40 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 41   ShouldNotReachHere();
 42 }
 43 
 44 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 45   __ bind(_entry);
 46   ce->store_parameter(_bci, 0);
 47   ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
 48   __ call(Runtime1::entry_for(StubId::c1_counter_overflow_id), relocInfo::runtime_call_type);
 49   ce->add_call_info_here(_info);
 50   ce->verify_oop_map(_info);
 51 
 52   __ b(_continuation);
 53 }
 54 
 55 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 56   __ bind(_entry);
 57 
 58   if (_info->deoptimize_on_exception()) {
 59     __ call(Runtime1::entry_for(StubId::c1_predicate_failed_trap_id), relocInfo::runtime_call_type);
 60     ce->add_call_info_here(_info);
 61     ce->verify_oop_map(_info);
 62     DEBUG_ONLY(__ should_not_reach_here());
 63     return;
 64   }
 65   // Pass the array index on stack because all registers must be preserved
 66   ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2);
 67   if (_index->is_cpu_register()) {
 68     __ str_32(_index->as_register(), Address(SP));
 69   } else {
 70     __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
 71     __ str_32(Rtemp, Address(SP));
 72   }
 73 
 74   if (_throw_index_out_of_bounds_exception) {
 75     __ call(Runtime1::entry_for(StubId::c1_throw_index_exception_id), relocInfo::runtime_call_type);
 76   } else {
 77     __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
 78     __ call(Runtime1::entry_for(StubId::c1_throw_range_check_failed_id), relocInfo::runtime_call_type);
 79   }
 80   ce->add_call_info_here(_info);
 81   ce->verify_oop_map(_info);
 82   DEBUG_ONLY(STOP("RangeCheck");)
 83 }
 84 
 85 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 86   _info = new CodeEmitInfo(info);
 87 }
 88 
 89 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 90   __ bind(_entry);
 91   __ call(Runtime1::entry_for(StubId::c1_predicate_failed_trap_id), relocInfo::runtime_call_type);
 92   ce->add_call_info_here(_info);
 93   ce->verify_oop_map(_info);
 94   DEBUG_ONLY(__ should_not_reach_here());
 95 }
 96 
 97 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 98   if (_offset != -1) {
 99     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
100   }
101   __ bind(_entry);
102   __ call(Runtime1::entry_for(StubId::c1_throw_div0_exception_id),
103           relocInfo::runtime_call_type);
104   ce->add_call_info_here(_info);
105   DEBUG_ONLY(STOP("DivByZero");)
106 }
107 
108 
109 // Implementation of NewInstanceStub
110 
111 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) {
112   _result = result;
113   _klass = klass;
114   _klass_reg = klass_reg;
115   _info = new CodeEmitInfo(info);
116   assert(stub_id == StubId::c1_new_instance_id                 ||
117          stub_id == StubId::c1_fast_new_instance_id            ||
118          stub_id == StubId::c1_fast_new_instance_init_check_id,
119          "need new_instance id");
120   _stub_id   = stub_id;
121 }
122 
123 
124 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
125   assert(_result->as_register() == R0, "runtime call setup");
126   assert(_klass_reg->as_register() == R1, "runtime call setup");
127   __ bind(_entry);
128   __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
129   ce->add_call_info_here(_info);
130   ce->verify_oop_map(_info);
131   __ b(_continuation);
132 }
133 
134 
135 // Implementation of NewTypeArrayStub
136 
137 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
138   _klass_reg = klass_reg;
139   _length = length;
140   _result = result;
141   _info = new CodeEmitInfo(info);
142 }
143 
144 
145 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
146   assert(_result->as_register() == R0, "runtime call setup");
147   assert(_klass_reg->as_register() == R1, "runtime call setup");
148   assert(_length->as_register() == R2, "runtime call setup");
149   __ bind(_entry);
150   __ call(Runtime1::entry_for(StubId::c1_new_type_array_id), relocInfo::runtime_call_type);
151   ce->add_call_info_here(_info);
152   ce->verify_oop_map(_info);
153   __ b(_continuation);
154 }
155 
156 
157 // Implementation of NewObjectArrayStub
158 
159 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
160   _klass_reg = klass_reg;
161   _result = result;
162   _length = length;
163   _info = new CodeEmitInfo(info);
164 }
165 
166 
167 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
168   assert(_result->as_register() == R0, "runtime call setup");
169   assert(_klass_reg->as_register() == R1, "runtime call setup");
170   assert(_length->as_register() == R2, "runtime call setup");
171   __ bind(_entry);
172   __ call(Runtime1::entry_for(StubId::c1_new_object_array_id), relocInfo::runtime_call_type);
173   ce->add_call_info_here(_info);
174   ce->verify_oop_map(_info);
175   __ b(_continuation);
176 }
177 
178 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
179   __ bind(_entry);
180   const Register obj_reg = _obj_reg->as_pointer_register();
181   const Register lock_reg = _lock_reg->as_pointer_register();
182 
183   ce->verify_reserved_argument_area_size(2);
184   if (obj_reg->encoding() < lock_reg->encoding()) {
185     __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
186   } else {
187     __ str(obj_reg, Address(SP));
188     __ str(lock_reg, Address(SP, BytesPerWord));
189   }
190 
191   StubId enter_id = ce->compilation()->has_fpu_code() ?
192                               StubId::c1_monitorenter_id :
193                               StubId::c1_monitorenter_nofpu_id;
194   __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
195   ce->add_call_info_here(_info);
196   ce->verify_oop_map(_info);
197   __ b(_continuation);
198 }
199 
200 
201 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
202   __ bind(_entry);
203 
204   // lock_reg was destroyed by fast unlocking attempt => recompute it
205   ce->monitor_address(_monitor_ix, _lock_reg);
206 
207   const Register lock_reg = _lock_reg->as_pointer_register();
208 
209   ce->verify_reserved_argument_area_size(1);
210   __ str(lock_reg, Address(SP));
211 
212   // Non-blocking leaf routine - no call info needed
213   StubId exit_id = ce->compilation()->has_fpu_code() ?
214                              StubId::c1_monitorexit_id :
215                              StubId::c1_monitorexit_nofpu_id;
216   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
217   __ b(_continuation);
218 }
219 
220 
221 // Call return is directly after patch word
222 int PatchingStub::_patch_info_offset = 0;
223 
224 void PatchingStub::align_patch_site(MacroAssembler* masm) {
225 #if 0
226   // TODO: investigate if we required to implement this
227     ShouldNotReachHere();
228 #endif
229 }
230 
231 void PatchingStub::emit_code(LIR_Assembler* ce) {
232   const int patchable_instruction_offset = 0;
233 
234   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
235          "not enough room for call");
236   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
237   Label call_patch;
238   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
239 
240 
241   if (is_load && !VM_Version::supports_movw()) {
242     address start = __ pc();
243 
244     // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
245     // without creating relocation info entry.
246 
247     assert((__ pc() - start) == patchable_instruction_offset, "should be");
248     __ ldr(_obj, Address(PC));
249     // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
250     __ nop();
251 
252 #ifdef ASSERT
253     for (int i = 0; i < _bytes_to_copy; i++) {
254       assert(((address)_pc_start)[i] == start[i], "should be the same code");
255     }
256 #endif // ASSERT
257   }
258 
259   address being_initialized_entry = __ pc();
260   if (CommentedAssembly) {
261     __ block_comment(" patch template");
262   }
263   if (is_load) {
264     address start = __ pc();
265     if (_id == load_mirror_id || _id == load_appendix_id) {
266       __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
267     } else {
268       __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
269     }
270 #ifdef ASSERT
271     for (int i = 0; i < _bytes_to_copy; i++) {
272       assert(((address)_pc_start)[i] == start[i], "should be the same code");
273     }
274 #endif // ASSERT
275   } else {
276     int* start = (int*)_pc_start;
277     int* end = start + (_bytes_to_copy / BytesPerInt);
278     while (start < end) {
279       __ emit_int32(*start++);
280     }
281   }
282   address end_of_patch = __ pc();
283 
284   int bytes_to_skip = 0;
285   if (_id == load_mirror_id) {
286     int offset = __ offset();
287     if (CommentedAssembly) {
288       __ block_comment(" being_initialized check");
289     }
290 
291     assert(_obj != noreg, "must be a valid register");
292     // Rtemp should be OK in C1
293     __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset()));
294     __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
295     __ cmp(Rtemp, Rthread);
296     __ b(call_patch, ne);
297     __ b(_patch_site_continuation);
298 
299     bytes_to_skip += __ offset() - offset;
300   }
301 
302   if (CommentedAssembly) {
303     __ block_comment("patch data - 3 high bytes of the word");
304   }
305   const int sizeof_patch_record = 4;
306   bytes_to_skip += sizeof_patch_record;
307   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
308   __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
309 
310   address patch_info_pc = __ pc();
311   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
312 
313   // runtime call will return here
314   Label call_return;
315   __ bind(call_return);
316   ce->add_call_info_here(_info);
317   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
318   __ b(_patch_site_entry);
319 
320   address entry = __ pc();
321   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
322   address target = nullptr;
323   relocInfo::relocType reloc_type = relocInfo::none;
324   switch (_id) {
325     case access_field_id:  target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break;
326     case load_klass_id:    target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
327     case load_mirror_id:   target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
328     case load_appendix_id: target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
329     default: ShouldNotReachHere();
330   }
331   __ bind(call_patch);
332 
333   if (CommentedAssembly) {
334     __ block_comment("patch entry point");
335   }
336 
337   // arrange for call to return just after patch word
338   __ adr(LR, call_return);
339   __ jump(target, relocInfo::runtime_call_type, Rtemp);
340 
341   if (is_load) {
342     CodeSection* cs = __ code_section();
343     address pc = (address)_pc_start;
344     RelocIterator iter(cs, pc, pc + 1);
345     relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
346   }
347 }
348 
349 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
350   __ bind(_entry);
351   __ mov_slow(Rtemp, _trap_request);
352   ce->verify_reserved_argument_area_size(1);
353   __ str(Rtemp, Address(SP));
354   __ call(Runtime1::entry_for(StubId::c1_deoptimize_id), relocInfo::runtime_call_type);
355   ce->add_call_info_here(_info);
356   DEBUG_ONLY(__ should_not_reach_here());
357 }
358 
359 
360 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
361   address a;
362   if (_info->deoptimize_on_exception()) {
363     // Deoptimize, do not throw the exception, because it is
364     // probably wrong to do it here.
365     a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
366   } else {
367     a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id);
368   }
369   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
370   __ bind(_entry);
371   __ call(a, relocInfo::runtime_call_type);
372   ce->add_call_info_here(_info);
373   ce->verify_oop_map(_info);
374   DEBUG_ONLY(STOP("ImplicitNullCheck");)
375 }
376 
377 
378 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
379   __ bind(_entry);
380   // Pass the object on stack because all registers must be preserved
381   if (_obj->is_cpu_register()) {
382     ce->verify_reserved_argument_area_size(1);
383     __ str(_obj->as_pointer_register(), Address(SP));
384   } else {
385     assert(_obj->is_illegal(), "should be");
386   }
387   __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
388   ce->add_call_info_here(_info);
389   DEBUG_ONLY(STOP("SimpleException");)
390 }
391 
392 
393 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
394   __ bind(_entry);
395 
396   VMRegPair args[5];
397   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
398   SharedRuntime::java_calling_convention(signature, args, 5);
399 
400   Register r[5];
401   r[0] = src()->as_pointer_register();
402   r[1] = src_pos()->as_register();
403   r[2] = dst()->as_pointer_register();
404   r[3] = dst_pos()->as_register();
405   r[4] = length()->as_register();
406 
407   for (int i = 0; i < 5; i++) {
408     VMReg arg = args[i].first();
409     if (arg->is_stack()) {
410       __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size));
411     } else {
412       assert(r[i] == arg->as_Register(), "Calling conventions must match");
413     }
414   }
415 
416   ce->emit_static_call_stub();
417   if (ce->compilation()->bailed_out()) {
418     return; // CodeCache is full
419   }
420   int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
421   assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
422   ce->add_call_info_here(info());
423   ce->verify_oop_map(info());
424   __ b(_continuation);
425 }
426 
427 #undef __