1 /*
  2  * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2012, 2025 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "c1/c1_CodeStubs.hpp"
 28 #include "c1/c1_FrameMap.hpp"
 29 #include "c1/c1_LIRAssembler.hpp"
 30 #include "c1/c1_MacroAssembler.hpp"
 31 #include "c1/c1_Runtime1.hpp"
 32 #include "classfile/javaClasses.hpp"
 33 #include "nativeInst_ppc.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "vmreg_ppc.inline.hpp"
 37 
 38 #define __ ce->masm()->
 39 
 40 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 41   if (UseSIGTRAP) {
 42     DEBUG_ONLY( __ should_not_reach_here("C1SafepointPollStub::emit_code"); )
 43   } else {
 44     __ bind(_entry);
 45     __ jump_to_polling_page_return_handler_blob(safepoint_offset());
 46   }
 47 }
 48 
 49 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 50   __ bind(_entry);
 51 
 52   if (_info->deoptimize_on_exception()) {
 53     address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
 54     //__ load_const_optimized(R0, a);
 55     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 56     __ mtctr(R0);
 57     __ bctrl();
 58     ce->add_call_info_here(_info);
 59     ce->verify_oop_map(_info);
 60     DEBUG_ONLY(__ illtrap());
 61     return;
 62   }
 63 
 64   address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(StubId::c1_throw_index_exception_id)
 65                                                       : Runtime1::entry_for(StubId::c1_throw_range_check_failed_id);
 66   //__ load_const_optimized(R0, stub);
 67   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 68   __ mtctr(R0);
 69 
 70   Register index = R0;
 71   if (_index->is_register()) {
 72     __ extsw(index, _index->as_register());
 73   } else {
 74     __ load_const_optimized(index, _index->as_jint());
 75   }
 76   if (_array) {
 77     __ std(_array->as_pointer_register(), -8, R1_SP);
 78   }
 79   __ std(index, -16, R1_SP);
 80 
 81   __ bctrl();
 82   ce->add_call_info_here(_info);
 83   ce->verify_oop_map(_info);
 84   DEBUG_ONLY(__ illtrap());
 85 }
 86 
 87 
 88 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 89   _info = new CodeEmitInfo(info);
 90 }
 91 
 92 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 93   __ bind(_entry);
 94   address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
 95   //__ load_const_optimized(R0, a);
 96   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 97   __ mtctr(R0);
 98   __ bctrl();
 99   ce->add_call_info_here(_info);
100   ce->verify_oop_map(_info);
101   DEBUG_ONLY(__ illtrap());
102 }
103 
104 
105 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
106   __ bind(_entry);
107 
108   // Parameter 1: bci
109   __ load_const_optimized(R0, _bci);
110   __ std(R0, -16, R1_SP);
111 
112   // Parameter 2: Method*
113   Metadata *m = _method->as_constant_ptr()->as_metadata();
114   AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
115   __ load_const_optimized(R0, md.value());
116   __ std(R0, -8, R1_SP);
117 
118   address a = Runtime1::entry_for(StubId::c1_counter_overflow_id);
119   //__ load_const_optimized(R0, a);
120   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
121   __ mtctr(R0);
122   __ bctrl();
123   ce->add_call_info_here(_info);
124   ce->verify_oop_map(_info);
125 
126   __ b(_continuation);
127 }
128 
129 
130 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
131   if (_offset != -1) {
132     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
133   }
134   __ bind(_entry);
135   address stub = Runtime1::entry_for(StubId::c1_throw_div0_exception_id);
136   //__ load_const_optimized(R0, stub);
137   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
138   __ mtctr(R0);
139   __ bctrl();
140   ce->add_call_info_here(_info);
141   ce->verify_oop_map(_info);
142   DEBUG_ONLY(__ illtrap());
143 }
144 
145 
146 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
147   address a;
148   if (_info->deoptimize_on_exception()) {
149     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
150     a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id);
151   } else {
152     a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id);
153   }
154 
155   if (ImplicitNullChecks || TrapBasedNullChecks) {
156     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
157   }
158   __ bind(_entry);
159   //__ load_const_optimized(R0, a);
160   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
161   __ mtctr(R0);
162   __ bctrl();
163   ce->add_call_info_here(_info);
164   ce->verify_oop_map(_info);
165   DEBUG_ONLY(__ illtrap());
166 }
167 
168 
169 // Implementation of SimpleExceptionStub
170 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
171   __ bind(_entry);
172   address stub = Runtime1::entry_for(_stub);
173   //__ load_const_optimized(R0, stub);
174   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
175   if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
176   __ mtctr(R0);
177   __ bctrl();
178   ce->add_call_info_here(_info);
179   DEBUG_ONLY( __ illtrap(); )
180 }
181 
182 
183 // Implementation of NewInstanceStub
184 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) {
185   _result = result;
186   _klass = klass;
187   _klass_reg = klass_reg;
188   _info = new CodeEmitInfo(info);
189   assert(stub_id == StubId::c1_new_instance_id                 ||
190          stub_id == StubId::c1_fast_new_instance_id            ||
191          stub_id == StubId::c1_fast_new_instance_init_check_id,
192          "need new_instance id");
193   _stub_id = stub_id;
194 }
195 
196 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
197   __ bind(_entry);
198 
199   address entry = Runtime1::entry_for(_stub_id);
200   //__ load_const_optimized(R0, entry);
201   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
202   __ mtctr(R0);
203   __ bctrl();
204   ce->add_call_info_here(_info);
205   ce->verify_oop_map(_info);
206   __ b(_continuation);
207 }
208 
209 
210 // Implementation of NewTypeArrayStub
211 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
212   _klass_reg = klass_reg;
213   _length = length;
214   _result = result;
215   _info = new CodeEmitInfo(info);
216 }
217 
218 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
219   __ bind(_entry);
220 
221   address entry = Runtime1::entry_for(StubId::c1_new_type_array_id);
222   //__ load_const_optimized(R0, entry);
223   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
224   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
225   __ mtctr(R0);
226   __ bctrl();
227   ce->add_call_info_here(_info);
228   ce->verify_oop_map(_info);
229   __ b(_continuation);
230 }
231 
232 
233 // Implementation of NewObjectArrayStub
234 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
235                                        CodeEmitInfo* info, bool is_null_free) {
236   _klass_reg = klass_reg;
237   _length = length;
238   _result = result;
239   _info = new CodeEmitInfo(info);
240   _is_null_free = is_null_free; // unimplemented
241 }
242 
243 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
244   __ bind(_entry);
245 
246   address entry = Runtime1::entry_for(StubId::c1_new_object_array_id);
247   //__ load_const_optimized(R0, entry);
248   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
249   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
250   __ mtctr(R0);
251   __ bctrl();
252   ce->add_call_info_here(_info);
253   ce->verify_oop_map(_info);
254   __ b(_continuation);
255 }
256 
257 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
258   __ bind(_entry);
259   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? StubId::c1_monitorenter_id : StubId::c1_monitorenter_nofpu_id);
260   //__ load_const_optimized(R0, stub);
261   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
262   __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
263   assert(_lock_reg->as_register() == R5_ARG3, "");
264   __ mtctr(R0);
265   __ bctrl();
266   ce->add_call_info_here(_info);
267   ce->verify_oop_map(_info);
268   __ b(_continuation);
269 }
270 
271 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
272   __ bind(_entry);
273 
274   // lock_reg was destroyed by fast unlocking attempt => recompute it
275   ce->monitor_address(_monitor_ix, _lock_reg);
276 
277   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? StubId::c1_monitorexit_id : StubId::c1_monitorexit_nofpu_id);
278   //__ load_const_optimized(R0, stub);
279   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
280   assert(_lock_reg->as_register() == R4_ARG2, "");
281   __ mtctr(R0);
282   __ bctrl();
283   __ b(_continuation);
284 }
285 
286 
287 // Implementation of patching:
288 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
289 // - Replace original code with a call to the stub.
290 // At Runtime:
291 // - call to stub, jump to runtime
292 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
293 // - in runtime: after initializing class, restore original code, reexecute instruction
294 
295 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
296 
297 void PatchingStub::align_patch_site(MacroAssembler* ) {
298   // Patch sites on ppc are always properly aligned.
299 }
300 
301 #ifdef ASSERT
302 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
303   address start = template_start;
304   for (int i = 0; i < bytes_to_copy; i++) {
305     address ptr = (address)(pc_start + i);
306     int a_byte = (*ptr) & 0xFF;
307     assert(a_byte == *start++, "should be the same code");
308   }
309 }
310 #endif
311 
312 void PatchingStub::emit_code(LIR_Assembler* ce) {
313   // copy original code here
314   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
315          "not enough room for call, need %d", _bytes_to_copy);
316   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
317 
318   Label call_patch;
319 
320   int being_initialized_entry = __ offset();
321 
322   if (_id == load_klass_id) {
323     // Produce a copy of the load klass instruction for use by the being initialized case.
324     AddressLiteral addrlit((address)nullptr, metadata_Relocation::spec(_index));
325     __ load_const(_obj, addrlit, R0);
326     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
327   } else if (_id == load_mirror_id || _id == load_appendix_id) {
328     // Produce a copy of the load mirror instruction for use by the being initialized case.
329     AddressLiteral addrlit((address)nullptr, oop_Relocation::spec(_index));
330     __ load_const(_obj, addrlit, R0);
331     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
332   } else {
333     // Make a copy of the code which is going to be patched.
334     for (int i = 0; i < _bytes_to_copy; i++) {
335       address ptr = (address)(_pc_start + i);
336       int a_byte = (*ptr) & 0xFF;
337       __ emit_int8 (a_byte);
338     }
339   }
340 
341   address end_of_patch = __ pc();
342   int bytes_to_skip = 0;
343   if (_id == load_mirror_id) {
344     int offset = __ offset();
345     __ block_comment(" being_initialized check");
346 
347     // Static field accesses have special semantics while the class
348     // initializer is being run so we emit a test which can be used to
349     // check that this code is being executed by the initializing
350     // thread.
351     assert(_obj != noreg, "must be a valid register");
352     assert(_index >= 0, "must have oop index");
353     __ mr(R0, _obj); // spill
354     __ ld(_obj, java_lang_Class::klass_offset(), _obj);
355     __ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
356     __ cmpd(CR0, _obj, R16_thread);
357     __ mr(_obj, R0); // restore
358     __ bne(CR0, call_patch);
359 
360     // Load_klass patches may execute the patched code before it's
361     // copied back into place so we need to jump back into the main
362     // code of the nmethod to continue execution.
363     __ b(_patch_site_continuation);
364 
365     // Make sure this extra code gets skipped.
366     bytes_to_skip += __ offset() - offset;
367   }
368 
369   // Now emit the patch record telling the runtime how to find the
370   // pieces of the patch.  We only need 3 bytes but it has to be
371   // aligned as an instruction so emit 4 bytes.
372   int sizeof_patch_record = 4;
373   bytes_to_skip += sizeof_patch_record;
374 
375   // Emit the offsets needed to find the code to patch.
376   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
377 
378   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte.
379   __ emit_int8(0);
380   __ emit_int8(being_initialized_entry_offset);
381   __ emit_int8(bytes_to_skip);
382   __ emit_int8(_bytes_to_copy);
383   address patch_info_pc = __ pc();
384   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
385 
386   address entry = __ pc();
387   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
388   address target = nullptr;
389   relocInfo::relocType reloc_type = relocInfo::none;
390   switch (_id) {
391     case access_field_id:  target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break;
392     case load_klass_id:    target = Runtime1::entry_for(StubId::c1_load_klass_patching_id);
393                            reloc_type = relocInfo::metadata_type; break;
394     case load_mirror_id:   target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id);
395                            reloc_type = relocInfo::oop_type; break;
396     case load_appendix_id: target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id);
397                            reloc_type = relocInfo::oop_type; break;
398     default: ShouldNotReachHere();
399   }
400   __ bind(call_patch);
401 
402   __ block_comment("patch entry point");
403   //__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
404   __ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
405   __ add(R0, R29_TOC, R0);
406   __ mtctr(R0);
407   __ bctrl();
408   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
409   ce->add_call_info_here(_info);
410   __ b(_patch_site_entry);
411   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
412     CodeSection* cs = __ code_section();
413     address pc = (address)_pc_start;
414     RelocIterator iter(cs, pc, pc + 1);
415     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
416   }
417 }
418 
419 
420 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
421   __ bind(_entry);
422   address stub = Runtime1::entry_for(StubId::c1_deoptimize_id);
423   //__ load_const_optimized(R0, stub);
424   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
425   __ mtctr(R0);
426 
427   __ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
428   __ bctrl();
429   ce->add_call_info_here(_info);
430   DEBUG_ONLY(__ illtrap());
431 }
432 
433 
434 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
435   //---------------slow case: call to native-----------------
436   __ bind(_entry);
437   __ mr(R3_ARG1, src()->as_register());
438   __ extsw(R4_ARG2, src_pos()->as_register());
439   __ mr(R5_ARG3, dst()->as_register());
440   __ extsw(R6_ARG4, dst_pos()->as_register());
441   __ extsw(R7_ARG5, length()->as_register());
442 
443   ce->emit_static_call_stub();
444   if (ce->compilation()->bailed_out()) {
445     return; // CodeCache is full
446   }
447 
448   bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
449   if (!success) { return; }
450 
451   __ relocate(relocInfo::static_call_type);
452   // Note: At this point we do not have the address of the trampoline
453   // stub, and the entry point might be too far away for bl, so __ pc()
454   // serves as dummy and the bl will be patched later.
455   __ code()->set_insts_mark();
456   __ bl(__ pc());
457   ce->add_call_info_here(info());
458   ce->verify_oop_map(info());
459 
460 #ifndef PRODUCT
461   if (PrintC1Statistics) {
462     const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
463     const Register tmp = R3, tmp2 = R4;
464     int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
465     __ lwz(tmp2, simm16_offs, tmp);
466     __ addi(tmp2, tmp2, 1);
467     __ stw(tmp2, simm16_offs, tmp);
468   }
469 #endif
470 
471   __ b(_continuation);
472 }
473 
474 // Implementation of SubstitutabilityCheckStub
475 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
476   Unimplemented();
477 }
478 
479 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) {
480   Unimplemented();
481 }
482 
483 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
484   Unimplemented();
485 }
486 
487 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
488   Unimplemented();
489 }
490 
491 // Implementation of StoreFlattenedArrayStub
492 
493 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
494   Unimplemented();
495 }
496 
497 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
498   Unimplemented();
499 }
500 #undef __