1 /*
  2  * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2016, 2024 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "c1/c1_CodeStubs.hpp"
 28 #include "c1/c1_FrameMap.hpp"
 29 #include "c1/c1_LIRAssembler.hpp"
 30 #include "c1/c1_MacroAssembler.hpp"
 31 #include "c1/c1_Runtime1.hpp"
 32 #include "classfile/javaClasses.hpp"
 33 #include "nativeInst_s390.hpp"
 34 #include "runtime/sharedRuntime.hpp"
 35 #include "utilities/align.hpp"
 36 #include "utilities/macros.hpp"
 37 #include "vmreg_s390.inline.hpp"
 38 
 39 #define __ ce->masm()->
 40 #undef  CHECK_BAILOUT
 41 #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
 42 
 43 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 44   ShouldNotReachHere();
 45 }
 46 
 47 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 48   __ bind(_entry);
 49   if (_info->deoptimize_on_exception()) {
 50     address a = Runtime1::entry_for (StubId::c1_predicate_failed_trap_id);
 51     ce->emit_call_c(a);
 52     CHECK_BAILOUT();
 53     ce->add_call_info_here(_info);
 54     ce->verify_oop_map(_info);
 55     DEBUG_ONLY(__ should_not_reach_here());
 56     return;
 57   }
 58 
 59   // Pass the array index in Z_R1_scratch which is not managed by linear scan.
 60   if (_index->is_cpu_register()) {
 61     __ lgr_if_needed(Z_R1_scratch, _index->as_register());
 62   } else {
 63     __ load_const_optimized(Z_R1_scratch, _index->as_jint());
 64   }
 65 
 66   StubId stub_id;
 67   if (_throw_index_out_of_bounds_exception) {
 68     stub_id = StubId::c1_throw_index_exception_id;
 69   } else {
 70     stub_id = StubId::c1_throw_range_check_failed_id;
 71     __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register());
 72   }
 73   ce->emit_call_c(Runtime1::entry_for (stub_id));
 74   CHECK_BAILOUT();
 75   ce->add_call_info_here(_info);
 76   ce->verify_oop_map(_info);
 77   DEBUG_ONLY(__ should_not_reach_here());
 78 }
 79 
 80 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 81   _info = new CodeEmitInfo(info);
 82 }
 83 
 84 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 85   __ bind(_entry);
 86   address a = Runtime1::entry_for (StubId::c1_predicate_failed_trap_id);
 87   ce->emit_call_c(a);
 88   CHECK_BAILOUT();
 89   ce->add_call_info_here(_info);
 90   ce->verify_oop_map(_info);
 91   DEBUG_ONLY(__ should_not_reach_here());
 92 }
 93 
 94 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 95   __ bind(_entry);
 96   Metadata *m = _method->as_constant_ptr()->as_metadata();
 97   bool success = __ set_metadata_constant(m, Z_R1_scratch);
 98   if (!success) {
 99     ce->compilation()->bailout("const section overflow");
100     return;
101   }
102   ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
103   ce->store_parameter(_bci, 0);
104   ce->emit_call_c(Runtime1::entry_for (StubId::c1_counter_overflow_id));
105   CHECK_BAILOUT();
106   ce->add_call_info_here(_info);
107   ce->verify_oop_map(_info);
108   __ branch_optimized(Assembler::bcondAlways, _continuation);
109 }
110 
111 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
112   if (_offset != -1) {
113     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
114   }
115   __ bind(_entry);
116   ce->emit_call_c(Runtime1::entry_for (StubId::c1_throw_div0_exception_id));
117   CHECK_BAILOUT();
118   ce->add_call_info_here(_info);
119   DEBUG_ONLY(__ should_not_reach_here());
120 }
121 
122 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
123   address a;
124   if (_info->deoptimize_on_exception()) {
125     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
126     a = Runtime1::entry_for (StubId::c1_predicate_failed_trap_id);
127   } else {
128     a = Runtime1::entry_for (StubId::c1_throw_null_pointer_exception_id);
129   }
130 
131   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
132   __ bind(_entry);
133   ce->emit_call_c(a);
134   CHECK_BAILOUT();
135   ce->add_call_info_here(_info);
136   ce->verify_oop_map(_info);
137   DEBUG_ONLY(__ should_not_reach_here());
138 }
139 
140 // Note: pass object in Z_R1_scratch
141 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
142   __ bind(_entry);
143   if (_obj->is_valid()) {
144     __ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
145   }
146   address a = Runtime1::entry_for (_stub);
147   ce->emit_call_c(a);
148   CHECK_BAILOUT();
149   ce->add_call_info_here(_info);
150   DEBUG_ONLY(__ should_not_reach_here());
151 }
152 
153 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) {
154   _result = result;
155   _klass = klass;
156   _klass_reg = klass_reg;
157   _info = new CodeEmitInfo(info);
158   assert(stub_id == StubId::c1_new_instance_id                 ||
159          stub_id == StubId::c1_fast_new_instance_id            ||
160          stub_id == StubId::c1_fast_new_instance_init_check_id,
161          "need new_instance id");
162   _stub_id = stub_id;
163 }
164 
165 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
166   __ bind(_entry);
167   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
168   address a = Runtime1::entry_for (_stub_id);
169   ce->emit_call_c(a);
170   CHECK_BAILOUT();
171   ce->add_call_info_here(_info);
172   ce->verify_oop_map(_info);
173   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
174   __ z_brul(_continuation);
175 }
176 
177 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
178   _klass_reg = klass_reg;
179   _length = length;
180   _result = result;
181   _info = new CodeEmitInfo(info);
182 }
183 
184 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
185   __ bind(_entry);
186   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
187   __ lgr_if_needed(Z_R13, _length->as_register());
188   address a = Runtime1::entry_for (StubId::c1_new_type_array_id);
189   ce->emit_call_c(a);
190   CHECK_BAILOUT();
191   ce->add_call_info_here(_info);
192   ce->verify_oop_map(_info);
193   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
194   __ z_brul(_continuation);
195 }
196 
197 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
198                                        CodeEmitInfo* info, bool is_null_free) {
199   _klass_reg = klass_reg;
200   _length = length;
201   _result = result;
202   _info = new CodeEmitInfo(info);
203   _is_null_free = is_null_free; // unimplemented
204 }
205 
206 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
207   __ bind(_entry);
208   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
209   __ lgr_if_needed(Z_R13, _length->as_register());
210   address a = Runtime1::entry_for (StubId::c1_new_object_array_id);
211   ce->emit_call_c(a);
212   CHECK_BAILOUT();
213   ce->add_call_info_here(_info);
214   ce->verify_oop_map(_info);
215   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
216   __ z_brul(_continuation);
217 }
218 
219 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
220   __ bind(_entry);
221   StubId enter_id;
222   if (ce->compilation()->has_fpu_code()) {
223     enter_id = StubId::c1_monitorenter_id;
224   } else {
225     enter_id = StubId::c1_monitorenter_nofpu_id;
226   }
227   __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
228   __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
229   ce->emit_call_c(Runtime1::entry_for (enter_id));
230   CHECK_BAILOUT();
231   ce->add_call_info_here(_info);
232   ce->verify_oop_map(_info);
233   __ branch_optimized(Assembler::bcondAlways, _continuation);
234 }
235 
236 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
237   __ bind(_entry);
238   // Move address of the BasicObjectLock into Z_R1_scratch.
239 
240   // Lock_reg was destroyed by fast unlocking attempt => recompute it.
241   ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
242 
243   // Note: non-blocking leaf routine => no call info needed.
244   StubId exit_id;
245   if (ce->compilation()->has_fpu_code()) {
246     exit_id = StubId::c1_monitorexit_id;
247   } else {
248     exit_id = StubId::c1_monitorexit_nofpu_id;
249   }
250   ce->emit_call_c(Runtime1::entry_for (exit_id));
251   CHECK_BAILOUT();
252   __ branch_optimized(Assembler::bcondAlways, _continuation);
253 }
254 
255 // Implementation of patching:
256 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
257 // - Replace original code with a call to the stub.
258 // At Runtime:
259 // - call to stub, jump to runtime.
260 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
261 // - in runtime: After initializing class, restore original code, reexecute instruction.
262 
263 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
264 
265 void PatchingStub::align_patch_site(MacroAssembler* masm) {
266 #ifndef PRODUCT
267   const char* bc;
268   switch (_id) {
269   case access_field_id: bc = "patch site (access_field)"; break;
270   case load_klass_id: bc = "patch site (load_klass)"; break;
271   case load_mirror_id: bc = "patch site (load_mirror)"; break;
272   case load_appendix_id: bc = "patch site (load_appendix)"; break;
273   default: bc = "patch site (unknown patch id)"; break;
274   }
275   masm->block_comment(bc);
276 #endif
277 
278   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
279 }
280 
281 void PatchingStub::emit_code(LIR_Assembler* ce) {
282   // Copy original code here.
283   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
284          "not enough room for call, need %d", _bytes_to_copy);
285 
286   NearLabel call_patch;
287 
288   int being_initialized_entry = __ offset();
289 
290   if (_id == load_klass_id) {
291     // Produce a copy of the load klass instruction for use by the case being initialized.
292 #ifdef ASSERT
293     address start = __ pc();
294 #endif
295     AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
296     __ load_const(_obj, addrlit);
297 
298 #ifdef ASSERT
299     for (int i = 0; i < _bytes_to_copy; i++) {
300       address ptr = (address)(_pc_start + i);
301       int a_byte = (*ptr) & 0xFF;
302       assert(a_byte == *start++, "should be the same code");
303     }
304 #endif
305   } else if (_id == load_mirror_id || _id == load_appendix_id) {
306     // Produce a copy of the load mirror instruction for use by the case being initialized.
307 #ifdef ASSERT
308     address start = __ pc();
309 #endif
310     AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
311     __ load_const(_obj, addrlit);
312 
313 #ifdef ASSERT
314     for (int i = 0; i < _bytes_to_copy; i++) {
315       address ptr = (address)(_pc_start + i);
316       int a_byte = (*ptr) & 0xFF;
317       assert(a_byte == *start++, "should be the same code");
318     }
319 #endif
320   } else {
321     // Make a copy of the code which is going to be patched.
322     for (int i = 0; i < _bytes_to_copy; i++) {
323       address ptr = (address)(_pc_start + i);
324       int a_byte = (*ptr) & 0xFF;
325       __ emit_int8 (a_byte);
326     }
327   }
328 
329   address end_of_patch = __ pc();
330   int bytes_to_skip = 0;
331   if (_id == load_mirror_id) {
332     int offset = __ offset();
333     if (CommentedAssembly) {
334       __ block_comment(" being_initialized check");
335     }
336 
337     // Static field accesses have special semantics while the class
338     // initializer is being run, so we emit a test which can be used to
339     // check that this code is being executed by the initializing
340     // thread.
341     assert(_obj != noreg, "must be a valid register");
342     assert(_index >= 0, "must have oop index");
343     __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset(), _obj);
344     __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
345     __ branch_optimized(Assembler::bcondNotEqual, call_patch);
346 
347     // Load_klass patches may execute the patched code before it's
348     // copied back into place so we need to jump back into the main
349     // code of the nmethod to continue execution.
350     __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);
351 
352     // Make sure this extra code gets skipped.
353     bytes_to_skip += __ offset() - offset;
354   }
355 
356   // Now emit the patch record telling the runtime how to find the
357   // pieces of the patch. We only need 3 bytes but to help the disassembler
358   // we make the data look like the following add instruction:
359   //   A R1, D2(X2, B2)
360   // which requires 4 bytes.
361   int sizeof_patch_record = 4;
362   bytes_to_skip += sizeof_patch_record;
363 
364   // Emit the offsets needed to find the code to patch.
365   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
366 
367   // Emit the patch record: opcode of the add followed by 3 bytes patch record data.
368   __ emit_int8((int8_t)(A_ZOPC>>24));
369   __ emit_int8(being_initialized_entry_offset);
370   __ emit_int8(bytes_to_skip);
371   __ emit_int8(_bytes_to_copy);
372   address patch_info_pc = __ pc();
373   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
374 
375   address entry = __ pc();
376   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
377   address target = nullptr;
378   relocInfo::relocType reloc_type = relocInfo::none;
379   switch (_id) {
380     case access_field_id:  target = Runtime1::entry_for (StubId::c1_access_field_patching_id); break;
381     case load_klass_id:    target = Runtime1::entry_for (StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
382     case load_mirror_id:   target = Runtime1::entry_for (StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
383     case load_appendix_id: target = Runtime1::entry_for (StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
384     default: ShouldNotReachHere();
385   }
386   __ bind(call_patch);
387 
388   if (CommentedAssembly) {
389     __ block_comment("patch entry point");
390   }
391   // Cannot use call_c_opt() because its size is not constant.
392   __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
393   __ z_basr(Z_R14, Z_R1_scratch);
394   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
395   ce->add_call_info_here(_info);
396   __ z_brcl(Assembler::bcondAlways, _patch_site_entry);
397   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
398     CodeSection* cs = __ code_section();
399     address pc = (address)_pc_start;
400     RelocIterator iter(cs, pc, pc + 1);
401     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
402   }
403 }
404 
405 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
406   __ bind(_entry);
407   __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
408   ce->emit_call_c(Runtime1::entry_for (StubId::c1_deoptimize_id));
409   CHECK_BAILOUT();
410   ce->add_call_info_here(_info);
411   DEBUG_ONLY(__ should_not_reach_here());
412 }
413 
414 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
415   // Slow case: call to native.
416   __ bind(_entry);
417   __ lgr_if_needed(Z_ARG1, src()->as_register());
418   __ lgr_if_needed(Z_ARG2, src_pos()->as_register());
419   __ lgr_if_needed(Z_ARG3, dst()->as_register());
420   __ lgr_if_needed(Z_ARG4, dst_pos()->as_register());
421   __ lgr_if_needed(Z_ARG5, length()->as_register());
422 
423   // Must align calls sites, otherwise they can't be updated atomically on MP hardware.
424   ce->align_call(lir_static_call);
425 
426   assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
427          "must be aligned");
428 
429   ce->emit_static_call_stub();
430   CHECK_BAILOUT();
431 
432   // Prepend each BRASL with a nop.
433   __ relocate(relocInfo::static_call_type);
434   __ z_nop();
435   __ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub());
436   ce->add_call_info_here(info());
437   ce->verify_oop_map(info());
438 
439 #ifndef PRODUCT
440   if (PrintC1Statistics) {
441     __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt);
442     __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
443   }
444 #endif
445 
446   __ branch_optimized(Assembler::bcondAlways, _continuation);
447 }
448 
449 // Implementation of SubstitutabilityCheckStub
450 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
451   Unimplemented();
452 }
453 
454 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) {
455   Unimplemented();
456 }
457 
458 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
459   Unimplemented();
460 }
461 
462 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
463   Unimplemented();
464 }
465 
466 // Implementation of StoreFlattenedArrayStub
467 
468 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
469   Unimplemented();
470 }
471 
472 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
473   Unimplemented();
474 }
475 #undef __