1 /*
  2  * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_CodeStubs.hpp"
 29 #include "c1/c1_FrameMap.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "classfile/javaClasses.hpp"
 34 #include "nativeInst_ppc.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "utilities/macros.hpp"
 37 #include "vmreg_ppc.inline.hpp"
 38 
 39 #define __ ce->masm()->
 40 
 41 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 42   if (UseSIGTRAP) {
 43     DEBUG_ONLY( __ should_not_reach_here("C1SafepointPollStub::emit_code"); )
 44   } else {
 45     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 46            "polling page return stub not created yet");
 47     address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 48 
 49     __ bind(_entry);
 50     // Using pc relative address computation.
 51     {
 52       Label next_pc;
 53       __ bl(next_pc);
 54       __ bind(next_pc);
 55     }
 56     int current_offset = __ offset();
 57     __ mflr(R12);
 58     __ add_const_optimized(R12, R12, safepoint_offset() - current_offset);
 59     __ std(R12, in_bytes(JavaThread::saved_exception_pc_offset()), R16_thread);
 60 
 61     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 62     __ mtctr(R0);
 63     __ bctr();
 64   }
 65 }
 66 
 67 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
 68   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
 69   assert(info != NULL, "must have info");
 70   _info = new CodeEmitInfo(info);
 71 }
 72 
 73 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
 74   : _index(index), _array(), _throw_index_out_of_bounds_exception(true) {
 75   assert(info != NULL, "must have info");
 76   _info = new CodeEmitInfo(info);
 77 }
 78 
 79 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 80   __ bind(_entry);
 81 
 82   if (_info->deoptimize_on_exception()) {
 83     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 84     // May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator.
 85     DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); )
 86     //__ load_const_optimized(R0, a);
 87     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 88     __ mtctr(R0);
 89     __ bctrl();
 90     ce->add_call_info_here(_info);
 91     ce->verify_oop_map(_info);
 92     debug_only(__ illtrap());
 93     return;
 94   }
 95 
 96   address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
 97                                                       : Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
 98   //__ load_const_optimized(R0, stub);
 99   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
100   __ mtctr(R0);
101 
102   Register index = R0;
103   if (_index->is_register()) {
104     __ extsw(index, _index->as_register());
105   } else {
106     __ load_const_optimized(index, _index->as_jint());
107   }
108   if (_array) {
109     __ std(_array->as_pointer_register(), -8, R1_SP);
110   }
111   __ std(index, -16, R1_SP);
112 
113   __ bctrl();
114   ce->add_call_info_here(_info);
115   ce->verify_oop_map(_info);
116   debug_only(__ illtrap());
117 }
118 
119 
120 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
121   _info = new CodeEmitInfo(info);
122 }
123 
124 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
125   __ bind(_entry);
126   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
127   //__ load_const_optimized(R0, a);
128   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
129   __ mtctr(R0);
130   __ bctrl();
131   ce->add_call_info_here(_info);
132   ce->verify_oop_map(_info);
133   debug_only(__ illtrap());
134 }
135 
136 
137 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
138   __ bind(_entry);
139 
140   // Parameter 1: bci
141   __ load_const_optimized(R0, _bci);
142   __ std(R0, -16, R1_SP);
143 
144   // Parameter 2: Method*
145   Metadata *m = _method->as_constant_ptr()->as_metadata();
146   AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
147   __ load_const_optimized(R0, md.value());
148   __ std(R0, -8, R1_SP);
149 
150   address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
151   //__ load_const_optimized(R0, a);
152   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
153   __ mtctr(R0);
154   __ bctrl();
155   ce->add_call_info_here(_info);
156   ce->verify_oop_map(_info);
157 
158   __ b(_continuation);
159 }
160 
161 
162 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
163   if (_offset != -1) {
164     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
165   }
166   __ bind(_entry);
167   address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
168   //__ load_const_optimized(R0, stub);
169   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
170   __ mtctr(R0);
171   __ bctrl();
172   ce->add_call_info_here(_info);
173   ce->verify_oop_map(_info);
174   debug_only(__ illtrap());
175 }
176 
177 
178 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
179   address a;
180   if (_info->deoptimize_on_exception()) {
181     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
182     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
183   } else {
184     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
185   }
186 
187   if (ImplicitNullChecks || TrapBasedNullChecks) {
188     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
189   }
190   __ bind(_entry);
191   //__ load_const_optimized(R0, a);
192   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
193   __ mtctr(R0);
194   __ bctrl();
195   ce->add_call_info_here(_info);
196   ce->verify_oop_map(_info);
197   debug_only(__ illtrap());
198 }
199 
200 
201 // Implementation of SimpleExceptionStub
202 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
203   __ bind(_entry);
204   address stub = Runtime1::entry_for(_stub);
205   //__ load_const_optimized(R0, stub);
206   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
207   if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
208   __ mtctr(R0);
209   __ bctrl();
210   ce->add_call_info_here(_info);
211   debug_only( __ illtrap(); )
212 }
213 
214 
215 // Implementation of NewInstanceStub
216 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
217   _result = result;
218   _klass = klass;
219   _klass_reg = klass_reg;
220   _info = new CodeEmitInfo(info);
221   assert(stub_id == Runtime1::new_instance_id                 ||
222          stub_id == Runtime1::fast_new_instance_id            ||
223          stub_id == Runtime1::fast_new_instance_init_check_id,
224          "need new_instance id");
225   _stub_id = stub_id;
226 }
227 
228 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
229   __ bind(_entry);
230 
231   address entry = Runtime1::entry_for(_stub_id);
232   //__ load_const_optimized(R0, entry);
233   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
234   __ mtctr(R0);
235   __ bctrl();
236   ce->add_call_info_here(_info);
237   ce->verify_oop_map(_info);
238   __ b(_continuation);
239 }
240 
241 
242 // Implementation of NewTypeArrayStub
243 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
244   _klass_reg = klass_reg;
245   _length = length;
246   _result = result;
247   _info = new CodeEmitInfo(info);
248 }
249 
250 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
251   __ bind(_entry);
252 
253   address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
254   //__ load_const_optimized(R0, entry);
255   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
256   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
257   __ mtctr(R0);
258   __ bctrl();
259   ce->add_call_info_here(_info);
260   ce->verify_oop_map(_info);
261   __ b(_continuation);
262 }
263 
264 
265 // Implementation of NewObjectArrayStub
266 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
267   _klass_reg = klass_reg;
268   _length = length;
269   _result = result;
270   _info = new CodeEmitInfo(info);
271 }
272 
273 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
274   __ bind(_entry);
275 
276   address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
277   //__ load_const_optimized(R0, entry);
278   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
279   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
280   __ mtctr(R0);
281   __ bctrl();
282   ce->add_call_info_here(_info);
283   ce->verify_oop_map(_info);
284   __ b(_continuation);
285 }
286 
287 
288 // Implementation of MonitorAccessStubs
289 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
290   : MonitorAccessStub(obj_reg, lock_reg) {
291   _info = new CodeEmitInfo(info);
292 }
293 
294 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
295   __ bind(_entry);
296   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
297   //__ load_const_optimized(R0, stub);
298   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
299   __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
300   assert(_lock_reg->as_register() == R5_ARG3, "");
301   __ mtctr(R0);
302   __ bctrl();
303   ce->add_call_info_here(_info);
304   ce->verify_oop_map(_info);
305   __ b(_continuation);
306 }
307 
308 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
309   __ bind(_entry);
310   if (_compute_lock) {
311     ce->monitor_address(_monitor_ix, _lock_reg);
312   }
313   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
314   //__ load_const_optimized(R0, stub);
315   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
316   assert(_lock_reg->as_register() == R4_ARG2, "");
317   __ mtctr(R0);
318   __ bctrl();
319   __ b(_continuation);
320 }
321 
322 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
323   // Currently not needed.
324   Unimplemented();
325 }
326 
327 // Implementation of patching:
328 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
329 // - Replace original code with a call to the stub.
330 // At Runtime:
331 // - call to stub, jump to runtime
332 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
333 // - in runtime: after initializing class, restore original code, reexecute instruction
334 
335 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
336 
337 void PatchingStub::align_patch_site(MacroAssembler* ) {
338   // Patch sites on ppc are always properly aligned.
339 }
340 
341 #ifdef ASSERT
342 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
343   address start = template_start;
344   for (int i = 0; i < bytes_to_copy; i++) {
345     address ptr = (address)(pc_start + i);
346     int a_byte = (*ptr) & 0xFF;
347     assert(a_byte == *start++, "should be the same code");
348   }
349 }
350 #endif
351 
352 void PatchingStub::emit_code(LIR_Assembler* ce) {
353   // copy original code here
354   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
355          "not enough room for call, need %d", _bytes_to_copy);
356   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
357 
358   Label call_patch;
359 
360   int being_initialized_entry = __ offset();
361 
362   if (_id == load_klass_id) {
363     // Produce a copy of the load klass instruction for use by the being initialized case.
364     AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index));
365     __ load_const(_obj, addrlit, R0);
366     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
367   } else if (_id == load_mirror_id || _id == load_appendix_id) {
368     // Produce a copy of the load mirror instruction for use by the being initialized case.
369     AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index));
370     __ load_const(_obj, addrlit, R0);
371     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
372   } else {
373     // Make a copy of the code which is going to be patched.
374     for (int i = 0; i < _bytes_to_copy; i++) {
375       address ptr = (address)(_pc_start + i);
376       int a_byte = (*ptr) & 0xFF;
377       __ emit_int8 (a_byte);
378     }
379   }
380 
381   address end_of_patch = __ pc();
382   int bytes_to_skip = 0;
383   if (_id == load_mirror_id) {
384     int offset = __ offset();
385     __ block_comment(" being_initialized check");
386 
387     // Static field accesses have special semantics while the class
388     // initializer is being run so we emit a test which can be used to
389     // check that this code is being executed by the initializing
390     // thread.
391     assert(_obj != noreg, "must be a valid register");
392     assert(_index >= 0, "must have oop index");
393     __ mr(R0, _obj); // spill
394     __ ld(_obj, java_lang_Class::klass_offset(), _obj);
395     __ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
396     __ cmpd(CCR0, _obj, R16_thread);
397     __ mr(_obj, R0); // restore
398     __ bne(CCR0, call_patch);
399 
400     // Load_klass patches may execute the patched code before it's
401     // copied back into place so we need to jump back into the main
402     // code of the nmethod to continue execution.
403     __ b(_patch_site_continuation);
404 
405     // Make sure this extra code gets skipped.
406     bytes_to_skip += __ offset() - offset;
407   }
408 
409   // Now emit the patch record telling the runtime how to find the
410   // pieces of the patch.  We only need 3 bytes but it has to be
411   // aligned as an instruction so emit 4 bytes.
412   int sizeof_patch_record = 4;
413   bytes_to_skip += sizeof_patch_record;
414 
415   // Emit the offsets needed to find the code to patch.
416   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
417 
418   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte.
419   __ emit_int8(0);
420   __ emit_int8(being_initialized_entry_offset);
421   __ emit_int8(bytes_to_skip);
422   __ emit_int8(_bytes_to_copy);
423   address patch_info_pc = __ pc();
424   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
425 
426   address entry = __ pc();
427   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
428   address target = NULL;
429   relocInfo::relocType reloc_type = relocInfo::none;
430   switch (_id) {
431     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
432     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
433                            reloc_type = relocInfo::metadata_type; break;
434     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
435                            reloc_type = relocInfo::oop_type; break;
436     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
437                            reloc_type = relocInfo::oop_type; break;
438     default: ShouldNotReachHere();
439   }
440   __ bind(call_patch);
441 
442   __ block_comment("patch entry point");
443   //__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
444   __ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
445   __ add(R0, R29_TOC, R0);
446   __ mtctr(R0);
447   __ bctrl();
448   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
449   ce->add_call_info_here(_info);
450   __ b(_patch_site_entry);
451   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
452     CodeSection* cs = __ code_section();
453     address pc = (address)_pc_start;
454     RelocIterator iter(cs, pc, pc + 1);
455     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
456   }
457 }
458 
459 
460 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
461   __ bind(_entry);
462   address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
463   //__ load_const_optimized(R0, stub);
464   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
465   __ mtctr(R0);
466 
467   __ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
468   __ bctrl();
469   ce->add_call_info_here(_info);
470   debug_only(__ illtrap());
471 }
472 
473 
474 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
475   //---------------slow case: call to native-----------------
476   __ bind(_entry);
477   __ mr(R3_ARG1, src()->as_register());
478   __ extsw(R4_ARG2, src_pos()->as_register());
479   __ mr(R5_ARG3, dst()->as_register());
480   __ extsw(R6_ARG4, dst_pos()->as_register());
481   __ extsw(R7_ARG5, length()->as_register());
482 
483   ce->emit_static_call_stub();
484 
485   bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
486   if (!success) { return; }
487 
488   __ relocate(relocInfo::static_call_type);
489   // Note: At this point we do not have the address of the trampoline
490   // stub, and the entry point might be too far away for bl, so __ pc()
491   // serves as dummy and the bl will be patched later.
492   __ code()->set_insts_mark();
493   __ bl(__ pc());
494   ce->add_call_info_here(info());
495   ce->verify_oop_map(info());
496 
497 #ifndef PRODUCT
498   if (PrintC1Statistics) {
499     const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
500     const Register tmp = R3, tmp2 = R4;
501     int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
502     __ lwz(tmp2, simm16_offs, tmp);
503     __ addi(tmp2, tmp2, 1);
504     __ stw(tmp2, simm16_offs, tmp);
505   }
506 #endif
507 
508   __ b(_continuation);
509 }
510 
511 #undef __