1 /*
  2  * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_CodeStubs.hpp"
 29 #include "c1/c1_FrameMap.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "classfile/javaClasses.hpp"
 34 #include "nativeInst_s390.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/macros.hpp"
 38 #include "vmreg_s390.inline.hpp"
 39 
 40 #define __ ce->masm()->
 41 #undef  CHECK_BAILOUT
 42 #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
 43 
 44 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 45   ShouldNotReachHere();
 46 }
 47 
 48 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
 49   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
 50   assert(info != NULL, "must have info");
 51   _info = new CodeEmitInfo(info);
 52 }
 53 
 54 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
 55   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
 56   assert(info != NULL, "must have info");
 57   _info = new CodeEmitInfo(info);
 58 }
 59 
 60 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 61   __ bind(_entry);
 62   if (_info->deoptimize_on_exception()) {
 63     address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
 64     ce->emit_call_c(a);
 65     CHECK_BAILOUT();
 66     ce->add_call_info_here(_info);
 67     ce->verify_oop_map(_info);
 68     debug_only(__ should_not_reach_here());
 69     return;
 70   }
 71 
 72   // Pass the array index in Z_R1_scratch which is not managed by linear scan.
 73   if (_index->is_cpu_register()) {
 74     __ lgr_if_needed(Z_R1_scratch, _index->as_register());
 75   } else {
 76     __ load_const_optimized(Z_R1_scratch, _index->as_jint());
 77   }
 78 
 79   Runtime1::StubID stub_id;
 80   if (_throw_index_out_of_bounds_exception) {
 81     stub_id = Runtime1::throw_index_exception_id;
 82   } else {
 83     stub_id = Runtime1::throw_range_check_failed_id;
 84     __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register());
 85   }
 86   ce->emit_call_c(Runtime1::entry_for (stub_id));
 87   CHECK_BAILOUT();
 88   ce->add_call_info_here(_info);
 89   ce->verify_oop_map(_info);
 90   debug_only(__ should_not_reach_here());
 91 }
 92 
 93 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 94   _info = new CodeEmitInfo(info);
 95 }
 96 
 97 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 98   __ bind(_entry);
 99   address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
100   ce->emit_call_c(a);
101   CHECK_BAILOUT();
102   ce->add_call_info_here(_info);
103   ce->verify_oop_map(_info);
104   debug_only(__ should_not_reach_here());
105 }
106 
107 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
108   __ bind(_entry);
109   Metadata *m = _method->as_constant_ptr()->as_metadata();
110   bool success = __ set_metadata_constant(m, Z_R1_scratch);
111   if (!success) {
112     ce->compilation()->bailout("const section overflow");
113     return;
114   }
115   ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
116   ce->store_parameter(_bci, 0);
117   ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
118   CHECK_BAILOUT();
119   ce->add_call_info_here(_info);
120   ce->verify_oop_map(_info);
121   __ branch_optimized(Assembler::bcondAlways, _continuation);
122 }
123 
124 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
125   if (_offset != -1) {
126     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
127   }
128   __ bind(_entry);
129   ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
130   CHECK_BAILOUT();
131   ce->add_call_info_here(_info);
132   debug_only(__ should_not_reach_here());
133 }
134 
135 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
136   address a;
137   if (_info->deoptimize_on_exception()) {
138     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
139     a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
140   } else {
141     a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
142   }
143 
144   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
145   __ bind(_entry);
146   ce->emit_call_c(a);
147   CHECK_BAILOUT();
148   ce->add_call_info_here(_info);
149   ce->verify_oop_map(_info);
150   debug_only(__ should_not_reach_here());
151 }
152 
153 // Note: pass object in Z_R1_scratch
154 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
155   __ bind(_entry);
156   if (_obj->is_valid()) {
157     __ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
158   }
159   address a = Runtime1::entry_for (_stub);
160   ce->emit_call_c(a);
161   CHECK_BAILOUT();
162   ce->add_call_info_here(_info);
163   debug_only(__ should_not_reach_here());
164 }
165 
166 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
167   _result = result;
168   _klass = klass;
169   _klass_reg = klass_reg;
170   _info = new CodeEmitInfo(info);
171   assert(stub_id == Runtime1::new_instance_id                 ||
172          stub_id == Runtime1::fast_new_instance_id            ||
173          stub_id == Runtime1::fast_new_instance_init_check_id,
174          "need new_instance id");
175   _stub_id = stub_id;
176 }
177 
178 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
179   __ bind(_entry);
180   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
181   address a = Runtime1::entry_for (_stub_id);
182   ce->emit_call_c(a);
183   CHECK_BAILOUT();
184   ce->add_call_info_here(_info);
185   ce->verify_oop_map(_info);
186   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
187   __ z_brul(_continuation);
188 }
189 
190 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
191   _klass_reg = klass_reg;
192   _length = length;
193   _result = result;
194   _info = new CodeEmitInfo(info);
195 }
196 
197 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
198   __ bind(_entry);
199   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
200   __ lgr_if_needed(Z_R13, _length->as_register());
201   address a = Runtime1::entry_for (Runtime1::new_type_array_id);
202   ce->emit_call_c(a);
203   CHECK_BAILOUT();
204   ce->add_call_info_here(_info);
205   ce->verify_oop_map(_info);
206   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
207   __ z_brul(_continuation);
208 }
209 
210 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
211   _klass_reg = klass_reg;
212   _length = length;
213   _result = result;
214   _info = new CodeEmitInfo(info);
215 }
216 
217 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
218   __ bind(_entry);
219   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
220   __ lgr_if_needed(Z_R13, _length->as_register());
221   address a = Runtime1::entry_for (Runtime1::new_object_array_id);
222   ce->emit_call_c(a);
223   CHECK_BAILOUT();
224   ce->add_call_info_here(_info);
225   ce->verify_oop_map(_info);
226   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
227   __ z_brul(_continuation);
228 }
229 
230 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
231   : MonitorAccessStub(obj_reg, lock_reg) {
232   _info = new CodeEmitInfo(info);
233 }
234 
235 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
236   __ bind(_entry);
237   Runtime1::StubID enter_id;
238   if (ce->compilation()->has_fpu_code()) {
239     enter_id = Runtime1::monitorenter_id;
240   } else {
241     enter_id = Runtime1::monitorenter_nofpu_id;
242   }
243   __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
244   __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
245   ce->emit_call_c(Runtime1::entry_for (enter_id));
246   CHECK_BAILOUT();
247   ce->add_call_info_here(_info);
248   ce->verify_oop_map(_info);
249   __ branch_optimized(Assembler::bcondAlways, _continuation);
250 }
251 
252 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
253   __ bind(_entry);
254   // Move address of the BasicObjectLock into Z_R1_scratch.
255   if (_compute_lock) {
256     // Lock_reg was destroyed by fast unlocking attempt => recompute it.
257     ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
258   } else {
259     __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
260   }
261   // Note: non-blocking leaf routine => no call info needed.
262   Runtime1::StubID exit_id;
263   if (ce->compilation()->has_fpu_code()) {
264     exit_id = Runtime1::monitorexit_id;
265   } else {
266     exit_id = Runtime1::monitorexit_nofpu_id;
267   }
268   ce->emit_call_c(Runtime1::entry_for (exit_id));
269   CHECK_BAILOUT();
270   __ branch_optimized(Assembler::bcondAlways, _continuation);
271 }
272 
273 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
274   Unimplemented();  // Only needed with compact object headers.
275 }
276 
277 // Implementation of patching:
278 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
279 // - Replace original code with a call to the stub.
280 // At Runtime:
281 // - call to stub, jump to runtime.
282 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
283 // - in runtime: After initializing class, restore original code, reexecute instruction.
284 
285 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
286 
287 void PatchingStub::align_patch_site(MacroAssembler* masm) {
288 #ifndef PRODUCT
289   const char* bc;
290   switch (_id) {
291   case access_field_id: bc = "patch site (access_field)"; break;
292   case load_klass_id: bc = "patch site (load_klass)"; break;
293   case load_mirror_id: bc = "patch site (load_mirror)"; break;
294   case load_appendix_id: bc = "patch site (load_appendix)"; break;
295   default: bc = "patch site (unknown patch id)"; break;
296   }
297   masm->block_comment(bc);
298 #endif
299 
300   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
301 }
302 
303 void PatchingStub::emit_code(LIR_Assembler* ce) {
304   // Copy original code here.
305   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
306          "not enough room for call, need %d", _bytes_to_copy);
307 
308   NearLabel call_patch;
309 
310   int being_initialized_entry = __ offset();
311 
312   if (_id == load_klass_id) {
313     // Produce a copy of the load klass instruction for use by the case being initialized.
314 #ifdef ASSERT
315     address start = __ pc();
316 #endif
317     AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
318     __ load_const(_obj, addrlit);
319 
320 #ifdef ASSERT
321     for (int i = 0; i < _bytes_to_copy; i++) {
322       address ptr = (address)(_pc_start + i);
323       int a_byte = (*ptr) & 0xFF;
324       assert(a_byte == *start++, "should be the same code");
325     }
326 #endif
327   } else if (_id == load_mirror_id || _id == load_appendix_id) {
328     // Produce a copy of the load mirror instruction for use by the case being initialized.
329 #ifdef ASSERT
330     address start = __ pc();
331 #endif
332     AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
333     __ load_const(_obj, addrlit);
334 
335 #ifdef ASSERT
336     for (int i = 0; i < _bytes_to_copy; i++) {
337       address ptr = (address)(_pc_start + i);
338       int a_byte = (*ptr) & 0xFF;
339       assert(a_byte == *start++, "should be the same code");
340     }
341 #endif
342   } else {
343     // Make a copy of the code which is going to be patched.
344     for (int i = 0; i < _bytes_to_copy; i++) {
345       address ptr = (address)(_pc_start + i);
346       int a_byte = (*ptr) & 0xFF;
347       __ emit_int8 (a_byte);
348     }
349   }
350 
351   address end_of_patch = __ pc();
352   int bytes_to_skip = 0;
353   if (_id == load_mirror_id) {
354     int offset = __ offset();
355     if (CommentedAssembly) {
356       __ block_comment(" being_initialized check");
357     }
358 
359     // Static field accesses have special semantics while the class
360     // initializer is being run, so we emit a test which can be used to
361     // check that this code is being executed by the initializing
362     // thread.
363     assert(_obj != noreg, "must be a valid register");
364     assert(_index >= 0, "must have oop index");
365     __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset(), _obj);
366     __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
367     __ branch_optimized(Assembler::bcondNotEqual, call_patch);
368 
369     // Load_klass patches may execute the patched code before it's
370     // copied back into place so we need to jump back into the main
371     // code of the nmethod to continue execution.
372     __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);
373 
374     // Make sure this extra code gets skipped.
375     bytes_to_skip += __ offset() - offset;
376   }
377 
378   // Now emit the patch record telling the runtime how to find the
379   // pieces of the patch. We only need 3 bytes but to help the disassembler
380   // we make the data look like a the following add instruction:
381   //   A R1, D2(X2, B2)
382   // which requires 4 bytes.
383   int sizeof_patch_record = 4;
384   bytes_to_skip += sizeof_patch_record;
385 
386   // Emit the offsets needed to find the code to patch.
387   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
388 
389   // Emit the patch record: opcode of the add followed by 3 bytes patch record data.
390   __ emit_int8((int8_t)(A_ZOPC>>24));
391   __ emit_int8(being_initialized_entry_offset);
392   __ emit_int8(bytes_to_skip);
393   __ emit_int8(_bytes_to_copy);
394   address patch_info_pc = __ pc();
395   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
396 
397   address entry = __ pc();
398   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
399   address target = NULL;
400   relocInfo::relocType reloc_type = relocInfo::none;
401   switch (_id) {
402     case access_field_id:  target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
403     case load_klass_id:    target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
404     case load_mirror_id:   target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
405     case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
406     default: ShouldNotReachHere();
407   }
408   __ bind(call_patch);
409 
410   if (CommentedAssembly) {
411     __ block_comment("patch entry point");
412   }
413   // Cannot use call_c_opt() because its size is not constant.
414   __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
415   __ z_basr(Z_R14, Z_R1_scratch);
416   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
417   ce->add_call_info_here(_info);
418   __ z_brcl(Assembler::bcondAlways, _patch_site_entry);
419   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
420     CodeSection* cs = __ code_section();
421     address pc = (address)_pc_start;
422     RelocIterator iter(cs, pc, pc + 1);
423     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
424   }
425 }
426 
427 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
428   __ bind(_entry);
429   __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
430   ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
431   CHECK_BAILOUT();
432   ce->add_call_info_here(_info);
433   DEBUG_ONLY(__ should_not_reach_here());
434 }
435 
436 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
437   // Slow case: call to native.
438   __ bind(_entry);
439   __ lgr_if_needed(Z_ARG1, src()->as_register());
440   __ lgr_if_needed(Z_ARG2, src_pos()->as_register());
441   __ lgr_if_needed(Z_ARG3, dst()->as_register());
442   __ lgr_if_needed(Z_ARG4, dst_pos()->as_register());
443   __ lgr_if_needed(Z_ARG5, length()->as_register());
444 
445   // Must align calls sites, otherwise they can't be updated atomically on MP hardware.
446   ce->align_call(lir_static_call);
447 
448   assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
449          "must be aligned");
450 
451   ce->emit_static_call_stub();
452 
453   // Prepend each BRASL with a nop.
454   __ relocate(relocInfo::static_call_type);
455   __ z_nop();
456   __ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub());
457   ce->add_call_info_here(info());
458   ce->verify_oop_map(info());
459 
460 #ifndef PRODUCT
461   __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt);
462   __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
463 #endif
464 
465   __ branch_optimized(Assembler::bcondAlways, _continuation);
466 }
467 
468 #undef __