1 /*
  2  * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_CodeStubs.hpp"
 29 #include "c1/c1_FrameMap.hpp"
 30 #include "c1/c1_LIRAssembler.hpp"
 31 #include "c1/c1_MacroAssembler.hpp"
 32 #include "c1/c1_Runtime1.hpp"
 33 #include "classfile/javaClasses.hpp"
 34 #include "nativeInst_aarch64.hpp"
 35 #include "runtime/objectMonitor.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "vmreg_aarch64.inline.hpp"
 38 
 39 
 40 #define __ ce->masm()->
 41 
 42 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 43   __ bind(_entry);
 44   InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
 45   __ adr(rscratch1, safepoint_pc);
 46   __ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
 47 
 48   assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
 49          "polling page return stub not created yet");
 50   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 51 
 52   __ far_jump(RuntimeAddress(stub));
 53 }
 54 
 55 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 56   __ bind(_entry);
 57   Metadata *m = _method->as_constant_ptr()->as_metadata();
 58   __ mov_metadata(rscratch1, m);
 59   ce->store_parameter(rscratch1, 1);
 60   ce->store_parameter(_bci, 0);
 61   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
 62   ce->add_call_info_here(_info);
 63   ce->verify_oop_map(_info);
 64   __ b(_continuation);
 65 }
 66 
 67 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
 68   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
 69   assert(info != NULL, "must have info");
 70   _info = new CodeEmitInfo(info);
 71 }
 72 
 73 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
 74   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
 75   assert(info != NULL, "must have info");
 76   _info = new CodeEmitInfo(info);
 77 }
 78 
 79 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 80   __ bind(_entry);
 81   if (_info->deoptimize_on_exception()) {
 82     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 83     __ far_call(RuntimeAddress(a));
 84     ce->add_call_info_here(_info);
 85     ce->verify_oop_map(_info);
 86     debug_only(__ should_not_reach_here());
 87     return;
 88   }
 89 
 90   if (_index->is_cpu_register()) {
 91     __ mov(rscratch1, _index->as_register());
 92   } else {
 93     __ mov(rscratch1, _index->as_jint());
 94   }
 95   Runtime1::StubID stub_id;
 96   if (_throw_index_out_of_bounds_exception) {
 97     stub_id = Runtime1::throw_index_exception_id;
 98   } else {
 99     assert(_array != NULL, "sanity");
100     __ mov(rscratch2, _array->as_pointer_register());
101     stub_id = Runtime1::throw_range_check_failed_id;
102   }
103   __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
104   __ blr(lr);
105   ce->add_call_info_here(_info);
106   ce->verify_oop_map(_info);
107   debug_only(__ should_not_reach_here());
108 }
109 
110 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
111   _info = new CodeEmitInfo(info);
112 }
113 
114 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
115   __ bind(_entry);
116   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
117   __ far_call(RuntimeAddress(a));
118   ce->add_call_info_here(_info);
119   ce->verify_oop_map(_info);
120   debug_only(__ should_not_reach_here());
121 }
122 
123 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
124   if (_offset != -1) {
125     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
126   }
127   __ bind(_entry);
128   __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type));
129   ce->add_call_info_here(_info);
130   ce->verify_oop_map(_info);
131 #ifdef ASSERT
132   __ should_not_reach_here();
133 #endif
134 }
135 
136 
137 
138 // Implementation of NewInstanceStub
139 
140 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
141   _result = result;
142   _klass = klass;
143   _klass_reg = klass_reg;
144   _info = new CodeEmitInfo(info);
145   assert(stub_id == Runtime1::new_instance_id                 ||
146          stub_id == Runtime1::fast_new_instance_id            ||
147          stub_id == Runtime1::fast_new_instance_init_check_id,
148          "need new_instance id");
149   _stub_id   = stub_id;
150 }
151 
152 
153 
154 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
155   assert(__ rsp_offset() == 0, "frame size should be fixed");
156   __ bind(_entry);
157   __ mov(r3, _klass_reg->as_register());
158   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
159   ce->add_call_info_here(_info);
160   ce->verify_oop_map(_info);
161   assert(_result->as_register() == r0, "result must in r0,");
162   __ b(_continuation);
163 }
164 
165 
166 // Implementation of NewTypeArrayStub
167 
168 // Implementation of NewTypeArrayStub
169 
170 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
171   _klass_reg = klass_reg;
172   _length = length;
173   _result = result;
174   _info = new CodeEmitInfo(info);
175 }
176 
177 
178 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
179   assert(__ rsp_offset() == 0, "frame size should be fixed");
180   __ bind(_entry);
181   assert(_length->as_register() == r19, "length must in r19,");
182   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
183   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
184   ce->add_call_info_here(_info);
185   ce->verify_oop_map(_info);
186   assert(_result->as_register() == r0, "result must in r0");
187   __ b(_continuation);
188 }
189 
190 
191 // Implementation of NewObjectArrayStub
192 
193 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
194   _klass_reg = klass_reg;
195   _result = result;
196   _length = length;
197   _info = new CodeEmitInfo(info);
198 }
199 
200 
201 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
202   assert(__ rsp_offset() == 0, "frame size should be fixed");
203   __ bind(_entry);
204   assert(_length->as_register() == r19, "length must in r19,");
205   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
206   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
207   ce->add_call_info_here(_info);
208   ce->verify_oop_map(_info);
209   assert(_result->as_register() == r0, "result must in r0");
210   __ b(_continuation);
211 }
212 // Implementation of MonitorAccessStubs
213 
214 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
215 : MonitorAccessStub(obj_reg, lock_reg)
216 {
217   _info = new CodeEmitInfo(info);
218 }
219 
220 
221 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
222   assert(__ rsp_offset() == 0, "frame size should be fixed");
223   __ bind(_entry);
224   ce->store_parameter(_obj_reg->as_register(),  1);
225   ce->store_parameter(_lock_reg->as_register(), 0);
226   Runtime1::StubID enter_id;
227   if (ce->compilation()->has_fpu_code()) {
228     enter_id = Runtime1::monitorenter_id;
229   } else {
230     enter_id = Runtime1::monitorenter_nofpu_id;
231   }
232   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
233   ce->add_call_info_here(_info);
234   ce->verify_oop_map(_info);
235   __ b(_continuation);
236 }
237 
238 
239 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
240   __ bind(_entry);
241   if (_compute_lock) {
242     // lock_reg was destroyed by fast unlocking attempt => recompute it
243     ce->monitor_address(_monitor_ix, _lock_reg);
244   }
245   ce->store_parameter(_lock_reg->as_register(), 0);
246   // note: non-blocking leaf routine => no call info needed
247   Runtime1::StubID exit_id;
248   if (ce->compilation()->has_fpu_code()) {
249     exit_id = Runtime1::monitorexit_id;
250   } else {
251     exit_id = Runtime1::monitorexit_nofpu_id;
252   }
253   __ adr(lr, _continuation);
254   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
255 }
256 
257 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
258   assert(UseCompactObjectHeaders, "Only use with compact object headers");
259   __ bind(_entry);
260   Register d = _result->as_register();
261   __ ldr(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
262   __ b(_continuation);
263 }
264 
265 // Implementation of patching:
266 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
267 // - Replace original code with a call to the stub
268 // At Runtime:
269 // - call to stub, jump to runtime
270 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
271 // - in runtime: after initializing class, restore original code, reexecute instruction
272 
273 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
274 
275 void PatchingStub::align_patch_site(MacroAssembler* masm) {
276 }
277 
278 void PatchingStub::emit_code(LIR_Assembler* ce) {
279   assert(false, "AArch64 should not use C1 runtime patching");
280 }
281 
282 
283 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
284   __ bind(_entry);
285   ce->store_parameter(_trap_request, 0);
286   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
287   ce->add_call_info_here(_info);
288   DEBUG_ONLY(__ should_not_reach_here());
289 }
290 
291 
292 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
293   address a;
294   if (_info->deoptimize_on_exception()) {
295     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
296     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
297   } else {
298     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
299   }
300 
301   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
302   __ bind(_entry);
303   __ far_call(RuntimeAddress(a));
304   ce->add_call_info_here(_info);
305   ce->verify_oop_map(_info);
306   debug_only(__ should_not_reach_here());
307 }
308 
309 
310 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
311   assert(__ rsp_offset() == 0, "frame size should be fixed");
312 
313   __ bind(_entry);
314   // pass the object in a scratch register because all other registers
315   // must be preserved
316   if (_obj->is_cpu_register()) {
317     __ mov(rscratch1, _obj->as_register());
318   }
319   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2);
320   ce->add_call_info_here(_info);
321   debug_only(__ should_not_reach_here());
322 }
323 
324 
325 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
326   //---------------slow case: call to native-----------------
327   __ bind(_entry);
328   // Figure out where the args should go
329   // This should really convert the IntrinsicID to the Method* and signature
330   // but I don't know how to do that.
331   //
332   VMRegPair args[5];
333   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
334   SharedRuntime::java_calling_convention(signature, args, 5);
335 
336   // push parameters
337   // (src, src_pos, dest, destPos, length)
338   Register r[5];
339   r[0] = src()->as_register();
340   r[1] = src_pos()->as_register();
341   r[2] = dst()->as_register();
342   r[3] = dst_pos()->as_register();
343   r[4] = length()->as_register();
344 
345   // next registers will get stored on the stack
346   for (int i = 0; i < 5 ; i++ ) {
347     VMReg r_1 = args[i].first();
348     if (r_1->is_stack()) {
349       int st_off = r_1->reg2stack() * wordSize;
350       __ str (r[i], Address(sp, st_off));
351     } else {
352       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
353     }
354   }
355 
356   ce->align_call(lir_static_call);
357 
358   ce->emit_static_call_stub();
359   if (ce->compilation()->bailed_out()) {
360     return; // CodeCache is full
361   }
362   Address resolve(SharedRuntime::get_resolve_static_call_stub(),
363                   relocInfo::static_call_type);
364   address call = __ trampoline_call(resolve);
365   if (call == NULL) {
366     ce->bailout("trampoline stub overflow");
367     return;
368   }
369   ce->add_call_info_here(info());
370 
371 #ifndef PRODUCT
372   __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
373   __ incrementw(Address(rscratch2));
374 #endif
375 
376   __ b(_continuation);
377 }
378 
379 #undef __