< prev index next >

src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp

Print this page

193   _klass_reg = klass_reg;
194   _result = result;
195   _length = length;
196   _info = new CodeEmitInfo(info);
197 }
198 
199 
200 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
201   assert(__ rsp_offset() == 0, "frame size should be fixed");
202   __ bind(_entry);
203   assert(_length->as_register() == r19, "length must in r19,");
204   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
205   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
206   ce->add_call_info_here(_info);
207   ce->verify_oop_map(_info);
208   assert(_result->as_register() == r0, "result must in r0");
209   __ b(_continuation);
210 }
211 // Implementation of MonitorAccessStubs
212 
213 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
214 : MonitorAccessStub(obj_reg, lock_reg)
215 {
216   _info = new CodeEmitInfo(info);
217 }
218 
219 
220 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
221   assert(__ rsp_offset() == 0, "frame size should be fixed");
222   __ bind(_entry);
223   ce->store_parameter(_obj_reg->as_register(),  1);
224   ce->store_parameter(_lock_reg->as_register(), 0);
225   Runtime1::StubID enter_id;
226   if (ce->compilation()->has_fpu_code()) {
227     enter_id = Runtime1::monitorenter_id;
228   } else {
229     enter_id = Runtime1::monitorenter_nofpu_id;
230   }
231   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
232   ce->add_call_info_here(_info);
233   ce->verify_oop_map(_info);
234   __ b(_continuation);
235 }
236 
237 
238 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
239   __ bind(_entry);
240   if (_compute_lock) {
241     // lock_reg was destroyed by fast unlocking attempt => recompute it
242     ce->monitor_address(_monitor_ix, _lock_reg);
243   }
244   ce->store_parameter(_lock_reg->as_register(), 0);
245   // note: non-blocking leaf routine => no call info needed
246   Runtime1::StubID exit_id;
247   if (ce->compilation()->has_fpu_code()) {
248     exit_id = Runtime1::monitorexit_id;
249   } else {
250     exit_id = Runtime1::monitorexit_nofpu_id;
251   }
252   __ adr(lr, _continuation);
253   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
254 }
255 

























256 
257 // Implementation of patching:
258 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
259 // - Replace original code with a call to the stub
260 // At Runtime:
261 // - call to stub, jump to runtime
262 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
263 // - in runtime: after initializing class, restore original code, reexecute instruction
264 
265 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
266 
267 void PatchingStub::align_patch_site(MacroAssembler* masm) {
268 }
269 
270 void PatchingStub::emit_code(LIR_Assembler* ce) {
271   assert(false, "AArch64 should not use C1 runtime patching");
272 }
273 
274 
275 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {

193   _klass_reg = klass_reg;
194   _result = result;
195   _length = length;
196   _info = new CodeEmitInfo(info);
197 }
198 
199 
200 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
201   assert(__ rsp_offset() == 0, "frame size should be fixed");
202   __ bind(_entry);
203   assert(_length->as_register() == r19, "length must in r19,");
204   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
205   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
206   ce->add_call_info_here(_info);
207   ce->verify_oop_map(_info);
208   assert(_result->as_register() == r0, "result must in r0");
209   __ b(_continuation);
210 }
211 // Implementation of MonitorAccessStubs
212 
213 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, CodeEmitInfo* info)
214 : MonitorAccessStub(obj_reg)
215 {
216   _info = new CodeEmitInfo(info);
217 }
218 
219 
220 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
221   assert(__ rsp_offset() == 0, "frame size should be fixed");
222   __ bind(_entry);
223   ce->store_parameter(_obj_reg->as_register(),  0);

224   Runtime1::StubID enter_id;
225   if (ce->compilation()->has_fpu_code()) {
226     enter_id = Runtime1::monitorenter_id;
227   } else {
228     enter_id = Runtime1::monitorenter_nofpu_id;
229   }
230   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
231   ce->add_call_info_here(_info);
232   ce->verify_oop_map(_info);
233   __ b(_continuation);
234 }
235 
236 
237 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
238   __ bind(_entry);
239   ce->store_parameter(_obj_reg->as_register(), 0);




240   // note: non-blocking leaf routine => no call info needed
241   Runtime1::StubID exit_id;
242   if (ce->compilation()->has_fpu_code()) {
243     exit_id = Runtime1::monitorexit_id;
244   } else {
245     exit_id = Runtime1::monitorexit_nofpu_id;
246   }
247   __ adr(lr, _continuation);
248   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
249 }
250 
251 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
252   __ bind(_entry);
253   Register res = _result->as_register();
254   ce->store_parameter(_obj->as_register(), 0);
255   if (res != r0) {
256     // Note: we cannot push/pop r0 around the call, because that
257     // would mess with the stack pointer sp, and we need that to
258     // remain intact for store_paramater/load_argument to work correctly.
259     // We swap r0 and res instead, which preserves current r0 in res.
260     // The preserved value is later saved and restored around the
261     // call in Runtime1::load_klass_id.
262     __ mov(rscratch1, r0);
263     __ mov(r0, res);
264     __ mov(res, rscratch1);
265   }
266   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_klass_id)));
267   if (res != r0) {
268     // Swap back r0 and res. This brings the call return value
269     // from r0 into res, and the preserved value in res back into r0.
270     __ mov(rscratch1, r0);
271     __ mov(r0, res);
272     __ mov(res, rscratch1);
273   }
274   __ b(_continuation);
275 }
276 
277 // Implementation of patching:
278 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
279 // - Replace original code with a call to the stub
280 // At Runtime:
281 // - call to stub, jump to runtime
282 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
283 // - in runtime: after initializing class, restore original code, reexecute instruction
284 
285 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
286 
287 void PatchingStub::align_patch_site(MacroAssembler* masm) {
288 }
289 
290 void PatchingStub::emit_code(LIR_Assembler* ce) {
291   assert(false, "AArch64 should not use C1 runtime patching");
292 }
293 
294 
295 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
< prev index next >