< prev index next >

src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp

Print this page

236 
237 
238 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
239   __ bind(_entry);
240   if (_compute_lock) {
241     // lock_reg was destroyed by fast unlocking attempt => recompute it
242     ce->monitor_address(_monitor_ix, _lock_reg);
243   }
244   ce->store_parameter(_lock_reg->as_register(), 0);
245   // note: non-blocking leaf routine => no call info needed
246   Runtime1::StubID exit_id;
247   if (ce->compilation()->has_fpu_code()) {
248     exit_id = Runtime1::monitorexit_id;
249   } else {
250     exit_id = Runtime1::monitorexit_nofpu_id;
251   }
252   __ adr(lr, _continuation);
253   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
254 }
255 














256 
257 // Implementation of patching:
258 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
259 // - Replace original code with a call to the stub
260 // At Runtime:
261 // - call to stub, jump to runtime
262 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
263 // - in runtime: after initializing class, restore original code, reexecute instruction
264 
265 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
266 
267 void PatchingStub::align_patch_site(MacroAssembler* masm) {
268 }
269 
270 void PatchingStub::emit_code(LIR_Assembler* ce) {
271   assert(false, "AArch64 should not use C1 runtime patching");
272 }
273 
274 
275 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {

236 
237 
238 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
239   __ bind(_entry);
240   if (_compute_lock) {
241     // lock_reg was destroyed by fast unlocking attempt => recompute it
242     ce->monitor_address(_monitor_ix, _lock_reg);
243   }
244   ce->store_parameter(_lock_reg->as_register(), 0);
245   // note: non-blocking leaf routine => no call info needed
246   Runtime1::StubID exit_id;
247   if (ce->compilation()->has_fpu_code()) {
248     exit_id = Runtime1::monitorexit_id;
249   } else {
250     exit_id = Runtime1::monitorexit_nofpu_id;
251   }
252   __ adr(lr, _continuation);
253   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
254 }
255 
256 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
257   __ bind(_entry);
258   Register res = _result->as_register();
259   ce->store_parameter(_obj->as_register(), 0);
260   if (res != r0) {
261     __ push(RegSet::of(r0), sp);
262   }
263   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::load_klass_id)));
264   if (res != r0) {
265     __ mov(res, r0);
266     __ pop(RegSet::of(r0), sp);
267   }
268   __ b(_continuation);
269 }
270 
271 // Implementation of patching:
272 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
273 // - Replace original code with a call to the stub
274 // At Runtime:
275 // - call to stub, jump to runtime
276 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
277 // - in runtime: after initializing class, restore original code, reexecute instruction
278 
279 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
280 
281 void PatchingStub::align_patch_site(MacroAssembler* masm) {
282 }
283 
284 void PatchingStub::emit_code(LIR_Assembler* ce) {
285   assert(false, "AArch64 should not use C1 runtime patching");
286 }
287 
288 
289 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
< prev index next >