236
237
238 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
239 __ bind(_entry);
240 if (_compute_lock) {
241 // lock_reg was destroyed by fast unlocking attempt => recompute it
242 ce->monitor_address(_monitor_ix, _lock_reg);
243 }
244 ce->store_parameter(_lock_reg->as_register(), 0);
245 // note: non-blocking leaf routine => no call info needed
246 Runtime1::StubID exit_id;
247 if (ce->compilation()->has_fpu_code()) {
248 exit_id = Runtime1::monitorexit_id;
249 } else {
250 exit_id = Runtime1::monitorexit_nofpu_id;
251 }
252 __ adr(lr, _continuation);
253 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
254 }
255
256
257 // Implementation of patching:
258 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
259 // - Replace original code with a call to the stub
260 // At Runtime:
261 // - call to stub, jump to runtime
262 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
263 // - in runtime: after initializing class, restore original code, reexecute instruction
264
265 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
266
267 void PatchingStub::align_patch_site(MacroAssembler* masm) {
268 }
269
270 void PatchingStub::emit_code(LIR_Assembler* ce) {
271 assert(false, "AArch64 should not use C1 runtime patching");
272 }
273
274
275 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|
236
237
238 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
239 __ bind(_entry);
240 if (_compute_lock) {
241 // lock_reg was destroyed by fast unlocking attempt => recompute it
242 ce->monitor_address(_monitor_ix, _lock_reg);
243 }
244 ce->store_parameter(_lock_reg->as_register(), 0);
245 // note: non-blocking leaf routine => no call info needed
246 Runtime1::StubID exit_id;
247 if (ce->compilation()->has_fpu_code()) {
248 exit_id = Runtime1::monitorexit_id;
249 } else {
250 exit_id = Runtime1::monitorexit_nofpu_id;
251 }
252 __ adr(lr, _continuation);
253 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
254 }
255
256 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
257 __ bind(_entry);
258 Register res = _result->as_register();
259 ce->store_parameter(_obj->as_register(), 0);
260 if (res != r0) {
261 // Note: we cannot push/pop r0 around the call, because that
262 // would mess with the stack pointer sp, and we need that to
263 // remain intact for store_paramater/load_argument to work correctly.
264 // We swap r0 and res instead, which preserves current r0 in res.
265 // The preserved value is later saved and restored around the
266 // call in Runtime1::load_klass_id.
267 __ mov(rscratch1, r0);
268 __ mov(r0, res);
269 __ mov(res, rscratch1);
270 }
271 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_klass_id)));
272 if (res != r0) {
273 // Swap back r0 and res. This brings the call return value
274 // from r0 into res, and the preserved value in res back into r0.
275 __ mov(rscratch1, r0);
276 __ mov(r0, res);
277 __ mov(res, rscratch1);
278 }
279 __ b(_continuation);
280 }
281
282 // Implementation of patching:
283 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
284 // - Replace original code with a call to the stub
285 // At Runtime:
286 // - call to stub, jump to runtime
287 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
288 // - in runtime: after initializing class, restore original code, reexecute instruction
289
290 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
291
292 void PatchingStub::align_patch_site(MacroAssembler* masm) {
293 }
294
295 void PatchingStub::emit_code(LIR_Assembler* ce) {
296 assert(false, "AArch64 should not use C1 runtime patching");
297 }
298
299
300 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|