< prev index next >

src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp

Print this page

283 
284 
285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286   __ bind(_entry);
287   if (_compute_lock) {
288     // lock_reg was destroyed by fast unlocking attempt => recompute it
289     ce->monitor_address(_monitor_ix, _lock_reg);
290   }
291   ce->store_parameter(_lock_reg->as_register(), 0);
292   // note: non-blocking leaf routine => no call info needed
293   Runtime1::StubID exit_id;
294   if (ce->compilation()->has_fpu_code()) {
295     exit_id = Runtime1::monitorexit_id;
296   } else {
297     exit_id = Runtime1::monitorexit_nofpu_id;
298   }
299   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300   __ jmp(_continuation);
301 }
302 
















303 
304 // Implementation of patching:
305 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
306 // - Replace original code with a call to the stub
307 // At Runtime:
308 // - call to stub, jump to runtime
309 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
310 // - in runtime: after initializing class, restore original code, reexecute instruction
311 
312 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
313 
314 void PatchingStub::align_patch_site(MacroAssembler* masm) {
315   // We're patching a 5-7 byte instruction on intel and we need to
316   // make sure that we don't see a piece of the instruction.  It
317   // appears mostly impossible on Intel to simply invalidate other
318   // processors caches and since they may do aggressive prefetch it's
319   // very hard to make a guess about what code might be in the icache.
320   // Force the instruction to be double word aligned so that it
321   // doesn't span a cache line.
322   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));

283 
284 
285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286   __ bind(_entry);
287   if (_compute_lock) {
288     // lock_reg was destroyed by fast unlocking attempt => recompute it
289     ce->monitor_address(_monitor_ix, _lock_reg);
290   }
291   ce->store_parameter(_lock_reg->as_register(), 0);
292   // note: non-blocking leaf routine => no call info needed
293   Runtime1::StubID exit_id;
294   if (ce->compilation()->has_fpu_code()) {
295     exit_id = Runtime1::monitorexit_id;
296   } else {
297     exit_id = Runtime1::monitorexit_nofpu_id;
298   }
299   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300   __ jmp(_continuation);
301 }
302 
303 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
304   __ bind(_entry);
305   Register res = _result->as_register();
306   ce->store_parameter(_obj->as_register(), 0);
307   if (res != rax) {
308     // This preserves rax and allows it to be used as return-register,
309     // without messing with the stack.
310     __ xchgptr(rax, res);
311   }
312   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_klass_id)));
313   if (res != rax) {
314     // Swap back rax, and move result to correct register.
315     __ xchgptr(rax, res);
316   }
317   __ jmp(_continuation);
318 }
319 
320 // Implementation of patching:
321 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
322 // - Replace original code with a call to the stub
323 // At Runtime:
324 // - call to stub, jump to runtime
325 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
326 // - in runtime: after initializing class, restore original code, reexecute instruction
327 
328 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
329 
330 void PatchingStub::align_patch_site(MacroAssembler* masm) {
331   // We're patching a 5-7 byte instruction on intel and we need to
332   // make sure that we don't see a piece of the instruction.  It
333   // appears mostly impossible on Intel to simply invalidate other
334   // processors caches and since they may do aggressive prefetch it's
335   // very hard to make a guess about what code might be in the icache.
336   // Force the instruction to be double word aligned so that it
337   // doesn't span a cache line.
338   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
< prev index next >