< prev index next >

src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp

Print this page

283 
284 
285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286   __ bind(_entry);
287   if (_compute_lock) {
288     // lock_reg was destroyed by fast unlocking attempt => recompute it
289     ce->monitor_address(_monitor_ix, _lock_reg);
290   }
291   ce->store_parameter(_lock_reg->as_register(), 0);
292   // note: non-blocking leaf routine => no call info needed
293   Runtime1::StubID exit_id;
294   if (ce->compilation()->has_fpu_code()) {
295     exit_id = Runtime1::monitorexit_id;
296   } else {
297     exit_id = Runtime1::monitorexit_nofpu_id;
298   }
299   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300   __ jmp(_continuation);
301 }
302 




















303 
304 // Implementation of patching:
305 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
306 // - Replace original code with a call to the stub
307 // At Runtime:
308 // - call to stub, jump to runtime
309 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
310 // - in runtime: after initializing class, restore original code, reexecute instruction
311 
312 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
313 
314 void PatchingStub::align_patch_site(MacroAssembler* masm) {
315   // We're patching a 5-7 byte instruction on intel and we need to
316   // make sure that we don't see a piece of the instruction.  It
317   // appears mostly impossible on Intel to simply invalidate other
318   // processors caches and since they may do aggressive prefetch it's
319   // very hard to make a guess about what code might be in the icache.
320   // Force the instruction to be double word aligned so that it
321   // doesn't span a cache line.
322   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));

283 
284 
285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286   __ bind(_entry);
287   if (_compute_lock) {
288     // lock_reg was destroyed by fast unlocking attempt => recompute it
289     ce->monitor_address(_monitor_ix, _lock_reg);
290   }
291   ce->store_parameter(_lock_reg->as_register(), 0);
292   // note: non-blocking leaf routine => no call info needed
293   Runtime1::StubID exit_id;
294   if (ce->compilation()->has_fpu_code()) {
295     exit_id = Runtime1::monitorexit_id;
296   } else {
297     exit_id = Runtime1::monitorexit_nofpu_id;
298   }
299   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300   __ jmp(_continuation);
301 }
302 
303 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
304   __ bind(_entry);
305 #ifdef _LP64
306   Register res = _result->as_register();
307   ce->store_parameter(_obj->as_register(), 0);
308   if (res != rax) {
309     // This preserves rax and allows it to be used as return-register,
310     // without messing with the stack.
311     __ xchgptr(rax, res);
312   }
313   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_klass_id)));
314   if (res != rax) {
315     // Swap back rax, and move result to correct register.
316     __ xchgptr(rax, res);
317   }
318   __ jmp(_continuation);
319 #else
320   __ should_not_reach_here();
321 #endif
322 }
323 
324 // Implementation of patching:
325 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
326 // - Replace original code with a call to the stub
327 // At Runtime:
328 // - call to stub, jump to runtime
329 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
330 // - in runtime: after initializing class, restore original code, reexecute instruction
331 
332 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
333 
334 void PatchingStub::align_patch_site(MacroAssembler* masm) {
335   // We're patching a 5-7 byte instruction on intel and we need to
336   // make sure that we don't see a piece of the instruction.  It
337   // appears mostly impossible on Intel to simply invalidate other
338   // processors caches and since they may do aggressive prefetch it's
339   // very hard to make a guess about what code might be in the icache.
340   // Force the instruction to be double word aligned so that it
341   // doesn't span a cache line.
342   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
< prev index next >