< prev index next >

src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp

Print this page

240   _length = length;
241   _info = new CodeEmitInfo(info);
242 }
243 
244 
245 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
246   assert(__ rsp_offset() == 0, "frame size should be fixed");
247   __ bind(_entry);
248   assert(_length->as_register() == rbx, "length must in rbx,");
249   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
250   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
251   ce->add_call_info_here(_info);
252   ce->verify_oop_map(_info);
253   assert(_result->as_register() == rax, "result must in rax,");
254   __ jmp(_continuation);
255 }
256 
257 
258 // Implementation of MonitorAccessStubs
259 
260 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
261 : MonitorAccessStub(obj_reg, lock_reg)
262 {
263   _info = new CodeEmitInfo(info);
264 }
265 
266 
267 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
268   assert(__ rsp_offset() == 0, "frame size should be fixed");
269   __ bind(_entry);
270   ce->store_parameter(_obj_reg->as_register(),  1);
271   ce->store_parameter(_lock_reg->as_register(), 0);
272   Runtime1::StubID enter_id;
273   if (ce->compilation()->has_fpu_code()) {
274     enter_id = Runtime1::monitorenter_id;
275   } else {
276     enter_id = Runtime1::monitorenter_nofpu_id;
277   }
278   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
279   ce->add_call_info_here(_info);
280   ce->verify_oop_map(_info);
281   __ jmp(_continuation);
282 }
283 
284 
285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286   __ bind(_entry);
287   if (_compute_lock) {
288     // lock_reg was destroyed by fast unlocking attempt => recompute it
289     ce->monitor_address(_monitor_ix, _lock_reg);
290   }
291   ce->store_parameter(_lock_reg->as_register(), 0);
292   // note: non-blocking leaf routine => no call info needed
293   Runtime1::StubID exit_id;
294   if (ce->compilation()->has_fpu_code()) {
295     exit_id = Runtime1::monitorexit_id;
296   } else {
297     exit_id = Runtime1::monitorexit_nofpu_id;
298   }
299   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300   __ jmp(_continuation);
301 }
302 




















303 
304 // Implementation of patching:
305 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
306 // - Replace original code with a call to the stub
307 // At Runtime:
308 // - call to stub, jump to runtime
309 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
310 // - in runtime: after initializing class, restore original code, reexecute instruction
311 
312 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
313 
314 void PatchingStub::align_patch_site(MacroAssembler* masm) {
315   // We're patching a 5-7 byte instruction on intel and we need to
316   // make sure that we don't see a piece of the instruction.  It
317   // appears mostly impossible on Intel to simply invalidate other
318   // processors caches and since they may do aggressive prefetch it's
319   // very hard to make a guess about what code might be in the icache.
320   // Force the instruction to be double word aligned so that it
321   // doesn't span a cache line.
322   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));

240   _length = length;
241   _info = new CodeEmitInfo(info);
242 }
243 
244 
245 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
246   assert(__ rsp_offset() == 0, "frame size should be fixed");
247   __ bind(_entry);
248   assert(_length->as_register() == rbx, "length must in rbx,");
249   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
250   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
251   ce->add_call_info_here(_info);
252   ce->verify_oop_map(_info);
253   assert(_result->as_register() == rax, "result must in rax,");
254   __ jmp(_continuation);
255 }
256 
257 
258 // Implementation of MonitorAccessStubs
259 
260 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, CodeEmitInfo* info)
261 : MonitorAccessStub(obj_reg)
262 {
263   _info = new CodeEmitInfo(info);
264 }
265 
266 
267 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
268   assert(__ rsp_offset() == 0, "frame size should be fixed");
269   __ bind(_entry);
270   ce->store_parameter(_obj_reg->as_register(),  0);

271   Runtime1::StubID enter_id;
272   if (ce->compilation()->has_fpu_code()) {
273     enter_id = Runtime1::monitorenter_id;
274   } else {
275     enter_id = Runtime1::monitorenter_nofpu_id;
276   }
277   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
278   ce->add_call_info_here(_info);
279   ce->verify_oop_map(_info);
280   __ jmp(_continuation);
281 }
282 
283 
284 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
285   __ bind(_entry);
286   ce->store_parameter(_obj_reg->as_register(), 0);




287   // note: non-blocking leaf routine => no call info needed
288   Runtime1::StubID exit_id;
289   if (ce->compilation()->has_fpu_code()) {
290     exit_id = Runtime1::monitorexit_id;
291   } else {
292     exit_id = Runtime1::monitorexit_nofpu_id;
293   }
294   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
295   __ jmp(_continuation);
296 }
297 
298 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
299   __ bind(_entry);
300 #ifdef _LP64
301   Register res = _result->as_register();
302   ce->store_parameter(_obj->as_register(), 0);
303   if (res != rax) {
304     // This preserves rax and allows it to be used as return-register,
305     // without messing with the stack.
306     __ xchgptr(rax, res);
307   }
308   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_klass_id)));
309   if (res != rax) {
310     // Swap back rax, and move result to correct register.
311     __ xchgptr(rax, res);
312   }
313   __ jmp(_continuation);
314 #else
315   __ should_not_reach_here();
316 #endif
317 }
318 
319 // Implementation of patching:
320 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
321 // - Replace original code with a call to the stub
322 // At Runtime:
323 // - call to stub, jump to runtime
324 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
325 // - in runtime: after initializing class, restore original code, reexecute instruction
326 
327 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
328 
329 void PatchingStub::align_patch_site(MacroAssembler* masm) {
330   // We're patching a 5-7 byte instruction on intel and we need to
331   // make sure that we don't see a piece of the instruction.  It
332   // appears mostly impossible on Intel to simply invalidate other
333   // processors caches and since they may do aggressive prefetch it's
334   // very hard to make a guess about what code might be in the icache.
335   // Force the instruction to be double word aligned so that it
336   // doesn't span a cache line.
337   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
< prev index next >