< prev index next >

src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp

Print this page

236   __ bind(_entry);
237   // Move address of the BasicObjectLock into Z_R1_scratch.
238   if (_compute_lock) {
239     // Lock_reg was destroyed by fast unlocking attempt => recompute it.
240     ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
241   } else {
242     __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
243   }
244   // Note: non-blocking leaf routine => no call info needed.
245   Runtime1::StubID exit_id;
246   if (ce->compilation()->has_fpu_code()) {
247     exit_id = Runtime1::monitorexit_id;
248   } else {
249     exit_id = Runtime1::monitorexit_nofpu_id;
250   }
251   ce->emit_call_c(Runtime1::entry_for (exit_id));
252   CHECK_BAILOUT();
253   __ branch_optimized(Assembler::bcondAlways, _continuation);
254 }
255 





256 // Implementation of patching:
257 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
258 // - Replace original code with a call to the stub.
259 // At Runtime:
260 // - call to stub, jump to runtime.
261 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
262 // - in runtime: After initializing class, restore original code, reexecute instruction.
263 
264 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
265 
266 void PatchingStub::align_patch_site(MacroAssembler* masm) {
267 #ifndef PRODUCT
268   const char* bc;
269   switch (_id) {
270   case access_field_id: bc = "patch site (access_field)"; break;
271   case load_klass_id: bc = "patch site (load_klass)"; break;
272   case load_mirror_id: bc = "patch site (load_mirror)"; break;
273   case load_appendix_id: bc = "patch site (load_appendix)"; break;
274   default: bc = "patch site (unknown patch id)"; break;
275   }

236   __ bind(_entry);
237   // Move address of the BasicObjectLock into Z_R1_scratch.
238   if (_compute_lock) {
239     // Lock_reg was destroyed by fast unlocking attempt => recompute it.
240     ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
241   } else {
242     __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
243   }
244   // Note: non-blocking leaf routine => no call info needed.
245   Runtime1::StubID exit_id;
246   if (ce->compilation()->has_fpu_code()) {
247     exit_id = Runtime1::monitorexit_id;
248   } else {
249     exit_id = Runtime1::monitorexit_nofpu_id;
250   }
251   ce->emit_call_c(Runtime1::entry_for (exit_id));
252   CHECK_BAILOUT();
253   __ branch_optimized(Assembler::bcondAlways, _continuation);
254 }
255 
256 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
257   // Currently not needed.
258   Unimplemented();
259 }
260 
261 // Implementation of patching:
262 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
263 // - Replace original code with a call to the stub.
264 // At Runtime:
265 // - call to stub, jump to runtime.
266 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
267 // - in runtime: After initializing class, restore original code, reexecute instruction.
268 
269 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
270 
271 void PatchingStub::align_patch_site(MacroAssembler* masm) {
272 #ifndef PRODUCT
273   const char* bc;
274   switch (_id) {
275   case access_field_id: bc = "patch site (access_field)"; break;
276   case load_klass_id: bc = "patch site (load_klass)"; break;
277   case load_mirror_id: bc = "patch site (load_mirror)"; break;
278   case load_appendix_id: bc = "patch site (load_appendix)"; break;
279   default: bc = "patch site (unknown patch id)"; break;
280   }
< prev index next >