< prev index next >

src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp

Print this page

226 
227 
228 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
229   __ bind(_entry);
230   if (_compute_lock) {
231     ce->monitor_address(_monitor_ix, _lock_reg);
232   }
233   const Register lock_reg = _lock_reg->as_pointer_register();
234 
235   ce->verify_reserved_argument_area_size(1);
236   __ str(lock_reg, Address(SP));
237 
238   // Non-blocking leaf routine - no call info needed
239   Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
240                              Runtime1::monitorexit_id :
241                              Runtime1::monitorexit_nofpu_id;
242   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
243   __ b(_continuation);
244 }
245 




246 
247 // Call return is directly after patch word
248 int PatchingStub::_patch_info_offset = 0;
249 
250 void PatchingStub::align_patch_site(MacroAssembler* masm) {
251 #if 0
252   // TODO: investigate if we required to implement this
253     ShouldNotReachHere();
254 #endif
255 }
256 
257 void PatchingStub::emit_code(LIR_Assembler* ce) {
258   const int patchable_instruction_offset = 0;
259 
260   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
261          "not enough room for call");
262   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
263   Label call_patch;
264   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
265 

226 
227 
228 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
229   __ bind(_entry);
230   if (_compute_lock) {
231     ce->monitor_address(_monitor_ix, _lock_reg);
232   }
233   const Register lock_reg = _lock_reg->as_pointer_register();
234 
235   ce->verify_reserved_argument_area_size(1);
236   __ str(lock_reg, Address(SP));
237 
238   // Non-blocking leaf routine - no call info needed
239   Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
240                              Runtime1::monitorexit_id :
241                              Runtime1::monitorexit_nofpu_id;
242   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
243   __ b(_continuation);
244 }
245 
246 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
247   // Currently not needed.
248   Unimplemented();
249 }
250 
251 // Call return is directly after patch word
252 int PatchingStub::_patch_info_offset = 0;
253 
254 void PatchingStub::align_patch_site(MacroAssembler* masm) {
255 #if 0
256   // TODO: investigate if we required to implement this
257     ShouldNotReachHere();
258 #endif
259 }
260 
261 void PatchingStub::emit_code(LIR_Assembler* ce) {
262   const int patchable_instruction_offset = 0;
263 
264   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
265          "not enough room for call");
266   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
267   Label call_patch;
268   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
269 
< prev index next >