< prev index next >

src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp

Print this page

200 
201 
202 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
203   __ bind(_entry);
204   if (_compute_lock) {
205     ce->monitor_address(_monitor_ix, _lock_reg);
206   }
207   const Register lock_reg = _lock_reg->as_pointer_register();
208 
209   ce->verify_reserved_argument_area_size(1);
210   __ str(lock_reg, Address(SP));
211 
212   // Non-blocking leaf routine - no call info needed
213   Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
214                              Runtime1::monitorexit_id :
215                              Runtime1::monitorexit_nofpu_id;
216   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
217   __ b(_continuation);
218 }
219 




220 
221 // Call return is directly after patch word
222 int PatchingStub::_patch_info_offset = 0;
223 
224 void PatchingStub::align_patch_site(MacroAssembler* masm) {
225 #if 0
226   // TODO: investigate if we required to implement this
227     ShouldNotReachHere();
228 #endif
229 }
230 
231 void PatchingStub::emit_code(LIR_Assembler* ce) {
232   const int patchable_instruction_offset = 0;
233 
234   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
235          "not enough room for call");
236   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
237   Label call_patch;
238   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
239 

200 
201 
202 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
203   __ bind(_entry);
204   if (_compute_lock) {
205     ce->monitor_address(_monitor_ix, _lock_reg);
206   }
207   const Register lock_reg = _lock_reg->as_pointer_register();
208 
209   ce->verify_reserved_argument_area_size(1);
210   __ str(lock_reg, Address(SP));
211 
212   // Non-blocking leaf routine - no call info needed
213   Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
214                              Runtime1::monitorexit_id :
215                              Runtime1::monitorexit_nofpu_id;
216   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
217   __ b(_continuation);
218 }
219 
220 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
221   // Currently not needed.
222   Unimplemented();
223 }
224 
225 // Call return is directly after patch word
226 int PatchingStub::_patch_info_offset = 0;
227 
228 void PatchingStub::align_patch_site(MacroAssembler* masm) {
229 #if 0
230   // TODO: investigate if we required to implement this
231     ShouldNotReachHere();
232 #endif
233 }
234 
235 void PatchingStub::emit_code(LIR_Assembler* ce) {
236   const int patchable_instruction_offset = 0;
237 
238   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
239          "not enough room for call");
240   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
241   Label call_patch;
242   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
243 
< prev index next >