226
227
228 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
229 __ bind(_entry);
230 if (_compute_lock) {
231 ce->monitor_address(_monitor_ix, _lock_reg);
232 }
233 const Register lock_reg = _lock_reg->as_pointer_register();
234
235 ce->verify_reserved_argument_area_size(1);
236 __ str(lock_reg, Address(SP));
237
238 // Non-blocking leaf routine - no call info needed
239 Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
240 Runtime1::monitorexit_id :
241 Runtime1::monitorexit_nofpu_id;
242 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
243 __ b(_continuation);
244 }
245
246
247 // Call return is directly after patch word
248 int PatchingStub::_patch_info_offset = 0;
249
250 void PatchingStub::align_patch_site(MacroAssembler* masm) {
251 #if 0
252 // TODO: investigate if we required to implement this
253 ShouldNotReachHere();
254 #endif
255 }
256
257 void PatchingStub::emit_code(LIR_Assembler* ce) {
258 const int patchable_instruction_offset = 0;
259
260 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
261 "not enough room for call");
262 assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
263 Label call_patch;
264 bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
265
|
226
227
228 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
229 __ bind(_entry);
230 if (_compute_lock) {
231 ce->monitor_address(_monitor_ix, _lock_reg);
232 }
233 const Register lock_reg = _lock_reg->as_pointer_register();
234
235 ce->verify_reserved_argument_area_size(1);
236 __ str(lock_reg, Address(SP));
237
238 // Non-blocking leaf routine - no call info needed
239 Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
240 Runtime1::monitorexit_id :
241 Runtime1::monitorexit_nofpu_id;
242 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
243 __ b(_continuation);
244 }
245
246 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
247 Unimplemented(); // Only needed with compact object headers.
248 }
249
250 // Call return is directly after patch word
251 int PatchingStub::_patch_info_offset = 0;
252
253 void PatchingStub::align_patch_site(MacroAssembler* masm) {
254 #if 0
255 // TODO: investigate if we required to implement this
256 ShouldNotReachHere();
257 #endif
258 }
259
260 void PatchingStub::emit_code(LIR_Assembler* ce) {
261 const int patchable_instruction_offset = 0;
262
263 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
264 "not enough room for call");
265 assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
266 Label call_patch;
267 bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
268
|