211 }
212
213 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
214 __ bind(_entry);
215 if (_compute_lock) {
216 // lock_reg was destroyed by fast unlocking attempt => recompute it
217 ce->monitor_address(_monitor_ix, _lock_reg);
218 }
219 ce->store_parameter(_lock_reg->as_register(), 0);
220 // note: non-blocking leaf routine => no call info needed
221 Runtime1::StubID exit_id;
222 if (ce->compilation()->has_fpu_code()) {
223 exit_id = Runtime1::monitorexit_id;
224 } else {
225 exit_id = Runtime1::monitorexit_nofpu_id;
226 }
227 __ la(ra, _continuation);
228 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
229 }
230
231 // Implementation of patching:
232 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
233 // - Replace original code with a call to the stub
234 // At Runtime:
235 // - call to stub, jump to runtime
236 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
237 // - in runtime: after initializing class, restore original code, reexecute instruction
238
239 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
240
241 void PatchingStub::align_patch_site(MacroAssembler* masm) {}
242
243 void PatchingStub::emit_code(LIR_Assembler* ce) {
244 assert(false, "RISCV should not use C1 runtime patching");
245 }
246
247 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
248 __ bind(_entry);
249 ce->store_parameter(_trap_request, 0);
250 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
|
211 }
212
213 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
214 __ bind(_entry);
215 if (_compute_lock) {
216 // lock_reg was destroyed by fast unlocking attempt => recompute it
217 ce->monitor_address(_monitor_ix, _lock_reg);
218 }
219 ce->store_parameter(_lock_reg->as_register(), 0);
220 // note: non-blocking leaf routine => no call info needed
221 Runtime1::StubID exit_id;
222 if (ce->compilation()->has_fpu_code()) {
223 exit_id = Runtime1::monitorexit_id;
224 } else {
225 exit_id = Runtime1::monitorexit_nofpu_id;
226 }
227 __ la(ra, _continuation);
228 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
229 }
230
231 void LoadKlassStub::emit_code(LIR_Assembler* ce) {
232 // Currently not needed.
233 Unimplemented();
234 }
235
236 // Implementation of patching:
237 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
238 // - Replace original code with a call to the stub
239 // At Runtime:
240 // - call to stub, jump to runtime
241 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
242 // - in runtime: after initializing class, restore original code, reexecute instruction
243
244 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
245
246 void PatchingStub::align_patch_site(MacroAssembler* masm) {}
247
248 void PatchingStub::emit_code(LIR_Assembler* ce) {
249 assert(false, "RISCV should not use C1 runtime patching");
250 }
251
252 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
253 __ bind(_entry);
254 ce->store_parameter(_trap_request, 0);
255 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
|