< prev index next > src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
Print this page
__ membar(Assembler::AnyAny);
__ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
return entry;
}
+ address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
+ if (!Continuations::enabled()) return nullptr;
+ address start = __ pc();
+
+ __ restore_bcp();
+ __ restore_locals();
+
+ // Restore constant pool cache
+ __ ldr(rcpool, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
+
+ // Restore Java expression stack pointer
+ __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
+ // and NULL it as marker that esp is now tos until next java call
+ __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+
+ // Restore machine SP
+ __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
+ __ lea(sp, Address(rfp, rscratch1, Address::lsl(LogBytesPerWord)));
+
+ // Restore method
+ __ ldr(rmethod, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
+
+ // Restore dispatch
+ uint64_t offset;
+ __ adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
+ __ add(rdispatch, rdispatch, offset);
+
+ __ ret(lr);
+
+ return start;
+ }
+
+
// Helpers for commoning out cases in the various type of method entries.
//
// increment invocation count & check for overflow
// result handler is in r0
// set result handler
__ mov(result_handler, r0);
+ // Save it in the frame in case of preemption; we cannot rely on callee saved registers.
+ __ str(r0, Address(rfp, frame::interpreter_frame_result_handler_offset * wordSize));
+
// pass mirror handle if static call
{
Label L;
__ ldrw(t, Address(rmethod, Method::access_flags_offset()));
__ tbz(t, exact_log2(JVM_ACC_STATIC), L);
}
// pass JNIEnv
__ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
- // Set the last Java PC in the frame anchor to be the return address from
- // the call to the native method: this will allow the debugger to
- // generate an accurate stack trace.
+ // It is enough that the pc() points into the right code
+ // segment. It does not have to be the correct return pc.
+ // For convenience we use the pc we want to resume to in
+ // case of preemption on Object.wait.
Label native_return;
__ set_last_Java_frame(esp, rfp, native_return, rscratch1);
// change thread state
#ifdef ASSERT
// Change state to native
__ mov(rscratch1, _thread_in_native);
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);
+ __ push_cont_fastpath();
+
// Call the native method.
__ blr(r10);
- __ bind(native_return);
+
+ __ pop_cont_fastpath();
+
__ get_method(rmethod);
// result potentially in r0 or v0
// Restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
// change thread state
__ mov(rscratch1, _thread_in_Java);
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);
+ if (LockingMode != LM_LEGACY) {
+ // Check preemption for Object.wait()
+ Label not_preempted;
+ __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
+ __ cbz(rscratch1, not_preempted);
+ __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
+ __ br(rscratch1);
+ __ bind(native_return);
+ __ restore_after_resume(true /* is_native */);
+ // reload result_handler
+ __ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
+ __ bind(not_preempted);
+ } else {
+ // any pc will do so just use this one for LM_LEGACY to keep code together.
+ __ bind(native_return);
+ }
+
// reset_last_Java_frame
__ reset_last_Java_frame(true);
if (CheckJNICalls) {
// clear_pending_jni_exception_check
< prev index next >