522 __ cast_primitive_type(type, x10);
523 }
524
525 __ ret(); // return from result handler
526 return entry;
527 }
528
529 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
530 address runtime_entry) {
531 assert_cond(runtime_entry != nullptr);
532 address entry = __ pc();
533 __ push(state);
534 __ push_cont_fastpath(xthread);
535 __ call_VM(noreg, runtime_entry);
536 __ pop_cont_fastpath(xthread);
537 __ membar(MacroAssembler::AnyAny);
538 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
539 return entry;
540 }
541
542 // Helpers for commoning out cases in the various type of method entries.
543 //
544
545
546 // increment invocation count & check for overflow
547 //
548 // Note: checking for negative value instead of overflow
549 // so we have a 'sticky' overflow test
550 //
551 // xmethod: method
552 //
553 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
554 Label done;
555 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
556 int increment = InvocationCounter::count_increment;
557 Label no_mdo;
558 if (ProfileInterpreter) {
559 // Are we profiling?
560 __ ld(x10, Address(xmethod, Method::method_data_offset()));
561 __ beqz(x10, no_mdo);
1075
1076 // call signature handler
1077 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals,
1078 "adjust this code");
1079 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1080 "adjust this code");
1081 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0,
1082 "adjust this code");
1083
1084 // The generated handlers do not touch xmethod (the method).
1085 // However, large signatures cannot be cached and are generated
1086 // each time here. The slow-path generator can do a GC on return,
1087 // so we must reload it after the call.
1088 __ jalr(t);
1089 __ get_method(xmethod); // slow path can do a GC, reload xmethod
1090
1091
1092 // result handler is in x10
1093 // set result handler
1094 __ mv(result_handler, x10);
1095 // pass mirror handle if static call
1096 {
1097 Label L;
1098 __ lwu(t, Address(xmethod, Method::access_flags_offset()));
1099 __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC));
1100 __ beqz(t0, L);
1101 // get mirror
1102 __ load_mirror(t, xmethod, x28, t1);
1103 // copy mirror into activation frame
1104 __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1105 // pass handle to mirror
1106 __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize);
1107 __ bind(L);
1108 }
1109
1110 // get native function entry point in x28
1111 {
1112 Label L;
1113 __ ld(x28, Address(xmethod, Method::native_function_offset()));
1114 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1115 __ la(t, unsatisfied);
1116 __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
1117
1118 __ bne(x28, t1, L);
1119 __ call_VM(noreg,
1120 CAST_FROM_FN_PTR(address,
1121 InterpreterRuntime::prepare_native_call),
1122 xmethod);
1123 __ get_method(xmethod);
1124 __ ld(x28, Address(xmethod, Method::native_function_offset()));
1125 __ bind(L);
1126 }
1127
1128 // pass JNIEnv
1129 __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset()));
1130
1131 // It is enough that the pc() points into the right code
1132 // segment. It does not have to be the correct return pc.
1133 Label native_return;
1134 __ set_last_Java_frame(esp, fp, native_return, x30);
1135
1136 // change thread state
1137 #ifdef ASSERT
1138 {
1139 Label L;
1140 __ lwu(t, Address(xthread, JavaThread::thread_state_offset()));
1141 __ addi(t0, zr, (u1)_thread_in_Java);
1142 __ beq(t, t0, L);
1143 __ stop("Wrong thread state in native stub");
1144 __ bind(L);
1145 }
1146 #endif
1147
1148 // Change state to native
1149 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1150 __ mv(t0, _thread_in_native);
1151 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1152 __ sw(t0, Address(t1));
1153
1154 // Call the native method.
1155 __ jalr(x28);
1156 __ bind(native_return);
1157 __ get_method(xmethod);
1158 // result potentially in x10 or f10
1159
1160 // Restore cpu control state after JNI call
1161 __ restore_cpu_control_state_after_jni(t0);
1162
1163 // make room for the pushes we're about to do
1164 __ sub(t0, esp, 4 * wordSize);
1165 __ andi(sp, t0, -16);
1166
1167 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1168 // in order to extract the result of a method call. If the order of these
1169 // pushes change or anything else is added to the stack then the code in
1170 // interpreter_frame_result must also change.
1171 __ push(dtos);
1172 __ push(ltos);
1173
1174 // change thread state
1175 // Force all preceding writes to be observed prior to thread state change
1176 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1202
1203 // Don't use call_VM as it will see a possible pending exception
1204 // and forward it and never return here preventing us from
1205 // clearing _last_native_pc down below. So we do a runtime call by
1206 // hand.
1207 //
1208 __ mv(c_rarg0, xthread);
1209 __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1210 __ get_method(xmethod);
1211 __ reinit_heapbase();
1212 __ bind(Continue);
1213 }
1214
1215 // change thread state
1216 // Force all preceding writes to be observed prior to thread state change
1217 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1218
1219 __ mv(t0, _thread_in_Java);
1220 __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1221
1222 // reset_last_Java_frame
1223 __ reset_last_Java_frame(true);
1224
1225 if (CheckJNICalls) {
1226 // clear_pending_jni_exception_check
1227 __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1228 }
1229
1230 // reset handle block
1231 __ ld(t, Address(xthread, JavaThread::active_handles_offset()));
1232 __ sd(zr, Address(t, JNIHandleBlock::top_offset()));
1233
1234 // If result is an oop unbox and store it in frame where gc will see it
1235 // and result handler will pick it up
1236
1237 {
1238 Label no_oop;
1239 __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1240 __ bne(t, result_handler, no_oop);
1241 // Unbox oop result, e.g. JNIHandles::resolve result.
|
522 __ cast_primitive_type(type, x10);
523 }
524
525 __ ret(); // return from result handler
526 return entry;
527 }
528
529 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
530 address runtime_entry) {
531 assert_cond(runtime_entry != nullptr);
532 address entry = __ pc();
533 __ push(state);
534 __ push_cont_fastpath(xthread);
535 __ call_VM(noreg, runtime_entry);
536 __ pop_cont_fastpath(xthread);
537 __ membar(MacroAssembler::AnyAny);
538 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
539 return entry;
540 }
541
542 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
543 if (!Continuations::enabled()) return nullptr;
544 address start = __ pc();
545
546 __ restore_bcp();
547 __ restore_locals();
548
549 // Restore constant pool cache
550 __ ld(xcpool, Address(fp, frame::interpreter_frame_cache_offset * wordSize));
551
552 // Restore Java expression stack pointer
553 __ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
554 __ shadd(esp, t0, fp, t0, Interpreter::logStackElementSize);
555 // and NULL it as marker that esp is now tos until next java call
556 __ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
557
558 // Restore machine SP
559 __ ld(t0, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
560 __ shadd(sp, t0, fp, t0, LogBytesPerWord);
561
562 // Restore method
563 __ ld(xmethod, Address(fp, frame::interpreter_frame_method_offset * wordSize));
564
565 // Restore dispatch
566 __ la(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()));
567
568 __ ret();
569
570 return start;
571 }
572
573
574 // Helpers for commoning out cases in the various type of method entries.
575 //
576
577
578 // increment invocation count & check for overflow
579 //
580 // Note: checking for negative value instead of overflow
581 // so we have a 'sticky' overflow test
582 //
583 // xmethod: method
584 //
585 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
586 Label done;
587 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
588 int increment = InvocationCounter::count_increment;
589 Label no_mdo;
590 if (ProfileInterpreter) {
591 // Are we profiling?
592 __ ld(x10, Address(xmethod, Method::method_data_offset()));
593 __ beqz(x10, no_mdo);
1107
1108 // call signature handler
1109 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == xlocals,
1110 "adjust this code");
1111 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1112 "adjust this code");
1113 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t0,
1114 "adjust this code");
1115
1116 // The generated handlers do not touch xmethod (the method).
1117 // However, large signatures cannot be cached and are generated
1118 // each time here. The slow-path generator can do a GC on return,
1119 // so we must reload it after the call.
1120 __ jalr(t);
1121 __ get_method(xmethod); // slow path can do a GC, reload xmethod
1122
1123
1124 // result handler is in x10
1125 // set result handler
1126 __ mv(result_handler, x10);
1127 // Save it in the frame in case of preemption; we cannot rely on callee saved registers.
1128 __ sd(x10, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1129
1130 // pass mirror handle if static call
1131 {
1132 Label L;
1133 __ lwu(t, Address(xmethod, Method::access_flags_offset()));
1134 __ test_bit(t0, t, exact_log2(JVM_ACC_STATIC));
1135 __ beqz(t0, L);
1136 // get mirror
1137 __ load_mirror(t, xmethod, x28, t1);
1138 // copy mirror into activation frame
1139 __ sd(t, Address(fp, frame::interpreter_frame_oop_temp_offset * wordSize));
1140 // pass handle to mirror
1141 __ addi(c_rarg1, fp, frame::interpreter_frame_oop_temp_offset * wordSize);
1142 __ bind(L);
1143 }
1144
1145 // get native function entry point in x28
1146 {
1147 Label L;
1148 __ ld(x28, Address(xmethod, Method::native_function_offset()));
1149 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1150 __ la(t, unsatisfied);
1151 __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
1152
1153 __ bne(x28, t1, L);
1154 __ call_VM(noreg,
1155 CAST_FROM_FN_PTR(address,
1156 InterpreterRuntime::prepare_native_call),
1157 xmethod);
1158 __ get_method(xmethod);
1159 __ ld(x28, Address(xmethod, Method::native_function_offset()));
1160 __ bind(L);
1161 }
1162
1163 // pass JNIEnv
1164 __ add(c_rarg0, xthread, in_bytes(JavaThread::jni_environment_offset()));
1165
1166 // It is enough that the pc() points into the right code
1167 // segment. It does not have to be the correct return pc.
1168 // For convenience we use the pc we want to resume to in
1169 // case of preemption on Object.wait.
1170 Label native_return;
1171 __ set_last_Java_frame(esp, fp, native_return, x30);
1172
1173 // change thread state
1174 #ifdef ASSERT
1175 {
1176 Label L;
1177 __ lwu(t, Address(xthread, JavaThread::thread_state_offset()));
1178 __ addi(t0, zr, (u1)_thread_in_Java);
1179 __ beq(t, t0, L);
1180 __ stop("Wrong thread state in native stub");
1181 __ bind(L);
1182 }
1183 #endif
1184
1185 // Change state to native
1186 __ la(t1, Address(xthread, JavaThread::thread_state_offset()));
1187 __ mv(t0, _thread_in_native);
1188 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1189 __ sw(t0, Address(t1));
1190
1191 __ push_cont_fastpath();
1192
1193 // Call the native method.
1194 __ jalr(x28);
1195
1196 __ pop_cont_fastpath();
1197
1198 __ get_method(xmethod);
1199 // result potentially in x10 or f10
1200
1201 // Restore cpu control state after JNI call
1202 __ restore_cpu_control_state_after_jni(t0);
1203
1204 // make room for the pushes we're about to do
1205 __ sub(t0, esp, 4 * wordSize);
1206 __ andi(sp, t0, -16);
1207
1208 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1209 // in order to extract the result of a method call. If the order of these
1210 // pushes change or anything else is added to the stack then the code in
1211 // interpreter_frame_result must also change.
1212 __ push(dtos);
1213 __ push(ltos);
1214
1215 // change thread state
1216 // Force all preceding writes to be observed prior to thread state change
1217 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1243
1244 // Don't use call_VM as it will see a possible pending exception
1245 // and forward it and never return here preventing us from
1246 // clearing _last_native_pc down below. So we do a runtime call by
1247 // hand.
1248 //
1249 __ mv(c_rarg0, xthread);
1250 __ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1251 __ get_method(xmethod);
1252 __ reinit_heapbase();
1253 __ bind(Continue);
1254 }
1255
1256 // change thread state
1257 // Force all preceding writes to be observed prior to thread state change
1258 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
1259
1260 __ mv(t0, _thread_in_Java);
1261 __ sw(t0, Address(xthread, JavaThread::thread_state_offset()));
1262
1263 if (LockingMode != LM_LEGACY) {
1264 // Check preemption for Object.wait()
1265 Label not_preempted;
1266 __ ld(t0, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1267 __ beqz(t0, not_preempted);
1268 __ sd(zr, Address(xthread, JavaThread::preempt_alternate_return_offset()));
1269 __ jr(t0);
1270 __ bind(native_return);
1271 __ restore_after_resume(true /* is_native */);
1272 // reload result_handler
1273 __ ld(result_handler, Address(fp, frame::interpreter_frame_result_handler_offset * wordSize));
1274 __ bind(not_preempted);
1275 } else {
1276 // any pc will do so just use this one for LM_LEGACY to keep code together.
1277 __ bind(native_return);
1278 }
1279
1280 // reset_last_Java_frame
1281 __ reset_last_Java_frame(true);
1282
1283 if (CheckJNICalls) {
1284 // clear_pending_jni_exception_check
1285 __ sd(zr, Address(xthread, JavaThread::pending_jni_exception_check_fn_offset()));
1286 }
1287
1288 // reset handle block
1289 __ ld(t, Address(xthread, JavaThread::active_handles_offset()));
1290 __ sd(zr, Address(t, JNIHandleBlock::top_offset()));
1291
1292 // If result is an oop unbox and store it in frame where gc will see it
1293 // and result handler will pick it up
1294
1295 {
1296 Label no_oop;
1297 __ la(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1298 __ bne(t, result_handler, no_oop);
1299 // Unbox oop result, e.g. JNIHandles::resolve result.
|