590 break;
591 default : ShouldNotReachHere();
592 }
593 __ ret(lr); // return from result handler
594 return entry;
595 }
596
597 address TemplateInterpreterGenerator::generate_safept_entry_for(
598 TosState state,
599 address runtime_entry) {
600 address entry = __ pc();
601 __ push(state);
602 __ push_cont_fastpath(rthread);
603 __ call_VM(noreg, runtime_entry);
604 __ pop_cont_fastpath(rthread);
605 __ membar(Assembler::AnyAny);
606 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
607 return entry;
608 }
609
610 // Helpers for commoning out cases in the various type of method entries.
611 //
612
613
614 // increment invocation count & check for overflow
615 //
616 // Note: checking for negative value instead of overflow
617 // so we have a 'sticky' overflow test
618 //
619 // rmethod: method
620 //
621 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
622 Label done;
623 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
624 int increment = InvocationCounter::count_increment;
625 Label no_mdo;
626 if (ProfileInterpreter) {
627 // Are we profiling?
628 __ ldr(r0, Address(rmethod, Method::method_data_offset()));
629 __ cbz(r0, no_mdo);
1297
1298 // call signature handler
1299 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
1300 "adjust this code");
1301 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1302 "adjust this code");
1303 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
1304 "adjust this code");
1305
1306 // The generated handlers do not touch rmethod (the method).
1307 // However, large signatures cannot be cached and are generated
1308 // each time here. The slow-path generator can do a GC on return,
1309 // so we must reload it after the call.
1310 __ blr(t);
1311 __ get_method(rmethod); // slow path can do a GC, reload rmethod
1312
1313
1314 // result handler is in r0
1315 // set result handler
1316 __ mov(result_handler, r0);
1317 // pass mirror handle if static call
1318 {
1319 Label L;
1320 __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
1321 __ tbz(t, exact_log2(JVM_ACC_STATIC), L);
1322 // get mirror
1323 __ load_mirror(t, rmethod, r10, rscratch2);
1324 // copy mirror into activation frame
1325 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
1326 // pass handle to mirror
1327 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
1328 __ bind(L);
1329 }
1330
1331 // get native function entry point in r10
1332 {
1333 Label L;
1334 __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1335 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1336 __ lea(rscratch2, unsatisfied);
1337 __ ldr(rscratch2, rscratch2);
1338 __ cmp(r10, rscratch2);
1339 __ br(Assembler::NE, L);
1340 __ call_VM(noreg,
1341 CAST_FROM_FN_PTR(address,
1342 InterpreterRuntime::prepare_native_call),
1343 rmethod);
1344 __ get_method(rmethod);
1345 __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1346 __ bind(L);
1347 }
1348
1349 // pass JNIEnv
1350 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
1351
1352 // Set the last Java PC in the frame anchor to be the return address from
1353 // the call to the native method: this will allow the debugger to
1354 // generate an accurate stack trace.
1355 Label native_return;
1356 __ set_last_Java_frame(esp, rfp, native_return, rscratch1);
1357
1358 // change thread state
1359 #ifdef ASSERT
1360 {
1361 Label L;
1362 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
1363 __ cmp(t, (u1)_thread_in_Java);
1364 __ br(Assembler::EQ, L);
1365 __ stop("Wrong thread state in native stub");
1366 __ bind(L);
1367 }
1368 #endif
1369
1370 // Change state to native
1371 __ mov(rscratch1, _thread_in_native);
1372 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1373 __ stlrw(rscratch1, rscratch2);
1374
1375 // Call the native method.
1376 __ blr(r10);
1377 __ bind(native_return);
1378 __ get_method(rmethod);
1379 // result potentially in r0 or v0
1380
1381 // Restore cpu control state after JNI call
1382 __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1383
1384 // make room for the pushes we're about to do
1385 __ sub(rscratch1, esp, 4 * wordSize);
1386 __ andr(sp, rscratch1, -16);
1387
1388 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1389 // in order to extract the result of a method call. If the order of these
1390 // pushes change or anything else is added to the stack then the code in
1391 // interpreter_frame_result must also change.
1392 __ push(dtos);
1393 __ push(ltos);
1394
1395 __ verify_sve_vector_length();
1396
1397 // change thread state
1415 __ bind(L);
1416
1417 // Don't use call_VM as it will see a possible pending exception
1418 // and forward it and never return here preventing us from
1419 // clearing _last_native_pc down below. So we do a runtime call by
1420 // hand.
1421 //
1422 __ mov(c_rarg0, rthread);
1423 __ lea(rscratch2, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1424 __ blr(rscratch2);
1425 __ get_method(rmethod);
1426 __ reinit_heapbase();
1427 __ bind(Continue);
1428 }
1429
1430 // change thread state
1431 __ mov(rscratch1, _thread_in_Java);
1432 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1433 __ stlrw(rscratch1, rscratch2);
1434
1435 // reset_last_Java_frame
1436 __ reset_last_Java_frame(true);
1437
1438 if (CheckJNICalls) {
1439 // clear_pending_jni_exception_check
1440 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1441 }
1442
1443 // reset handle block
1444 __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
1445 __ str(zr, Address(t, JNIHandleBlock::top_offset()));
1446
1447 // If result is an oop unbox and store it in frame where gc will see it
1448 // and result handler will pick it up
1449
1450 {
1451 Label no_oop;
1452 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1453 __ cmp(t, result_handler);
1454 __ br(Assembler::NE, no_oop);
1455 // Unbox oop result, e.g. JNIHandles::resolve result.
1456 __ pop(ltos);
1457 __ resolve_jobject(r0, t, rscratch2);
1458 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1459 // keep stack depth as expected by pushing oop which will eventually be discarded
1460 __ push(ltos);
1461 __ bind(no_oop);
1462 }
1463
1464 {
1465 Label no_reguard;
1466 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1467 __ ldrw(rscratch1, Address(rscratch1));
1468 __ cmp(rscratch1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1469 __ br(Assembler::NE, no_reguard);
1470
1471 __ push_call_clobbered_registers();
1472 __ mov(c_rarg0, rthread);
|
590 break;
591 default : ShouldNotReachHere();
592 }
593 __ ret(lr); // return from result handler
594 return entry;
595 }
596
597 address TemplateInterpreterGenerator::generate_safept_entry_for(
598 TosState state,
599 address runtime_entry) {
600 address entry = __ pc();
601 __ push(state);
602 __ push_cont_fastpath(rthread);
603 __ call_VM(noreg, runtime_entry);
604 __ pop_cont_fastpath(rthread);
605 __ membar(Assembler::AnyAny);
606 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
607 return entry;
608 }
609
610 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
611 if (!Continuations::enabled()) return nullptr;
612 address start = __ pc();
613
614 __ restore_bcp();
615 __ restore_locals();
616
617 // Restore constant pool cache
618 __ ldr(rcpool, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
619
620 // Restore Java expression stack pointer
621 __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
622 __ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
623 // and NULL it as marker that esp is now tos until next java call
624 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
625
626 // Restore machine SP
627 __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
628 __ lea(sp, Address(rfp, rscratch1, Address::lsl(LogBytesPerWord)));
629
630 // Restore method
631 __ ldr(rmethod, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
632
633 // Restore dispatch
634 uint64_t offset;
635 __ adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
636 __ add(rdispatch, rdispatch, offset);
637
638 __ ret(lr);
639
640 return start;
641 }
642
643
644 // Helpers for commoning out cases in the various type of method entries.
645 //
646
647
648 // increment invocation count & check for overflow
649 //
650 // Note: checking for negative value instead of overflow
651 // so we have a 'sticky' overflow test
652 //
653 // rmethod: method
654 //
655 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
656 Label done;
657 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
658 int increment = InvocationCounter::count_increment;
659 Label no_mdo;
660 if (ProfileInterpreter) {
661 // Are we profiling?
662 __ ldr(r0, Address(rmethod, Method::method_data_offset()));
663 __ cbz(r0, no_mdo);
1331
1332 // call signature handler
1333 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
1334 "adjust this code");
1335 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1336 "adjust this code");
1337 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
1338 "adjust this code");
1339
1340 // The generated handlers do not touch rmethod (the method).
1341 // However, large signatures cannot be cached and are generated
1342 // each time here. The slow-path generator can do a GC on return,
1343 // so we must reload it after the call.
1344 __ blr(t);
1345 __ get_method(rmethod); // slow path can do a GC, reload rmethod
1346
1347
1348 // result handler is in r0
1349 // set result handler
1350 __ mov(result_handler, r0);
1351 __ str(r0, Address(rfp, frame::interpreter_frame_result_handler_offset * wordSize));
1352
1353 // pass mirror handle if static call
1354 {
1355 Label L;
1356 __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
1357 __ tbz(t, exact_log2(JVM_ACC_STATIC), L);
1358 // get mirror
1359 __ load_mirror(t, rmethod, r10, rscratch2);
1360 // copy mirror into activation frame
1361 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
1362 // pass handle to mirror
1363 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
1364 __ bind(L);
1365 }
1366
1367 // get native function entry point in r10
1368 {
1369 Label L;
1370 __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1371 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1372 __ lea(rscratch2, unsatisfied);
1373 __ ldr(rscratch2, rscratch2);
1374 __ cmp(r10, rscratch2);
1375 __ br(Assembler::NE, L);
1376 __ call_VM(noreg,
1377 CAST_FROM_FN_PTR(address,
1378 InterpreterRuntime::prepare_native_call),
1379 rmethod);
1380 __ get_method(rmethod);
1381 __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1382 __ bind(L);
1383 }
1384
1385 // pass JNIEnv
1386 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
1387
1388 // It is enough that the pc() points into the right code
1389 // segment. It does not have to be the correct return pc.
1390 // For convenience we use the pc we want to resume to in
1391 // case of preemption on Object.wait.
1392 Label native_return;
1393 __ set_last_Java_frame(esp, rfp, native_return, rscratch1);
1394
1395 // change thread state
1396 #ifdef ASSERT
1397 {
1398 Label L;
1399 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
1400 __ cmp(t, (u1)_thread_in_Java);
1401 __ br(Assembler::EQ, L);
1402 __ stop("Wrong thread state in native stub");
1403 __ bind(L);
1404 }
1405 #endif
1406
1407 // Change state to native
1408 __ mov(rscratch1, _thread_in_native);
1409 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1410 __ stlrw(rscratch1, rscratch2);
1411
1412 __ push_cont_fastpath();
1413
1414 // Call the native method.
1415 __ blr(r10);
1416
1417 __ pop_cont_fastpath();
1418
1419 __ get_method(rmethod);
1420 // result potentially in r0 or v0
1421
1422 // Restore cpu control state after JNI call
1423 __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1424
1425 // make room for the pushes we're about to do
1426 __ sub(rscratch1, esp, 4 * wordSize);
1427 __ andr(sp, rscratch1, -16);
1428
1429 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1430 // in order to extract the result of a method call. If the order of these
1431 // pushes change or anything else is added to the stack then the code in
1432 // interpreter_frame_result must also change.
1433 __ push(dtos);
1434 __ push(ltos);
1435
1436 __ verify_sve_vector_length();
1437
1438 // change thread state
1456 __ bind(L);
1457
1458 // Don't use call_VM as it will see a possible pending exception
1459 // and forward it and never return here preventing us from
1460 // clearing _last_native_pc down below. So we do a runtime call by
1461 // hand.
1462 //
1463 __ mov(c_rarg0, rthread);
1464 __ lea(rscratch2, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1465 __ blr(rscratch2);
1466 __ get_method(rmethod);
1467 __ reinit_heapbase();
1468 __ bind(Continue);
1469 }
1470
1471 // change thread state
1472 __ mov(rscratch1, _thread_in_Java);
1473 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1474 __ stlrw(rscratch1, rscratch2);
1475
1476 if (LockingMode != LM_LEGACY) {
1477 // Check preemption for Object.wait()
1478 Label not_preempted;
1479 __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1480 __ cbz(rscratch1, not_preempted);
1481 __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1482 __ br(rscratch1);
1483 __ bind(native_return);
1484 __ restore_after_resume(true /* is_native */);
1485 __ bind(not_preempted);
1486 } else {
1487 // any pc will do so just use this one for LM_LEGACY to keep code together.
1488 __ bind(native_return);
1489 }
1490
1491 // reset_last_Java_frame
1492 __ reset_last_Java_frame(true);
1493
1494 if (CheckJNICalls) {
1495 // clear_pending_jni_exception_check
1496 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1497 }
1498
1499 // reset handle block
1500 __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
1501 __ str(zr, Address(t, JNIHandleBlock::top_offset()));
1502
1503 // If result is an oop unbox and store it in frame where gc will see it
1504 // and result handler will pick it up
1505
1506 {
1507 Label no_oop;
1508 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1509 __ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
1510 __ cmp(t, result_handler);
1511 __ br(Assembler::NE, no_oop);
1512 // Unbox oop result, e.g. JNIHandles::resolve result.
1513 __ pop(ltos);
1514 __ resolve_jobject(r0, t, rscratch2);
1515 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1516 // keep stack depth as expected by pushing oop which will eventually be discarded
1517 __ push(ltos);
1518 __ bind(no_oop);
1519 }
1520
1521 {
1522 Label no_reguard;
1523 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1524 __ ldrw(rscratch1, Address(rscratch1));
1525 __ cmp(rscratch1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1526 __ br(Assembler::NE, no_reguard);
1527
1528 __ push_call_clobbered_registers();
1529 __ mov(c_rarg0, rthread);
|