< prev index next >

src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp

Print this page

 595     break;
 596   default       : ShouldNotReachHere();
 597   }
 598   __ ret(lr);                                  // return from result handler
 599   return entry;
 600 }
 601 
 602 address TemplateInterpreterGenerator::generate_safept_entry_for(
 603         TosState state,
 604         address runtime_entry) {
 605   address entry = __ pc();
 606   __ push(state);
 607   __ push_cont_fastpath(rthread);
 608   __ call_VM(noreg, runtime_entry);
 609   __ pop_cont_fastpath(rthread);
 610   __ membar(Assembler::AnyAny);
 611   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 612   return entry;
 613 }
 614 



































 615 // Helpers for commoning out cases in the various type of method entries.
 616 //
 617 
 618 
 619 // increment invocation count & check for overflow
 620 //
 621 // Note: checking for negative value instead of overflow
 622 //       so we have a 'sticky' overflow test
 623 //
 624 // rmethod: method
 625 //
 626 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 627   Label done;
 628   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 629   int increment = InvocationCounter::count_increment;
 630   Label no_mdo;
 631   if (ProfileInterpreter) {
 632     // Are we profiling?
 633     __ ldr(r0, Address(rmethod, Method::method_data_offset()));
 634     __ cbz(r0, no_mdo);

1302 
1303   // call signature handler
1304   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
1305          "adjust this code");
1306   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1307          "adjust this code");
1308   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
1309           "adjust this code");
1310 
1311   // The generated handlers do not touch rmethod (the method).
1312   // However, large signatures cannot be cached and are generated
1313   // each time here.  The slow-path generator can do a GC on return,
1314   // so we must reload it after the call.
1315   __ blr(t);
1316   __ get_method(rmethod);        // slow path can do a GC, reload rmethod
1317 
1318 
1319   // result handler is in r0
1320   // set result handler
1321   __ mov(result_handler, r0);


1322   // pass mirror handle if static call
1323   {
1324     Label L;
1325     __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
1326     __ tbz(t, exact_log2(JVM_ACC_STATIC), L);
1327     // get mirror
1328     __ load_mirror(t, rmethod, r10, rscratch2);
1329     // copy mirror into activation frame
1330     __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
1331     // pass handle to mirror
1332     __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
1333     __ bind(L);
1334   }
1335 
1336   // get native function entry point in r10
1337   {
1338     Label L;
1339     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1340     address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1341     __ mov(rscratch2, unsatisfied);
1342     __ ldr(rscratch2, rscratch2);
1343     __ cmp(r10, rscratch2);
1344     __ br(Assembler::NE, L);
1345     __ call_VM(noreg,
1346                CAST_FROM_FN_PTR(address,
1347                                 InterpreterRuntime::prepare_native_call),
1348                rmethod);
1349     __ get_method(rmethod);
1350     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1351     __ bind(L);
1352   }
1353 
1354   // pass JNIEnv
1355   __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
1356 
1357   // Set the last Java PC in the frame anchor to be the return address from
1358   // the call to the native method: this will allow the debugger to
1359   // generate an accurate stack trace.
1360   Label native_return;
1361   __ set_last_Java_frame(esp, rfp, native_return, rscratch1);
1362 
1363   // change thread state
1364 #ifdef ASSERT
1365   {
1366     Label L;
1367     __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
1368     __ cmp(t, (u1)_thread_in_Java);
1369     __ br(Assembler::EQ, L);
1370     __ stop("Wrong thread state in native stub");
1371     __ bind(L);
1372   }
1373 #endif
1374 
1375   // Change state to native
1376   __ mov(rscratch1, _thread_in_native);
1377   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1378   __ stlrw(rscratch1, rscratch2);
1379 
1380   // Call the native method.
1381   __ blr(r10);
1382   __ bind(native_return);
1383   __ get_method(rmethod);
1384   // result potentially in r0 or v0
1385 
1386   // Restore cpu control state after JNI call
1387   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1388 
1389   // make room for the pushes we're about to do
1390   __ sub(rscratch1, esp, 4 * wordSize);
1391   __ andr(sp, rscratch1, -16);
1392 
1393   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1394   // in order to extract the result of a method call. If the order of these
1395   // pushes change or anything else is added to the stack then the code in
1396   // interpreter_frame_result must also change.
1397   __ push(dtos);
1398   __ push(ltos);
1399 
1400   __ verify_sve_vector_length();
1401 
1402   // change thread state

1427     __ bind(L);
1428 
1429     // Don't use call_VM as it will see a possible pending exception
1430     // and forward it and never return here preventing us from
1431     // clearing _last_native_pc down below. So we do a runtime call by
1432     // hand.
1433     //
1434     __ mov(c_rarg0, rthread);
1435     __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1436     __ blr(rscratch2);
1437     __ get_method(rmethod);
1438     __ reinit_heapbase();
1439     __ bind(Continue);
1440   }
1441 
1442   // change thread state
1443   __ mov(rscratch1, _thread_in_Java);
1444   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1445   __ stlrw(rscratch1, rscratch2);
1446 












1447   // reset_last_Java_frame
1448   __ reset_last_Java_frame(true);
1449 
1450   if (CheckJNICalls) {
1451     // clear_pending_jni_exception_check
1452     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1453   }
1454 
1455   // reset handle block
1456   __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
1457   __ str(zr, Address(t, JNIHandleBlock::top_offset()));
1458 
1459   // If result is an oop unbox and store it in frame where gc will see it
1460   // and result handler will pick it up
1461 
1462   {
1463     Label no_oop;
1464     __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));

1465     __ cmp(t, result_handler);
1466     __ br(Assembler::NE, no_oop);
1467     // Unbox oop result, e.g. JNIHandles::resolve result.
1468     __ pop(ltos);
1469     __ resolve_jobject(r0, t, rscratch2);
1470     __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1471     // keep stack depth as expected by pushing oop which will eventually be discarded
1472     __ push(ltos);
1473     __ bind(no_oop);
1474   }
1475 
1476   {
1477     Label no_reguard;
1478     __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1479     __ ldrw(rscratch1, Address(rscratch1));
1480     __ cmp(rscratch1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1481     __ br(Assembler::NE, no_reguard);
1482 
1483     __ push_call_clobbered_registers();
1484     __ mov(c_rarg0, rthread);

 595     break;
 596   default       : ShouldNotReachHere();
 597   }
 598   __ ret(lr);                                  // return from result handler
 599   return entry;
 600 }
 601 
 602 address TemplateInterpreterGenerator::generate_safept_entry_for(
 603         TosState state,
 604         address runtime_entry) {
 605   address entry = __ pc();
 606   __ push(state);
 607   __ push_cont_fastpath(rthread);
 608   __ call_VM(noreg, runtime_entry);
 609   __ pop_cont_fastpath(rthread);
 610   __ membar(Assembler::AnyAny);
 611   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 612   return entry;
 613 }
 614 
 615 address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter() {
 616   if (!Continuations::enabled()) return nullptr;
 617   address start = __ pc();
 618 
 619   // Restore rfp first since we need it to restore rest of registers
 620   __ leave();
 621 
 622   // Restore constant pool cache
 623   __ ldr(rcpool, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 624 
 625   // Restore Java expression stack pointer
 626   __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 627   __ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
 628   // and NULL it as marker that esp is now tos until next java call
 629   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 630 
 631   // Restore machine SP
 632   __ ldr(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
 633   __ lea(sp, Address(rfp, rscratch1, Address::lsl(LogBytesPerWord)));
 634 
 635   // Prepare for adjustment on return to call_VM_leaf_base()
 636   __ ldr(rmethod, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 637   __ stp(rscratch1, rmethod, Address(__ pre(sp, -2 * wordSize)));
 638 
 639   // Restore dispatch
 640   uint64_t offset;
 641   __ adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
 642   __ add(rdispatch, rdispatch, offset);
 643 
 644   __ ret(lr);
 645 
 646   return start;
 647 }
 648 
 649 
 650 // Helpers for commoning out cases in the various type of method entries.
 651 //
 652 
 653 
 654 // increment invocation count & check for overflow
 655 //
 656 // Note: checking for negative value instead of overflow
 657 //       so we have a 'sticky' overflow test
 658 //
 659 // rmethod: method
 660 //
 661 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
 662   Label done;
 663   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 664   int increment = InvocationCounter::count_increment;
 665   Label no_mdo;
 666   if (ProfileInterpreter) {
 667     // Are we profiling?
 668     __ ldr(r0, Address(rmethod, Method::method_data_offset()));
 669     __ cbz(r0, no_mdo);

1337 
1338   // call signature handler
1339   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
1340          "adjust this code");
1341   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1342          "adjust this code");
1343   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
1344           "adjust this code");
1345 
1346   // The generated handlers do not touch rmethod (the method).
1347   // However, large signatures cannot be cached and are generated
1348   // each time here.  The slow-path generator can do a GC on return,
1349   // so we must reload it after the call.
1350   __ blr(t);
1351   __ get_method(rmethod);        // slow path can do a GC, reload rmethod
1352 
1353 
1354   // result handler is in r0
1355   // set result handler
1356   __ mov(result_handler, r0);
1357   __ str(r0, Address(rfp, frame::interpreter_frame_result_handler_offset * wordSize));
1358 
1359   // pass mirror handle if static call
1360   {
1361     Label L;
1362     __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
1363     __ tbz(t, exact_log2(JVM_ACC_STATIC), L);
1364     // get mirror
1365     __ load_mirror(t, rmethod, r10, rscratch2);
1366     // copy mirror into activation frame
1367     __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
1368     // pass handle to mirror
1369     __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
1370     __ bind(L);
1371   }
1372 
1373   // get native function entry point in r10
1374   {
1375     Label L;
1376     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1377     address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1378     __ mov(rscratch2, unsatisfied);
1379     __ ldr(rscratch2, rscratch2);
1380     __ cmp(r10, rscratch2);
1381     __ br(Assembler::NE, L);
1382     __ call_VM(noreg,
1383                CAST_FROM_FN_PTR(address,
1384                                 InterpreterRuntime::prepare_native_call),
1385                rmethod);
1386     __ get_method(rmethod);
1387     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1388     __ bind(L);
1389   }
1390 
1391   // pass JNIEnv
1392   __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
1393 
1394   // Set the last Java PC in the frame anchor to be the return address from
1395   // the call to the native method: this will allow the debugger to
1396   // generate an accurate stack trace.
1397   Label resume_pc;
1398   __ set_last_Java_frame(esp, rfp, resume_pc, rscratch1);
1399 
1400   // change thread state
1401 #ifdef ASSERT
1402   {
1403     Label L;
1404     __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
1405     __ cmp(t, (u1)_thread_in_Java);
1406     __ br(Assembler::EQ, L);
1407     __ stop("Wrong thread state in native stub");
1408     __ bind(L);
1409   }
1410 #endif
1411 
1412   // Change state to native
1413   __ mov(rscratch1, _thread_in_native);
1414   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1415   __ stlrw(rscratch1, rscratch2);
1416 
1417   // Call the native method.
1418   __ blr(r10);
1419 
1420   __ get_method(rmethod);
1421   // result potentially in r0 or v0
1422 
1423   // Restore cpu control state after JNI call
1424   __ restore_cpu_control_state_after_jni(rscratch1, rscratch2);
1425 
1426   // make room for the pushes we're about to do
1427   __ sub(rscratch1, esp, 4 * wordSize);
1428   __ andr(sp, rscratch1, -16);
1429 
1430   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1431   // in order to extract the result of a method call. If the order of these
1432   // pushes change or anything else is added to the stack then the code in
1433   // interpreter_frame_result must also change.
1434   __ push(dtos);
1435   __ push(ltos);
1436 
1437   __ verify_sve_vector_length();
1438 
1439   // change thread state

1464     __ bind(L);
1465 
1466     // Don't use call_VM as it will see a possible pending exception
1467     // and forward it and never return here preventing us from
1468     // clearing _last_native_pc down below. So we do a runtime call by
1469     // hand.
1470     //
1471     __ mov(c_rarg0, rthread);
1472     __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1473     __ blr(rscratch2);
1474     __ get_method(rmethod);
1475     __ reinit_heapbase();
1476     __ bind(Continue);
1477   }
1478 
1479   // change thread state
1480   __ mov(rscratch1, _thread_in_Java);
1481   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1482   __ stlrw(rscratch1, rscratch2);
1483 
1484   // Check preemption for Object.wait()
1485   Label not_preempted;
1486   __ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1487   __ cbz(rscratch1, not_preempted);
1488   __ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
1489   __ br(rscratch1);
1490   __ bind(resume_pc);
1491   // On resume we need to set up stack as expected
1492   __ push(dtos);
1493   __ push(ltos);
1494   __ bind(not_preempted);
1495 
1496   // reset_last_Java_frame
1497   __ reset_last_Java_frame(true);
1498 
1499   if (CheckJNICalls) {
1500     // clear_pending_jni_exception_check
1501     __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1502   }
1503 
1504   // reset handle block
1505   __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
1506   __ str(zr, Address(t, JNIHandleBlock::top_offset()));
1507 
1508   // If result is an oop unbox and store it in frame where gc will see it
1509   // and result handler will pick it up
1510 
1511   {
1512     Label no_oop;
1513     __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1514     __ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
1515     __ cmp(t, result_handler);
1516     __ br(Assembler::NE, no_oop);
1517     // Unbox oop result, e.g. JNIHandles::resolve result.
1518     __ pop(ltos);
1519     __ resolve_jobject(r0, t, rscratch2);
1520     __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1521     // keep stack depth as expected by pushing oop which will eventually be discarded
1522     __ push(ltos);
1523     __ bind(no_oop);
1524   }
1525 
1526   {
1527     Label no_reguard;
1528     __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1529     __ ldrw(rscratch1, Address(rscratch1));
1530     __ cmp(rscratch1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
1531     __ br(Assembler::NE, no_reguard);
1532 
1533     __ push_call_clobbered_registers();
1534     __ mov(c_rarg0, rthread);
< prev index next >