< prev index next >

src/hotspot/cpu/s390/sharedRuntime_s390.cpp

Print this page

1305     vmIntrinsics::ID iid = method->intrinsic_id();
1306     intptr_t start = (intptr_t) __ pc();
1307     int vep_offset = ((intptr_t) __ pc()) - start;
1308 
1309     gen_special_dispatch(masm, total_in_args,
1310                          method->intrinsic_id(), in_sig_bt, in_regs);
1311 
1312     int frame_complete = ((intptr_t)__ pc()) - start; // Not complete, period.
1313 
1314     __ flush();
1315 
1316     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // No out slots at all, actually.
1317 
1318     return nmethod::new_native_nmethod(method,
1319                                        compile_id,
1320                                        masm->code(),
1321                                        vep_offset,
1322                                        frame_complete,
1323                                        stack_slots / VMRegImpl::slots_per_word,
1324                                        in_ByteSize(-1),
1325                                        in_ByteSize(-1),
1326                                        (OopMapSet *) NULL);
1327   }
1328 
1329 
1330   ///////////////////////////////////////////////////////////////////////
1331   //
1332   //  Precalculations before generating any code
1333   //
1334   ///////////////////////////////////////////////////////////////////////
1335 
1336   address native_func = method->native_function();
1337   assert(native_func != NULL, "must have function");
1338 
1339   //---------------------------------------------------------------------
1340   // We have received a description of where all the java args are located
1341   // on entry to the wrapper. We need to convert these args to where
1342   // the jni function will expect them. To figure out where they go
1343   // we convert the java signature to a C signature by inserting
1344   // the hidden arguments as arg[0] and possibly arg[1] (static method).
1345   //

1412   // - 7) filler slots for alignment
1413   //---------------------------------------------------------------------
1414   // Here is how the space we have allocated will look like.
1415   // Since we use resize_frame, we do not create a new stack frame,
1416   // but just extend the one we got with our own data area.
1417   //
1418   // If an offset or pointer name points to a separator line, it is
1419   // assumed that addressing with offset 0 selects storage starting
1420   // at the first byte above the separator line.
1421   //
1422   //
1423   //     ...                   ...
1424   //      | caller's frame      |
1425   // FP-> |---------------------|
1426   //      | filler slots, if any|
1427   //     7| #slots == mult of 2 |
1428   //      |---------------------|
1429   //      | work space          |
1430   //     6| 2 slots = 8 bytes   |
1431   //      |---------------------|
1432   //     5| lock box (if sync)  |
1433   //      |---------------------| <- lock_slot_offset
1434   //     4| klass (if static)   |
1435   //      |---------------------| <- klass_slot_offset
1436   //     3| oopHandle area      |
1437   //      |                     |
1438   //      |                     |
1439   //      |---------------------| <- oop_handle_offset
1440   //     2| outbound memory     |
1441   //     ...                   ...
1442   //      | based arguments     |
1443   //      |---------------------|
1444   //      | vararg              |
1445   //     ...                   ...
1446   //      | area                |
1447   //      |---------------------| <- out_arg_slot_offset
1448   //     1| out_preserved_slots |
1449   //     ...                   ...
1450   //      | (z_abi spec)        |
1451   // SP-> |---------------------| <- FP_slot_offset (back chain)
1452   //     ...                   ...
1453   //

1456   // *_slot_offset indicates offset from SP in #stack slots
1457   // *_offset      indicates offset from SP in #bytes
1458 
1459   int stack_slots = c_calling_convention(out_sig_bt, out_regs, /*regs2=*/NULL, total_c_args) + // 1+2
1460                     SharedRuntime::out_preserve_stack_slots(); // see c_calling_convention
1461 
1462   // Now the space for the inbound oop handle area.
1463   int total_save_slots = RegisterImpl::number_of_arg_registers * VMRegImpl::slots_per_word;
1464 
1465   int oop_handle_slot_offset = stack_slots;
1466   stack_slots += total_save_slots;                                        // 3)
1467 
1468   int klass_slot_offset = 0;
1469   int klass_offset      = -1;
1470   if (method_is_static) {                                                 // 4)
1471     klass_slot_offset  = stack_slots;
1472     klass_offset       = klass_slot_offset * VMRegImpl::stack_slot_size;
1473     stack_slots       += VMRegImpl::slots_per_word;
1474   }
1475 
1476   int lock_slot_offset = 0;
1477   int lock_offset      = -1;
1478   if (method->is_synchronized()) {                                        // 5)
1479     lock_slot_offset   = stack_slots;
1480     lock_offset        = lock_slot_offset * VMRegImpl::stack_slot_size;
1481     stack_slots       += VMRegImpl::slots_per_word;
1482   }
1483 
1484   int workspace_slot_offset= stack_slots;                                 // 6)
1485   stack_slots         += 2;
1486 
1487   // Now compute actual number of stack words we need.
1488   // Round to align stack properly.
1489   stack_slots = align_up(stack_slots,                                     // 7)
1490                          frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
1491   int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
1492 
1493 
1494   ///////////////////////////////////////////////////////////////////////
1495   // Now we can start generating code
1496   ///////////////////////////////////////////////////////////////////////
1497 
1498   unsigned int wrapper_CodeStart  = __ offset();
1499   unsigned int wrapper_UEPStart;
1500   unsigned int wrapper_VEPStart;
1501   unsigned int wrapper_FrameDone;
1502   unsigned int wrapper_CRegsSet;
1503   Label     handle_pending_exception;

1692 
1693   // We use the same pc/oopMap repeatedly when we call out.
1694   oop_maps->add_gc_map((int)(wrapper_CRegsSet-wrapper_CodeStart), map);
1695 
1696   // Lock a synchronized method.
1697 
1698   if (method->is_synchronized()) {
1699 
1700     // ATTENTION: args and Z_R10 must be preserved.
1701     Register r_oop  = Z_R11;
1702     Register r_box  = Z_R12;
1703     Register r_tmp1 = Z_R13;
1704     Register r_tmp2 = Z_R7;
1705     Label done;
1706 
1707     // Load the oop for the object or class. R_carg2_classorobject contains
1708     // either the handlized oop from the incoming arguments or the handlized
1709     // class mirror (if the method is static).
1710     __ z_lg(r_oop, 0, Z_ARG2);
1711 
1712     lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
1713     // Get the lock box slot's address.
1714     __ add2reg(r_box, lock_offset, Z_SP);
1715 
1716     // Try fastpath for locking.
1717     // Fast_lock kills r_temp_1, r_temp_2. (Don't use R1 as temp, won't work!)
1718     __ compiler_fast_lock_object(r_oop, r_box, r_tmp1, r_tmp2);
1719     __ z_bre(done);
1720 
1721     //-------------------------------------------------------------------------
1722     // None of the above fast optimizations worked so we have to get into the
1723     // slow case of monitor enter. Inline a special case of call_VM that
1724     // disallows any pending_exception.
1725     //-------------------------------------------------------------------------
1726 
1727     Register oldSP = Z_R11;
1728 
1729     __ z_lgr(oldSP, Z_SP);
1730 
1731     RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers);
1732 
1733     // Prepare arguments for call.
1734     __ z_lg(Z_ARG1, 0, Z_ARG2); // Ynboxed class mirror or unboxed object.
1735     __ add2reg(Z_ARG2, lock_offset, oldSP);
1736     __ z_lgr(Z_ARG3, Z_thread);
1737 
1738     __ set_last_Java_frame(oldSP, Z_R10 /* gc map pc */);
1739 
1740     // Do the call.
1741     __ load_const_optimized(Z_R1_scratch, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C));
1742     __ call(Z_R1_scratch);
1743 
1744     __ reset_last_Java_frame();
1745 
1746     RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
1747 #ifdef ASSERT
1748     { Label L;
1749       __ load_and_test_long(Z_R0, Address(Z_thread, Thread::pending_exception_offset()));
1750       __ z_bre(L);
1751       __ stop("no pending exception allowed on exit from IR::monitorenter");
1752       __ bind(L);
1753     }
1754 #endif
1755     __ bind(done);

1890   // No pending exceptions for now.
1891   //--------------------------------------------------------------------
1892   // Handle possibly pending exception (will unlock if necessary).
1893   // Native result is, if any is live, in Z_FRES or Z_RES.
1894   //--------------------------------------------------------------------
1895   // Unlock
1896   //--------------------------------------------------------------------
1897   if (method->is_synchronized()) {
1898     const Register r_oop        = Z_R11;
1899     const Register r_box        = Z_R12;
1900     const Register r_tmp1       = Z_R13;
1901     const Register r_tmp2       = Z_R7;
1902     Label done;
1903 
1904     // Get unboxed oop of class mirror or object ...
1905     int   offset = method_is_static ? klass_offset : receiver_offset;
1906 
1907     assert(offset != -1, "");
1908     __ z_lg(r_oop, offset, Z_SP);
1909 
1910     // ... and address of lock object box.
1911     __ add2reg(r_box, lock_offset, Z_SP);
1912 
1913     // Try fastpath for unlocking.
1914     __ compiler_fast_unlock_object(r_oop, r_box, r_tmp1, r_tmp2); // Don't use R1 as temp.
1915     __ z_bre(done);
1916 
1917     // Slow path for unlocking.
1918     // Save and restore any potential method result value around the unlocking operation.
1919     const Register R_exc = Z_R11;
1920 
1921     save_native_result(masm, ret_type, workspace_slot_offset);
1922 
1923     // Must save pending exception around the slow-path VM call. Since it's a
1924     // leaf call, the pending exception (if any) can be kept in a register.
1925     __ z_lg(R_exc, Address(Z_thread, Thread::pending_exception_offset()));
1926     assert(R_exc->is_nonvolatile(), "exception register must be non-volatile");
1927 
1928     // Must clear pending-exception before re-entering the VM. Since this is
1929     // a leaf call, pending-exception-oop can be safely kept in a register.
1930     __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), sizeof(intptr_t));
1931 
1932     // Inline a special case of call_VM that disallows any pending_exception.
1933 
1934     // Get locked oop from the handle we passed to jni.
1935     __ z_lg(Z_ARG1, offset, Z_SP);
1936     __ add2reg(Z_ARG2, lock_offset, Z_SP);
1937     __ z_lgr(Z_ARG3, Z_thread);
1938 
1939     __ load_const_optimized(Z_R1_scratch, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1940 
1941     __ call(Z_R1_scratch);
1942 
1943 #ifdef ASSERT
1944     {
1945       Label L;
1946       __ load_and_test_long(Z_R0, Address(Z_thread, Thread::pending_exception_offset()));
1947       __ z_bre(L);
1948       __ stop("no pending exception allowed on exit from IR::monitorexit");
1949       __ bind(L);
1950     }
1951 #endif
1952 
1953     // Check_forward_pending_exception jump to forward_exception if any pending
1954     // exception is set. The forward_exception routine expects to see the
1955     // exception in pending_exception and not in a register. Kind of clumsy,
1956     // since all folks who branch to forward_exception must have tested
1957     // pending_exception first and hence have it in a register already.

2024 
2025   //---------------------------------------------------------------------
2026   // Handler for a cache miss (out-of-line)
2027   //---------------------------------------------------------------------
2028   __ call_ic_miss_handler(ic_miss, 0x77, 0, Z_R1_scratch);
2029   __ flush();
2030 
2031 
2032   //////////////////////////////////////////////////////////////////////
2033   // end of code generation
2034   //////////////////////////////////////////////////////////////////////
2035 
2036 
2037   nmethod *nm = nmethod::new_native_nmethod(method,
2038                                             compile_id,
2039                                             masm->code(),
2040                                             (int)(wrapper_VEPStart-wrapper_CodeStart),
2041                                             (int)(wrapper_FrameDone-wrapper_CodeStart),
2042                                             stack_slots / VMRegImpl::slots_per_word,
2043                                             (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2044                                             in_ByteSize(lock_offset),
2045                                             oop_maps);
2046 
2047   return nm;
2048 }
2049 
2050 static address gen_c2i_adapter(MacroAssembler  *masm,
2051                                int total_args_passed,
2052                                int comp_args_on_stack,
2053                                const BasicType *sig_bt,
2054                                const VMRegPair *regs,
2055                                Label &skip_fixup) {
2056   // Before we get into the guts of the C2I adapter, see if we should be here
2057   // at all. We've come from compiled code and are attempting to jump to the
2058   // interpreter, which means the caller made a static call to get here
2059   // (vcalls always get a compiled target if there is one). Check for a
2060   // compiled target. If there is one, we need to patch the caller's call.
2061 
2062   // These two defs MUST MATCH code in gen_i2c2i_adapter!
2063   const Register ientry = Z_R11;
2064   const Register code   = Z_R11;

1305     vmIntrinsics::ID iid = method->intrinsic_id();
1306     intptr_t start = (intptr_t) __ pc();
1307     int vep_offset = ((intptr_t) __ pc()) - start;
1308 
1309     gen_special_dispatch(masm, total_in_args,
1310                          method->intrinsic_id(), in_sig_bt, in_regs);
1311 
1312     int frame_complete = ((intptr_t)__ pc()) - start; // Not complete, period.
1313 
1314     __ flush();
1315 
1316     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // No out slots at all, actually.
1317 
1318     return nmethod::new_native_nmethod(method,
1319                                        compile_id,
1320                                        masm->code(),
1321                                        vep_offset,
1322                                        frame_complete,
1323                                        stack_slots / VMRegImpl::slots_per_word,
1324                                        in_ByteSize(-1),

1325                                        (OopMapSet *) NULL);
1326   }
1327 
1328 
1329   ///////////////////////////////////////////////////////////////////////
1330   //
1331   //  Precalculations before generating any code
1332   //
1333   ///////////////////////////////////////////////////////////////////////
1334 
1335   address native_func = method->native_function();
1336   assert(native_func != NULL, "must have function");
1337 
1338   //---------------------------------------------------------------------
1339   // We have received a description of where all the java args are located
1340   // on entry to the wrapper. We need to convert these args to where
1341   // the jni function will expect them. To figure out where they go
1342   // we convert the java signature to a C signature by inserting
1343   // the hidden arguments as arg[0] and possibly arg[1] (static method).
1344   //

1411   // - 7) filler slots for alignment
1412   //---------------------------------------------------------------------
1413   // Here is how the space we have allocated will look like.
1414   // Since we use resize_frame, we do not create a new stack frame,
1415   // but just extend the one we got with our own data area.
1416   //
1417   // If an offset or pointer name points to a separator line, it is
1418   // assumed that addressing with offset 0 selects storage starting
1419   // at the first byte above the separator line.
1420   //
1421   //
1422   //     ...                   ...
1423   //      | caller's frame      |
1424   // FP-> |---------------------|
1425   //      | filler slots, if any|
1426   //     7| #slots == mult of 2 |
1427   //      |---------------------|
1428   //      | work space          |
1429   //     6| 2 slots = 8 bytes   |
1430   //      |---------------------|


1431   //     4| klass (if static)   |
1432   //      |---------------------| <- klass_slot_offset
1433   //     3| oopHandle area      |
1434   //      |                     |
1435   //      |                     |
1436   //      |---------------------| <- oop_handle_offset
1437   //     2| outbound memory     |
1438   //     ...                   ...
1439   //      | based arguments     |
1440   //      |---------------------|
1441   //      | vararg              |
1442   //     ...                   ...
1443   //      | area                |
1444   //      |---------------------| <- out_arg_slot_offset
1445   //     1| out_preserved_slots |
1446   //     ...                   ...
1447   //      | (z_abi spec)        |
1448   // SP-> |---------------------| <- FP_slot_offset (back chain)
1449   //     ...                   ...
1450   //

1453   // *_slot_offset indicates offset from SP in #stack slots
1454   // *_offset      indicates offset from SP in #bytes
1455 
1456   int stack_slots = c_calling_convention(out_sig_bt, out_regs, /*regs2=*/NULL, total_c_args) + // 1+2
1457                     SharedRuntime::out_preserve_stack_slots(); // see c_calling_convention
1458 
1459   // Now the space for the inbound oop handle area.
1460   int total_save_slots = RegisterImpl::number_of_arg_registers * VMRegImpl::slots_per_word;
1461 
1462   int oop_handle_slot_offset = stack_slots;
1463   stack_slots += total_save_slots;                                        // 3)
1464 
1465   int klass_slot_offset = 0;
1466   int klass_offset      = -1;
1467   if (method_is_static) {                                                 // 4)
1468     klass_slot_offset  = stack_slots;
1469     klass_offset       = klass_slot_offset * VMRegImpl::stack_slot_size;
1470     stack_slots       += VMRegImpl::slots_per_word;
1471   }
1472 








1473   int workspace_slot_offset= stack_slots;                                 // 6)
1474   stack_slots         += 2;
1475 
1476   // Now compute actual number of stack words we need.
1477   // Round to align stack properly.
1478   stack_slots = align_up(stack_slots,                                     // 7)
1479                          frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
1480   int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
1481 
1482 
1483   ///////////////////////////////////////////////////////////////////////
1484   // Now we can start generating code
1485   ///////////////////////////////////////////////////////////////////////
1486 
1487   unsigned int wrapper_CodeStart  = __ offset();
1488   unsigned int wrapper_UEPStart;
1489   unsigned int wrapper_VEPStart;
1490   unsigned int wrapper_FrameDone;
1491   unsigned int wrapper_CRegsSet;
1492   Label     handle_pending_exception;

1681 
1682   // We use the same pc/oopMap repeatedly when we call out.
1683   oop_maps->add_gc_map((int)(wrapper_CRegsSet-wrapper_CodeStart), map);
1684 
1685   // Lock a synchronized method.
1686 
1687   if (method->is_synchronized()) {
1688 
1689     // ATTENTION: args and Z_R10 must be preserved.
1690     Register r_oop  = Z_R11;
1691     Register r_box  = Z_R12;
1692     Register r_tmp1 = Z_R13;
1693     Register r_tmp2 = Z_R7;
1694     Label done;
1695 
1696     // Load the oop for the object or class. R_carg2_classorobject contains
1697     // either the handlized oop from the incoming arguments or the handlized
1698     // class mirror (if the method is static).
1699     __ z_lg(r_oop, 0, Z_ARG2);
1700 




1701     // Try fastpath for locking.
1702     // Fast_lock kills r_temp_1, r_temp_2. (Don't use R1 as temp, won't work!)
1703     __ compiler_fast_lock_object(r_oop, r_box, r_tmp1, r_tmp2);
1704     __ z_bre(done);
1705 
1706     //-------------------------------------------------------------------------
1707     // None of the above fast optimizations worked so we have to get into the
1708     // slow case of monitor enter. Inline a special case of call_VM that
1709     // disallows any pending_exception.
1710     //-------------------------------------------------------------------------
1711 
1712     Register oldSP = Z_R11;
1713 
1714     __ z_lgr(oldSP, Z_SP);
1715 
1716     RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers);
1717 
1718     // Prepare arguments for call.
1719     __ z_lg(Z_ARG1, 0, Z_ARG2); // Ynboxed class mirror or unboxed object.

1720     __ z_lgr(Z_ARG3, Z_thread);
1721 
1722     __ set_last_Java_frame(oldSP, Z_R10 /* gc map pc */);
1723 
1724     // Do the call.
1725     __ load_const_optimized(Z_R1_scratch, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C));
1726     __ call(Z_R1_scratch);
1727 
1728     __ reset_last_Java_frame();
1729 
1730     RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
1731 #ifdef ASSERT
1732     { Label L;
1733       __ load_and_test_long(Z_R0, Address(Z_thread, Thread::pending_exception_offset()));
1734       __ z_bre(L);
1735       __ stop("no pending exception allowed on exit from IR::monitorenter");
1736       __ bind(L);
1737     }
1738 #endif
1739     __ bind(done);

1874   // No pending exceptions for now.
1875   //--------------------------------------------------------------------
1876   // Handle possibly pending exception (will unlock if necessary).
1877   // Native result is, if any is live, in Z_FRES or Z_RES.
1878   //--------------------------------------------------------------------
1879   // Unlock
1880   //--------------------------------------------------------------------
1881   if (method->is_synchronized()) {
1882     const Register r_oop        = Z_R11;
1883     const Register r_box        = Z_R12;
1884     const Register r_tmp1       = Z_R13;
1885     const Register r_tmp2       = Z_R7;
1886     Label done;
1887 
1888     // Get unboxed oop of class mirror or object ...
1889     int   offset = method_is_static ? klass_offset : receiver_offset;
1890 
1891     assert(offset != -1, "");
1892     __ z_lg(r_oop, offset, Z_SP);
1893 



1894     // Try fastpath for unlocking.
1895     __ compiler_fast_unlock_object(r_oop, r_box, r_tmp1, r_tmp2); // Don't use R1 as temp.
1896     __ z_bre(done);
1897 
1898     // Slow path for unlocking.
1899     // Save and restore any potential method result value around the unlocking operation.
1900     const Register R_exc = Z_R11;
1901 
1902     save_native_result(masm, ret_type, workspace_slot_offset);
1903 
1904     // Must save pending exception around the slow-path VM call. Since it's a
1905     // leaf call, the pending exception (if any) can be kept in a register.
1906     __ z_lg(R_exc, Address(Z_thread, Thread::pending_exception_offset()));
1907     assert(R_exc->is_nonvolatile(), "exception register must be non-volatile");
1908 
1909     // Must clear pending-exception before re-entering the VM. Since this is
1910     // a leaf call, pending-exception-oop can be safely kept in a register.
1911     __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), sizeof(intptr_t));
1912 
1913     // Inline a special case of call_VM that disallows any pending_exception.
1914 
1915     // Get locked oop from the handle we passed to jni.
1916     __ z_lg(Z_ARG1, offset, Z_SP);
1917     __ z_lgr(Z_ARG2, Z_thread);

1918 
1919     __ load_const_optimized(Z_R1_scratch, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C));
1920 
1921     __ call(Z_R1_scratch);
1922 
1923 #ifdef ASSERT
1924     {
1925       Label L;
1926       __ load_and_test_long(Z_R0, Address(Z_thread, Thread::pending_exception_offset()));
1927       __ z_bre(L);
1928       __ stop("no pending exception allowed on exit from IR::monitorexit");
1929       __ bind(L);
1930     }
1931 #endif
1932 
1933     // Check_forward_pending_exception jump to forward_exception if any pending
1934     // exception is set. The forward_exception routine expects to see the
1935     // exception in pending_exception and not in a register. Kind of clumsy,
1936     // since all folks who branch to forward_exception must have tested
1937     // pending_exception first and hence have it in a register already.

2004 
2005   //---------------------------------------------------------------------
2006   // Handler for a cache miss (out-of-line)
2007   //---------------------------------------------------------------------
2008   __ call_ic_miss_handler(ic_miss, 0x77, 0, Z_R1_scratch);
2009   __ flush();
2010 
2011 
2012   //////////////////////////////////////////////////////////////////////
2013   // end of code generation
2014   //////////////////////////////////////////////////////////////////////
2015 
2016 
2017   nmethod *nm = nmethod::new_native_nmethod(method,
2018                                             compile_id,
2019                                             masm->code(),
2020                                             (int)(wrapper_VEPStart-wrapper_CodeStart),
2021                                             (int)(wrapper_FrameDone-wrapper_CodeStart),
2022                                             stack_slots / VMRegImpl::slots_per_word,
2023                                             (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),

2024                                             oop_maps);
2025 
2026   return nm;
2027 }
2028 
2029 static address gen_c2i_adapter(MacroAssembler  *masm,
2030                                int total_args_passed,
2031                                int comp_args_on_stack,
2032                                const BasicType *sig_bt,
2033                                const VMRegPair *regs,
2034                                Label &skip_fixup) {
2035   // Before we get into the guts of the C2I adapter, see if we should be here
2036   // at all. We've come from compiled code and are attempting to jump to the
2037   // interpreter, which means the caller made a static call to get here
2038   // (vcalls always get a compiled target if there is one). Check for a
2039   // compiled target. If there is one, we need to patch the caller's call.
2040 
2041   // These two defs MUST MATCH code in gen_i2c2i_adapter!
2042   const Register ientry = Z_R11;
2043   const Register code   = Z_R11;
< prev index next >