< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp

Print this page

1310                                                 VMRegPair* in_regs,
1311                                                 BasicType ret_type) {
1312   if (method->is_method_handle_intrinsic()) {
1313     vmIntrinsics::ID iid = method->intrinsic_id();
1314     intptr_t start = (intptr_t)__ pc();
1315     int vep_offset = ((intptr_t)__ pc()) - start;
1316     gen_special_dispatch(masm,
1317                          method,
1318                          in_sig_bt,
1319                          in_regs);
1320     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1321     __ flush();
1322     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1323     return nmethod::new_native_nmethod(method,
1324                                        compile_id,
1325                                        masm->code(),
1326                                        vep_offset,
1327                                        frame_complete,
1328                                        stack_slots / VMRegImpl::slots_per_word,
1329                                        in_ByteSize(-1),
1330                                        in_ByteSize(-1),
1331                                        (OopMapSet*)NULL);
1332   }
1333   address native_func = method->native_function();
1334   assert(native_func != NULL, "must have function");
1335 
1336   // An OopMap for lock (and class if static)
1337   OopMapSet *oop_maps = new OopMapSet();
1338 
1339   // We have received a description of where all the java arg are located
1340   // on entry to the wrapper. We need to convert these args to where
1341   // the jni function will expect them. To figure out where they go
1342   // we convert the java signature to a C signature by inserting
1343   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1344 
1345   const int total_in_args = method->size_of_parameters();
1346   int  total_c_args       = total_in_args + (method->is_static() ? 2 : 1);
1347 
1348   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1349   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1350   BasicType* in_elem_bt = NULL;

1365   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1366 
1367   // Compute framesize for the wrapper.  We need to handlize all oops in
1368   // registers a max of 2 on x86.
1369 
1370   // Calculate the total number of stack slots we will need.
1371 
1372   // First count the abi requirement plus all of the outgoing args
1373   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1374 
1375   // Now the space for the inbound oop handle area
1376   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1377 
1378   int oop_handle_offset = stack_slots;
1379   stack_slots += total_save_slots;
1380 
1381   // Now any space we need for handlizing a klass if static method
1382 
1383   int klass_slot_offset = 0;
1384   int klass_offset = -1;
1385   int lock_slot_offset = 0;
1386   bool is_static = false;
1387 
1388   if (method->is_static()) {
1389     klass_slot_offset = stack_slots;
1390     stack_slots += VMRegImpl::slots_per_word;
1391     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1392     is_static = true;
1393   }
1394 
1395   // Plus a lock if needed
1396 
1397   if (method->is_synchronized()) {
1398     lock_slot_offset = stack_slots;
1399     stack_slots += VMRegImpl::slots_per_word;
1400   }
1401 
1402   // Now a place (+2) to save return values or temp during shuffling
1403   // + 2 for return address (which we own) and saved rbp,
1404   stack_slots += 4;
1405 
1406   // Ok The space we have allocated will look like:
1407   //
1408   //
1409   // FP-> |                     |
1410   //      |---------------------|
1411   //      | 2 slots for moves   |
1412   //      |---------------------|
1413   //      | lock box (if sync)  |
1414   //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1415   //      | klass (if static)   |
1416   //      |---------------------| <- klass_slot_offset
1417   //      | oopHandle area      |
1418   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1419   //      | outbound memory     |
1420   //      | based arguments     |
1421   //      |                     |
1422   //      |---------------------|
1423   //      |                     |
1424   // SP-> | out_preserved_slots |
1425   //
1426   //
1427   // ****************************************************************************
1428   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1429   // arguments off of the stack after the jni call. Before the call we can use
1430   // instructions that are SP relative. After the jni call we switch to FP
1431   // relative instructions instead of re-adjusting the stack on windows.
1432   // ****************************************************************************
1433 
1434 

1500     __ xabort(0);
1501   }
1502 
1503   // Calculate the difference between rsp and rbp,. We need to know it
1504   // after the native call because on windows Java Natives will pop
1505   // the arguments and it is painful to do rsp relative addressing
1506   // in a platform independent way. So after the call we switch to
1507   // rbp, relative addressing.
1508 
1509   int fp_adjustment = stack_size - 2*wordSize;
1510 
1511 #ifdef COMPILER2
1512   // C2 may leave the stack dirty if not in SSE2+ mode
1513   if (UseSSE >= 2) {
1514     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1515   } else {
1516     __ empty_FPU_stack();
1517   }
1518 #endif /* COMPILER2 */
1519 
1520   // Compute the rbp, offset for any slots used after the jni call
1521 
1522   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1523 
1524   // We use rdi as a thread pointer because it is callee save and
1525   // if we load it once it is usable thru the entire wrapper
1526   const Register thread = rdi;
1527 
1528    // We use rsi as the oop handle for the receiver/klass
1529    // It is callee save so it survives the call to native
1530 
1531    const Register oop_handle_reg = rsi;
1532 
1533    __ get_thread(thread);
1534 
1535   //
1536   // We immediately shuffle the arguments so that any vm call we have to
1537   // make from here on out (sync slow path, jvmti, etc.) we will have
1538   // captured the oops from our caller and have a valid oopMap for
1539   // them.
1540 
1541   // -----------------
1542   // The Grand Shuffle
1543   //

1638 
1639   {
1640     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1641     __ mov_metadata(rax, method());
1642     __ call_VM_leaf(
1643          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1644          thread, rax);
1645   }
1646 
1647   // RedefineClasses() tracing support for obsolete method entry
1648   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1649     __ mov_metadata(rax, method());
1650     __ call_VM_leaf(
1651          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1652          thread, rax);
1653   }
1654 
1655   // These are register definitions we need for locking/unlocking
1656   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1657   const Register obj_reg  = rcx;  // Will contain the oop
1658   const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1659 
1660   Label slow_path_lock;
1661   Label lock_done;
1662 
1663   // Lock a synchronized method
1664   if (method->is_synchronized()) {
1665     Label count_mon;
1666 
1667     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1668 
1669     // Get the handle (the 2nd argument)
1670     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1671 
1672     // Get address of the box
1673 
1674     __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1675 
1676     // Load the oop from the handle
1677     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1678 
1679     if (!UseHeavyMonitors) {
1680       // Load immediate 1 into swap_reg %rax,
1681       __ movptr(swap_reg, 1);
1682 
1683       // Load (object->mark() | 1) into swap_reg %rax,
1684       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1685 
1686       // Save (object->mark() | 1) into BasicLock's displaced header
1687       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1688 
1689       // src -> dest iff dest == rax, else rax, <- dest
1690       // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1691       __ lock();
1692       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1693       __ jcc(Assembler::equal, count_mon);
1694 
1695       // Test if the oopMark is an obvious stack pointer, i.e.,
1696       //  1) (mark & 3) == 0, and
1697       //  2) rsp <= mark < mark + os::pagesize()
1698       // These 3 tests can be done by evaluating the following
1699       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1700       // assuming both stack pointer and pagesize have their
1701       // least significant 2 bits clear.
1702       // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1703 
1704       __ subptr(swap_reg, rsp);
1705       __ andptr(swap_reg, 3 - os::vm_page_size());
1706 
1707       // Save the test result, for recursive case, the result is zero
1708       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1709       __ jcc(Assembler::notEqual, slow_path_lock);
1710     } else {
1711       __ jmp(slow_path_lock);
1712     }
1713     __ bind(count_mon);
1714     __ inc_held_monitor_count();
1715 
1716     // Slow path will re-enter here
1717     __ bind(lock_done);
1718   }
1719 
1720 
1721   // Finally just about ready to make the JNI call
1722 
1723   // get JNIEnv* which is first argument to native
1724   __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1725   __ movptr(Address(rsp, 0), rdx);
1726 
1727   // Now set thread in native
1728   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1729 
1730   __ call(RuntimeAddress(native_func));
1731 
1732   // Verify or restore cpu control state after JNI call
1733   __ restore_cpu_control_state_after_jni(noreg);

1813   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1814   __ jcc(Assembler::equal, reguard);
1815 
1816   // slow path reguard  re-enters here
1817   __ bind(reguard_done);
1818 
1819   // Handle possible exception (will unlock if necessary)
1820 
1821   // native result if any is live
1822 
1823   // Unlock
1824   Label slow_path_unlock;
1825   Label unlock_done;
1826   if (method->is_synchronized()) {
1827 
1828     Label fast_done;
1829 
1830     // Get locked oop from the handle we passed to jni
1831     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1832 
1833     if (!UseHeavyMonitors) {
1834       Label not_recur;
1835       // Simple recursive lock?
1836       __ cmpptr(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1837       __ jcc(Assembler::notEqual, not_recur);
1838       __ dec_held_monitor_count();
1839       __ jmpb(fast_done);
1840       __ bind(not_recur);
1841     }
1842 
1843     // Must save rax, if it is live now because cmpxchg must use it
1844     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1845       save_native_result(masm, ret_type, stack_slots);
1846     }
1847 
1848     if (!UseHeavyMonitors) {
1849       //  get old displaced header
1850       __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1851 
1852       // get address of the stack lock
1853       __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1854 
1855       // Atomic swap old header if oop still contains the stack lock
1856       // src -> dest iff dest == rax, else rax, <- dest
1857       // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1858       __ lock();
1859       __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1860       __ jcc(Assembler::notEqual, slow_path_unlock);
1861       __ dec_held_monitor_count();
1862     } else {
1863       __ jmp(slow_path_unlock);
1864     }
1865 
1866     // slow path re-enters here
1867     __ bind(unlock_done);
1868     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1869       restore_native_result(masm, ret_type, stack_slots);
1870     }
1871 
1872     __ bind(fast_done);
1873   }
1874 
1875   {
1876     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1877     // Tell dtrace about this method exit
1878     save_native_result(masm, ret_type, stack_slots);
1879     __ mov_metadata(rax, method());
1880     __ call_VM_leaf(

1927     }
1928   }
1929 
1930   // Return
1931 
1932   __ leave();
1933   __ ret(0);
1934 
1935   // Unexpected paths are out of line and go here
1936 
1937   // Slow path locking & unlocking
1938   if (method->is_synchronized()) {
1939 
1940     // BEGIN Slow path lock
1941 
1942     __ bind(slow_path_lock);
1943 
1944     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1945     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1946     __ push(thread);
1947     __ push(lock_reg);
1948     __ push(obj_reg);
1949     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1950     __ addptr(rsp, 3*wordSize);
1951 
1952 #ifdef ASSERT
1953     { Label L;
1954     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1955     __ jcc(Assembler::equal, L);
1956     __ stop("no pending exception allowed on exit from monitorenter");
1957     __ bind(L);
1958     }
1959 #endif
1960     __ jmp(lock_done);
1961 
1962     // END Slow path lock
1963 
1964     // BEGIN Slow path unlock
1965     __ bind(slow_path_unlock);
1966     __ vzeroupper();
1967     // Slow path unlock
1968 
1969     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1970       save_native_result(masm, ret_type, stack_slots);
1971     }
1972     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1973 
1974     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1975     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1976 
1977 
1978     // should be a peal
1979     // +wordSize because of the push above
1980     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1981     __ push(thread);
1982     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1983     __ push(rax);
1984 
1985     __ push(obj_reg);
1986     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1987     __ addptr(rsp, 3*wordSize);
1988 #ifdef ASSERT
1989     {
1990       Label L;
1991       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1992       __ jcc(Assembler::equal, L);
1993       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1994       __ bind(L);
1995     }
1996 #endif /* ASSERT */
1997 
1998     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1999 
2000     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2001       restore_native_result(masm, ret_type, stack_slots);
2002     }
2003     __ jmp(unlock_done);
2004     // END Slow path unlock
2005 
2006   }
2007 

2022   // Forward  the exception
2023   __ bind(exception_pending);
2024 
2025   // remove possible return value from FPU register stack
2026   __ empty_FPU_stack();
2027 
2028   // pop our frame
2029   __ leave();
2030   // and forward the exception
2031   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2032 
2033   __ flush();
2034 
2035   nmethod *nm = nmethod::new_native_nmethod(method,
2036                                             compile_id,
2037                                             masm->code(),
2038                                             vep_offset,
2039                                             frame_complete,
2040                                             stack_slots / VMRegImpl::slots_per_word,
2041                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2042                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2043                                             oop_maps);
2044 
2045   return nm;
2046 
2047 }
2048 
2049 // this function returns the adjust size (in number of words) to a c2i adapter
2050 // activation for use during deoptimization
2051 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2052   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2053 }
2054 
2055 
2056 // Number of stack slots between incoming argument block and the start of
2057 // a new frame.  The PROLOG must add this many slots to the stack.  The
2058 // EPILOG must remove this many slots.  Intel needs one slot for
2059 // return address and one for rbp, (must save rbp)
2060 uint SharedRuntime::in_preserve_stack_slots() {
2061   return 2+VerifyStackAtCalls;
2062 }

1310                                                 VMRegPair* in_regs,
1311                                                 BasicType ret_type) {
1312   if (method->is_method_handle_intrinsic()) {
1313     vmIntrinsics::ID iid = method->intrinsic_id();
1314     intptr_t start = (intptr_t)__ pc();
1315     int vep_offset = ((intptr_t)__ pc()) - start;
1316     gen_special_dispatch(masm,
1317                          method,
1318                          in_sig_bt,
1319                          in_regs);
1320     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1321     __ flush();
1322     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1323     return nmethod::new_native_nmethod(method,
1324                                        compile_id,
1325                                        masm->code(),
1326                                        vep_offset,
1327                                        frame_complete,
1328                                        stack_slots / VMRegImpl::slots_per_word,
1329                                        in_ByteSize(-1),

1330                                        (OopMapSet*)NULL);
1331   }
1332   address native_func = method->native_function();
1333   assert(native_func != NULL, "must have function");
1334 
1335   // An OopMap for lock (and class if static)
1336   OopMapSet *oop_maps = new OopMapSet();
1337 
1338   // We have received a description of where all the java arg are located
1339   // on entry to the wrapper. We need to convert these args to where
1340   // the jni function will expect them. To figure out where they go
1341   // we convert the java signature to a C signature by inserting
1342   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1343 
1344   const int total_in_args = method->size_of_parameters();
1345   int  total_c_args       = total_in_args + (method->is_static() ? 2 : 1);
1346 
1347   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1348   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1349   BasicType* in_elem_bt = NULL;

1364   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1365 
1366   // Compute framesize for the wrapper.  We need to handlize all oops in
1367   // registers a max of 2 on x86.
1368 
1369   // Calculate the total number of stack slots we will need.
1370 
1371   // First count the abi requirement plus all of the outgoing args
1372   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1373 
1374   // Now the space for the inbound oop handle area
1375   int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
1376 
1377   int oop_handle_offset = stack_slots;
1378   stack_slots += total_save_slots;
1379 
1380   // Now any space we need for handlizing a klass if static method
1381 
1382   int klass_slot_offset = 0;
1383   int klass_offset = -1;

1384   bool is_static = false;
1385 
1386   if (method->is_static()) {
1387     klass_slot_offset = stack_slots;
1388     stack_slots += VMRegImpl::slots_per_word;
1389     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1390     is_static = true;
1391   }
1392 
1393   // Plus a lock if needed
1394 
1395   if (method->is_synchronized()) {

1396     stack_slots += VMRegImpl::slots_per_word;
1397   }
1398 
1399   // Now a place (+2) to save return values or temp during shuffling
1400   // + 2 for return address (which we own) and saved rbp,
1401   stack_slots += 4;
1402 
1403   // Ok The space we have allocated will look like:
1404   //
1405   //
1406   // FP-> |                     |
1407   //      |---------------------|
1408   //      | 2 slots for moves   |
1409   //      |---------------------|


1410   //      | klass (if static)   |
1411   //      |---------------------| <- klass_slot_offset
1412   //      | oopHandle area      |
1413   //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1414   //      | outbound memory     |
1415   //      | based arguments     |
1416   //      |                     |
1417   //      |---------------------|
1418   //      |                     |
1419   // SP-> | out_preserved_slots |
1420   //
1421   //
1422   // ****************************************************************************
1423   // WARNING - on Windows Java Natives use pascal calling convention and pop the
1424   // arguments off of the stack after the jni call. Before the call we can use
1425   // instructions that are SP relative. After the jni call we switch to FP
1426   // relative instructions instead of re-adjusting the stack on windows.
1427   // ****************************************************************************
1428 
1429 

1495     __ xabort(0);
1496   }
1497 
1498   // Calculate the difference between rsp and rbp,. We need to know it
1499   // after the native call because on windows Java Natives will pop
1500   // the arguments and it is painful to do rsp relative addressing
1501   // in a platform independent way. So after the call we switch to
1502   // rbp, relative addressing.
1503 
1504   int fp_adjustment = stack_size - 2*wordSize;
1505 
1506 #ifdef COMPILER2
1507   // C2 may leave the stack dirty if not in SSE2+ mode
1508   if (UseSSE >= 2) {
1509     __ verify_FPU(0, "c2i transition should have clean FPU stack");
1510   } else {
1511     __ empty_FPU_stack();
1512   }
1513 #endif /* COMPILER2 */
1514 




1515   // We use rdi as a thread pointer because it is callee save and
1516   // if we load it once it is usable thru the entire wrapper
1517   const Register thread = rdi;
1518 
1519    // We use rsi as the oop handle for the receiver/klass
1520    // It is callee save so it survives the call to native
1521 
1522    const Register oop_handle_reg = rsi;
1523 
1524    __ get_thread(thread);
1525 
1526   //
1527   // We immediately shuffle the arguments so that any vm call we have to
1528   // make from here on out (sync slow path, jvmti, etc.) we will have
1529   // captured the oops from our caller and have a valid oopMap for
1530   // them.
1531 
1532   // -----------------
1533   // The Grand Shuffle
1534   //

1629 
1630   {
1631     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1632     __ mov_metadata(rax, method());
1633     __ call_VM_leaf(
1634          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1635          thread, rax);
1636   }
1637 
1638   // RedefineClasses() tracing support for obsolete method entry
1639   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1640     __ mov_metadata(rax, method());
1641     __ call_VM_leaf(
1642          CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1643          thread, rax);
1644   }
1645 
1646   // These are register definitions we need for locking/unlocking
1647   const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1648   const Register obj_reg  = rcx;  // Will contain the oop
1649   const Register tmp      = rdx;
1650 
1651   Label slow_path_lock;
1652   Label lock_done;
1653 
1654   // Lock a synchronized method
1655   if (method->is_synchronized()) {




1656     // Get the handle (the 2nd argument)
1657     __ movptr(oop_handle_reg, Address(rsp, wordSize));
1658 




1659     // Load the oop from the handle
1660     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1661 
1662     if (!UseHeavyMonitors) {
1663       // Load object header
1664       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1665       __ fast_lock_impl(obj_reg, swap_reg, thread, tmp, noreg, slow_path_lock);



























1666     } else {
1667       __ jmp(slow_path_lock);
1668     }

1669     __ inc_held_monitor_count();
1670 
1671     // Slow path will re-enter here
1672     __ bind(lock_done);
1673   }
1674 
1675 
1676   // Finally just about ready to make the JNI call
1677 
1678   // get JNIEnv* which is first argument to native
1679   __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1680   __ movptr(Address(rsp, 0), rdx);
1681 
1682   // Now set thread in native
1683   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1684 
1685   __ call(RuntimeAddress(native_func));
1686 
1687   // Verify or restore cpu control state after JNI call
1688   __ restore_cpu_control_state_after_jni(noreg);

1768   __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
1769   __ jcc(Assembler::equal, reguard);
1770 
1771   // slow path reguard  re-enters here
1772   __ bind(reguard_done);
1773 
1774   // Handle possible exception (will unlock if necessary)
1775 
1776   // native result if any is live
1777 
1778   // Unlock
1779   Label slow_path_unlock;
1780   Label unlock_done;
1781   if (method->is_synchronized()) {
1782 
1783     Label fast_done;
1784 
1785     // Get locked oop from the handle we passed to jni
1786     __ movptr(obj_reg, Address(oop_handle_reg, 0));
1787 










1788     // Must save rax, if it is live now because cmpxchg must use it
1789     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1790       save_native_result(masm, ret_type, stack_slots);
1791     }
1792 
1793     if (!UseHeavyMonitors) {
1794       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1795       __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
1796       __ fast_unlock_impl(obj_reg, swap_reg, tmp, slow_path_unlock);









1797       __ dec_held_monitor_count();
1798     } else {
1799       __ jmp(slow_path_unlock);
1800     }
1801 
1802     // slow path re-enters here
1803     __ bind(unlock_done);
1804     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1805       restore_native_result(masm, ret_type, stack_slots);
1806     }
1807 
1808     __ bind(fast_done);
1809   }
1810 
1811   {
1812     SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0, noreg);
1813     // Tell dtrace about this method exit
1814     save_native_result(masm, ret_type, stack_slots);
1815     __ mov_metadata(rax, method());
1816     __ call_VM_leaf(

1863     }
1864   }
1865 
1866   // Return
1867 
1868   __ leave();
1869   __ ret(0);
1870 
1871   // Unexpected paths are out of line and go here
1872 
1873   // Slow path locking & unlocking
1874   if (method->is_synchronized()) {
1875 
1876     // BEGIN Slow path lock
1877 
1878     __ bind(slow_path_lock);
1879 
1880     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1881     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1882     __ push(thread);

1883     __ push(obj_reg);
1884     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1885     __ addptr(rsp, 2*wordSize);
1886 
1887 #ifdef ASSERT
1888     { Label L;
1889     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1890     __ jcc(Assembler::equal, L);
1891     __ stop("no pending exception allowed on exit from monitorenter");
1892     __ bind(L);
1893     }
1894 #endif
1895     __ jmp(lock_done);
1896 
1897     // END Slow path lock
1898 
1899     // BEGIN Slow path unlock
1900     __ bind(slow_path_unlock);
1901     __ vzeroupper();
1902     // Slow path unlock
1903 
1904     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1905       save_native_result(masm, ret_type, stack_slots);
1906     }
1907     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1908 
1909     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1910     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1911 
1912 
1913     // should be a peal
1914     // +wordSize because of the push above
1915     // args are (oop obj, BasicLock* lock, JavaThread* thread)
1916     __ push(thread);


1917 
1918     __ push(obj_reg);
1919     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1920     __ addptr(rsp, 2*wordSize);
1921 #ifdef ASSERT
1922     {
1923       Label L;
1924       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1925       __ jcc(Assembler::equal, L);
1926       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1927       __ bind(L);
1928     }
1929 #endif /* ASSERT */
1930 
1931     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1932 
1933     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1934       restore_native_result(masm, ret_type, stack_slots);
1935     }
1936     __ jmp(unlock_done);
1937     // END Slow path unlock
1938 
1939   }
1940 

1955   // Forward  the exception
1956   __ bind(exception_pending);
1957 
1958   // remove possible return value from FPU register stack
1959   __ empty_FPU_stack();
1960 
1961   // pop our frame
1962   __ leave();
1963   // and forward the exception
1964   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1965 
1966   __ flush();
1967 
1968   nmethod *nm = nmethod::new_native_nmethod(method,
1969                                             compile_id,
1970                                             masm->code(),
1971                                             vep_offset,
1972                                             frame_complete,
1973                                             stack_slots / VMRegImpl::slots_per_word,
1974                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),

1975                                             oop_maps);
1976 
1977   return nm;
1978 
1979 }
1980 
1981 // this function returns the adjust size (in number of words) to a c2i adapter
1982 // activation for use during deoptimization
1983 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
1984   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
1985 }
1986 
1987 
1988 // Number of stack slots between incoming argument block and the start of
1989 // a new frame.  The PROLOG must add this many slots to the stack.  The
1990 // EPILOG must remove this many slots.  Intel needs one slot for
1991 // return address and one for rbp, (must save rbp)
1992 uint SharedRuntime::in_preserve_stack_slots() {
1993   return 2+VerifyStackAtCalls;
1994 }
< prev index next >