< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

1613     if (method->is_continuation_enter_intrinsic()) {
1614       assert(interpreted_entry_offset != -1, "Must be set");
1615       assert(exception_offset != -1,         "Must be set");
1616     } else {
1617       assert(interpreted_entry_offset == -1, "Must be unset");
1618       assert(exception_offset == -1,         "Must be unset");
1619     }
1620     assert(frame_complete != -1,    "Must be set");
1621     assert(stack_slots != -1,       "Must be set");
1622     assert(vep_offset != -1,        "Must be set");
1623 #endif
1624 
1625     __ flush();
1626     nmethod* nm = nmethod::new_native_nmethod(method,
1627                                               compile_id,
1628                                               masm->code(),
1629                                               vep_offset,
1630                                               frame_complete,
1631                                               stack_slots,
1632                                               in_ByteSize(-1),
1633                                               in_ByteSize(-1),
1634                                               oop_maps,
1635                                               exception_offset);
1636     if (method->is_continuation_enter_intrinsic()) {
1637       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1638     } else if (method->is_continuation_yield_intrinsic()) {
1639       _cont_doYield_stub = nm;
1640     }
1641     return nm;
1642   }
1643 
1644   if (method->is_method_handle_intrinsic()) {
1645     vmIntrinsics::ID iid = method->intrinsic_id();
1646     intptr_t start = (intptr_t)__ pc();
1647     int vep_offset = ((intptr_t)__ pc()) - start;
1648     gen_special_dispatch(masm,
1649                          method,
1650                          in_sig_bt,
1651                          in_regs);
1652     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1653     __ flush();
1654     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1655     return nmethod::new_native_nmethod(method,
1656                                        compile_id,
1657                                        masm->code(),
1658                                        vep_offset,
1659                                        frame_complete,
1660                                        stack_slots / VMRegImpl::slots_per_word,
1661                                        in_ByteSize(-1),
1662                                        in_ByteSize(-1),
1663                                        (OopMapSet*)NULL);
1664   }
1665   address native_func = method->native_function();
1666   assert(native_func != NULL, "must have function");
1667 
1668   // An OopMap for lock (and class if static)
1669   OopMapSet *oop_maps = new OopMapSet();
1670   intptr_t start = (intptr_t)__ pc();
1671 
1672   // We have received a description of where all the java arg are located
1673   // on entry to the wrapper. We need to convert these args to where
1674   // the jni function will expect them. To figure out where they go
1675   // we convert the java signature to a C signature by inserting
1676   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1677 
1678   const int total_in_args = method->size_of_parameters();
1679   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1680 
1681   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1682   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);

1698   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1699 
1700   // Compute framesize for the wrapper.  We need to handlize all oops in
1701   // incoming registers
1702 
1703   // Calculate the total number of stack slots we will need.
1704 
1705   // First count the abi requirement plus all of the outgoing args
1706   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1707 
1708   // Now the space for the inbound oop handle area
1709   int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
1710 
1711   int oop_handle_offset = stack_slots;
1712   stack_slots += total_save_slots;
1713 
1714   // Now any space we need for handlizing a klass if static method
1715 
1716   int klass_slot_offset = 0;
1717   int klass_offset = -1;
1718   int lock_slot_offset = 0;
1719   bool is_static = false;
1720 
1721   if (method->is_static()) {
1722     klass_slot_offset = stack_slots;
1723     stack_slots += VMRegImpl::slots_per_word;
1724     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1725     is_static = true;
1726   }
1727 
1728   // Plus a lock if needed
1729 
1730   if (method->is_synchronized()) {
1731     lock_slot_offset = stack_slots;
1732     stack_slots += VMRegImpl::slots_per_word;
1733   }
1734 
1735   // Now a place (+2) to save return values or temp during shuffling
1736   // + 4 for return address (which we own) and saved rbp
1737   stack_slots += 6;
1738 
1739   // Ok The space we have allocated will look like:
1740   //
1741   //
1742   // FP-> |                     |
1743   //      |---------------------|
1744   //      | 2 slots for moves   |
1745   //      |---------------------|
1746   //      | lock box (if sync)  |
1747   //      |---------------------| <- lock_slot_offset
1748   //      | klass (if static)   |
1749   //      |---------------------| <- klass_slot_offset
1750   //      | oopHandle area      |
1751   //      |---------------------| <- oop_handle_offset (6 java arg registers)
1752   //      | outbound memory     |
1753   //      | based arguments     |
1754   //      |                     |
1755   //      |---------------------|
1756   //      |                     |
1757   // SP-> | out_preserved_slots |
1758   //
1759   //
1760 
1761 
1762   // Now compute actual number of stack words we need rounding to make
1763   // stack properly aligned.
1764   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1765 
1766   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1767 

2004     restore_args(masm, total_c_args, c_arg, out_regs);
2005   }
2006 
2007   // RedefineClasses() tracing support for obsolete method entry
2008   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2009     // protect the args we've loaded
2010     save_args(masm, total_c_args, c_arg, out_regs);
2011     __ mov_metadata(c_rarg1, method());
2012     __ call_VM_leaf(
2013       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2014       r15_thread, c_rarg1);
2015     restore_args(masm, total_c_args, c_arg, out_regs);
2016   }
2017 
2018   // Lock a synchronized method
2019 
2020   // Register definitions used by locking and unlocking
2021 
2022   const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
2023   const Register obj_reg  = rbx;  // Will contain the oop
2024   const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2025   const Register old_hdr  = r13;  // value of old header at unlock time
2026 
2027   Label slow_path_lock;
2028   Label lock_done;
2029 
2030   if (method->is_synchronized()) {
2031     Label count_mon;
2032 
2033     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2034 
2035     // Get the handle (the 2nd argument)
2036     __ mov(oop_handle_reg, c_rarg1);
2037 
2038     // Get address of the box
2039 
2040     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2041 
2042     // Load the oop from the handle
2043     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2044 
2045     if (!UseHeavyMonitors) {
2046 
2047       // Load immediate 1 into swap_reg %rax
2048       __ movl(swap_reg, 1);
2049 
2050       // Load (object->mark() | 1) into swap_reg %rax
2051       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2052 
2053       // Save (object->mark() | 1) into BasicLock's displaced header
2054       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2055 
2056       // src -> dest iff dest == rax else rax <- dest
2057       __ lock();
2058       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2059       __ jcc(Assembler::equal, count_mon);
2060 
2061       // Hmm should this move to the slow path code area???
2062 
2063       // Test if the oopMark is an obvious stack pointer, i.e.,
2064       //  1) (mark & 3) == 0, and
2065       //  2) rsp <= mark < mark + os::pagesize()
2066       // These 3 tests can be done by evaluating the following
2067       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2068       // assuming both stack pointer and pagesize have their
2069       // least significant 2 bits clear.
2070       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2071 
2072       __ subptr(swap_reg, rsp);
2073       __ andptr(swap_reg, 3 - os::vm_page_size());
2074 
2075       // Save the test result, for recursive case, the result is zero
2076       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2077       __ jcc(Assembler::notEqual, slow_path_lock);
2078     } else {
2079       __ jmp(slow_path_lock);
2080     }
2081     __ bind(count_mon);
2082     __ inc_held_monitor_count();
2083 
2084     // Slow path will re-enter here
2085     __ bind(lock_done);
2086   }
2087 
2088   // Finally just about ready to make the JNI call
2089 
2090   // get JNIEnv* which is first argument to native
2091   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2092 
2093   // Now set thread in native
2094   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2095 
2096   __ call(RuntimeAddress(native_func));
2097 
2098   // Verify or restore cpu control state after JNI call
2099   __ restore_cpu_control_state_after_jni(rscratch1);
2100 
2101   // Unpack native results.

2171   __ bind(after_transition);
2172 
2173   Label reguard;
2174   Label reguard_done;
2175   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2176   __ jcc(Assembler::equal, reguard);
2177   __ bind(reguard_done);
2178 
2179   // native result if any is live
2180 
2181   // Unlock
2182   Label slow_path_unlock;
2183   Label unlock_done;
2184   if (method->is_synchronized()) {
2185 
2186     Label fast_done;
2187 
2188     // Get locked oop from the handle we passed to jni
2189     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2190 
2191     if (!UseHeavyMonitors) {
2192       Label not_recur;
2193       // Simple recursive lock?
2194       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2195       __ jcc(Assembler::notEqual, not_recur);
2196       __ dec_held_monitor_count();
2197       __ jmpb(fast_done);
2198       __ bind(not_recur);
2199     }
2200 
2201     // Must save rax if it is live now because cmpxchg must use it
2202     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2203       save_native_result(masm, ret_type, stack_slots);
2204     }
2205 
2206     if (!UseHeavyMonitors) {
2207       // get address of the stack lock
2208       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2209       //  get old displaced header
2210       __ movptr(old_hdr, Address(rax, 0));
2211 
2212       // Atomic swap old header if oop still contains the stack lock
2213       __ lock();
2214       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2215       __ jcc(Assembler::notEqual, slow_path_unlock);
2216       __ dec_held_monitor_count();
2217     } else {
2218       __ jmp(slow_path_unlock);
2219     }
2220 
2221     // slow path re-enters here
2222     __ bind(unlock_done);
2223     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2224       restore_native_result(masm, ret_type, stack_slots);
2225     }
2226 
2227     __ bind(fast_done);
2228   }
2229   {
2230     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2231     save_native_result(masm, ret_type, stack_slots);
2232     __ mov_metadata(c_rarg1, method());
2233     __ call_VM_leaf(
2234          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2235          r15_thread, c_rarg1);

2270 
2271   // forward the exception
2272   __ bind(exception_pending);
2273 
2274   // and forward the exception
2275   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2276 
2277   // Slow path locking & unlocking
2278   if (method->is_synchronized()) {
2279 
2280     // BEGIN Slow path lock
2281     __ bind(slow_path_lock);
2282 
2283     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2284     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2285 
2286     // protect the args we've loaded
2287     save_args(masm, total_c_args, c_arg, out_regs);
2288 
2289     __ mov(c_rarg0, obj_reg);
2290     __ mov(c_rarg1, lock_reg);
2291     __ mov(c_rarg2, r15_thread);
2292 
2293     // Not a leaf but we have last_Java_frame setup as we want
2294     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2295     restore_args(masm, total_c_args, c_arg, out_regs);
2296 
2297 #ifdef ASSERT
2298     { Label L;
2299     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2300     __ jcc(Assembler::equal, L);
2301     __ stop("no pending exception allowed on exit from monitorenter");
2302     __ bind(L);
2303     }
2304 #endif
2305     __ jmp(lock_done);
2306 
2307     // END Slow path lock
2308 
2309     // BEGIN Slow path unlock
2310     __ bind(slow_path_unlock);
2311 
2312     // If we haven't already saved the native result we must save it now as xmm registers
2313     // are still exposed.
2314     __ vzeroupper();
2315     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2316       save_native_result(masm, ret_type, stack_slots);
2317     }
2318 
2319     __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2320 
2321     __ mov(c_rarg0, obj_reg);
2322     __ mov(c_rarg2, r15_thread);
2323     __ mov(r12, rsp); // remember sp
2324     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2325     __ andptr(rsp, -16); // align stack as required by ABI
2326 
2327     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2328     // NOTE that obj_reg == rbx currently
2329     __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2330     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2331 
2332     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2333     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2334     __ mov(rsp, r12); // restore sp
2335     __ reinit_heapbase();
2336 #ifdef ASSERT
2337     {
2338       Label L;
2339       __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2340       __ jcc(Assembler::equal, L);
2341       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2342       __ bind(L);

2363   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2364   __ andptr(rsp, -16); // align stack as required by ABI
2365   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2366   __ mov(rsp, r12); // restore sp
2367   __ reinit_heapbase();
2368   restore_native_result(masm, ret_type, stack_slots);
2369   // and continue
2370   __ jmp(reguard_done);
2371 
2372 
2373 
2374   __ flush();
2375 
2376   nmethod *nm = nmethod::new_native_nmethod(method,
2377                                             compile_id,
2378                                             masm->code(),
2379                                             vep_offset,
2380                                             frame_complete,
2381                                             stack_slots / VMRegImpl::slots_per_word,
2382                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2383                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2384                                             oop_maps);
2385 
2386   return nm;
2387 }
2388 
2389 // this function returns the adjust size (in number of words) to a c2i adapter
2390 // activation for use during deoptimization
2391 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2392   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2393 }
2394 
2395 
2396 uint SharedRuntime::out_preserve_stack_slots() {
2397   return 0;
2398 }
2399 
2400 
2401 // Number of stack slots between incoming argument block and the start of
2402 // a new frame.  The PROLOG must add this many slots to the stack.  The
2403 // EPILOG must remove this many slots.  amd64 needs two slots for

1613     if (method->is_continuation_enter_intrinsic()) {
1614       assert(interpreted_entry_offset != -1, "Must be set");
1615       assert(exception_offset != -1,         "Must be set");
1616     } else {
1617       assert(interpreted_entry_offset == -1, "Must be unset");
1618       assert(exception_offset == -1,         "Must be unset");
1619     }
1620     assert(frame_complete != -1,    "Must be set");
1621     assert(stack_slots != -1,       "Must be set");
1622     assert(vep_offset != -1,        "Must be set");
1623 #endif
1624 
1625     __ flush();
1626     nmethod* nm = nmethod::new_native_nmethod(method,
1627                                               compile_id,
1628                                               masm->code(),
1629                                               vep_offset,
1630                                               frame_complete,
1631                                               stack_slots,
1632                                               in_ByteSize(-1),

1633                                               oop_maps,
1634                                               exception_offset);
1635     if (method->is_continuation_enter_intrinsic()) {
1636       ContinuationEntry::set_enter_code(nm, interpreted_entry_offset);
1637     } else if (method->is_continuation_yield_intrinsic()) {
1638       _cont_doYield_stub = nm;
1639     }
1640     return nm;
1641   }
1642 
1643   if (method->is_method_handle_intrinsic()) {
1644     vmIntrinsics::ID iid = method->intrinsic_id();
1645     intptr_t start = (intptr_t)__ pc();
1646     int vep_offset = ((intptr_t)__ pc()) - start;
1647     gen_special_dispatch(masm,
1648                          method,
1649                          in_sig_bt,
1650                          in_regs);
1651     int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1652     __ flush();
1653     int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1654     return nmethod::new_native_nmethod(method,
1655                                        compile_id,
1656                                        masm->code(),
1657                                        vep_offset,
1658                                        frame_complete,
1659                                        stack_slots / VMRegImpl::slots_per_word,
1660                                        in_ByteSize(-1),

1661                                        (OopMapSet*)NULL);
1662   }
1663   address native_func = method->native_function();
1664   assert(native_func != NULL, "must have function");
1665 
1666   // An OopMap for lock (and class if static)
1667   OopMapSet *oop_maps = new OopMapSet();
1668   intptr_t start = (intptr_t)__ pc();
1669 
1670   // We have received a description of where all the java arg are located
1671   // on entry to the wrapper. We need to convert these args to where
1672   // the jni function will expect them. To figure out where they go
1673   // we convert the java signature to a C signature by inserting
1674   // the hidden arguments as arg[0] and possibly arg[1] (static method)
1675 
1676   const int total_in_args = method->size_of_parameters();
1677   int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
1678 
1679   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1680   VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);

1696   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1697 
1698   // Compute framesize for the wrapper.  We need to handlize all oops in
1699   // incoming registers
1700 
1701   // Calculate the total number of stack slots we will need.
1702 
1703   // First count the abi requirement plus all of the outgoing args
1704   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1705 
1706   // Now the space for the inbound oop handle area
1707   int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
1708 
1709   int oop_handle_offset = stack_slots;
1710   stack_slots += total_save_slots;
1711 
1712   // Now any space we need for handlizing a klass if static method
1713 
1714   int klass_slot_offset = 0;
1715   int klass_offset = -1;

1716   bool is_static = false;
1717 
1718   if (method->is_static()) {
1719     klass_slot_offset = stack_slots;
1720     stack_slots += VMRegImpl::slots_per_word;
1721     klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1722     is_static = true;
1723   }
1724 
1725   // Plus a lock if needed
1726 
1727   if (method->is_synchronized()) {

1728     stack_slots += VMRegImpl::slots_per_word;
1729   }
1730 
1731   // Now a place (+2) to save return values or temp during shuffling
1732   // + 4 for return address (which we own) and saved rbp
1733   stack_slots += 6;
1734 
1735   // Ok The space we have allocated will look like:
1736   //
1737   //
1738   // FP-> |                     |
1739   //      |---------------------|
1740   //      | 2 slots for moves   |
1741   //      |---------------------|


1742   //      | klass (if static)   |
1743   //      |---------------------| <- klass_slot_offset
1744   //      | oopHandle area      |
1745   //      |---------------------| <- oop_handle_offset (6 java arg registers)
1746   //      | outbound memory     |
1747   //      | based arguments     |
1748   //      |                     |
1749   //      |---------------------|
1750   //      |                     |
1751   // SP-> | out_preserved_slots |
1752   //
1753   //
1754 
1755 
1756   // Now compute actual number of stack words we need rounding to make
1757   // stack properly aligned.
1758   stack_slots = align_up(stack_slots, StackAlignmentInSlots);
1759 
1760   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1761 

1998     restore_args(masm, total_c_args, c_arg, out_regs);
1999   }
2000 
2001   // RedefineClasses() tracing support for obsolete method entry
2002   if (log_is_enabled(Trace, redefine, class, obsolete)) {
2003     // protect the args we've loaded
2004     save_args(masm, total_c_args, c_arg, out_regs);
2005     __ mov_metadata(c_rarg1, method());
2006     __ call_VM_leaf(
2007       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2008       r15_thread, c_rarg1);
2009     restore_args(masm, total_c_args, c_arg, out_regs);
2010   }
2011 
2012   // Lock a synchronized method
2013 
2014   // Register definitions used by locking and unlocking
2015 
2016   const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
2017   const Register obj_reg  = rbx;  // Will contain the oop
2018   const Register tmp      = r13;

2019 
2020   Label slow_path_lock;
2021   Label lock_done;
2022 
2023   if (method->is_synchronized()) {




2024     // Get the handle (the 2nd argument)
2025     __ mov(oop_handle_reg, c_rarg1);
2026 




2027     // Load the oop from the handle
2028     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2029 
2030     if (!UseHeavyMonitors) {
2031       // Load object header
2032       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2033       __ fast_lock_impl(obj_reg, swap_reg, r15_thread, tmp, rscratch1, slow_path_lock);





























2034     } else {
2035       __ jmp(slow_path_lock);
2036     }

2037     __ inc_held_monitor_count();
2038 
2039     // Slow path will re-enter here
2040     __ bind(lock_done);
2041   }
2042 
2043   // Finally just about ready to make the JNI call
2044 
2045   // get JNIEnv* which is first argument to native
2046   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2047 
2048   // Now set thread in native
2049   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2050 
2051   __ call(RuntimeAddress(native_func));
2052 
2053   // Verify or restore cpu control state after JNI call
2054   __ restore_cpu_control_state_after_jni(rscratch1);
2055 
2056   // Unpack native results.

2126   __ bind(after_transition);
2127 
2128   Label reguard;
2129   Label reguard_done;
2130   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2131   __ jcc(Assembler::equal, reguard);
2132   __ bind(reguard_done);
2133 
2134   // native result if any is live
2135 
2136   // Unlock
2137   Label slow_path_unlock;
2138   Label unlock_done;
2139   if (method->is_synchronized()) {
2140 
2141     Label fast_done;
2142 
2143     // Get locked oop from the handle we passed to jni
2144     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2145 










2146     // Must save rax if it is live now because cmpxchg must use it
2147     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2148       save_native_result(masm, ret_type, stack_slots);
2149     }
2150 
2151     if (!UseHeavyMonitors) {
2152       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2153       __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2154       __ fast_unlock_impl(obj_reg, swap_reg, tmp, slow_path_unlock);






2155       __ dec_held_monitor_count();
2156     } else {
2157       __ jmp(slow_path_unlock);
2158     }
2159 
2160     // slow path re-enters here
2161     __ bind(unlock_done);
2162     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2163       restore_native_result(masm, ret_type, stack_slots);
2164     }
2165 
2166     __ bind(fast_done);
2167   }
2168   {
2169     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2170     save_native_result(masm, ret_type, stack_slots);
2171     __ mov_metadata(c_rarg1, method());
2172     __ call_VM_leaf(
2173          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2174          r15_thread, c_rarg1);

2209 
2210   // forward the exception
2211   __ bind(exception_pending);
2212 
2213   // and forward the exception
2214   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2215 
2216   // Slow path locking & unlocking
2217   if (method->is_synchronized()) {
2218 
2219     // BEGIN Slow path lock
2220     __ bind(slow_path_lock);
2221 
2222     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2223     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2224 
2225     // protect the args we've loaded
2226     save_args(masm, total_c_args, c_arg, out_regs);
2227 
2228     __ mov(c_rarg0, obj_reg);
2229     __ mov(c_rarg1, r15_thread);

2230 
2231     // Not a leaf but we have last_Java_frame setup as we want
2232     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 2);
2233     restore_args(masm, total_c_args, c_arg, out_regs);
2234 
2235 #ifdef ASSERT
2236     { Label L;
2237     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2238     __ jcc(Assembler::equal, L);
2239     __ stop("no pending exception allowed on exit from monitorenter");
2240     __ bind(L);
2241     }
2242 #endif
2243     __ jmp(lock_done);
2244 
2245     // END Slow path lock
2246 
2247     // BEGIN Slow path unlock
2248     __ bind(slow_path_unlock);
2249 
2250     // If we haven't already saved the native result we must save it now as xmm registers
2251     // are still exposed.
2252     __ vzeroupper();
2253     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2254       save_native_result(masm, ret_type, stack_slots);
2255     }
2256 


2257     __ mov(c_rarg0, obj_reg);
2258     __ mov(c_rarg1, r15_thread);
2259     __ mov(r12, rsp); // remember sp
2260     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2261     __ andptr(rsp, -16); // align stack as required by ABI
2262 
2263     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2264     // NOTE that obj_reg == rbx currently
2265     __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2266     __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2267 
2268     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2269     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2270     __ mov(rsp, r12); // restore sp
2271     __ reinit_heapbase();
2272 #ifdef ASSERT
2273     {
2274       Label L;
2275       __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2276       __ jcc(Assembler::equal, L);
2277       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2278       __ bind(L);

2299   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2300   __ andptr(rsp, -16); // align stack as required by ABI
2301   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2302   __ mov(rsp, r12); // restore sp
2303   __ reinit_heapbase();
2304   restore_native_result(masm, ret_type, stack_slots);
2305   // and continue
2306   __ jmp(reguard_done);
2307 
2308 
2309 
2310   __ flush();
2311 
2312   nmethod *nm = nmethod::new_native_nmethod(method,
2313                                             compile_id,
2314                                             masm->code(),
2315                                             vep_offset,
2316                                             frame_complete,
2317                                             stack_slots / VMRegImpl::slots_per_word,
2318                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),

2319                                             oop_maps);
2320 
2321   return nm;
2322 }
2323 
2324 // this function returns the adjust size (in number of words) to a c2i adapter
2325 // activation for use during deoptimization
2326 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2327   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2328 }
2329 
2330 
2331 uint SharedRuntime::out_preserve_stack_slots() {
2332   return 0;
2333 }
2334 
2335 
2336 // Number of stack slots between incoming argument block and the start of
2337 // a new frame.  The PROLOG must add this many slots to the stack.  The
2338 // EPILOG must remove this many slots.  amd64 needs two slots for
< prev index next >