< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

 147     flags_off, flagsH_off,
 148     // The frame sender code expects that rbp will be in the "natural" place and
 149     // will override any oopMap setting for it. We must therefore force the layout
 150     // so that it agrees with the frame sender code.
 151     rbp_off, rbpH_off,        // copy of rbp we will restore
 152     return_off, returnH_off,  // slot for return address
 153     reg_save_size             // size in compiler stack slots
 154   };
 155 
 156  public:
 157   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors);
 158   static void restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors = false);
 159 
 160   // Offsets into the register save area
 161   // Used by deoptimization when it is managing result register
 162   // values on its own
 163 
 164   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 165   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 166   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }

 167   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 168   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 169 
 170   // During deoptimization only the result registers need to be restored,
 171   // all the other values have already been extracted.
 172   static void restore_result_registers(MacroAssembler* masm);
 173 };
 174 
 175 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
 176   int off = 0;
 177   int num_xmm_regs = XMMRegister::available_xmm_registers();
 178 #if COMPILER2_OR_JVMCI
 179   if (save_wide_vectors && UseAVX == 0) {
 180     save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
 181   }
 182   assert(!save_wide_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 183 #else
 184   save_wide_vectors = false; // vectors are generated only by C2 and JVMCI
 185 #endif
 186 

1329   __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
1330   __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
1331   __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
1332   __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
1333 
1334   __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
1335   __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
1336 }
1337 
1338 //---------------------------- continuation_enter_cleanup ---------------------------
1339 //
1340 // Arguments:
1341 //   rsp: pointer to the ContinuationEntry
1342 //
1343 // Results:
1344 //   rsp: pointer to the spilled rbp in the entry frame
1345 //
1346 // Kills:
1347 //   rbx
1348 //
1349 void static continuation_enter_cleanup(MacroAssembler* masm) {
1350 #ifdef ASSERT
1351   Label L_good_sp;
1352   __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1353   __ jcc(Assembler::equal, L_good_sp);
1354   __ stop("Incorrect rsp at continuation_enter_cleanup");
1355   __ bind(L_good_sp);
1356 #endif
1357 
1358   __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
1359   __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
1360   __ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()));
1361   __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx);
1362 
1363   __ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset()));
1364   __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx);
1365   __ addptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));
1366 }
1367 
1368 static void gen_continuation_enter(MacroAssembler* masm,
1369                                    const VMRegPair* regs,

1482   address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
1483   if (stub == nullptr) {
1484     fatal("CodeCache is full at gen_continuation_enter");
1485   }
1486 
1487   // The call needs to be resolved. There's a special case for this in
1488   // SharedRuntime::find_callee_info_helper() which calls
1489   // LinkResolver::resolve_continuation_enter() which resolves the call to
1490   // Continuation.enter(Continuation c, boolean isContinue).
1491   __ call(resolve);
1492 
1493   oop_maps->add_gc_map(__ pc() - start, map);
1494   __ post_call_nop();
1495 
1496   __ jmpb(L_exit);
1497 
1498   // --- Thawing path
1499 
1500   __ bind(L_thaw);
1501 

1502   __ call(RuntimeAddress(StubRoutines::cont_thaw()));
1503 
1504   ContinuationEntry::_return_pc_offset = __ pc() - start;
1505   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1506   __ post_call_nop();
1507 
1508   // --- Normal exit (resolve/thawing)
1509 
1510   __ bind(L_exit);
1511 
1512   continuation_enter_cleanup(masm);
1513   __ pop(rbp);
1514   __ ret(0);
1515 
1516   // --- Exception handling path
1517 
1518   exception_offset = __ pc() - start;
1519 
1520   continuation_enter_cleanup(masm);
1521   __ pop(rbp);

1584   continuation_enter_cleanup(masm);
1585   __ pop(rbp);
1586   __ ret(0);
1587 
1588   __ bind(L_pinned);
1589 
1590   // Pinned, return to caller
1591 
1592   // handle pending exception thrown by freeze
1593   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1594   Label ok;
1595   __ jcc(Assembler::equal, ok);
1596   __ leave();
1597   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1598   __ bind(ok);
1599 
1600   __ leave();
1601   __ ret(0);
1602 }
1603 




1604 static void gen_special_dispatch(MacroAssembler* masm,
1605                                  const methodHandle& method,
1606                                  const BasicType* sig_bt,
1607                                  const VMRegPair* regs) {
1608   verify_oop_args(masm, method, sig_bt, regs);
1609   vmIntrinsics::ID iid = method->intrinsic_id();
1610 
1611   // Now write the args into the outgoing interpreter space
1612   bool     has_receiver   = false;
1613   Register receiver_reg   = noreg;
1614   int      member_arg_pos = -1;
1615   Register member_reg     = noreg;
1616   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1617   if (ref_kind != 0) {
1618     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1619     member_reg = rbx;  // known to be free at this point
1620     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1621   } else if (iid == vmIntrinsics::_invokeBasic) {
1622     has_receiver = true;
1623   } else if (iid == vmIntrinsics::_linkToNative) {

2155 
2156       // Test if the oopMark is an obvious stack pointer, i.e.,
2157       //  1) (mark & 3) == 0, and
2158       //  2) rsp <= mark < mark + os::pagesize()
2159       // These 3 tests can be done by evaluating the following
2160       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2161       // assuming both stack pointer and pagesize have their
2162       // least significant 2 bits clear.
2163       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2164 
2165       __ subptr(swap_reg, rsp);
2166       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2167 
2168       // Save the test result, for recursive case, the result is zero
2169       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2170       __ jcc(Assembler::notEqual, slow_path_lock);
2171     } else {
2172       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2173       __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2174     }


2175     __ bind(count_mon);
2176     __ inc_held_monitor_count();
2177 
2178     // Slow path will re-enter here
2179     __ bind(lock_done);
2180   }
2181 
2182   // Finally just about ready to make the JNI call
2183 
2184   // get JNIEnv* which is first argument to native
2185   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2186 
2187   // Now set thread in native
2188   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2189 
2190   __ call(RuntimeAddress(native_func));
2191 
2192   // Verify or restore cpu control state after JNI call
2193   __ restore_cpu_control_state_after_jni(rscratch1);
2194 

2270   __ jcc(Assembler::equal, reguard);
2271   __ bind(reguard_done);
2272 
2273   // native result if any is live
2274 
2275   // Unlock
2276   Label slow_path_unlock;
2277   Label unlock_done;
2278   if (method->is_synchronized()) {
2279 
2280     Label fast_done;
2281 
2282     // Get locked oop from the handle we passed to jni
2283     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2284 
2285     if (LockingMode == LM_LEGACY) {
2286       Label not_recur;
2287       // Simple recursive lock?
2288       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2289       __ jcc(Assembler::notEqual, not_recur);
2290       __ dec_held_monitor_count();
2291       __ jmpb(fast_done);
2292       __ bind(not_recur);
2293     }
2294 
2295     // Must save rax if it is live now because cmpxchg must use it
2296     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2297       save_native_result(masm, ret_type, stack_slots);
2298     }
2299 
2300     if (LockingMode == LM_MONITOR) {
2301       __ jmp(slow_path_unlock);
2302     } else if (LockingMode == LM_LEGACY) {
2303       // get address of the stack lock
2304       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2305       //  get old displaced header
2306       __ movptr(old_hdr, Address(rax, 0));
2307 
2308       // Atomic swap old header if oop still contains the stack lock
2309       __ lock();
2310       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2311       __ jcc(Assembler::notEqual, slow_path_unlock);
2312       __ dec_held_monitor_count();
2313     } else {
2314       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2315       __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
2316       __ dec_held_monitor_count();
2317     }
2318 
2319     // slow path re-enters here
2320     __ bind(unlock_done);
2321     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2322       restore_native_result(masm, ret_type, stack_slots);
2323     }
2324 
2325     __ bind(fast_done);
2326   }
2327   {
2328     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2329     save_native_result(masm, ret_type, stack_slots);
2330     __ mov_metadata(c_rarg1, method());
2331     __ call_VM_leaf(
2332          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2333          r15_thread, c_rarg1);
2334     restore_native_result(masm, ret_type, stack_slots);
2335   }
2336 

2372   // and forward the exception
2373   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2374 
2375   // Slow path locking & unlocking
2376   if (method->is_synchronized()) {
2377 
2378     // BEGIN Slow path lock
2379     __ bind(slow_path_lock);
2380 
2381     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2382     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2383 
2384     // protect the args we've loaded
2385     save_args(masm, total_c_args, c_arg, out_regs);
2386 
2387     __ mov(c_rarg0, obj_reg);
2388     __ mov(c_rarg1, lock_reg);
2389     __ mov(c_rarg2, r15_thread);
2390 
2391     // Not a leaf but we have last_Java_frame setup as we want


2392     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);

2393     restore_args(masm, total_c_args, c_arg, out_regs);
2394 
2395 #ifdef ASSERT
2396     { Label L;
2397     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2398     __ jcc(Assembler::equal, L);
2399     __ stop("no pending exception allowed on exit from monitorenter");
2400     __ bind(L);
2401     }
2402 #endif
2403     __ jmp(lock_done);
2404 
2405     // END Slow path lock
2406 
2407     // BEGIN Slow path unlock
2408     __ bind(slow_path_unlock);
2409 
2410     // If we haven't already saved the native result we must save it now as xmm registers
2411     // are still exposed.
2412     __ vzeroupper();

2487 // this function returns the adjust size (in number of words) to a c2i adapter
2488 // activation for use during deoptimization
2489 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2490   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2491 }
2492 
2493 
2494 uint SharedRuntime::out_preserve_stack_slots() {
2495   return 0;
2496 }
2497 
2498 
2499 // Number of stack slots between incoming argument block and the start of
2500 // a new frame.  The PROLOG must add this many slots to the stack.  The
2501 // EPILOG must remove this many slots.  amd64 needs two slots for
2502 // return address.
2503 uint SharedRuntime::in_preserve_stack_slots() {
2504   return 4 + 2 * VerifyStackAtCalls;
2505 }
2506 




2507 //------------------------------generate_deopt_blob----------------------------
2508 void SharedRuntime::generate_deopt_blob() {
2509   // Allocate space for the code
2510   ResourceMark rm;
2511   // Setup code generation tools
2512   int pad = 0;
2513   if (UseAVX > 2) {
2514     pad += 1024;
2515   }
2516 #if INCLUDE_JVMCI
2517   if (EnableJVMCI) {
2518     pad += 512; // Increase the buffer size when compiling for JVMCI
2519   }
2520 #endif
2521   CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
2522   MacroAssembler* masm = new MacroAssembler(&buffer);
2523   int frame_size_in_words;
2524   OopMap* map = nullptr;
2525   OopMapSet *oop_maps = new OopMapSet();
2526 

 147     flags_off, flagsH_off,
 148     // The frame sender code expects that rbp will be in the "natural" place and
 149     // will override any oopMap setting for it. We must therefore force the layout
 150     // so that it agrees with the frame sender code.
 151     rbp_off, rbpH_off,        // copy of rbp we will restore
 152     return_off, returnH_off,  // slot for return address
 153     reg_save_size             // size in compiler stack slots
 154   };
 155 
 156  public:
 157   static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors);
 158   static void restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors = false);
 159 
 160   // Offsets into the register save area
 161   // Used by deoptimization when it is managing result register
 162   // values on its own
 163 
 164   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 165   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 166   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
 167   static int r15_offset_in_bytes(void)    { return BytesPerInt * r15_off; }
 168   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 169   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 170 
 171   // During deoptimization only the result registers need to be restored,
 172   // all the other values have already been extracted.
 173   static void restore_result_registers(MacroAssembler* masm);
 174 };
 175 
 176 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
 177   int off = 0;
 178   int num_xmm_regs = XMMRegister::available_xmm_registers();
 179 #if COMPILER2_OR_JVMCI
 180   if (save_wide_vectors && UseAVX == 0) {
 181     save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
 182   }
 183   assert(!save_wide_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 184 #else
 185   save_wide_vectors = false; // vectors are generated only by C2 and JVMCI
 186 #endif
 187 

1330   __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
1331   __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
1332   __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
1333   __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
1334 
1335   __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
1336   __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
1337 }
1338 
1339 //---------------------------- continuation_enter_cleanup ---------------------------
1340 //
1341 // Arguments:
1342 //   rsp: pointer to the ContinuationEntry
1343 //
1344 // Results:
1345 //   rsp: pointer to the spilled rbp in the entry frame
1346 //
1347 // Kills:
1348 //   rbx
1349 //
1350 static void continuation_enter_cleanup(MacroAssembler* masm) {
1351 #ifdef ASSERT
1352   Label L_good_sp;
1353   __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1354   __ jcc(Assembler::equal, L_good_sp);
1355   __ stop("Incorrect rsp at continuation_enter_cleanup");
1356   __ bind(L_good_sp);
1357 #endif
1358 
1359   __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
1360   __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
1361   __ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()));
1362   __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx);
1363 
1364   __ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset()));
1365   __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx);
1366   __ addptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));
1367 }
1368 
1369 static void gen_continuation_enter(MacroAssembler* masm,
1370                                    const VMRegPair* regs,

1483   address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
1484   if (stub == nullptr) {
1485     fatal("CodeCache is full at gen_continuation_enter");
1486   }
1487 
1488   // The call needs to be resolved. There's a special case for this in
1489   // SharedRuntime::find_callee_info_helper() which calls
1490   // LinkResolver::resolve_continuation_enter() which resolves the call to
1491   // Continuation.enter(Continuation c, boolean isContinue).
1492   __ call(resolve);
1493 
1494   oop_maps->add_gc_map(__ pc() - start, map);
1495   __ post_call_nop();
1496 
1497   __ jmpb(L_exit);
1498 
1499   // --- Thawing path
1500 
1501   __ bind(L_thaw);
1502 
1503   ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1504   __ call(RuntimeAddress(StubRoutines::cont_thaw()));
1505 
1506   ContinuationEntry::_return_pc_offset = __ pc() - start;
1507   oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1508   __ post_call_nop();
1509 
1510   // --- Normal exit (resolve/thawing)
1511 
1512   __ bind(L_exit);
1513 
1514   continuation_enter_cleanup(masm);
1515   __ pop(rbp);
1516   __ ret(0);
1517 
1518   // --- Exception handling path
1519 
1520   exception_offset = __ pc() - start;
1521 
1522   continuation_enter_cleanup(masm);
1523   __ pop(rbp);

1586   continuation_enter_cleanup(masm);
1587   __ pop(rbp);
1588   __ ret(0);
1589 
1590   __ bind(L_pinned);
1591 
1592   // Pinned, return to caller
1593 
1594   // handle pending exception thrown by freeze
1595   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1596   Label ok;
1597   __ jcc(Assembler::equal, ok);
1598   __ leave();
1599   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1600   __ bind(ok);
1601 
1602   __ leave();
1603   __ ret(0);
1604 }
1605 
1606 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1607   ::continuation_enter_cleanup(masm);
1608 }
1609 
1610 static void gen_special_dispatch(MacroAssembler* masm,
1611                                  const methodHandle& method,
1612                                  const BasicType* sig_bt,
1613                                  const VMRegPair* regs) {
1614   verify_oop_args(masm, method, sig_bt, regs);
1615   vmIntrinsics::ID iid = method->intrinsic_id();
1616 
1617   // Now write the args into the outgoing interpreter space
1618   bool     has_receiver   = false;
1619   Register receiver_reg   = noreg;
1620   int      member_arg_pos = -1;
1621   Register member_reg     = noreg;
1622   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1623   if (ref_kind != 0) {
1624     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1625     member_reg = rbx;  // known to be free at this point
1626     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1627   } else if (iid == vmIntrinsics::_invokeBasic) {
1628     has_receiver = true;
1629   } else if (iid == vmIntrinsics::_linkToNative) {

2161 
2162       // Test if the oopMark is an obvious stack pointer, i.e.,
2163       //  1) (mark & 3) == 0, and
2164       //  2) rsp <= mark < mark + os::pagesize()
2165       // These 3 tests can be done by evaluating the following
2166       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2167       // assuming both stack pointer and pagesize have their
2168       // least significant 2 bits clear.
2169       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2170 
2171       __ subptr(swap_reg, rsp);
2172       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2173 
2174       // Save the test result, for recursive case, the result is zero
2175       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2176       __ jcc(Assembler::notEqual, slow_path_lock);
2177     } else {
2178       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2179       __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2180     }
2181     __ jmp (lock_done);
2182 
2183     __ bind(count_mon);
2184     __ inc_held_monitor_count();
2185 
2186     // Slow path will re-enter here
2187     __ bind(lock_done);
2188   }
2189 
2190   // Finally just about ready to make the JNI call
2191 
2192   // get JNIEnv* which is first argument to native
2193   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2194 
2195   // Now set thread in native
2196   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2197 
2198   __ call(RuntimeAddress(native_func));
2199 
2200   // Verify or restore cpu control state after JNI call
2201   __ restore_cpu_control_state_after_jni(rscratch1);
2202 

2278   __ jcc(Assembler::equal, reguard);
2279   __ bind(reguard_done);
2280 
2281   // native result if any is live
2282 
2283   // Unlock
2284   Label slow_path_unlock;
2285   Label unlock_done;
2286   if (method->is_synchronized()) {
2287 
2288     Label fast_done;
2289 
2290     // Get locked oop from the handle we passed to jni
2291     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2292 
2293     if (LockingMode == LM_LEGACY) {
2294       Label not_recur;
2295       // Simple recursive lock?
2296       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2297       __ jcc(Assembler::notEqual, not_recur);

2298       __ jmpb(fast_done);
2299       __ bind(not_recur);
2300     }
2301 
2302     // Must save rax if it is live now because cmpxchg must use it
2303     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2304       save_native_result(masm, ret_type, stack_slots);
2305     }
2306 
2307     if (LockingMode == LM_MONITOR) {
2308       __ jmp(slow_path_unlock);
2309     } else if (LockingMode == LM_LEGACY) {
2310       // get address of the stack lock
2311       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2312       //  get old displaced header
2313       __ movptr(old_hdr, Address(rax, 0));
2314 
2315       // Atomic swap old header if oop still contains the stack lock
2316       __ lock();
2317       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2318       __ jcc(Assembler::notEqual, slow_path_unlock);
2319       __ dec_held_monitor_count();
2320     } else {
2321       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2322       __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);

2323     }
2324 
2325     // slow path re-enters here
2326     __ bind(unlock_done);
2327     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2328       restore_native_result(masm, ret_type, stack_slots);
2329     }
2330 
2331     __ bind(fast_done);
2332   }
2333   {
2334     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2335     save_native_result(masm, ret_type, stack_slots);
2336     __ mov_metadata(c_rarg1, method());
2337     __ call_VM_leaf(
2338          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2339          r15_thread, c_rarg1);
2340     restore_native_result(masm, ret_type, stack_slots);
2341   }
2342 

2378   // and forward the exception
2379   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2380 
2381   // Slow path locking & unlocking
2382   if (method->is_synchronized()) {
2383 
2384     // BEGIN Slow path lock
2385     __ bind(slow_path_lock);
2386 
2387     // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2388     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2389 
2390     // protect the args we've loaded
2391     save_args(masm, total_c_args, c_arg, out_regs);
2392 
2393     __ mov(c_rarg0, obj_reg);
2394     __ mov(c_rarg1, lock_reg);
2395     __ mov(c_rarg2, r15_thread);
2396 
2397     // Not a leaf but we have last_Java_frame setup as we want
2398     // Force freeze slow path on ObjectMonitor::enter() for now which will fail with freeze_pinned_native.
2399     __ push_cont_fastpath();
2400     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2401     __ pop_cont_fastpath();
2402     restore_args(masm, total_c_args, c_arg, out_regs);
2403 
2404 #ifdef ASSERT
2405     { Label L;
2406     __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2407     __ jcc(Assembler::equal, L);
2408     __ stop("no pending exception allowed on exit from monitorenter");
2409     __ bind(L);
2410     }
2411 #endif
2412     __ jmp(lock_done);
2413 
2414     // END Slow path lock
2415 
2416     // BEGIN Slow path unlock
2417     __ bind(slow_path_unlock);
2418 
2419     // If we haven't already saved the native result we must save it now as xmm registers
2420     // are still exposed.
2421     __ vzeroupper();

2496 // this function returns the adjust size (in number of words) to a c2i adapter
2497 // activation for use during deoptimization
2498 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2499   return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2500 }
2501 
2502 
2503 uint SharedRuntime::out_preserve_stack_slots() {
2504   return 0;
2505 }
2506 
2507 
2508 // Number of stack slots between incoming argument block and the start of
2509 // a new frame.  The PROLOG must add this many slots to the stack.  The
2510 // EPILOG must remove this many slots.  amd64 needs two slots for
2511 // return address.
2512 uint SharedRuntime::in_preserve_stack_slots() {
2513   return 4 + 2 * VerifyStackAtCalls;
2514 }
2515 
2516 VMReg SharedRuntime::thread_register() {
2517   return r15_thread->as_VMReg();
2518 }
2519 
2520 //------------------------------generate_deopt_blob----------------------------
2521 void SharedRuntime::generate_deopt_blob() {
2522   // Allocate space for the code
2523   ResourceMark rm;
2524   // Setup code generation tools
2525   int pad = 0;
2526   if (UseAVX > 2) {
2527     pad += 1024;
2528   }
2529 #if INCLUDE_JVMCI
2530   if (EnableJVMCI) {
2531     pad += 512; // Increase the buffer size when compiling for JVMCI
2532   }
2533 #endif
2534   CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
2535   MacroAssembler* masm = new MacroAssembler(&buffer);
2536   int frame_size_in_words;
2537   OopMap* map = nullptr;
2538   OopMapSet *oop_maps = new OopMapSet();
2539 
< prev index next >