154 flags_off, flagsH_off,
155 // The frame sender code expects that rbp will be in the "natural" place and
156 // will override any oopMap setting for it. We must therefore force the layout
157 // so that it agrees with the frame sender code.
158 rbp_off, rbpH_off, // copy of rbp we will restore
159 return_off, returnH_off, // slot for return address
160 reg_save_size // size in compiler stack slots
161 };
162
163 public:
164 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors);
165 static void restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors = false);
166
167 // Offsets into the register save area
168 // Used by deoptimization when it is managing result register
169 // values on its own
170
171 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
172 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
173 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
174 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
175 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
176
177 // During deoptimization only the result registers need to be restored,
178 // all the other values have already been extracted.
179 static void restore_result_registers(MacroAssembler* masm);
180 };
181
182 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
183 int off = 0;
184 int num_xmm_regs = XMMRegister::available_xmm_registers();
185 #if COMPILER2_OR_JVMCI
186 if (save_wide_vectors && UseAVX == 0) {
187 save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
188 }
189 assert(!save_wide_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
190 #else
191 save_wide_vectors = false; // vectors are generated only by C2 and JVMCI
192 #endif
193
1403 __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
1404 __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
1405 __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
1406 __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
1407
1408 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
1409 __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
1410 }
1411
1412 //---------------------------- continuation_enter_cleanup ---------------------------
1413 //
1414 // Arguments:
1415 // rsp: pointer to the ContinuationEntry
1416 //
1417 // Results:
1418 // rsp: pointer to the spilled rbp in the entry frame
1419 //
1420 // Kills:
1421 // rbx
1422 //
1423 void static continuation_enter_cleanup(MacroAssembler* masm) {
1424 #ifdef ASSERT
1425 Label L_good_sp;
1426 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1427 __ jcc(Assembler::equal, L_good_sp);
1428 __ stop("Incorrect rsp at continuation_enter_cleanup");
1429 __ bind(L_good_sp);
1430 #endif
1431 __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
1432 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
1433
1434 if (CheckJNICalls) {
1435 // Check if this is a virtual thread continuation
1436 Label L_skip_vthread_code;
1437 __ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
1438 __ jcc(Assembler::equal, L_skip_vthread_code);
1439
1440 // If the held monitor count is > 0 and this vthread is terminating then
1441 // it failed to release a JNI monitor. So we issue the same log message
1442 // that JavaThread::exit does.
1443 __ cmpptr(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
1593 address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
1594 if (stub == nullptr) {
1595 fatal("CodeCache is full at gen_continuation_enter");
1596 }
1597
1598 // The call needs to be resolved. There's a special case for this in
1599 // SharedRuntime::find_callee_info_helper() which calls
1600 // LinkResolver::resolve_continuation_enter() which resolves the call to
1601 // Continuation.enter(Continuation c, boolean isContinue).
1602 __ call(resolve);
1603
1604 oop_maps->add_gc_map(__ pc() - start, map);
1605 __ post_call_nop();
1606
1607 __ jmpb(L_exit);
1608
1609 // --- Thawing path
1610
1611 __ bind(L_thaw);
1612
1613 __ call(RuntimeAddress(StubRoutines::cont_thaw()));
1614
1615 ContinuationEntry::_return_pc_offset = __ pc() - start;
1616 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1617 __ post_call_nop();
1618
1619 // --- Normal exit (resolve/thawing)
1620
1621 __ bind(L_exit);
1622
1623 continuation_enter_cleanup(masm);
1624 __ pop(rbp);
1625 __ ret(0);
1626
1627 // --- Exception handling path
1628
1629 exception_offset = __ pc() - start;
1630
1631 continuation_enter_cleanup(masm);
1632 __ pop(rbp);
1633
1634 __ movptr(c_rarg0, r15_thread);
1635 __ movptr(c_rarg1, Address(rsp, 0)); // return address
1636
1637 // rax still holds the original exception oop, save it before the call
1638 __ push(rax);
1639
1640 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
1641 __ movptr(rbx, rax);
1642
1695 continuation_enter_cleanup(masm);
1696 __ pop(rbp);
1697 __ ret(0);
1698
1699 __ bind(L_pinned);
1700
1701 // Pinned, return to caller
1702
1703 // handle pending exception thrown by freeze
1704 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1705 Label ok;
1706 __ jcc(Assembler::equal, ok);
1707 __ leave();
1708 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1709 __ bind(ok);
1710
1711 __ leave();
1712 __ ret(0);
1713 }
1714
1715 static void gen_special_dispatch(MacroAssembler* masm,
1716 const methodHandle& method,
1717 const BasicType* sig_bt,
1718 const VMRegPair* regs) {
1719 verify_oop_args(masm, method, sig_bt, regs);
1720 vmIntrinsics::ID iid = method->intrinsic_id();
1721
1722 // Now write the args into the outgoing interpreter space
1723 bool has_receiver = false;
1724 Register receiver_reg = noreg;
1725 int member_arg_pos = -1;
1726 Register member_reg = noreg;
1727 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1728 if (ref_kind != 0) {
1729 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1730 member_reg = rbx; // known to be free at this point
1731 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1732 } else if (iid == vmIntrinsics::_invokeBasic) {
1733 has_receiver = true;
1734 } else if (iid == vmIntrinsics::_linkToNative) {
2163 // load oop into a register
2164 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2165
2166 // Now handlize the static class mirror it's known not-null.
2167 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2168 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2169
2170 // Now get the handle
2171 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2172 // store the klass handle as second argument
2173 __ movptr(c_rarg1, oop_handle_reg);
2174 // and protect the arg if we must spill
2175 c_arg--;
2176 }
2177
2178 // Change state to native (we save the return address in the thread, since it might not
2179 // be pushed on the stack when we do a stack traversal). It is enough that the pc()
2180 // points into the right code segment. It does not have to be the correct return pc.
2181 // We use the same pc/oopMap repeatedly when we call out
2182
2183 intptr_t the_pc = (intptr_t) __ pc();
2184 oop_maps->add_gc_map(the_pc - start, map);
2185
2186 __ set_last_Java_frame(rsp, noreg, (address)the_pc, rscratch1);
2187
2188
2189 // We have all of the arguments setup at this point. We must not touch any register
2190 // argument registers at this point (what if we save/restore them there are no oop?
2191
2192 if (DTraceMethodProbes) {
2193 // protect the args we've loaded
2194 save_args(masm, total_c_args, c_arg, out_regs);
2195 __ mov_metadata(c_rarg1, method());
2196 __ call_VM_leaf(
2197 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2198 r15_thread, c_rarg1);
2199 restore_args(masm, total_c_args, c_arg, out_regs);
2200 }
2201
2202 // RedefineClasses() tracing support for obsolete method entry
2203 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2204 // protect the args we've loaded
2205 save_args(masm, total_c_args, c_arg, out_regs);
2206 __ mov_metadata(c_rarg1, method());
2207 __ call_VM_leaf(
2254 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2255 __ jcc(Assembler::equal, count_mon);
2256
2257 // Hmm should this move to the slow path code area???
2258
2259 // Test if the oopMark is an obvious stack pointer, i.e.,
2260 // 1) (mark & 3) == 0, and
2261 // 2) rsp <= mark < mark + os::pagesize()
2262 // These 3 tests can be done by evaluating the following
2263 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2264 // assuming both stack pointer and pagesize have their
2265 // least significant 2 bits clear.
2266 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2267
2268 __ subptr(swap_reg, rsp);
2269 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2270
2271 // Save the test result, for recursive case, the result is zero
2272 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2273 __ jcc(Assembler::notEqual, slow_path_lock);
2274 } else {
2275 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2276 __ lightweight_lock(lock_reg, obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2277 }
2278 __ bind(count_mon);
2279 __ inc_held_monitor_count();
2280
2281 // Slow path will re-enter here
2282 __ bind(lock_done);
2283 }
2284
2285 // Finally just about ready to make the JNI call
2286
2287 // get JNIEnv* which is first argument to native
2288 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2289
2290 // Now set thread in native
2291 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2292
2293 __ call(RuntimeAddress(native_func));
2294
2295 // Verify or restore cpu control state after JNI call
2296 __ restore_cpu_control_state_after_jni(rscratch1);
2297
2298 // Unpack native results.
2299 switch (ret_type) {
2350 // by hand.
2351 //
2352 __ vzeroupper();
2353 save_native_result(masm, ret_type, stack_slots);
2354 __ mov(c_rarg0, r15_thread);
2355 __ mov(r12, rsp); // remember sp
2356 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2357 __ andptr(rsp, -16); // align stack as required by ABI
2358 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2359 __ mov(rsp, r12); // restore sp
2360 __ reinit_heapbase();
2361 // Restore any method result value
2362 restore_native_result(masm, ret_type, stack_slots);
2363 __ bind(Continue);
2364 }
2365
2366 // change thread state
2367 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2368 __ bind(after_transition);
2369
2370 Label reguard;
2371 Label reguard_done;
2372 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2373 __ jcc(Assembler::equal, reguard);
2374 __ bind(reguard_done);
2375
2376 // native result if any is live
2377
2378 // Unlock
2379 Label slow_path_unlock;
2380 Label unlock_done;
2381 if (method->is_synchronized()) {
2382
2383 Label fast_done;
2384
2385 // Get locked oop from the handle we passed to jni
2386 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2387
2388 if (LockingMode == LM_LEGACY) {
2389 Label not_recur;
2399 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2400 save_native_result(masm, ret_type, stack_slots);
2401 }
2402
2403 if (LockingMode == LM_MONITOR) {
2404 __ jmp(slow_path_unlock);
2405 } else if (LockingMode == LM_LEGACY) {
2406 // get address of the stack lock
2407 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2408 // get old displaced header
2409 __ movptr(old_hdr, Address(rax, 0));
2410
2411 // Atomic swap old header if oop still contains the stack lock
2412 __ lock();
2413 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2414 __ jcc(Assembler::notEqual, slow_path_unlock);
2415 __ dec_held_monitor_count();
2416 } else {
2417 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2418 __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
2419 __ dec_held_monitor_count();
2420 }
2421
2422 // slow path re-enters here
2423 __ bind(unlock_done);
2424 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2425 restore_native_result(masm, ret_type, stack_slots);
2426 }
2427
2428 __ bind(fast_done);
2429 }
2430 if (DTraceMethodProbes) {
2431 save_native_result(masm, ret_type, stack_slots);
2432 __ mov_metadata(c_rarg1, method());
2433 __ call_VM_leaf(
2434 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2435 r15_thread, c_rarg1);
2436 restore_native_result(masm, ret_type, stack_slots);
2437 }
2438
2439 __ reset_last_Java_frame(false);
2473
2474 // and forward the exception
2475 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2476
2477 // Slow path locking & unlocking
2478 if (method->is_synchronized()) {
2479
2480 // BEGIN Slow path lock
2481 __ bind(slow_path_lock);
2482
2483 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2484 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2485
2486 // protect the args we've loaded
2487 save_args(masm, total_c_args, c_arg, out_regs);
2488
2489 __ mov(c_rarg0, obj_reg);
2490 __ mov(c_rarg1, lock_reg);
2491 __ mov(c_rarg2, r15_thread);
2492
2493 // Not a leaf but we have last_Java_frame setup as we want
2494 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2495 restore_args(masm, total_c_args, c_arg, out_regs);
2496
2497 #ifdef ASSERT
2498 { Label L;
2499 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2500 __ jcc(Assembler::equal, L);
2501 __ stop("no pending exception allowed on exit from monitorenter");
2502 __ bind(L);
2503 }
2504 #endif
2505 __ jmp(lock_done);
2506
2507 // END Slow path lock
2508
2509 // BEGIN Slow path unlock
2510 __ bind(slow_path_unlock);
2511
2512 // If we haven't already saved the native result we must save it now as xmm registers
2513 // are still exposed.
2514 __ vzeroupper();
2589 // this function returns the adjust size (in number of words) to a c2i adapter
2590 // activation for use during deoptimization
2591 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2592 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2593 }
2594
2595
2596 uint SharedRuntime::out_preserve_stack_slots() {
2597 return 0;
2598 }
2599
2600
2601 // Number of stack slots between incoming argument block and the start of
2602 // a new frame. The PROLOG must add this many slots to the stack. The
2603 // EPILOG must remove this many slots. amd64 needs two slots for
2604 // return address.
2605 uint SharedRuntime::in_preserve_stack_slots() {
2606 return 4 + 2 * VerifyStackAtCalls;
2607 }
2608
2609 //------------------------------generate_deopt_blob----------------------------
2610 void SharedRuntime::generate_deopt_blob() {
2611 // Allocate space for the code
2612 ResourceMark rm;
2613 // Setup code generation tools
2614 int pad = 0;
2615 if (UseAVX > 2) {
2616 pad += 1024;
2617 }
2618 if (UseAPX) {
2619 pad += 1024;
2620 }
2621 #if INCLUDE_JVMCI
2622 if (EnableJVMCI) {
2623 pad += 512; // Increase the buffer size when compiling for JVMCI
2624 }
2625 #endif
2626 const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
2627 CodeBuffer buffer(name, 2560+pad, 1024);
2628 MacroAssembler* masm = new MacroAssembler(&buffer);
|
154 flags_off, flagsH_off,
155 // The frame sender code expects that rbp will be in the "natural" place and
156 // will override any oopMap setting for it. We must therefore force the layout
157 // so that it agrees with the frame sender code.
158 rbp_off, rbpH_off, // copy of rbp we will restore
159 return_off, returnH_off, // slot for return address
160 reg_save_size // size in compiler stack slots
161 };
162
163 public:
164 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors);
165 static void restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors = false);
166
167 // Offsets into the register save area
168 // Used by deoptimization when it is managing result register
169 // values on its own
170
171 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
172 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
173 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
174 static int r15_offset_in_bytes(void) { return BytesPerInt * r15_off; }
175 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
176 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
177
178 // During deoptimization only the result registers need to be restored,
179 // all the other values have already been extracted.
180 static void restore_result_registers(MacroAssembler* masm);
181 };
182
183 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
184 int off = 0;
185 int num_xmm_regs = XMMRegister::available_xmm_registers();
186 #if COMPILER2_OR_JVMCI
187 if (save_wide_vectors && UseAVX == 0) {
188 save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
189 }
190 assert(!save_wide_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
191 #else
192 save_wide_vectors = false; // vectors are generated only by C2 and JVMCI
193 #endif
194
1404 __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
1405 __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
1406 __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
1407 __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
1408
1409 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
1410 __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
1411 }
1412
1413 //---------------------------- continuation_enter_cleanup ---------------------------
1414 //
1415 // Arguments:
1416 // rsp: pointer to the ContinuationEntry
1417 //
1418 // Results:
1419 // rsp: pointer to the spilled rbp in the entry frame
1420 //
1421 // Kills:
1422 // rbx
1423 //
1424 static void continuation_enter_cleanup(MacroAssembler* masm) {
1425 #ifdef ASSERT
1426 Label L_good_sp;
1427 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1428 __ jcc(Assembler::equal, L_good_sp);
1429 __ stop("Incorrect rsp at continuation_enter_cleanup");
1430 __ bind(L_good_sp);
1431 #endif
1432 __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
1433 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
1434
1435 if (CheckJNICalls) {
1436 // Check if this is a virtual thread continuation
1437 Label L_skip_vthread_code;
1438 __ cmpl(Address(rsp, ContinuationEntry::flags_offset()), 0);
1439 __ jcc(Assembler::equal, L_skip_vthread_code);
1440
1441 // If the held monitor count is > 0 and this vthread is terminating then
1442 // it failed to release a JNI monitor. So we issue the same log message
1443 // that JavaThread::exit does.
1444 __ cmpptr(Address(r15_thread, JavaThread::jni_monitor_count_offset()), 0);
1594 address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
1595 if (stub == nullptr) {
1596 fatal("CodeCache is full at gen_continuation_enter");
1597 }
1598
1599 // The call needs to be resolved. There's a special case for this in
1600 // SharedRuntime::find_callee_info_helper() which calls
1601 // LinkResolver::resolve_continuation_enter() which resolves the call to
1602 // Continuation.enter(Continuation c, boolean isContinue).
1603 __ call(resolve);
1604
1605 oop_maps->add_gc_map(__ pc() - start, map);
1606 __ post_call_nop();
1607
1608 __ jmpb(L_exit);
1609
1610 // --- Thawing path
1611
1612 __ bind(L_thaw);
1613
1614 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1615 __ call(RuntimeAddress(StubRoutines::cont_thaw()));
1616
1617 ContinuationEntry::_return_pc_offset = __ pc() - start;
1618 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1619 __ post_call_nop();
1620
1621 // --- Normal exit (resolve/thawing)
1622
1623 __ bind(L_exit);
1624 ContinuationEntry::_cleanup_offset = __ pc() - start;
1625 continuation_enter_cleanup(masm);
1626 __ pop(rbp);
1627 __ ret(0);
1628
1629 // --- Exception handling path
1630
1631 exception_offset = __ pc() - start;
1632
1633 continuation_enter_cleanup(masm);
1634 __ pop(rbp);
1635
1636 __ movptr(c_rarg0, r15_thread);
1637 __ movptr(c_rarg1, Address(rsp, 0)); // return address
1638
1639 // rax still holds the original exception oop, save it before the call
1640 __ push(rax);
1641
1642 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 2);
1643 __ movptr(rbx, rax);
1644
1697 continuation_enter_cleanup(masm);
1698 __ pop(rbp);
1699 __ ret(0);
1700
1701 __ bind(L_pinned);
1702
1703 // Pinned, return to caller
1704
1705 // handle pending exception thrown by freeze
1706 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1707 Label ok;
1708 __ jcc(Assembler::equal, ok);
1709 __ leave();
1710 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1711 __ bind(ok);
1712
1713 __ leave();
1714 __ ret(0);
1715 }
1716
1717 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1718 ::continuation_enter_cleanup(masm);
1719 }
1720
1721 static void gen_special_dispatch(MacroAssembler* masm,
1722 const methodHandle& method,
1723 const BasicType* sig_bt,
1724 const VMRegPair* regs) {
1725 verify_oop_args(masm, method, sig_bt, regs);
1726 vmIntrinsics::ID iid = method->intrinsic_id();
1727
1728 // Now write the args into the outgoing interpreter space
1729 bool has_receiver = false;
1730 Register receiver_reg = noreg;
1731 int member_arg_pos = -1;
1732 Register member_reg = noreg;
1733 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1734 if (ref_kind != 0) {
1735 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1736 member_reg = rbx; // known to be free at this point
1737 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1738 } else if (iid == vmIntrinsics::_invokeBasic) {
1739 has_receiver = true;
1740 } else if (iid == vmIntrinsics::_linkToNative) {
2169 // load oop into a register
2170 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2171
2172 // Now handlize the static class mirror it's known not-null.
2173 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2174 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2175
2176 // Now get the handle
2177 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2178 // store the klass handle as second argument
2179 __ movptr(c_rarg1, oop_handle_reg);
2180 // and protect the arg if we must spill
2181 c_arg--;
2182 }
2183
2184 // Change state to native (we save the return address in the thread, since it might not
2185 // be pushed on the stack when we do a stack traversal). It is enough that the pc()
2186 // points into the right code segment. It does not have to be the correct return pc.
2187 // We use the same pc/oopMap repeatedly when we call out
2188
2189 Label native_return;
2190 if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
2191 // For convenience we use the pc we want to resume to in case of preemption on Object.wait.
2192 __ set_last_Java_frame(rsp, noreg, native_return, rscratch1);
2193 } else {
2194 intptr_t the_pc = (intptr_t) __ pc();
2195 oop_maps->add_gc_map(the_pc - start, map);
2196
2197 __ set_last_Java_frame(rsp, noreg, __ pc(), rscratch1);
2198 }
2199
2200 // We have all of the arguments setup at this point. We must not touch any register
2201 // argument registers at this point (what if we save/restore them there are no oop?
2202
2203 if (DTraceMethodProbes) {
2204 // protect the args we've loaded
2205 save_args(masm, total_c_args, c_arg, out_regs);
2206 __ mov_metadata(c_rarg1, method());
2207 __ call_VM_leaf(
2208 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2209 r15_thread, c_rarg1);
2210 restore_args(masm, total_c_args, c_arg, out_regs);
2211 }
2212
2213 // RedefineClasses() tracing support for obsolete method entry
2214 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2215 // protect the args we've loaded
2216 save_args(masm, total_c_args, c_arg, out_regs);
2217 __ mov_metadata(c_rarg1, method());
2218 __ call_VM_leaf(
2265 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2266 __ jcc(Assembler::equal, count_mon);
2267
2268 // Hmm should this move to the slow path code area???
2269
2270 // Test if the oopMark is an obvious stack pointer, i.e.,
2271 // 1) (mark & 3) == 0, and
2272 // 2) rsp <= mark < mark + os::pagesize()
2273 // These 3 tests can be done by evaluating the following
2274 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2275 // assuming both stack pointer and pagesize have their
2276 // least significant 2 bits clear.
2277 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2278
2279 __ subptr(swap_reg, rsp);
2280 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2281
2282 // Save the test result, for recursive case, the result is zero
2283 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2284 __ jcc(Assembler::notEqual, slow_path_lock);
2285
2286 __ bind(count_mon);
2287 __ inc_held_monitor_count();
2288 } else {
2289 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2290 __ lightweight_lock(lock_reg, obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2291 }
2292
2293 // Slow path will re-enter here
2294 __ bind(lock_done);
2295 }
2296
2297 // Finally just about ready to make the JNI call
2298
2299 // get JNIEnv* which is first argument to native
2300 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2301
2302 // Now set thread in native
2303 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2304
2305 __ call(RuntimeAddress(native_func));
2306
2307 // Verify or restore cpu control state after JNI call
2308 __ restore_cpu_control_state_after_jni(rscratch1);
2309
2310 // Unpack native results.
2311 switch (ret_type) {
2362 // by hand.
2363 //
2364 __ vzeroupper();
2365 save_native_result(masm, ret_type, stack_slots);
2366 __ mov(c_rarg0, r15_thread);
2367 __ mov(r12, rsp); // remember sp
2368 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2369 __ andptr(rsp, -16); // align stack as required by ABI
2370 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2371 __ mov(rsp, r12); // restore sp
2372 __ reinit_heapbase();
2373 // Restore any method result value
2374 restore_native_result(masm, ret_type, stack_slots);
2375 __ bind(Continue);
2376 }
2377
2378 // change thread state
2379 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2380 __ bind(after_transition);
2381
2382 if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
2383 // Check preemption for Object.wait()
2384 __ movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset()));
2385 __ cmpptr(rscratch1, NULL_WORD);
2386 __ jccb(Assembler::equal, native_return);
2387 __ movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
2388 __ jmp(rscratch1);
2389 __ bind(native_return);
2390
2391 intptr_t the_pc = (intptr_t) __ pc();
2392 oop_maps->add_gc_map(the_pc - start, map);
2393 }
2394
2395
2396 Label reguard;
2397 Label reguard_done;
2398 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2399 __ jcc(Assembler::equal, reguard);
2400 __ bind(reguard_done);
2401
2402 // native result if any is live
2403
2404 // Unlock
2405 Label slow_path_unlock;
2406 Label unlock_done;
2407 if (method->is_synchronized()) {
2408
2409 Label fast_done;
2410
2411 // Get locked oop from the handle we passed to jni
2412 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2413
2414 if (LockingMode == LM_LEGACY) {
2415 Label not_recur;
2425 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2426 save_native_result(masm, ret_type, stack_slots);
2427 }
2428
2429 if (LockingMode == LM_MONITOR) {
2430 __ jmp(slow_path_unlock);
2431 } else if (LockingMode == LM_LEGACY) {
2432 // get address of the stack lock
2433 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2434 // get old displaced header
2435 __ movptr(old_hdr, Address(rax, 0));
2436
2437 // Atomic swap old header if oop still contains the stack lock
2438 __ lock();
2439 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2440 __ jcc(Assembler::notEqual, slow_path_unlock);
2441 __ dec_held_monitor_count();
2442 } else {
2443 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2444 __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
2445 }
2446
2447 // slow path re-enters here
2448 __ bind(unlock_done);
2449 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2450 restore_native_result(masm, ret_type, stack_slots);
2451 }
2452
2453 __ bind(fast_done);
2454 }
2455 if (DTraceMethodProbes) {
2456 save_native_result(masm, ret_type, stack_slots);
2457 __ mov_metadata(c_rarg1, method());
2458 __ call_VM_leaf(
2459 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2460 r15_thread, c_rarg1);
2461 restore_native_result(masm, ret_type, stack_slots);
2462 }
2463
2464 __ reset_last_Java_frame(false);
2498
2499 // and forward the exception
2500 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2501
2502 // Slow path locking & unlocking
2503 if (method->is_synchronized()) {
2504
2505 // BEGIN Slow path lock
2506 __ bind(slow_path_lock);
2507
2508 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2509 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2510
2511 // protect the args we've loaded
2512 save_args(masm, total_c_args, c_arg, out_regs);
2513
2514 __ mov(c_rarg0, obj_reg);
2515 __ mov(c_rarg1, lock_reg);
2516 __ mov(c_rarg2, r15_thread);
2517
2518 // Not a leaf but we have last_Java_frame setup as we want.
2519 // We don't want to unmount in case of contention since that would complicate preserving
2520 // the arguments that had already been marshalled into the native convention. So we force
2521 // the freeze slow path to find this native wrapper frame (see recurse_freeze_native_frame())
2522 // and pin the vthread. Otherwise the fast path won't find it since we don't walk the stack.
2523 __ push_cont_fastpath();
2524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2525 __ pop_cont_fastpath();
2526 restore_args(masm, total_c_args, c_arg, out_regs);
2527
2528 #ifdef ASSERT
2529 { Label L;
2530 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2531 __ jcc(Assembler::equal, L);
2532 __ stop("no pending exception allowed on exit from monitorenter");
2533 __ bind(L);
2534 }
2535 #endif
2536 __ jmp(lock_done);
2537
2538 // END Slow path lock
2539
2540 // BEGIN Slow path unlock
2541 __ bind(slow_path_unlock);
2542
2543 // If we haven't already saved the native result we must save it now as xmm registers
2544 // are still exposed.
2545 __ vzeroupper();
2620 // this function returns the adjust size (in number of words) to a c2i adapter
2621 // activation for use during deoptimization
2622 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2623 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2624 }
2625
2626
2627 uint SharedRuntime::out_preserve_stack_slots() {
2628 return 0;
2629 }
2630
2631
2632 // Number of stack slots between incoming argument block and the start of
2633 // a new frame. The PROLOG must add this many slots to the stack. The
2634 // EPILOG must remove this many slots. amd64 needs two slots for
2635 // return address.
2636 uint SharedRuntime::in_preserve_stack_slots() {
2637 return 4 + 2 * VerifyStackAtCalls;
2638 }
2639
2640 VMReg SharedRuntime::thread_register() {
2641 return r15_thread->as_VMReg();
2642 }
2643
2644 //------------------------------generate_deopt_blob----------------------------
2645 void SharedRuntime::generate_deopt_blob() {
2646 // Allocate space for the code
2647 ResourceMark rm;
2648 // Setup code generation tools
2649 int pad = 0;
2650 if (UseAVX > 2) {
2651 pad += 1024;
2652 }
2653 if (UseAPX) {
2654 pad += 1024;
2655 }
2656 #if INCLUDE_JVMCI
2657 if (EnableJVMCI) {
2658 pad += 512; // Increase the buffer size when compiling for JVMCI
2659 }
2660 #endif
2661 const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id);
2662 CodeBuffer buffer(name, 2560+pad, 1024);
2663 MacroAssembler* masm = new MacroAssembler(&buffer);
|