147 flags_off, flagsH_off,
148 // The frame sender code expects that rbp will be in the "natural" place and
149 // will override any oopMap setting for it. We must therefore force the layout
150 // so that it agrees with the frame sender code.
151 rbp_off, rbpH_off, // copy of rbp we will restore
152 return_off, returnH_off, // slot for return address
153 reg_save_size // size in compiler stack slots
154 };
155
156 public:
157 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors);
158 static void restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors = false);
159
160 // Offsets into the register save area
161 // Used by deoptimization when it is managing result register
162 // values on its own
163
164 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
165 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
166 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
167 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
168 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
169
170 // During deoptimization only the result registers need to be restored,
171 // all the other values have already been extracted.
172 static void restore_result_registers(MacroAssembler* masm);
173 };
174
175 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
176 int off = 0;
177 int num_xmm_regs = XMMRegister::available_xmm_registers();
178 #if COMPILER2_OR_JVMCI
179 if (save_wide_vectors && UseAVX == 0) {
180 save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
181 }
182 assert(!save_wide_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
183 #else
184 save_wide_vectors = false; // vectors are generated only by C2 and JVMCI
185 #endif
186
1329 __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
1330 __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
1331 __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
1332 __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
1333
1334 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
1335 __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
1336 }
1337
1338 //---------------------------- continuation_enter_cleanup ---------------------------
1339 //
1340 // Arguments:
1341 // rsp: pointer to the ContinuationEntry
1342 //
1343 // Results:
1344 // rsp: pointer to the spilled rbp in the entry frame
1345 //
1346 // Kills:
1347 // rbx
1348 //
1349 void static continuation_enter_cleanup(MacroAssembler* masm) {
1350 #ifdef ASSERT
1351 Label L_good_sp;
1352 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1353 __ jcc(Assembler::equal, L_good_sp);
1354 __ stop("Incorrect rsp at continuation_enter_cleanup");
1355 __ bind(L_good_sp);
1356 #endif
1357
1358 __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
1359 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
1360 __ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()));
1361 __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx);
1362
1363 __ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset()));
1364 __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx);
1365 __ addptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));
1366 }
1367
1368 static void gen_continuation_enter(MacroAssembler* masm,
1369 const VMRegPair* regs,
1480 address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
1481 if (stub == nullptr) {
1482 fatal("CodeCache is full at gen_continuation_enter");
1483 }
1484
1485 // The call needs to be resolved. There's a special case for this in
1486 // SharedRuntime::find_callee_info_helper() which calls
1487 // LinkResolver::resolve_continuation_enter() which resolves the call to
1488 // Continuation.enter(Continuation c, boolean isContinue).
1489 __ call(resolve);
1490
1491 oop_maps->add_gc_map(__ pc() - start, map);
1492 __ post_call_nop();
1493
1494 __ jmpb(L_exit);
1495
1496 // --- Thawing path
1497
1498 __ bind(L_thaw);
1499
1500 __ call(RuntimeAddress(StubRoutines::cont_thaw()));
1501
1502 ContinuationEntry::_return_pc_offset = __ pc() - start;
1503 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1504 __ post_call_nop();
1505
1506 // --- Normal exit (resolve/thawing)
1507
1508 __ bind(L_exit);
1509
1510 continuation_enter_cleanup(masm);
1511 __ pop(rbp);
1512 __ ret(0);
1513
1514 // --- Exception handling path
1515
1516 exception_offset = __ pc() - start;
1517
1518 continuation_enter_cleanup(masm);
1519 __ pop(rbp);
1582 continuation_enter_cleanup(masm);
1583 __ pop(rbp);
1584 __ ret(0);
1585
1586 __ bind(L_pinned);
1587
1588 // Pinned, return to caller
1589
1590 // handle pending exception thrown by freeze
1591 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1592 Label ok;
1593 __ jcc(Assembler::equal, ok);
1594 __ leave();
1595 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1596 __ bind(ok);
1597
1598 __ leave();
1599 __ ret(0);
1600 }
1601
1602 static void gen_special_dispatch(MacroAssembler* masm,
1603 const methodHandle& method,
1604 const BasicType* sig_bt,
1605 const VMRegPair* regs) {
1606 verify_oop_args(masm, method, sig_bt, regs);
1607 vmIntrinsics::ID iid = method->intrinsic_id();
1608
1609 // Now write the args into the outgoing interpreter space
1610 bool has_receiver = false;
1611 Register receiver_reg = noreg;
1612 int member_arg_pos = -1;
1613 Register member_reg = noreg;
1614 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1615 if (ref_kind != 0) {
1616 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1617 member_reg = rbx; // known to be free at this point
1618 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1619 } else if (iid == vmIntrinsics::_invokeBasic) {
1620 has_receiver = true;
1621 } else if (iid == vmIntrinsics::_linkToNative) {
2153
2154 // Test if the oopMark is an obvious stack pointer, i.e.,
2155 // 1) (mark & 3) == 0, and
2156 // 2) rsp <= mark < mark + os::pagesize()
2157 // These 3 tests can be done by evaluating the following
2158 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2159 // assuming both stack pointer and pagesize have their
2160 // least significant 2 bits clear.
2161 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2162
2163 __ subptr(swap_reg, rsp);
2164 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2165
2166 // Save the test result, for recursive case, the result is zero
2167 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2168 __ jcc(Assembler::notEqual, slow_path_lock);
2169 } else {
2170 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2171 __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2172 }
2173 __ bind(count_mon);
2174 __ inc_held_monitor_count();
2175
2176 // Slow path will re-enter here
2177 __ bind(lock_done);
2178 }
2179
2180 // Finally just about ready to make the JNI call
2181
2182 // get JNIEnv* which is first argument to native
2183 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2184
2185 // Now set thread in native
2186 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2187
2188 __ call(RuntimeAddress(native_func));
2189
2190 // Verify or restore cpu control state after JNI call
2191 __ restore_cpu_control_state_after_jni(rscratch1);
2192
2268 __ jcc(Assembler::equal, reguard);
2269 __ bind(reguard_done);
2270
2271 // native result if any is live
2272
2273 // Unlock
2274 Label slow_path_unlock;
2275 Label unlock_done;
2276 if (method->is_synchronized()) {
2277
2278 Label fast_done;
2279
2280 // Get locked oop from the handle we passed to jni
2281 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2282
2283 if (LockingMode == LM_LEGACY) {
2284 Label not_recur;
2285 // Simple recursive lock?
2286 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2287 __ jcc(Assembler::notEqual, not_recur);
2288 __ dec_held_monitor_count();
2289 __ jmpb(fast_done);
2290 __ bind(not_recur);
2291 }
2292
2293 // Must save rax if it is live now because cmpxchg must use it
2294 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2295 save_native_result(masm, ret_type, stack_slots);
2296 }
2297
2298 if (LockingMode == LM_MONITOR) {
2299 __ jmp(slow_path_unlock);
2300 } else if (LockingMode == LM_LEGACY) {
2301 // get address of the stack lock
2302 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2303 // get old displaced header
2304 __ movptr(old_hdr, Address(rax, 0));
2305
2306 // Atomic swap old header if oop still contains the stack lock
2307 __ lock();
2308 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2309 __ jcc(Assembler::notEqual, slow_path_unlock);
2310 __ dec_held_monitor_count();
2311 } else {
2312 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2313 __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
2314 __ dec_held_monitor_count();
2315 }
2316
2317 // slow path re-enters here
2318 __ bind(unlock_done);
2319 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2320 restore_native_result(masm, ret_type, stack_slots);
2321 }
2322
2323 __ bind(fast_done);
2324 }
2325 {
2326 SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2327 save_native_result(masm, ret_type, stack_slots);
2328 __ mov_metadata(c_rarg1, method());
2329 __ call_VM_leaf(
2330 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2331 r15_thread, c_rarg1);
2332 restore_native_result(masm, ret_type, stack_slots);
2333 }
2334
2370 // and forward the exception
2371 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2372
2373 // Slow path locking & unlocking
2374 if (method->is_synchronized()) {
2375
2376 // BEGIN Slow path lock
2377 __ bind(slow_path_lock);
2378
2379 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2380 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2381
2382 // protect the args we've loaded
2383 save_args(masm, total_c_args, c_arg, out_regs);
2384
2385 __ mov(c_rarg0, obj_reg);
2386 __ mov(c_rarg1, lock_reg);
2387 __ mov(c_rarg2, r15_thread);
2388
2389 // Not a leaf but we have last_Java_frame setup as we want
2390 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2391 restore_args(masm, total_c_args, c_arg, out_regs);
2392
2393 #ifdef ASSERT
2394 { Label L;
2395 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2396 __ jcc(Assembler::equal, L);
2397 __ stop("no pending exception allowed on exit from monitorenter");
2398 __ bind(L);
2399 }
2400 #endif
2401 __ jmp(lock_done);
2402
2403 // END Slow path lock
2404
2405 // BEGIN Slow path unlock
2406 __ bind(slow_path_unlock);
2407
2408 // If we haven't already saved the native result we must save it now as xmm registers
2409 // are still exposed.
2410 __ vzeroupper();
2485 // this function returns the adjust size (in number of words) to a c2i adapter
2486 // activation for use during deoptimization
2487 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2488 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2489 }
2490
2491
2492 uint SharedRuntime::out_preserve_stack_slots() {
2493 return 0;
2494 }
2495
2496
2497 // Number of stack slots between incoming argument block and the start of
2498 // a new frame. The PROLOG must add this many slots to the stack. The
2499 // EPILOG must remove this many slots. amd64 needs two slots for
2500 // return address.
2501 uint SharedRuntime::in_preserve_stack_slots() {
2502 return 4 + 2 * VerifyStackAtCalls;
2503 }
2504
2505 //------------------------------generate_deopt_blob----------------------------
2506 void SharedRuntime::generate_deopt_blob() {
2507 // Allocate space for the code
2508 ResourceMark rm;
2509 // Setup code generation tools
2510 int pad = 0;
2511 if (UseAVX > 2) {
2512 pad += 1024;
2513 }
2514 #if INCLUDE_JVMCI
2515 if (EnableJVMCI) {
2516 pad += 512; // Increase the buffer size when compiling for JVMCI
2517 }
2518 #endif
2519 CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
2520 MacroAssembler* masm = new MacroAssembler(&buffer);
2521 int frame_size_in_words;
2522 OopMap* map = nullptr;
2523 OopMapSet *oop_maps = new OopMapSet();
2524
|
147 flags_off, flagsH_off,
148 // The frame sender code expects that rbp will be in the "natural" place and
149 // will override any oopMap setting for it. We must therefore force the layout
150 // so that it agrees with the frame sender code.
151 rbp_off, rbpH_off, // copy of rbp we will restore
152 return_off, returnH_off, // slot for return address
153 reg_save_size // size in compiler stack slots
154 };
155
156 public:
157 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors);
158 static void restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors = false);
159
160 // Offsets into the register save area
161 // Used by deoptimization when it is managing result register
162 // values on its own
163
164 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
165 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
166 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
167 static int r15_offset_in_bytes(void) { return BytesPerInt * r15_off; }
168 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
169 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
170
171 // During deoptimization only the result registers need to be restored,
172 // all the other values have already been extracted.
173 static void restore_result_registers(MacroAssembler* masm);
174 };
175
176 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
177 int off = 0;
178 int num_xmm_regs = XMMRegister::available_xmm_registers();
179 #if COMPILER2_OR_JVMCI
180 if (save_wide_vectors && UseAVX == 0) {
181 save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
182 }
183 assert(!save_wide_vectors || MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
184 #else
185 save_wide_vectors = false; // vectors are generated only by C2 and JVMCI
186 #endif
187
1330 __ movptr(rax, Address(r15_thread, JavaThread::cont_fastpath_offset()));
1331 __ movptr(Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()), rax);
1332 __ movq(rax, Address(r15_thread, JavaThread::held_monitor_count_offset()));
1333 __ movq(Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()), rax);
1334
1335 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0);
1336 __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), 0);
1337 }
1338
1339 //---------------------------- continuation_enter_cleanup ---------------------------
1340 //
1341 // Arguments:
1342 // rsp: pointer to the ContinuationEntry
1343 //
1344 // Results:
1345 // rsp: pointer to the spilled rbp in the entry frame
1346 //
1347 // Kills:
1348 // rbx
1349 //
1350 static void continuation_enter_cleanup(MacroAssembler* masm) {
1351 #ifdef ASSERT
1352 Label L_good_sp;
1353 __ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
1354 __ jcc(Assembler::equal, L_good_sp);
1355 __ stop("Incorrect rsp at continuation_enter_cleanup");
1356 __ bind(L_good_sp);
1357 #endif
1358
1359 __ movptr(rbx, Address(rsp, ContinuationEntry::parent_cont_fastpath_offset()));
1360 __ movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rbx);
1361 __ movq(rbx, Address(rsp, ContinuationEntry::parent_held_monitor_count_offset()));
1362 __ movq(Address(r15_thread, JavaThread::held_monitor_count_offset()), rbx);
1363
1364 __ movptr(rbx, Address(rsp, ContinuationEntry::parent_offset()));
1365 __ movptr(Address(r15_thread, JavaThread::cont_entry_offset()), rbx);
1366 __ addptr(rsp, checked_cast<int32_t>(ContinuationEntry::size()));
1367 }
1368
1369 static void gen_continuation_enter(MacroAssembler* masm,
1370 const VMRegPair* regs,
1481 address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
1482 if (stub == nullptr) {
1483 fatal("CodeCache is full at gen_continuation_enter");
1484 }
1485
1486 // The call needs to be resolved. There's a special case for this in
1487 // SharedRuntime::find_callee_info_helper() which calls
1488 // LinkResolver::resolve_continuation_enter() which resolves the call to
1489 // Continuation.enter(Continuation c, boolean isContinue).
1490 __ call(resolve);
1491
1492 oop_maps->add_gc_map(__ pc() - start, map);
1493 __ post_call_nop();
1494
1495 __ jmpb(L_exit);
1496
1497 // --- Thawing path
1498
1499 __ bind(L_thaw);
1500
1501 ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
1502 __ call(RuntimeAddress(StubRoutines::cont_thaw()));
1503
1504 ContinuationEntry::_return_pc_offset = __ pc() - start;
1505 oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
1506 __ post_call_nop();
1507
1508 // --- Normal exit (resolve/thawing)
1509
1510 __ bind(L_exit);
1511
1512 continuation_enter_cleanup(masm);
1513 __ pop(rbp);
1514 __ ret(0);
1515
1516 // --- Exception handling path
1517
1518 exception_offset = __ pc() - start;
1519
1520 continuation_enter_cleanup(masm);
1521 __ pop(rbp);
1584 continuation_enter_cleanup(masm);
1585 __ pop(rbp);
1586 __ ret(0);
1587
1588 __ bind(L_pinned);
1589
1590 // Pinned, return to caller
1591
1592 // handle pending exception thrown by freeze
1593 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
1594 Label ok;
1595 __ jcc(Assembler::equal, ok);
1596 __ leave();
1597 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1598 __ bind(ok);
1599
1600 __ leave();
1601 __ ret(0);
1602 }
1603
1604 void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
1605 ::continuation_enter_cleanup(masm);
1606 }
1607
1608 static void gen_special_dispatch(MacroAssembler* masm,
1609 const methodHandle& method,
1610 const BasicType* sig_bt,
1611 const VMRegPair* regs) {
1612 verify_oop_args(masm, method, sig_bt, regs);
1613 vmIntrinsics::ID iid = method->intrinsic_id();
1614
1615 // Now write the args into the outgoing interpreter space
1616 bool has_receiver = false;
1617 Register receiver_reg = noreg;
1618 int member_arg_pos = -1;
1619 Register member_reg = noreg;
1620 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1621 if (ref_kind != 0) {
1622 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1623 member_reg = rbx; // known to be free at this point
1624 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1625 } else if (iid == vmIntrinsics::_invokeBasic) {
1626 has_receiver = true;
1627 } else if (iid == vmIntrinsics::_linkToNative) {
2159
2160 // Test if the oopMark is an obvious stack pointer, i.e.,
2161 // 1) (mark & 3) == 0, and
2162 // 2) rsp <= mark < mark + os::pagesize()
2163 // These 3 tests can be done by evaluating the following
2164 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2165 // assuming both stack pointer and pagesize have their
2166 // least significant 2 bits clear.
2167 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2168
2169 __ subptr(swap_reg, rsp);
2170 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2171
2172 // Save the test result, for recursive case, the result is zero
2173 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2174 __ jcc(Assembler::notEqual, slow_path_lock);
2175 } else {
2176 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2177 __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2178 }
2179 __ jmp (lock_done);
2180
2181 __ bind(count_mon);
2182 __ inc_held_monitor_count();
2183
2184 // Slow path will re-enter here
2185 __ bind(lock_done);
2186 }
2187
2188 // Finally just about ready to make the JNI call
2189
2190 // get JNIEnv* which is first argument to native
2191 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2192
2193 // Now set thread in native
2194 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2195
2196 __ call(RuntimeAddress(native_func));
2197
2198 // Verify or restore cpu control state after JNI call
2199 __ restore_cpu_control_state_after_jni(rscratch1);
2200
2276 __ jcc(Assembler::equal, reguard);
2277 __ bind(reguard_done);
2278
2279 // native result if any is live
2280
2281 // Unlock
2282 Label slow_path_unlock;
2283 Label unlock_done;
2284 if (method->is_synchronized()) {
2285
2286 Label fast_done;
2287
2288 // Get locked oop from the handle we passed to jni
2289 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2290
2291 if (LockingMode == LM_LEGACY) {
2292 Label not_recur;
2293 // Simple recursive lock?
2294 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2295 __ jcc(Assembler::notEqual, not_recur);
2296 __ jmpb(fast_done);
2297 __ bind(not_recur);
2298 }
2299
2300 // Must save rax if it is live now because cmpxchg must use it
2301 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2302 save_native_result(masm, ret_type, stack_slots);
2303 }
2304
2305 if (LockingMode == LM_MONITOR) {
2306 __ jmp(slow_path_unlock);
2307 } else if (LockingMode == LM_LEGACY) {
2308 // get address of the stack lock
2309 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2310 // get old displaced header
2311 __ movptr(old_hdr, Address(rax, 0));
2312
2313 // Atomic swap old header if oop still contains the stack lock
2314 __ lock();
2315 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2316 __ jcc(Assembler::notEqual, slow_path_unlock);
2317 __ dec_held_monitor_count();
2318 } else {
2319 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2320 __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
2321 }
2322
2323 // slow path re-enters here
2324 __ bind(unlock_done);
2325 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2326 restore_native_result(masm, ret_type, stack_slots);
2327 }
2328
2329 __ bind(fast_done);
2330 }
2331 {
2332 SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2333 save_native_result(masm, ret_type, stack_slots);
2334 __ mov_metadata(c_rarg1, method());
2335 __ call_VM_leaf(
2336 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2337 r15_thread, c_rarg1);
2338 restore_native_result(masm, ret_type, stack_slots);
2339 }
2340
2376 // and forward the exception
2377 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2378
2379 // Slow path locking & unlocking
2380 if (method->is_synchronized()) {
2381
2382 // BEGIN Slow path lock
2383 __ bind(slow_path_lock);
2384
2385 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2386 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2387
2388 // protect the args we've loaded
2389 save_args(masm, total_c_args, c_arg, out_regs);
2390
2391 __ mov(c_rarg0, obj_reg);
2392 __ mov(c_rarg1, lock_reg);
2393 __ mov(c_rarg2, r15_thread);
2394
2395 // Not a leaf but we have last_Java_frame setup as we want
2396 // Force freeze slow path on ObjectMonitor::enter() for now which will fail with freeze_pinned_native.
2397 __ push_cont_fastpath();
2398 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2399 __ pop_cont_fastpath();
2400 restore_args(masm, total_c_args, c_arg, out_regs);
2401
2402 #ifdef ASSERT
2403 { Label L;
2404 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2405 __ jcc(Assembler::equal, L);
2406 __ stop("no pending exception allowed on exit from monitorenter");
2407 __ bind(L);
2408 }
2409 #endif
2410 __ jmp(lock_done);
2411
2412 // END Slow path lock
2413
2414 // BEGIN Slow path unlock
2415 __ bind(slow_path_unlock);
2416
2417 // If we haven't already saved the native result we must save it now as xmm registers
2418 // are still exposed.
2419 __ vzeroupper();
2494 // this function returns the adjust size (in number of words) to a c2i adapter
2495 // activation for use during deoptimization
2496 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2497 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2498 }
2499
2500
2501 uint SharedRuntime::out_preserve_stack_slots() {
2502 return 0;
2503 }
2504
2505
2506 // Number of stack slots between incoming argument block and the start of
2507 // a new frame. The PROLOG must add this many slots to the stack. The
2508 // EPILOG must remove this many slots. amd64 needs two slots for
2509 // return address.
2510 uint SharedRuntime::in_preserve_stack_slots() {
2511 return 4 + 2 * VerifyStackAtCalls;
2512 }
2513
2514 VMReg SharedRuntime::thread_register() {
2515 return r15_thread->as_VMReg();
2516 }
2517
2518 //------------------------------generate_deopt_blob----------------------------
2519 void SharedRuntime::generate_deopt_blob() {
2520 // Allocate space for the code
2521 ResourceMark rm;
2522 // Setup code generation tools
2523 int pad = 0;
2524 if (UseAVX > 2) {
2525 pad += 1024;
2526 }
2527 #if INCLUDE_JVMCI
2528 if (EnableJVMCI) {
2529 pad += 512; // Increase the buffer size when compiling for JVMCI
2530 }
2531 #endif
2532 CodeBuffer buffer("deopt_blob", 2560+pad, 1024);
2533 MacroAssembler* masm = new MacroAssembler(&buffer);
2534 int frame_size_in_words;
2535 OopMap* map = nullptr;
2536 OopMapSet *oop_maps = new OopMapSet();
2537
|