1 /*
2 * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
2054
2055 Label slow_path_lock;
2056 Label lock_done;
2057
2058 if (method->is_synchronized()) {
2059 assert(!is_critical_native, "unhandled");
2060
2061
2062 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2063
2064 // Get the handle (the 2nd argument)
2065 __ mov(oop_handle_reg, c_rarg1);
2066
2067 // Get address of the box
2068
2069 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2070
2071 // Load the oop from the handle
2072 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2073
2074 if (UseBiasedLocking) {
2075 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
2076 }
2077
2078 // Load immediate 1 into swap_reg %rax
2079 __ movl(swap_reg, 1);
2080
2081 // Load (object->mark() | 1) into swap_reg %rax
2082 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2083
2084 // Save (object->mark() | 1) into BasicLock's displaced header
2085 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2086
2087 // src -> dest iff dest == rax else rax <- dest
2088 __ lock();
2089 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2090 __ jcc(Assembler::equal, lock_done);
2091
2092 // Hmm should this move to the slow path code area???
2093
2094 // Test if the oopMark is an obvious stack pointer, i.e.,
2095 // 1) (mark & 3) == 0, and
2096 // 2) rsp <= mark < mark + os::pagesize()
2097 // These 3 tests can be done by evaluating the following
2098 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2099 // assuming both stack pointer and pagesize have their
2100 // least significant 2 bits clear.
2101 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2102
2103 __ subptr(swap_reg, rsp);
2104 __ andptr(swap_reg, 3 - os::vm_page_size());
2105
2106 // Save the test result, for recursive case, the result is zero
2107 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2108 __ jcc(Assembler::notEqual, slow_path_lock);
2109
2110 // Slow path will re-enter here
2111
2112 __ bind(lock_done);
2113 }
2114
2115 // Finally just about ready to make the JNI call
2116
2117 // get JNIEnv* which is first argument to native
2118 if (!is_critical_native) {
2119 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2120
2121 // Now set thread in native
2122 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2123 }
2124
2125 __ call(RuntimeAddress(native_func));
2126
2127 // Verify or restore cpu control state after JNI call
2128 __ restore_cpu_control_state_after_jni();
2213 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2214 __ jcc(Assembler::equal, reguard);
2215 __ bind(reguard_done);
2216
2217 // native result if any is live
2218
2219 // Unlock
2220 Label unlock_done;
2221 Label slow_path_unlock;
2222 if (method->is_synchronized()) {
2223
2224 // Get locked oop from the handle we passed to jni
2225 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2226
2227 Label done;
2228
2229 if (UseBiasedLocking) {
2230 __ biased_locking_exit(obj_reg, old_hdr, done);
2231 }
2232
2233 // Simple recursive lock?
2234
2235 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2236 __ jcc(Assembler::equal, done);
2237
2238 // Must save rax if if it is live now because cmpxchg must use it
2239 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2240 save_native_result(masm, ret_type, stack_slots);
2241 }
2242
2243
2244 // get address of the stack lock
2245 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2246 // get old displaced header
2247 __ movptr(old_hdr, Address(rax, 0));
2248
2249 // Atomic swap old header if oop still contains the stack lock
2250 __ lock();
2251 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2252 __ jcc(Assembler::notEqual, slow_path_unlock);
2253
2254 // slow path re-enters here
2255 __ bind(unlock_done);
2256 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2257 restore_native_result(masm, ret_type, stack_slots);
2258 }
2259
2260 __ bind(done);
2261
2262 }
2263 {
2264 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2265 save_native_result(masm, ret_type, stack_slots);
2266 __ mov_metadata(c_rarg1, method());
2267 __ call_VM_leaf(
2268 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2269 r15_thread, c_rarg1);
2270 restore_native_result(masm, ret_type, stack_slots);
2271 }
2272
|
1 /*
2 * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
2054
2055 Label slow_path_lock;
2056 Label lock_done;
2057
2058 if (method->is_synchronized()) {
2059 assert(!is_critical_native, "unhandled");
2060
2061
2062 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2063
2064 // Get the handle (the 2nd argument)
2065 __ mov(oop_handle_reg, c_rarg1);
2066
2067 // Get address of the box
2068
2069 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2070
2071 // Load the oop from the handle
2072 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2073
2074 if (LockingMode == LM_MONITOR) {
2075 __ jmp(slow_path_lock);
2076 } else if (LockingMode == LM_LEGACY) {
2077 if (UseBiasedLocking) {
2078 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
2079 }
2080
2081 // Load immediate 1 into swap_reg %rax
2082 __ movl(swap_reg, 1);
2083
2084 // Load (object->mark() | 1) into swap_reg %rax
2085 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2086
2087 // Save (object->mark() | 1) into BasicLock's displaced header
2088 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2089
2090 // src -> dest iff dest == rax else rax <- dest
2091 __ lock();
2092 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2093 __ jcc(Assembler::equal, lock_done);
2094
2095 // Hmm should this move to the slow path code area???
2096
2097 // Test if the oopMark is an obvious stack pointer, i.e.,
2098 // 1) (mark & 3) == 0, and
2099 // 2) rsp <= mark < mark + os::pagesize()
2100 // These 3 tests can be done by evaluating the following
2101 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2102 // assuming both stack pointer and pagesize have their
2103 // least significant 2 bits clear.
2104 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2105
2106 __ subptr(swap_reg, rsp);
2107 __ andptr(swap_reg, 3 - os::vm_page_size());
2108
2109 // Save the test result, for recursive case, the result is zero
2110 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2111 __ jcc(Assembler::notEqual, slow_path_lock);
2112 } else {
2113 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2114 __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2115 }
2116
2117 // Slow path will re-enter here
2118
2119 __ bind(lock_done);
2120 }
2121
2122 // Finally just about ready to make the JNI call
2123
2124 // get JNIEnv* which is first argument to native
2125 if (!is_critical_native) {
2126 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2127
2128 // Now set thread in native
2129 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2130 }
2131
2132 __ call(RuntimeAddress(native_func));
2133
2134 // Verify or restore cpu control state after JNI call
2135 __ restore_cpu_control_state_after_jni();
2220 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2221 __ jcc(Assembler::equal, reguard);
2222 __ bind(reguard_done);
2223
2224 // native result if any is live
2225
2226 // Unlock
2227 Label unlock_done;
2228 Label slow_path_unlock;
2229 if (method->is_synchronized()) {
2230
2231 // Get locked oop from the handle we passed to jni
2232 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2233
2234 Label done;
2235
2236 if (UseBiasedLocking) {
2237 __ biased_locking_exit(obj_reg, old_hdr, done);
2238 }
2239
2240 if (LockingMode == LM_LEGACY) {
2241 // Simple recursive lock?
2242
2243 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2244 __ jcc(Assembler::equal, done);
2245 }
2246
2247 // Must save rax if if it is live now because cmpxchg must use it
2248 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2249 save_native_result(masm, ret_type, stack_slots);
2250 }
2251
2252 if (LockingMode == LM_MONITOR) {
2253 __ jmp(slow_path_unlock);
2254 } else if (LockingMode == LM_LEGACY) {
2255 // get address of the stack lock
2256 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2257 // get old displaced header
2258 __ movptr(old_hdr, Address(rax, 0));
2259
2260 // Atomic swap old header if oop still contains the stack lock
2261 __ lock();
2262 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2263 __ jcc(Assembler::notEqual, slow_path_unlock);
2264 } else {
2265 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2266 __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
2267 }
2268
2269 // slow path re-enters here
2270 __ bind(unlock_done);
2271 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2272 restore_native_result(masm, ret_type, stack_slots);
2273 }
2274
2275 __ bind(done);
2276
2277 }
2278 {
2279 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2280 save_native_result(masm, ret_type, stack_slots);
2281 __ mov_metadata(c_rarg1, method());
2282 __ call_VM_leaf(
2283 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2284 r15_thread, c_rarg1);
2285 restore_native_result(masm, ret_type, stack_slots);
2286 }
2287
|