2055
2056 Label slow_path_lock;
2057 Label lock_done;
2058
2059 if (method->is_synchronized()) {
2060 assert(!is_critical_native, "unhandled");
2061
2062
2063 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2064
2065 // Get the handle (the 2nd argument)
2066 __ mov(oop_handle_reg, c_rarg1);
2067
2068 // Get address of the box
2069
2070 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2071
2072 // Load the oop from the handle
2073 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2074
2075 if (UseBiasedLocking) {
2076 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
2077 }
2078
2079 // Load immediate 1 into swap_reg %rax
2080 __ movl(swap_reg, 1);
2081
2082 // Load (object->mark() | 1) into swap_reg %rax
2083 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2084
2085 // Save (object->mark() | 1) into BasicLock's displaced header
2086 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2087
2088 // src -> dest iff dest == rax else rax <- dest
2089 __ lock();
2090 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2091 __ jcc(Assembler::equal, lock_done);
2092
2093 // Hmm should this move to the slow path code area???
2094
2095 // Test if the oopMark is an obvious stack pointer, i.e.,
2096 // 1) (mark & 3) == 0, and
2097 // 2) rsp <= mark < mark + os::pagesize()
2098 // These 3 tests can be done by evaluating the following
2099 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2100 // assuming both stack pointer and pagesize have their
2101 // least significant 2 bits clear.
2102 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2103
2104 __ subptr(swap_reg, rsp);
2105 __ andptr(swap_reg, 3 - os::vm_page_size());
2106
2107 // Save the test result, for recursive case, the result is zero
2108 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2109 __ jcc(Assembler::notEqual, slow_path_lock);
2110
2111 // Slow path will re-enter here
2112
2113 __ bind(lock_done);
2114 }
2115
2116 // Finally just about ready to make the JNI call
2117
2118 // get JNIEnv* which is first argument to native
2119 if (!is_critical_native) {
2120 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2121
2122 // Now set thread in native
2123 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2124 }
2125
2126 __ call(RuntimeAddress(native_func));
2127
2128 // Verify or restore cpu control state after JNI call
2129 __ restore_cpu_control_state_after_jni();
2214 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2215 __ jcc(Assembler::equal, reguard);
2216 __ bind(reguard_done);
2217
2218 // native result if any is live
2219
2220 // Unlock
2221 Label unlock_done;
2222 Label slow_path_unlock;
2223 if (method->is_synchronized()) {
2224
2225 // Get locked oop from the handle we passed to jni
2226 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2227
2228 Label done;
2229
2230 if (UseBiasedLocking) {
2231 __ biased_locking_exit(obj_reg, old_hdr, done);
2232 }
2233
2234 // Simple recursive lock?
2235
2236 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2237 __ jcc(Assembler::equal, done);
2238
2239 // Must save rax if if it is live now because cmpxchg must use it
2240 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2241 save_native_result(masm, ret_type, stack_slots);
2242 }
2243
2244
2245 // get address of the stack lock
2246 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2247 // get old displaced header
2248 __ movptr(old_hdr, Address(rax, 0));
2249
2250 // Atomic swap old header if oop still contains the stack lock
2251 __ lock();
2252 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2253 __ jcc(Assembler::notEqual, slow_path_unlock);
2254
2255 // slow path re-enters here
2256 __ bind(unlock_done);
2257 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2258 restore_native_result(masm, ret_type, stack_slots);
2259 }
2260
2261 __ bind(done);
2262
2263 }
2264 {
2265 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2266 save_native_result(masm, ret_type, stack_slots);
2267 __ mov_metadata(c_rarg1, method());
2268 __ call_VM_leaf(
2269 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2270 r15_thread, c_rarg1);
2271 restore_native_result(masm, ret_type, stack_slots);
2272 }
2273
|
2055
2056 Label slow_path_lock;
2057 Label lock_done;
2058
2059 if (method->is_synchronized()) {
2060 assert(!is_critical_native, "unhandled");
2061
2062
2063 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2064
2065 // Get the handle (the 2nd argument)
2066 __ mov(oop_handle_reg, c_rarg1);
2067
2068 // Get address of the box
2069
2070 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2071
2072 // Load the oop from the handle
2073 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2074
2075 if (LockingMode == LM_MONITOR) {
2076 __ jmp(slow_path_lock);
2077 } else if (LockingMode == LM_LEGACY) {
2078 if (UseBiasedLocking) {
2079 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
2080 }
2081
2082 // Load immediate 1 into swap_reg %rax
2083 __ movl(swap_reg, 1);
2084
2085 // Load (object->mark() | 1) into swap_reg %rax
2086 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2087
2088 // Save (object->mark() | 1) into BasicLock's displaced header
2089 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2090
2091 // src -> dest iff dest == rax else rax <- dest
2092 __ lock();
2093 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2094 __ jcc(Assembler::equal, lock_done);
2095
2096 // Hmm should this move to the slow path code area???
2097
2098 // Test if the oopMark is an obvious stack pointer, i.e.,
2099 // 1) (mark & 3) == 0, and
2100 // 2) rsp <= mark < mark + os::pagesize()
2101 // These 3 tests can be done by evaluating the following
2102 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2103 // assuming both stack pointer and pagesize have their
2104 // least significant 2 bits clear.
2105 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2106
2107 __ subptr(swap_reg, rsp);
2108 __ andptr(swap_reg, 3 - os::vm_page_size());
2109
2110 // Save the test result, for recursive case, the result is zero
2111 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2112 __ jcc(Assembler::notEqual, slow_path_lock);
2113 } else {
2114 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2115 // Load object header
2116 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2117 __ fast_lock_impl(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2118 }
2119
2120 // Slow path will re-enter here
2121
2122 __ bind(lock_done);
2123 }
2124
2125 // Finally just about ready to make the JNI call
2126
2127 // get JNIEnv* which is first argument to native
2128 if (!is_critical_native) {
2129 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2130
2131 // Now set thread in native
2132 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2133 }
2134
2135 __ call(RuntimeAddress(native_func));
2136
2137 // Verify or restore cpu control state after JNI call
2138 __ restore_cpu_control_state_after_jni();
2223 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2224 __ jcc(Assembler::equal, reguard);
2225 __ bind(reguard_done);
2226
2227 // native result if any is live
2228
2229 // Unlock
2230 Label unlock_done;
2231 Label slow_path_unlock;
2232 if (method->is_synchronized()) {
2233
2234 // Get locked oop from the handle we passed to jni
2235 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2236
2237 Label done;
2238
2239 if (UseBiasedLocking) {
2240 __ biased_locking_exit(obj_reg, old_hdr, done);
2241 }
2242
2243 if (LockingMode == LM_LEGACY) {
2244 // Simple recursive lock?
2245
2246 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2247 __ jcc(Assembler::equal, done);
2248 }
2249
2250 // Must save rax if if it is live now because cmpxchg must use it
2251 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2252 save_native_result(masm, ret_type, stack_slots);
2253 }
2254
2255 if (LockingMode == LM_MONITOR) {
2256 __ jmp(slow_path_unlock);
2257 } else if (LockingMode == LM_LEGACY) {
2258 // get address of the stack lock
2259 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2260 // get old displaced header
2261 __ movptr(old_hdr, Address(rax, 0));
2262
2263 // Atomic swap old header if oop still contains the stack lock
2264 __ lock();
2265 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2266 __ jcc(Assembler::notEqual, slow_path_unlock);
2267 } else {
2268 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2269 __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2270 __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2271 __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2272 }
2273
2274 // slow path re-enters here
2275 __ bind(unlock_done);
2276 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2277 restore_native_result(masm, ret_type, stack_slots);
2278 }
2279
2280 __ bind(done);
2281
2282 }
2283 {
2284 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2285 save_native_result(masm, ret_type, stack_slots);
2286 __ mov_metadata(c_rarg1, method());
2287 __ call_VM_leaf(
2288 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2289 r15_thread, c_rarg1);
2290 restore_native_result(masm, ret_type, stack_slots);
2291 }
2292
|