< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

2055 
2056   Label slow_path_lock;
2057   Label lock_done;
2058 
2059   if (method->is_synchronized()) {
2060     assert(!is_critical_native, "unhandled");
2061 
2062 
2063     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2064 
2065     // Get the handle (the 2nd argument)
2066     __ mov(oop_handle_reg, c_rarg1);
2067 
2068     // Get address of the box
2069 
2070     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2071 
2072     // Load the oop from the handle
2073     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2074 
2075     if (UseBiasedLocking) {
2076       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
2077     }





2078 
2079     // Load immediate 1 into swap_reg %rax
2080     __ movl(swap_reg, 1);
2081 
2082     // Load (object->mark() | 1) into swap_reg %rax
2083     __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2084 
2085     // Save (object->mark() | 1) into BasicLock's displaced header
2086     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2087 
2088     // src -> dest iff dest == rax else rax <- dest
2089     __ lock();
2090     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2091     __ jcc(Assembler::equal, lock_done);
2092 
2093     // Hmm should this move to the slow path code area???
2094 
2095     // Test if the oopMark is an obvious stack pointer, i.e.,
2096     //  1) (mark & 3) == 0, and
2097     //  2) rsp <= mark < mark + os::pagesize()
2098     // These 3 tests can be done by evaluating the following
2099     // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2100     // assuming both stack pointer and pagesize have their
2101     // least significant 2 bits clear.
2102     // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2103 
2104     __ subptr(swap_reg, rsp);
2105     __ andptr(swap_reg, 3 - os::vm_page_size());
2106 
2107     // Save the test result, for recursive case, the result is zero
2108     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2109     __ jcc(Assembler::notEqual, slow_path_lock);

2110 
2111     // Slow path will re-enter here
2112 
2113     __ bind(lock_done);
2114   }
2115 
2116   // Finally just about ready to make the JNI call
2117 
2118   // get JNIEnv* which is first argument to native
2119   if (!is_critical_native) {
2120     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2121 
2122     // Now set thread in native
2123     __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2124   }
2125 
2126   __ call(RuntimeAddress(native_func));
2127 
2128   // Verify or restore cpu control state after JNI call
2129   __ restore_cpu_control_state_after_jni();

2214   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2215   __ jcc(Assembler::equal, reguard);
2216   __ bind(reguard_done);
2217 
2218   // native result if any is live
2219 
2220   // Unlock
2221   Label unlock_done;
2222   Label slow_path_unlock;
2223   if (method->is_synchronized()) {
2224 
2225     // Get locked oop from the handle we passed to jni
2226     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2227 
2228     Label done;
2229 
2230     if (UseBiasedLocking) {
2231       __ biased_locking_exit(obj_reg, old_hdr, done);
2232     }
2233 
2234     // Simple recursive lock?

2235 
2236     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2237     __ jcc(Assembler::equal, done);

2238 
2239     // Must save rax if if it is live now because cmpxchg must use it
2240     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2241       save_native_result(masm, ret_type, stack_slots);
2242     }
2243 
2244 
2245     // get address of the stack lock
2246     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2247     //  get old displaced header
2248     __ movptr(old_hdr, Address(rax, 0));
2249 
2250     // Atomic swap old header if oop still contains the stack lock
2251     __ lock();
2252     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2253     __ jcc(Assembler::notEqual, slow_path_unlock);





2254 
2255     // slow path re-enters here
2256     __ bind(unlock_done);
2257     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2258       restore_native_result(masm, ret_type, stack_slots);
2259     }
2260 
2261     __ bind(done);
2262 
2263   }
2264   {
2265     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2266     save_native_result(masm, ret_type, stack_slots);
2267     __ mov_metadata(c_rarg1, method());
2268     __ call_VM_leaf(
2269          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2270          r15_thread, c_rarg1);
2271     restore_native_result(masm, ret_type, stack_slots);
2272   }
2273 

2055 
2056   Label slow_path_lock;
2057   Label lock_done;
2058 
2059   if (method->is_synchronized()) {
2060     assert(!is_critical_native, "unhandled");
2061 
2062 
2063     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2064 
2065     // Get the handle (the 2nd argument)
2066     __ mov(oop_handle_reg, c_rarg1);
2067 
2068     // Get address of the box
2069 
2070     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2071 
2072     // Load the oop from the handle
2073     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2074 
2075     if (UseFastLocking) {
2076       // Load object header
2077       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2078       __ fast_lock_impl(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2079     } else {
2080       if (UseBiasedLocking) {
2081         __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
2082       }
2083 
2084       // Load immediate 1 into swap_reg %rax
2085       __ movl(swap_reg, 1);
2086 
2087       // Load (object->mark() | 1) into swap_reg %rax
2088       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2089 
2090       // Save (object->mark() | 1) into BasicLock's displaced header
2091       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2092 
2093       // src -> dest iff dest == rax else rax <- dest
2094       __ lock();
2095       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2096       __ jcc(Assembler::equal, lock_done);
2097 
2098       // Hmm should this move to the slow path code area???
2099 
2100       // Test if the oopMark is an obvious stack pointer, i.e.,
2101       //  1) (mark & 3) == 0, and
2102       //  2) rsp <= mark < mark + os::pagesize()
2103       // These 3 tests can be done by evaluating the following
2104       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2105       // assuming both stack pointer and pagesize have their
2106       // least significant 2 bits clear.
2107       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2108 
2109       __ subptr(swap_reg, rsp);
2110       __ andptr(swap_reg, 3 - os::vm_page_size());
2111 
2112       // Save the test result, for recursive case, the result is zero
2113       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2114       __ jcc(Assembler::notEqual, slow_path_lock);
2115     }
2116 
2117     // Slow path will re-enter here
2118 
2119     __ bind(lock_done);
2120   }
2121 
2122   // Finally just about ready to make the JNI call
2123 
2124   // get JNIEnv* which is first argument to native
2125   if (!is_critical_native) {
2126     __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2127 
2128     // Now set thread in native
2129     __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2130   }
2131 
2132   __ call(RuntimeAddress(native_func));
2133 
2134   // Verify or restore cpu control state after JNI call
2135   __ restore_cpu_control_state_after_jni();

2220   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2221   __ jcc(Assembler::equal, reguard);
2222   __ bind(reguard_done);
2223 
2224   // native result if any is live
2225 
2226   // Unlock
2227   Label unlock_done;
2228   Label slow_path_unlock;
2229   if (method->is_synchronized()) {
2230 
2231     // Get locked oop from the handle we passed to jni
2232     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2233 
2234     Label done;
2235 
2236     if (UseBiasedLocking) {
2237       __ biased_locking_exit(obj_reg, old_hdr, done);
2238     }
2239 
2240     if (!UseFastLocking) {
2241       // Simple recursive lock?
2242 
2243       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2244       __ jcc(Assembler::equal, done);
2245     }
2246 
2247     // Must save rax if if it is live now because cmpxchg must use it
2248     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2249       save_native_result(masm, ret_type, stack_slots);
2250     }
2251 
2252     if (UseFastLocking) {
2253       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2254       __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2255       __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2256     } else {
2257       // get address of the stack lock
2258       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2259       //  get old displaced header
2260       __ movptr(old_hdr, Address(rax, 0));
2261 
2262       // Atomic swap old header if oop still contains the stack lock
2263       __ lock();
2264       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2265       __ jcc(Assembler::notEqual, slow_path_unlock);
2266     }
2267 
2268     // slow path re-enters here
2269     __ bind(unlock_done);
2270     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2271       restore_native_result(masm, ret_type, stack_slots);
2272     }
2273 
2274     __ bind(done);
2275 
2276   }
2277   {
2278     SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2279     save_native_result(masm, ret_type, stack_slots);
2280     __ mov_metadata(c_rarg1, method());
2281     __ call_VM_leaf(
2282          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2283          r15_thread, c_rarg1);
2284     restore_native_result(masm, ret_type, stack_slots);
2285   }
2286 
< prev index next >