< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

2128 
2129   Label slow_path_lock;
2130   Label lock_done;
2131 
2132   if (method->is_synchronized()) {
2133     Label count_mon;
2134 
2135     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2136 
2137     // Get the handle (the 2nd argument)
2138     __ mov(oop_handle_reg, c_rarg1);
2139 
2140     // Get address of the box
2141 
2142     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2143 
2144     // Load the oop from the handle
2145     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2146 
2147     if (!UseHeavyMonitors) {







2148 
2149       // Load immediate 1 into swap_reg %rax
2150       __ movl(swap_reg, 1);
2151 
2152       // Load (object->mark() | 1) into swap_reg %rax
2153       __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2154 
2155       // Save (object->mark() | 1) into BasicLock's displaced header
2156       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2157 
2158       // src -> dest iff dest == rax else rax <- dest
2159       __ lock();
2160       __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2161       __ jcc(Assembler::equal, count_mon);
2162 
2163       // Hmm should this move to the slow path code area???
2164 
2165       // Test if the oopMark is an obvious stack pointer, i.e.,
2166       //  1) (mark & 3) == 0, and
2167       //  2) rsp <= mark < mark + os::pagesize()
2168       // These 3 tests can be done by evaluating the following
2169       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2170       // assuming both stack pointer and pagesize have their
2171       // least significant 2 bits clear.
2172       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2173 
2174       __ subptr(swap_reg, rsp);
2175       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2176 
2177       // Save the test result, for recursive case, the result is zero
2178       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2179       __ jcc(Assembler::notEqual, slow_path_lock);

2180     } else {
2181       __ jmp(slow_path_lock);
2182     }
2183     __ bind(count_mon);
2184     __ inc_held_monitor_count();
2185 
2186     // Slow path will re-enter here
2187     __ bind(lock_done);
2188   }
2189 
2190   // Finally just about ready to make the JNI call
2191 
2192   // get JNIEnv* which is first argument to native
2193   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2194 
2195   // Now set thread in native
2196   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2197 
2198   __ call(RuntimeAddress(native_func));
2199 

2273   __ bind(after_transition);
2274 
2275   Label reguard;
2276   Label reguard_done;
2277   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2278   __ jcc(Assembler::equal, reguard);
2279   __ bind(reguard_done);
2280 
2281   // native result if any is live
2282 
2283   // Unlock
2284   Label slow_path_unlock;
2285   Label unlock_done;
2286   if (method->is_synchronized()) {
2287 
2288     Label fast_done;
2289 
2290     // Get locked oop from the handle we passed to jni
2291     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2292 
2293     if (!UseHeavyMonitors) {
2294       Label not_recur;
2295       // Simple recursive lock?
2296       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2297       __ jcc(Assembler::notEqual, not_recur);
2298       __ dec_held_monitor_count();
2299       __ jmpb(fast_done);
2300       __ bind(not_recur);
2301     }
2302 
2303     // Must save rax if it is live now because cmpxchg must use it
2304     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2305       save_native_result(masm, ret_type, stack_slots);
2306     }
2307 
2308     if (!UseHeavyMonitors) {
2309       // get address of the stack lock
2310       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2311       //  get old displaced header
2312       __ movptr(old_hdr, Address(rax, 0));
2313 
2314       // Atomic swap old header if oop still contains the stack lock
2315       __ lock();
2316       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2317       __ jcc(Assembler::notEqual, slow_path_unlock);






2318       __ dec_held_monitor_count();
2319     } else {
2320       __ jmp(slow_path_unlock);
2321     }
2322 
2323     // slow path re-enters here
2324     __ bind(unlock_done);
2325     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2326       restore_native_result(masm, ret_type, stack_slots);
2327     }
2328 
2329     __ bind(fast_done);
2330   }
2331   {
2332     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2333     save_native_result(masm, ret_type, stack_slots);
2334     __ mov_metadata(c_rarg1, method());
2335     __ call_VM_leaf(
2336          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2337          r15_thread, c_rarg1);

2128 
2129   Label slow_path_lock;
2130   Label lock_done;
2131 
2132   if (method->is_synchronized()) {
2133     Label count_mon;
2134 
2135     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2136 
2137     // Get the handle (the 2nd argument)
2138     __ mov(oop_handle_reg, c_rarg1);
2139 
2140     // Get address of the box
2141 
2142     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2143 
2144     // Load the oop from the handle
2145     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2146 
2147     if (!UseHeavyMonitors) {
2148       if (UseFastLocking) {
2149         // Load object header
2150         __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2151         __ fast_lock_impl(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2152       } else {
2153         // Load immediate 1 into swap_reg %rax
2154         __ movl(swap_reg, 1);
2155 
2156         // Load (object->mark() | 1) into swap_reg %rax
2157         __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));



2158 
2159         // Save (object->mark() | 1) into BasicLock's displaced header
2160         __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2161 
2162         // src -> dest iff dest == rax else rax <- dest
2163         __ lock();
2164         __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2165         __ jcc(Assembler::equal, count_mon);
2166 
2167         // Hmm should this move to the slow path code area???
2168 
2169         // Test if the oopMark is an obvious stack pointer, i.e.,
2170         //  1) (mark & 3) == 0, and
2171         //  2) rsp <= mark < mark + os::pagesize()
2172         // These 3 tests can be done by evaluating the following
2173         // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2174         // assuming both stack pointer and pagesize have their
2175         // least significant 2 bits clear.
2176         // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2177 
2178         __ subptr(swap_reg, rsp);
2179         __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2180 
2181         // Save the test result, for recursive case, the result is zero
2182         __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2183         __ jcc(Assembler::notEqual, slow_path_lock);
2184       }
2185     } else {
2186       __ jmp(slow_path_lock);
2187     }
2188     __ bind(count_mon);
2189     __ inc_held_monitor_count();
2190 
2191     // Slow path will re-enter here
2192     __ bind(lock_done);
2193   }
2194 
2195   // Finally just about ready to make the JNI call
2196 
2197   // get JNIEnv* which is first argument to native
2198   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2199 
2200   // Now set thread in native
2201   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2202 
2203   __ call(RuntimeAddress(native_func));
2204 

2278   __ bind(after_transition);
2279 
2280   Label reguard;
2281   Label reguard_done;
2282   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
2283   __ jcc(Assembler::equal, reguard);
2284   __ bind(reguard_done);
2285 
2286   // native result if any is live
2287 
2288   // Unlock
2289   Label slow_path_unlock;
2290   Label unlock_done;
2291   if (method->is_synchronized()) {
2292 
2293     Label fast_done;
2294 
2295     // Get locked oop from the handle we passed to jni
2296     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2297 
2298     if (!UseHeavyMonitors && !UseFastLocking) {
2299       Label not_recur;
2300       // Simple recursive lock?
2301       __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
2302       __ jcc(Assembler::notEqual, not_recur);
2303       __ dec_held_monitor_count();
2304       __ jmpb(fast_done);
2305       __ bind(not_recur);
2306     }
2307 
2308     // Must save rax if it is live now because cmpxchg must use it
2309     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2310       save_native_result(masm, ret_type, stack_slots);
2311     }
2312 
2313     if (!UseHeavyMonitors) {
2314       if (UseFastLocking) {
2315         __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2316         __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2317         __ fast_unlock_impl(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2318       } else {
2319         // get address of the stack lock
2320         __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2321         //  get old displaced header
2322         __ movptr(old_hdr, Address(rax, 0));
2323 
2324         // Atomic swap old header if oop still contains the stack lock
2325         __ lock();
2326         __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2327         __ jcc(Assembler::notEqual, slow_path_unlock);
2328       }
2329       __ dec_held_monitor_count();
2330     } else {
2331       __ jmp(slow_path_unlock);
2332     }
2333 
2334     // slow path re-enters here
2335     __ bind(unlock_done);
2336     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2337       restore_native_result(masm, ret_type, stack_slots);
2338     }
2339 
2340     __ bind(fast_done);
2341   }
2342   {
2343     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2344     save_native_result(masm, ret_type, stack_slots);
2345     __ mov_metadata(c_rarg1, method());
2346     __ call_VM_leaf(
2347          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2348          r15_thread, c_rarg1);
< prev index next >