< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

   1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *

2168 
2169       // Hmm should this move to the slow path code area???
2170 
2171       // Test if the oopMark is an obvious stack pointer, i.e.,
2172       //  1) (mark & 3) == 0, and
2173       //  2) rsp <= mark < mark + os::pagesize()
2174       // These 3 tests can be done by evaluating the following
2175       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2176       // assuming both stack pointer and pagesize have their
2177       // least significant 2 bits clear.
2178       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2179 
2180       __ subptr(swap_reg, rsp);
2181       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2182 
2183       // Save the test result, for recursive case, the result is zero
2184       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2185       __ jcc(Assembler::notEqual, slow_path_lock);
2186     } else {
2187       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2188       // Load object header
2189       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2190       __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2191     }
2192     __ bind(count_mon);
2193     __ inc_held_monitor_count();
2194 
2195     // Slow path will re-enter here
2196     __ bind(lock_done);
2197   }
2198 
2199   // Finally just about ready to make the JNI call
2200 
2201   // get JNIEnv* which is first argument to native
2202   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2203 
2204   // Now set thread in native
2205   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2206 
2207   __ call(RuntimeAddress(native_func));
2208 
2209   // Verify or restore cpu control state after JNI call

2312     // Must save rax if it is live now because cmpxchg must use it
2313     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2314       save_native_result(masm, ret_type, stack_slots);
2315     }
2316 
2317     if (LockingMode == LM_MONITOR) {
2318       __ jmp(slow_path_unlock);
2319     } else if (LockingMode == LM_LEGACY) {
2320       // get address of the stack lock
2321       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2322       //  get old displaced header
2323       __ movptr(old_hdr, Address(rax, 0));
2324 
2325       // Atomic swap old header if oop still contains the stack lock
2326       __ lock();
2327       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2328       __ jcc(Assembler::notEqual, slow_path_unlock);
2329       __ dec_held_monitor_count();
2330     } else {
2331       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2332       __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2333       __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
2334       __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
2335       __ dec_held_monitor_count();
2336     }
2337 
2338     // slow path re-enters here
2339     __ bind(unlock_done);
2340     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2341       restore_native_result(masm, ret_type, stack_slots);
2342     }
2343 
2344     __ bind(fast_done);
2345   }
2346   {
2347     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2348     save_native_result(masm, ret_type, stack_slots);
2349     __ mov_metadata(c_rarg1, method());
2350     __ call_VM_leaf(
2351          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2352          r15_thread, c_rarg1);
2353     restore_native_result(masm, ret_type, stack_slots);
2354   }

   1 /*
   2  * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *

2168 
2169       // Hmm should this move to the slow path code area???
2170 
2171       // Test if the oopMark is an obvious stack pointer, i.e.,
2172       //  1) (mark & 3) == 0, and
2173       //  2) rsp <= mark < mark + os::pagesize()
2174       // These 3 tests can be done by evaluating the following
2175       // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2176       // assuming both stack pointer and pagesize have their
2177       // least significant 2 bits clear.
2178       // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2179 
2180       __ subptr(swap_reg, rsp);
2181       __ andptr(swap_reg, 3 - (int)os::vm_page_size());
2182 
2183       // Save the test result, for recursive case, the result is zero
2184       __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2185       __ jcc(Assembler::notEqual, slow_path_lock);
2186     } else {
2187       assert(LockingMode == LM_LIGHTWEIGHT, "must be");


2188       __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
2189     }
2190     __ bind(count_mon);
2191     __ inc_held_monitor_count();
2192 
2193     // Slow path will re-enter here
2194     __ bind(lock_done);
2195   }
2196 
2197   // Finally just about ready to make the JNI call
2198 
2199   // get JNIEnv* which is first argument to native
2200   __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2201 
2202   // Now set thread in native
2203   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2204 
2205   __ call(RuntimeAddress(native_func));
2206 
2207   // Verify or restore cpu control state after JNI call

2310     // Must save rax if it is live now because cmpxchg must use it
2311     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2312       save_native_result(masm, ret_type, stack_slots);
2313     }
2314 
2315     if (LockingMode == LM_MONITOR) {
2316       __ jmp(slow_path_unlock);
2317     } else if (LockingMode == LM_LEGACY) {
2318       // get address of the stack lock
2319       __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2320       //  get old displaced header
2321       __ movptr(old_hdr, Address(rax, 0));
2322 
2323       // Atomic swap old header if oop still contains the stack lock
2324       __ lock();
2325       __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2326       __ jcc(Assembler::notEqual, slow_path_unlock);
2327       __ dec_held_monitor_count();
2328     } else {
2329       assert(LockingMode == LM_LIGHTWEIGHT, "must be");
2330       __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);


2331       __ dec_held_monitor_count();
2332     }
2333 
2334     // slow path re-enters here
2335     __ bind(unlock_done);
2336     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2337       restore_native_result(masm, ret_type, stack_slots);
2338     }
2339 
2340     __ bind(fast_done);
2341   }
2342   {
2343     SkipIfEqual skip(masm, &DTraceMethodProbes, false, rscratch1);
2344     save_native_result(masm, ret_type, stack_slots);
2345     __ mov_metadata(c_rarg1, method());
2346     __ call_VM_leaf(
2347          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2348          r15_thread, c_rarg1);
2349     restore_native_result(masm, ret_type, stack_slots);
2350   }
< prev index next >