29 #include "code/debugInfoRec.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/gcLocker.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "logging/log.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/klass.inline.hpp"
40 #include "prims/methodHandles.hpp"
41 #include "runtime/jniHandles.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/signature.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "runtime/vm_version.hpp"
48 #include "utilities/align.hpp"
49 #include "vmreg_x86.inline.hpp"
50 #ifdef COMPILER1
51 #include "c1/c1_Runtime1.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
56
57 #define __ masm->
58
59 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
60
61 class RegisterSaver {
62 // Capture info about frame layout
63 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
64 enum layout {
65 fpu_state_off = 0,
66 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
67 st0_off, st0H_off,
68 st1_off, st1H_off,
1677 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1678 __ jcc(Assembler::equal, count_mon);
1679
1680 // Test if the oopMark is an obvious stack pointer, i.e.,
1681 // 1) (mark & 3) == 0, and
1682 // 2) rsp <= mark < mark + os::pagesize()
1683 // These 3 tests can be done by evaluating the following
1684 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1685 // assuming both stack pointer and pagesize have their
1686 // least significant 2 bits clear.
1687 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1688
1689 __ subptr(swap_reg, rsp);
1690 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
1691
1692 // Save the test result, for recursive case, the result is zero
1693 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1694 __ jcc(Assembler::notEqual, slow_path_lock);
1695 } else {
1696 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1697 __ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1698 }
1699 __ bind(count_mon);
1700 __ inc_held_monitor_count();
1701
1702 // Slow path will re-enter here
1703 __ bind(lock_done);
1704 }
1705
1706
1707 // Finally just about ready to make the JNI call
1708
1709 // get JNIEnv* which is first argument to native
1710 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1711 __ movptr(Address(rsp, 0), rdx);
1712
1713 // Now set thread in native
1714 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1715
1716 __ call(RuntimeAddress(native_func));
1717
1916 // Pop st0 and store as double and reload into xmm register
1917 __ fstp_d(Address(rbp, -8));
1918 __ movdbl(xmm0, Address(rbp, -8));
1919 }
1920 }
1921
1922 // Return
1923
1924 __ leave();
1925 __ ret(0);
1926
1927 // Unexpected paths are out of line and go here
1928
1929 // Slow path locking & unlocking
1930 if (method->is_synchronized()) {
1931
1932 // BEGIN Slow path lock
1933
1934 __ bind(slow_path_lock);
1935
1936 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1937 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1938 __ push(thread);
1939 __ push(lock_reg);
1940 __ push(obj_reg);
1941 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1942 __ addptr(rsp, 3*wordSize);
1943
1944 #ifdef ASSERT
1945 { Label L;
1946 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1947 __ jcc(Assembler::equal, L);
1948 __ stop("no pending exception allowed on exit from monitorenter");
1949 __ bind(L);
1950 }
1951 #endif
1952 __ jmp(lock_done);
1953
1954 // END Slow path lock
1955
|
29 #include "code/debugInfoRec.hpp"
30 #include "code/nativeInst.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/gcLocker.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "logging/log.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/klass.inline.hpp"
40 #include "prims/methodHandles.hpp"
41 #include "runtime/jniHandles.hpp"
42 #include "runtime/safepointMechanism.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/signature.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "runtime/vm_version.hpp"
48 #include "utilities/align.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "vmreg_x86.inline.hpp"
51 #ifdef COMPILER1
52 #include "c1/c1_Runtime1.hpp"
53 #endif
54 #ifdef COMPILER2
55 #include "opto/runtime.hpp"
56 #endif
57
58 #define __ masm->
59
60 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
61
62 class RegisterSaver {
63 // Capture info about frame layout
64 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
65 enum layout {
66 fpu_state_off = 0,
67 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
68 st0_off, st0H_off,
69 st1_off, st1H_off,
1678 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1679 __ jcc(Assembler::equal, count_mon);
1680
1681 // Test if the oopMark is an obvious stack pointer, i.e.,
1682 // 1) (mark & 3) == 0, and
1683 // 2) rsp <= mark < mark + os::pagesize()
1684 // These 3 tests can be done by evaluating the following
1685 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1686 // assuming both stack pointer and pagesize have their
1687 // least significant 2 bits clear.
1688 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1689
1690 __ subptr(swap_reg, rsp);
1691 __ andptr(swap_reg, 3 - (int)os::vm_page_size());
1692
1693 // Save the test result, for recursive case, the result is zero
1694 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1695 __ jcc(Assembler::notEqual, slow_path_lock);
1696 } else {
1697 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
1698 __ lightweight_lock(lock_reg, obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
1699 }
1700 __ bind(count_mon);
1701 __ inc_held_monitor_count();
1702
1703 // Slow path will re-enter here
1704 __ bind(lock_done);
1705 }
1706
1707
1708 // Finally just about ready to make the JNI call
1709
1710 // get JNIEnv* which is first argument to native
1711 __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1712 __ movptr(Address(rsp, 0), rdx);
1713
1714 // Now set thread in native
1715 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1716
1717 __ call(RuntimeAddress(native_func));
1718
1917 // Pop st0 and store as double and reload into xmm register
1918 __ fstp_d(Address(rbp, -8));
1919 __ movdbl(xmm0, Address(rbp, -8));
1920 }
1921 }
1922
1923 // Return
1924
1925 __ leave();
1926 __ ret(0);
1927
1928 // Unexpected paths are out of line and go here
1929
1930 // Slow path locking & unlocking
1931 if (method->is_synchronized()) {
1932
1933 // BEGIN Slow path lock
1934
1935 __ bind(slow_path_lock);
1936
1937 if (LockingMode == LM_LIGHTWEIGHT) {
1938 // Reload the lock addr. Clobbered by lightweight_lock.
1939 __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1940 }
1941
1942 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1943 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1944 __ push(thread);
1945 __ push(lock_reg);
1946 __ push(obj_reg);
1947 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1948 __ addptr(rsp, 3*wordSize);
1949
1950 #ifdef ASSERT
1951 { Label L;
1952 __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1953 __ jcc(Assembler::equal, L);
1954 __ stop("no pending exception allowed on exit from monitorenter");
1955 __ bind(L);
1956 }
1957 #endif
1958 __ jmp(lock_done);
1959
1960 // END Slow path lock
1961
|