2792 bgt(flag, inflated);
2793 bne(flag, slow_path);
2794
2795 // Not inflated.
2796
2797 // Try to lock. Transition lock bits 0b00 => 0b01
2798 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a lea");
2799 atomically_flip_locked_state(/* is_unlock */ false, obj, mark, slow_path, MacroAssembler::MemBarAcq);
2800
2801 bind(push);
2802 // After successful lock, push object on lock-stack.
2803 stdx(obj, R16_thread, top);
2804 addi(top, top, oopSize);
2805 stw(top, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
2806 b(locked);
2807 }
2808
2809 { // Handle inflated monitor.
2810 bind(inflated);
2811
2812 // mark contains the tagged ObjectMonitor*.
2813 const Register tagged_monitor = mark;
2814 const uintptr_t monitor_tag = markWord::monitor_value;
2815 const Register owner_addr = tmp2;
2816
2817 // Compute owner address.
2818 addi(owner_addr, tagged_monitor, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
2819
2820 // CAS owner (null => current thread).
2821 cmpxchgd(/*flag=*/flag,
2822 /*current_value=*/t,
2823 /*compare_value=*/(intptr_t)0,
2824 /*exchange_value=*/R16_thread,
2825 /*where=*/owner_addr,
2826 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
2827 MacroAssembler::cmpxchgx_hint_acquire_lock());
2828 beq(flag, locked);
2829
2830 // Check if recursive.
2831 cmpd(flag, t, R16_thread);
2832 bne(flag, slow_path);
2833
2834 // Recursive.
2835 ld(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr);
2836 addi(tmp1, tmp1, 1);
2837 std(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr);
2838 }
2839
2840 bind(locked);
2841 inc_held_monitor_count(tmp1);
2842
2843 #ifdef ASSERT
2844 // Check that locked label is reached with flag == EQ.
2845 Label flag_correct;
2846 beq(flag, flag_correct);
2847 stop("Fast Lock Flag != EQ");
2848 #endif
2849 bind(slow_path);
2850 #ifdef ASSERT
2851 // Check that slow_path label is reached with flag == NE.
2852 bne(flag, flag_correct);
2853 stop("Fast Lock Flag != NE");
2854 bind(flag_correct);
2855 #endif
2856 // C2 uses the value of flag (NE vs EQ) to determine the continuation.
2857 }
2931 #ifdef ASSERT
2932 andi_(t, mark, markWord::monitor_value);
2933 bne(CCR0, inflated);
2934 stop("Fast Unlock not monitor");
2935 #endif
2936
2937 bind(inflated);
2938
2939 #ifdef ASSERT
2940 Label check_done;
2941 subi(top, top, oopSize);
2942 cmplwi(CCR0, top, in_bytes(JavaThread::lock_stack_base_offset()));
2943 blt(CCR0, check_done);
2944 ldx(t, R16_thread, top);
2945 cmpd(flag, obj, t);
2946 bne(flag, inflated);
2947 stop("Fast Unlock lock on stack");
2948 bind(check_done);
2949 #endif
2950
2951 // mark contains the tagged ObjectMonitor*.
2952 const Register monitor = mark;
2953 const uintptr_t monitor_tag = markWord::monitor_value;
2954
2955 // Untag the monitor.
2956 subi(monitor, mark, monitor_tag);
2957
2958 const Register recursions = tmp2;
2959 Label not_recursive;
2960
2961 // Check if recursive.
2962 ld(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
2963 addic_(recursions, recursions, -1);
2964 blt(CCR0, not_recursive);
2965
2966 // Recursive unlock.
2967 std(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
2968 crorc(CCR0, Assembler::equal, CCR0, Assembler::equal);
2969 b(unlocked);
2970
2971 bind(not_recursive);
2972
2973 Label release_;
2974 const Register t2 = tmp2;
2975
2976 // Check if the entry lists are empty.
2977 ld(t, in_bytes(ObjectMonitor::EntryList_offset()), monitor);
2978 ld(t2, in_bytes(ObjectMonitor::cxq_offset()), monitor);
2979 orr(t, t, t2);
2980 cmpdi(flag, t, 0);
2981 beq(flag, release_);
2982
2983 // The owner may be anonymous and we removed the last obj entry in
2984 // the lock-stack. This loses the information about the owner.
2985 // Write the thread to the owner field so the runtime knows the owner.
2986 std(R16_thread, in_bytes(ObjectMonitor::owner_offset()), monitor);
2987 b(slow_path);
2988
2989 bind(release_);
2990 // Set owner to null.
2991 release();
2992 // t contains 0
2993 std(t, in_bytes(ObjectMonitor::owner_offset()), monitor);
2994 }
2995
2996 bind(unlocked);
2997 dec_held_monitor_count(t);
2998
2999 #ifdef ASSERT
3000 // Check that unlocked label is reached with flag == EQ.
3001 Label flag_correct;
3002 beq(flag, flag_correct);
3003 stop("Fast Lock Flag != EQ");
3004 #endif
3005 bind(slow_path);
3006 #ifdef ASSERT
3007 // Check that slow_path label is reached with flag == NE.
3008 bne(flag, flag_correct);
3009 stop("Fast Lock Flag != NE");
3010 bind(flag_correct);
3011 #endif
3012 // C2 uses the value of flag (NE vs EQ) to determine the continuation.
3013 }
|
2792 bgt(flag, inflated);
2793 bne(flag, slow_path);
2794
2795 // Not inflated.
2796
2797 // Try to lock. Transition lock bits 0b00 => 0b01
2798 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a lea");
2799 atomically_flip_locked_state(/* is_unlock */ false, obj, mark, slow_path, MacroAssembler::MemBarAcq);
2800
2801 bind(push);
2802 // After successful lock, push object on lock-stack.
2803 stdx(obj, R16_thread, top);
2804 addi(top, top, oopSize);
2805 stw(top, in_bytes(JavaThread::lock_stack_top_offset()), R16_thread);
2806 b(locked);
2807 }
2808
2809 { // Handle inflated monitor.
2810 bind(inflated);
2811
2812 if (!UseObjectMonitorTable) {
2813 // mark contains the tagged ObjectMonitor*.
2814 const Register tagged_monitor = mark;
2815 const uintptr_t monitor_tag = markWord::monitor_value;
2816 const Register owner_addr = tmp2;
2817
2818 // Compute owner address.
2819 addi(owner_addr, tagged_monitor, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag);
2820
2821 // CAS owner (null => current thread).
2822 cmpxchgd(/*flag=*/flag,
2823 /*current_value=*/t,
2824 /*compare_value=*/(intptr_t)0,
2825 /*exchange_value=*/R16_thread,
2826 /*where=*/owner_addr,
2827 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
2828 MacroAssembler::cmpxchgx_hint_acquire_lock());
2829 beq(flag, locked);
2830
2831 // Check if recursive.
2832 cmpd(flag, t, R16_thread);
2833 bne(flag, slow_path);
2834
2835 // Recursive.
2836 ld(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr);
2837 addi(tmp1, tmp1, 1);
2838 std(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr);
2839 } else {
2840 // OMCache lookup not supported yet. Take the slowpath.
2841 // Set flag to NE
2842 crxor(flag, Assembler::equal, flag, Assembler::equal);
2843 b(slow_path);
2844 }
2845 }
2846
2847 bind(locked);
2848 inc_held_monitor_count(tmp1);
2849
2850 #ifdef ASSERT
2851 // Check that locked label is reached with flag == EQ.
2852 Label flag_correct;
2853 beq(flag, flag_correct);
2854 stop("Fast Lock Flag != EQ");
2855 #endif
2856 bind(slow_path);
2857 #ifdef ASSERT
2858 // Check that slow_path label is reached with flag == NE.
2859 bne(flag, flag_correct);
2860 stop("Fast Lock Flag != NE");
2861 bind(flag_correct);
2862 #endif
2863 // C2 uses the value of flag (NE vs EQ) to determine the continuation.
2864 }
2938 #ifdef ASSERT
2939 andi_(t, mark, markWord::monitor_value);
2940 bne(CCR0, inflated);
2941 stop("Fast Unlock not monitor");
2942 #endif
2943
2944 bind(inflated);
2945
2946 #ifdef ASSERT
2947 Label check_done;
2948 subi(top, top, oopSize);
2949 cmplwi(CCR0, top, in_bytes(JavaThread::lock_stack_base_offset()));
2950 blt(CCR0, check_done);
2951 ldx(t, R16_thread, top);
2952 cmpd(flag, obj, t);
2953 bne(flag, inflated);
2954 stop("Fast Unlock lock on stack");
2955 bind(check_done);
2956 #endif
2957
2958 if (!UseObjectMonitorTable) {
2959 // mark contains the tagged ObjectMonitor*.
2960 const Register monitor = mark;
2961 const uintptr_t monitor_tag = markWord::monitor_value;
2962
2963 // Untag the monitor.
2964 subi(monitor, mark, monitor_tag);
2965
2966 const Register recursions = tmp2;
2967 Label not_recursive;
2968
2969 // Check if recursive.
2970 ld(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
2971 addic_(recursions, recursions, -1);
2972 blt(CCR0, not_recursive);
2973
2974 // Recursive unlock.
2975 std(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor);
2976 crorc(CCR0, Assembler::equal, CCR0, Assembler::equal);
2977 b(unlocked);
2978
2979 bind(not_recursive);
2980
2981 Label release_;
2982 const Register t2 = tmp2;
2983
2984 // Check if the entry lists are empty.
2985 ld(t, in_bytes(ObjectMonitor::EntryList_offset()), monitor);
2986 ld(t2, in_bytes(ObjectMonitor::cxq_offset()), monitor);
2987 orr(t, t, t2);
2988 cmpdi(flag, t, 0);
2989 beq(flag, release_);
2990
2991 // The owner may be anonymous and we removed the last obj entry in
2992 // the lock-stack. This loses the information about the owner.
2993 // Write the thread to the owner field so the runtime knows the owner.
2994 std(R16_thread, in_bytes(ObjectMonitor::owner_offset()), monitor);
2995 b(slow_path);
2996
2997 bind(release_);
2998 // Set owner to null.
2999 release();
3000 // t contains 0
3001 std(t, in_bytes(ObjectMonitor::owner_offset()), monitor);
3002 } else {
3003 // OMCache lookup not supported yet. Take the slowpath.
3004 // Set flag to NE
3005 crxor(flag, Assembler::equal, flag, Assembler::equal);
3006 b(slow_path);
3007 }
3008 }
3009
3010 bind(unlocked);
3011 dec_held_monitor_count(t);
3012
3013 #ifdef ASSERT
3014 // Check that unlocked label is reached with flag == EQ.
3015 Label flag_correct;
3016 beq(flag, flag_correct);
3017 stop("Fast Lock Flag != EQ");
3018 #endif
3019 bind(slow_path);
3020 #ifdef ASSERT
3021 // Check that slow_path label is reached with flag == NE.
3022 bne(flag, flag_correct);
3023 stop("Fast Lock Flag != NE");
3024 bind(flag_correct);
3025 #endif
3026 // C2 uses the value of flag (NE vs EQ) to determine the continuation.
3027 }
|