1 /*
2 * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "runtime/os.hpp"
28
29 #include "gc/shenandoah/shenandoahLock.hpp"
30 #include "runtime/atomic.hpp"
31 #include "runtime/interfaceSupport.inline.hpp"
32 #include "runtime/javaThread.hpp"
33 #include "runtime/os.inline.hpp"
34
35 void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
36 Thread* thread = Thread::current();
37 if (allow_block_for_safepoint && thread->is_Java_thread()) {
38 contended_lock_internal<true>(JavaThread::cast(thread));
39 } else {
40 contended_lock_internal<false>(nullptr);
41 }
42 }
43
44 template<bool ALLOW_BLOCK>
45 void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
46 assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block.");
47 // Spin this much, but only on multi-processor systems.
48 int ctr = os::is_MP() ? 0xFF : 0;
49 int yields = 0;
50 // Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
51 while (Atomic::load(&_state) == locked ||
52 Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
53 if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
54 // Lightly contended, spin a little if no safepoint is pending.
55 SpinPause();
56 ctr--;
57 } else if (ALLOW_BLOCK) {
58 ThreadBlockInVM block(java_thread);
59 if (SafepointSynchronize::is_synchronizing()) {
60 // If safepoint is pending, we want to block and allow safepoint to proceed.
61 // Normally, TBIVM above would block us in its destructor.
62 //
63 // But that blocking only happens when TBIVM knows the thread poll is armed.
64 // There is a window between announcing a safepoint and arming the thread poll
65 // during which trying to continuously enter TBIVM is counter-productive.
66 // Under high contention, we may end up going in circles thousands of times.
67 // To avoid it, we wait here until local poll is armed and then proceed
68 // to TBVIM exit for blocking. We do not SpinPause, but yield to let
69 // VM thread to arm the poll sooner.
70 while (SafepointSynchronize::is_synchronizing() &&
71 !SafepointMechanism::local_poll_armed(java_thread)) {
72 yield_or_sleep(yields);
73 }
74 } else {
75 yield_or_sleep(yields);
76 }
77 } else {
78 yield_or_sleep(yields);
79 }
80 }
81 }
82
83 void ShenandoahLock::yield_or_sleep(int &yields) {
84 // Simple yield-sleep policy: do one 100us sleep after every N yields.
85 // Tested with different values of N, and chose 3 for best performance.
86 if (yields < 3) {
87 os::naked_yield();
88 yields++;
89 } else {
90 os::naked_short_nanosleep(100000);
91 yields = 0;
92 }
93 }
94
95 ShenandoahSimpleLock::ShenandoahSimpleLock() {
96 assert(os::mutex_init_done(), "Too early!");
97 }
98
99 void ShenandoahSimpleLock::lock() {
100 _lock.lock();
101 }
102
103 void ShenandoahSimpleLock::unlock() {
104 _lock.unlock();
105 }
106
107 ShenandoahReentrantLock::ShenandoahReentrantLock() :
108 ShenandoahSimpleLock(), _owner(nullptr), _count(0) {
109 assert(os::mutex_init_done(), "Too early!");
110 }
111
112 ShenandoahReentrantLock::~ShenandoahReentrantLock() {
113 assert(_count == 0, "Unbalance");
114 }
115
116 void ShenandoahReentrantLock::lock() {
117 Thread* const thread = Thread::current();
118 Thread* const owner = Atomic::load(&_owner);
119
120 if (owner != thread) {
121 ShenandoahSimpleLock::lock();
122 Atomic::store(&_owner, thread);
123 }
124
125 _count++;
126 }
127
128 void ShenandoahReentrantLock::unlock() {
129 assert(owned_by_self(), "Invalid owner");
130 assert(_count > 0, "Invalid count");
131
132 _count--;
133
134 if (_count == 0) {
135 Atomic::store(&_owner, (Thread*)nullptr);
136 ShenandoahSimpleLock::unlock();
137 }
138 }
139
140 bool ShenandoahReentrantLock::owned_by_self() const {
141 Thread* const thread = Thread::current();
142 Thread* const owner = Atomic::load(&_owner);
143 return owner == thread;
144 }