1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_MUTEX_HPP
26 #define SHARE_RUNTIME_MUTEX_HPP
27
28 #include "memory/allocation.hpp"
29 #include "runtime/atomicAccess.hpp"
30 #include "runtime/semaphore.hpp"
31
32 #if defined(LINUX) || defined(AIX) || defined(BSD)
33 # include "mutex_posix.hpp"
34 #else
35 # include OS_HEADER(mutex)
36 #endif
37
38
39 // A Mutex/Monitor is a simple wrapper around a native lock plus condition
40 // variable that supports lock ownership tracking, lock ranking for deadlock
41 // detection and coordinates with the safepoint protocol.
42
43 // Locking is non-recursive: if you try to lock a mutex you already own then you
44 // will get an assertion failure in a debug build (which should suffice to expose
45 // usage bugs). If you call try_lock on a mutex you already own it will return false.
46 // The underlying PlatformMutex may support recursive locking but this is not exposed
47 // and we account for that possibility in try_lock.
48
49 // A thread is not allowed to safepoint while holding a mutex whose rank
50 // is nosafepoint or lower.
51
52 // The Mutex class used to explicitly guarantee fence(); lock(); acquire(); semantics with
53 // a hand crafted implementation. That may or may not be a desirable contract for a Mutex,
54 // but is nevertheless something that older HotSpot code may or may not rely on for correctness.
55 // Newer code is encouraged not to rely more on this feature, but it is not generally safe to
56 // remove the fences, until all usages of Mutex have been evaluated on a case-by-case basis, whether
57 // they actually rely on this stronger contract, or not.
58
59 // Having a fence does not have any significant impact on peformance, as this is an internal VM
60 // mutex and is generally not in hot code paths.
61
62 class Mutex : public CHeapObj<mtSynchronizer> {
63
64 friend class VMStructs;
65 public:
66 // Special low level locks are given names and ranges avoid overlap.
67 enum class Rank {
68 event,
69 service = event + 6,
70 stackwatermark = service + 3,
71 tty = stackwatermark + 3,
72 oopstorage = tty + 3,
73 nosafepoint = oopstorage + 6,
74 safepoint = nosafepoint + 20
75 };
76
77 // want C++later "using enum" directives.
78 static const Rank event = Rank::event;
79 static const Rank service = Rank::service;
80 static const Rank stackwatermark = Rank::stackwatermark;
81 static const Rank tty = Rank::tty;
82 static const Rank oopstorage = Rank::oopstorage;
83 static const Rank nosafepoint = Rank::nosafepoint;
84 static const Rank safepoint = Rank::safepoint;
85
86 static void assert_no_overlap(Rank orig, Rank adjusted, int adjust);
87
88 friend Rank operator-(Rank base, int adjust) {
89 Rank result = static_cast<Rank>(static_cast<int>(base) - adjust);
90 DEBUG_ONLY(assert_no_overlap(base, result, adjust));
91 return result;
92 }
93
94 friend constexpr bool operator<(Rank lhs, Rank rhs) {
95 return static_cast<int>(lhs) < static_cast<int>(rhs);
96 }
97
98 friend constexpr bool operator>(Rank lhs, Rank rhs) { return rhs < lhs; }
99 friend constexpr bool operator<=(Rank lhs, Rank rhs) { return !(lhs > rhs); }
100 friend constexpr bool operator>=(Rank lhs, Rank rhs) { return !(lhs < rhs); }
101
102 private:
103 // The _owner field is only set by the current thread, either to itself after it has acquired
104 // the low-level _lock, or to null before it has released the _lock. Accesses by any thread other
105 // than the lock owner are inherently racy.
106 Thread* volatile _owner;
107 void raw_set_owner(Thread* new_owner) { AtomicAccess::store(&_owner, new_owner); }
108
109 protected: // Monitor-Mutex metadata
110 PlatformMonitor _lock; // Native monitor implementation
111 const char* _name; // Name of mutex/monitor
112
113 // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
114 #ifndef PRODUCT
115 bool _allow_vm_block;
116 #endif
117 static Mutex** _mutex_array;
118 static int _num_mutex;
119
120 #ifdef ASSERT
121 Rank _rank; // rank (to avoid/detect potential deadlocks)
122 Mutex* _next; // Used by a Thread to link up owned locks
123 Thread* _last_owner; // the last thread to own the lock
124 bool _skip_rank_check; // read only by owner when doing rank checks
125
126 static Mutex* get_least_ranked_lock(Mutex* locks);
127 Mutex* get_least_ranked_lock_besides_this(Mutex* locks);
128 bool skip_rank_check() {
129 assert(owned_by_self(), "only the owner should call this");
130 return _skip_rank_check;
131 }
132
133 public:
134 Rank rank() const { return _rank; }
135 const char* rank_name() const;
136 Mutex* next() const { return _next; }
137 #endif // ASSERT
138
139 protected:
140 void set_owner_implementation(Thread* owner) NOT_DEBUG({ raw_set_owner(owner);});
141 void check_block_state (Thread* thread) NOT_DEBUG_RETURN;
142 void check_safepoint_state (Thread* thread) NOT_DEBUG_RETURN;
143 void check_no_safepoint_state(Thread* thread) NOT_DEBUG_RETURN;
144 void check_rank (Thread* thread) NOT_DEBUG_RETURN;
145 void assert_owner (Thread* expected) NOT_DEBUG_RETURN;
146
147 public:
148 static const bool _allow_vm_block_flag = true;
149
150 // Locks can be acquired with or without a safepoint check. NonJavaThreads do not follow
151 // the safepoint protocol when acquiring locks.
152
153 // Each lock can be acquired by only JavaThreads, only NonJavaThreads, or shared between
154 // Java and NonJavaThreads. When the lock is initialized with rank > nosafepoint,
155 // that means that whenever the lock is acquired by a JavaThread, it will verify that
156 // it is done with a safepoint check. In corollary, when the lock is initialized with
157 // rank <= nosafepoint, that means that whenever the lock is acquired by a JavaThread
158 // it will verify that it is done without a safepoint check.
159
160 // TODO: Locks that are shared between JavaThreads and NonJavaThreads
161 // should never encounter a safepoint check while they are held, or else a
162 // deadlock can occur. We should check this by noting which
163 // locks are shared, and walk held locks during safepoint checking.
164
165 enum class SafepointCheckFlag {
166 _safepoint_check_flag,
167 _no_safepoint_check_flag
168 };
169 // Bring the enumerator names into class scope.
170 static const SafepointCheckFlag _safepoint_check_flag =
171 SafepointCheckFlag::_safepoint_check_flag;
172 static const SafepointCheckFlag _no_safepoint_check_flag =
173 SafepointCheckFlag::_no_safepoint_check_flag;
174
175 public:
176 Mutex(Rank rank, const char *name, bool allow_vm_block);
177
178 Mutex(Rank rank, const char *name) :
179 Mutex(rank, name, rank > nosafepoint ? false : true) {}
180
181 ~Mutex();
182
183 void lock(); // prints out warning if VM thread blocks
184 void lock(Thread *thread); // overloaded with current thread
185 void unlock();
186 bool is_locked() const { return owner() != nullptr; }
187
188 bool try_lock(); // Like lock(), but unblocking. It returns false instead
189 private:
190 void lock_contended(Thread *thread); // contended slow-path
191 bool try_lock_inner(bool do_rank_checks);
192 public:
193
194 void release_for_safepoint();
195
196 // Lock without safepoint check. Should ONLY be used by safepoint code and other code
197 // that is guaranteed not to block while running inside the VM.
198 void lock_without_safepoint_check();
199 void lock_without_safepoint_check(Thread* self);
200 // A thread should not call this if failure to acquire ownership will blocks its progress
201 bool try_lock_without_rank_check();
202
203 // Current owner - note not MT-safe. Can only be used to guarantee that
204 // the current running thread owns the lock
205 Thread* owner() const { return AtomicAccess::load(&_owner); }
206 void set_owner(Thread* owner) { set_owner_implementation(owner); }
207 bool owned_by_self() const;
208
209 const char *name() const { return _name; }
210
211 static void add_mutex(Mutex* var);
212
213 void print_on_error(outputStream* st) const;
214 #ifndef PRODUCT
215 void print_on(outputStream* st) const;
216 void print() const;
217 #endif
218
219 // Print all mutexes/monitors that are currently owned by a thread; called
220 // by fatal error handler.
221 static void print_owned_locks_on_error(outputStream* st);
222 static void print_lock_ranks(outputStream* st);
223 };
224
225 class Monitor : public Mutex {
226 public:
227 Monitor(Rank rank, const char *name, bool allow_vm_block) :
228 Mutex(rank, name, allow_vm_block) {}
229
230 Monitor(Rank rank, const char *name) :
231 Mutex(rank, name) {}
232 // default destructor
233
234 // Wait until monitor is notified (or times out).
235 // Defaults are to make safepoint checks, wait time is forever (i.e.,
236 // zero). Returns true if wait times out; otherwise returns false.
237 bool wait(uint64_t timeout = 0);
238 bool wait_without_safepoint_check(uint64_t timeout = 0);
239 void notify();
240 void notify_all();
241 };
242
243
244 class PaddedMutex : public Mutex {
245 enum {
246 CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Mutex),
247 PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
248 };
249 char _padding[PADDING_LEN];
250 public:
251 PaddedMutex(Rank rank, const char *name, bool allow_vm_block) : Mutex(rank, name, allow_vm_block) {};
252 PaddedMutex(Rank rank, const char *name) : Mutex(rank, name) {};
253 };
254
255 class PaddedMonitor : public Monitor {
256 enum {
257 CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Monitor),
258 PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
259 };
260 char _padding[PADDING_LEN];
261 public:
262 PaddedMonitor(Rank rank, const char *name, bool allow_vm_block) : Monitor(rank, name, allow_vm_block) {};
263 PaddedMonitor(Rank rank, const char *name) : Monitor(rank, name) {};
264 };
265
266 // RecursiveMutex is a minimal implementation, and has no safety and rank checks that Mutex has.
267 // There are also no checks that the recursive lock is not held when going to Java or to JNI, like
268 // other JVM mutexes have. This should be used only for cases where the alternatives with all the
269 // nice safety features don't work.
270 // Waiting on the RecursiveMutex partipates in the safepoint protocol if the current thread is a Java thread,
271 // (ie. waiting sets JavaThread to blocked)
272 class RecursiveMutex : public CHeapObj<mtThread> {
273 Semaphore _sem;
274 Thread* _owner;
275 int _recursions;
276
277 NONCOPYABLE(RecursiveMutex);
278 public:
279 RecursiveMutex();
280 void lock(Thread* current);
281 void unlock(Thread* current);
282 // For use in asserts
283 bool holds_lock(Thread* current) { return _owner == current; }
284 };
285
286 #endif // SHARE_RUNTIME_MUTEX_HPP