1 /*
2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_MUTEX_HPP
26 #define SHARE_RUNTIME_MUTEX_HPP
27
28 #include "memory/allocation.hpp"
29 #include "runtime/atomicAccess.hpp"
30 #include "runtime/semaphore.hpp"
31
32 #if defined(LINUX) || defined(AIX) || defined(BSD)
33 # include "mutex_posix.hpp"
34 #else
35 # include OS_HEADER(mutex)
36 #endif
37
38
39 // A Mutex/Monitor is a simple wrapper around a native lock plus condition
40 // variable that supports lock ownership tracking, lock ranking for deadlock
41 // detection and coordinates with the safepoint protocol.
42
43 // Locking is non-recursive: if you try to lock a mutex you already own then you
44 // will get an assertion failure in a debug build (which should suffice to expose
45 // usage bugs). If you call try_lock on a mutex you already own it will return false.
46 // The underlying PlatformMutex may support recursive locking but this is not exposed
47 // and we account for that possibility in try_lock.
48
49 // A thread is not allowed to safepoint while holding a mutex whose rank
50 // is nosafepoint or lower.
51
52 // The Mutex class used to explicitly guarantee fence(); lock(); acquire(); semantics with
53 // a hand crafted implementation. That may or may not be a desirable contract for a Mutex,
54 // but is nevertheless something that older HotSpot code may or may not rely on for correctness.
55 // Newer code is encouraged not to rely more on this feature, but it is not generally safe to
56 // remove the fences, until all usages of Mutex have been evaluated on a case-by-case basis, whether
57 // they actually rely on this stronger contract, or not.
58
59 // Having a fence does not have any significant impact on peformance, as this is an internal VM
60 // mutex and is generally not in hot code paths.
61
62 class Mutex : public CHeapObj<mtSynchronizer> {
63
64 friend class VMStructs;
65 public:
66 // Special low level locks are given names and ranges avoid overlap.
67 enum class Rank {
68 event,
69 service = event + 6,
70 stackwatermark = service + 3,
71 tty = stackwatermark + 3,
72 oopstorage = tty + 3,
73 nosafepoint = oopstorage + 6,
74 safepoint = nosafepoint + 20
75 };
76
77 // want C++later "using enum" directives.
78 static const Rank event = Rank::event;
79 static const Rank service = Rank::service;
80 static const Rank stackwatermark = Rank::stackwatermark;
81 static const Rank tty = Rank::tty;
82 static const Rank oopstorage = Rank::oopstorage;
83 static const Rank nosafepoint = Rank::nosafepoint;
84 static const Rank safepoint = Rank::safepoint;
85
86 static void assert_no_overlap(Rank orig, Rank adjusted, int adjust);
87
88 friend Rank operator-(Rank base, int adjust) {
89 Rank result = static_cast<Rank>(static_cast<int>(base) - adjust);
90 DEBUG_ONLY(assert_no_overlap(base, result, adjust));
91 return result;
92 }
93
94 friend constexpr bool operator<(Rank lhs, Rank rhs) {
95 return static_cast<int>(lhs) < static_cast<int>(rhs);
96 }
97
98 friend constexpr bool operator>(Rank lhs, Rank rhs) { return rhs < lhs; }
99 friend constexpr bool operator<=(Rank lhs, Rank rhs) { return !(lhs > rhs); }
100 friend constexpr bool operator>=(Rank lhs, Rank rhs) { return !(lhs < rhs); }
101
102 private:
103 // The _owner field is only set by the current thread, either to itself after it has acquired
104 // the low-level _lock, or to null before it has released the _lock. Accesses by any thread other
105 // than the lock owner are inherently racy.
106 Thread* volatile _owner;
107 void raw_set_owner(Thread* new_owner) { AtomicAccess::store(&_owner, new_owner); }
108
109 protected: // Monitor-Mutex metadata
110 PlatformMonitor _lock; // Native monitor implementation
111 const char* _name; // Name of mutex/monitor
112 int _id; // ID for named mutexes
113
114 // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
115 #ifndef PRODUCT
116 bool _allow_vm_block;
117 #endif
118 static Mutex** _mutex_array;
119 static int _num_mutex;
120
121 #ifdef ASSERT
122 Rank _rank; // rank (to avoid/detect potential deadlocks)
123 Mutex* _next; // Used by a Thread to link up owned locks
124 Thread* _last_owner; // the last thread to own the lock
125 bool _skip_rank_check; // read only by owner when doing rank checks
126
127 static Mutex* get_least_ranked_lock(Mutex* locks);
128 Mutex* get_least_ranked_lock_besides_this(Mutex* locks);
129 bool skip_rank_check() {
130 assert(owned_by_self(), "only the owner should call this");
131 return _skip_rank_check;
132 }
133
134 public:
135 Rank rank() const { return _rank; }
136 const char* rank_name() const;
137 Mutex* next() const { return _next; }
138 #endif // ASSERT
139
140 protected:
141 void set_owner_implementation(Thread* owner) NOT_DEBUG({ raw_set_owner(owner);});
142 void check_block_state (Thread* thread) NOT_DEBUG_RETURN;
143 void check_safepoint_state (Thread* thread) NOT_DEBUG_RETURN;
144 void check_no_safepoint_state(Thread* thread) NOT_DEBUG_RETURN;
145 void check_rank (Thread* thread) NOT_DEBUG_RETURN;
146 void assert_owner (Thread* expected) NOT_DEBUG_RETURN;
147
148 public:
149 static const bool _allow_vm_block_flag = true;
150
151 // Locks can be acquired with or without a safepoint check. NonJavaThreads do not follow
152 // the safepoint protocol when acquiring locks.
153
154 // Each lock can be acquired by only JavaThreads, only NonJavaThreads, or shared between
155 // Java and NonJavaThreads. When the lock is initialized with rank > nosafepoint,
156 // that means that whenever the lock is acquired by a JavaThread, it will verify that
157 // it is done with a safepoint check. In corollary, when the lock is initialized with
158 // rank <= nosafepoint, that means that whenever the lock is acquired by a JavaThread
159 // it will verify that it is done without a safepoint check.
160
161 // TODO: Locks that are shared between JavaThreads and NonJavaThreads
162 // should never encounter a safepoint check while they are held, or else a
163 // deadlock can occur. We should check this by noting which
164 // locks are shared, and walk held locks during safepoint checking.
165
166 enum class SafepointCheckFlag {
167 _safepoint_check_flag,
168 _no_safepoint_check_flag
169 };
170 // Bring the enumerator names into class scope.
171 static const SafepointCheckFlag _safepoint_check_flag =
172 SafepointCheckFlag::_safepoint_check_flag;
173 static const SafepointCheckFlag _no_safepoint_check_flag =
174 SafepointCheckFlag::_no_safepoint_check_flag;
175
176 public:
177 Mutex(Rank rank, const char *name, bool allow_vm_block);
178
179 Mutex(Rank rank, const char *name) :
180 Mutex(rank, name, rank > nosafepoint ? false : true) {}
181
182 ~Mutex();
183
184 void lock(); // prints out warning if VM thread blocks
185 void lock(Thread *thread); // overloaded with current thread
186 void unlock();
187 bool is_locked() const { return owner() != nullptr; }
188
189 bool try_lock(); // Like lock(), but unblocking. It returns false instead
190 private:
191 void lock_contended(Thread *thread); // contended slow-path
192 bool try_lock_inner(bool do_rank_checks);
193 public:
194
195 void release_for_safepoint();
196
197 // Lock without safepoint check. Should ONLY be used by safepoint code and other code
198 // that is guaranteed not to block while running inside the VM.
199 void lock_without_safepoint_check();
200 void lock_without_safepoint_check(Thread* self);
201 // A thread should not call this if failure to acquire ownership will blocks its progress
202 bool try_lock_without_rank_check();
203
204 // Current owner - note not MT-safe. Can only be used to guarantee that
205 // the current running thread owns the lock
206 Thread* owner() const { return AtomicAccess::load(&_owner); }
207 void set_owner(Thread* owner) { set_owner_implementation(owner); }
208 bool owned_by_self() const;
209
210 const char *name() const { return _name; }
211
212 int id() const { return _id; }
213 // void set_id(int id) { _id = id; }
214
215 static void add_mutex(Mutex* var);
216
217 void print_on_error(outputStream* st) const;
218 #ifndef PRODUCT
219 void print_on(outputStream* st) const;
220 void print() const;
221 #endif
222
223 // Print all mutexes/monitors that are currently owned by a thread; called
224 // by fatal error handler.
225 static void print_owned_locks_on_error(outputStream* st);
226 static void print_lock_ranks(outputStream* st);
227
228 static int num_mutex() { return _num_mutex; }
229 };
230
231 class Monitor : public Mutex {
232 public:
233 Monitor(Rank rank, const char *name, bool allow_vm_block) :
234 Mutex(rank, name, allow_vm_block) {}
235
236 Monitor(Rank rank, const char *name) :
237 Mutex(rank, name) {}
238 // default destructor
239
240 // Wait until monitor is notified (or times out).
241 // Defaults are to make safepoint checks, wait time is forever (i.e.,
242 // zero). Returns true if wait times out; otherwise returns false.
243 bool wait(uint64_t timeout = 0);
244 bool wait_without_safepoint_check(uint64_t timeout = 0);
245 void notify();
246 void notify_all();
247 };
248
249
250 class PaddedMutex : public Mutex {
251 enum {
252 CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Mutex),
253 PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
254 };
255 char _padding[PADDING_LEN];
256 public:
257 PaddedMutex(Rank rank, const char *name, bool allow_vm_block) : Mutex(rank, name, allow_vm_block) {};
258 PaddedMutex(Rank rank, const char *name) : Mutex(rank, name) {};
259 };
260
261 class PaddedMonitor : public Monitor {
262 enum {
263 CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Monitor),
264 PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
265 };
266 char _padding[PADDING_LEN];
267 public:
268 PaddedMonitor(Rank rank, const char *name, bool allow_vm_block) : Monitor(rank, name, allow_vm_block) {};
269 PaddedMonitor(Rank rank, const char *name) : Monitor(rank, name) {};
270 };
271
272 // RecursiveMutex is a minimal implementation, and has no safety and rank checks that Mutex has.
273 // There are also no checks that the recursive lock is not held when going to Java or to JNI, like
274 // other JVM mutexes have. This should be used only for cases where the alternatives with all the
275 // nice safety features don't work.
276 // Waiting on the RecursiveMutex partipates in the safepoint protocol if the current thread is a Java thread,
277 // (ie. waiting sets JavaThread to blocked)
278 class RecursiveMutex : public CHeapObj<mtThread> {
279 Semaphore _sem;
280 Thread* _owner;
281 int _recursions;
282
283 NONCOPYABLE(RecursiveMutex);
284 public:
285 RecursiveMutex();
286 void lock(Thread* current);
287 void unlock(Thread* current);
288 // For use in asserts
289 bool holds_lock(Thread* current) { return _owner == current; }
290 };
291
292 #endif // SHARE_RUNTIME_MUTEX_HPP