1 /*
  2  * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_MUTEX_HPP
 26 #define SHARE_RUNTIME_MUTEX_HPP
 27 
 28 #include "memory/allocation.hpp"
 29 #include "runtime/atomic.hpp"
 30 #include "runtime/semaphore.hpp"
 31 
 32 #if defined(LINUX) || defined(AIX) || defined(BSD)
 33 # include "mutex_posix.hpp"
 34 #else
 35 # include OS_HEADER(mutex)
 36 #endif
 37 
 38 
 39 // A Mutex/Monitor is a simple wrapper around a native lock plus condition
 40 // variable that supports lock ownership tracking, lock ranking for deadlock
 41 // detection and coordinates with the safepoint protocol.
 42 
 43 // Locking is non-recursive: if you try to lock a mutex you already own then you
 44 // will get an assertion failure in a debug build (which should suffice to expose
 45 // usage bugs). If you call try_lock on a mutex you already own it will return false.
 46 // The underlying PlatformMutex may support recursive locking but this is not exposed
 47 // and we account for that possibility in try_lock.
 48 
 49 // A thread is not allowed to safepoint while holding a mutex whose rank
 50 // is nosafepoint or lower.
 51 
 52 class Mutex : public CHeapObj<mtSynchronizer> {
 53 
 54   friend class VMStructs;
 55  public:
 56   // Special low level locks are given names and ranges avoid overlap.
 57   enum class Rank {
 58        event,
 59        service        = event          +   6,
 60        stackwatermark = service        +   3,
 61        tty            = stackwatermark +   3,
 62        oopstorage     = tty            +   3,
 63        nosafepoint    = oopstorage     +   6,
 64        safepoint      = nosafepoint    +  20
 65   };
 66 
 67   // want C++later "using enum" directives.
 68   static const Rank event          = Rank::event;
 69   static const Rank service        = Rank::service;
 70   static const Rank stackwatermark = Rank::stackwatermark;
 71   static const Rank tty            = Rank::tty;
 72   static const Rank oopstorage     = Rank::oopstorage;
 73   static const Rank nosafepoint    = Rank::nosafepoint;
 74   static const Rank safepoint      = Rank::safepoint;
 75 
 76   static void assert_no_overlap(Rank orig, Rank adjusted, int adjust);
 77 
 78   friend Rank operator-(Rank base, int adjust) {
 79     Rank result = static_cast<Rank>(static_cast<int>(base) - adjust);
 80     DEBUG_ONLY(assert_no_overlap(base, result, adjust));
 81     return result;
 82   }
 83 
 84   friend constexpr bool operator<(Rank lhs, Rank rhs) {
 85     return static_cast<int>(lhs) < static_cast<int>(rhs);
 86   }
 87 
 88   friend constexpr bool operator>(Rank lhs, Rank rhs)  { return rhs < lhs; }
 89   friend constexpr bool operator<=(Rank lhs, Rank rhs) { return !(lhs > rhs); }
 90   friend constexpr bool operator>=(Rank lhs, Rank rhs) { return !(lhs < rhs); }
 91 
 92  private:
 93   // The _owner field is only set by the current thread, either to itself after it has acquired
 94   // the low-level _lock, or to null before it has released the _lock. Accesses by any thread other
 95   // than the lock owner are inherently racy.
 96   Thread* volatile _owner;
 97   void raw_set_owner(Thread* new_owner) { Atomic::store(&_owner, new_owner); }
 98 
 99  protected:                              // Monitor-Mutex metadata
100   PlatformMonitor _lock;                 // Native monitor implementation
101   const char* _name;                     // Name of mutex/monitor
102   int _id;                               // ID for named mutexes
103 
104   // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
105 #ifndef PRODUCT
106   bool    _allow_vm_block;
107 #endif
108   static Mutex** _mutex_array;
109   static int _num_mutex;
110 
111 #ifdef ASSERT
112   Rank    _rank;                 // rank (to avoid/detect potential deadlocks)
113   Mutex*  _next;                 // Used by a Thread to link up owned locks
114   Thread* _last_owner;           // the last thread to own the lock
115   bool _skip_rank_check;         // read only by owner when doing rank checks
116 
117   static Mutex* get_least_ranked_lock(Mutex* locks);
118   Mutex* get_least_ranked_lock_besides_this(Mutex* locks);
119   bool skip_rank_check() {
120     assert(owned_by_self(), "only the owner should call this");
121     return _skip_rank_check;
122   }
123 
124  public:
125   Rank   rank() const          { return _rank; }
126   const char*  rank_name() const;
127   Mutex* next()  const         { return _next; }
128 #endif // ASSERT
129 
130  protected:
131   void set_owner_implementation(Thread* owner)                        NOT_DEBUG({ raw_set_owner(owner);});
132   void check_block_state       (Thread* thread)                       NOT_DEBUG_RETURN;
133   void check_safepoint_state   (Thread* thread)                       NOT_DEBUG_RETURN;
134   void check_no_safepoint_state(Thread* thread)                       NOT_DEBUG_RETURN;
135   void check_rank              (Thread* thread)                       NOT_DEBUG_RETURN;
136   void assert_owner            (Thread* expected)                     NOT_DEBUG_RETURN;
137 
138  public:
139   static const bool _allow_vm_block_flag        = true;
140 
141   // Locks can be acquired with or without a safepoint check. NonJavaThreads do not follow
142   // the safepoint protocol when acquiring locks.
143 
144   // Each lock can be acquired by only JavaThreads, only NonJavaThreads, or shared between
145   // Java and NonJavaThreads. When the lock is initialized with rank > nosafepoint,
146   // that means that whenever the lock is acquired by a JavaThread, it will verify that
147   // it is done with a safepoint check. In corollary, when the lock is initialized with
148   // rank <= nosafepoint, that means that whenever the lock is acquired by a JavaThread
149   // it will verify that it is done without a safepoint check.
150 
151   // TODO: Locks that are shared between JavaThreads and NonJavaThreads
152   // should never encounter a safepoint check while they are held, or else a
153   // deadlock can occur. We should check this by noting which
154   // locks are shared, and walk held locks during safepoint checking.
155 
156   enum class SafepointCheckFlag {
157     _safepoint_check_flag,
158     _no_safepoint_check_flag
159   };
160   // Bring the enumerator names into class scope.
161   static const SafepointCheckFlag _safepoint_check_flag =
162     SafepointCheckFlag::_safepoint_check_flag;
163   static const SafepointCheckFlag _no_safepoint_check_flag =
164     SafepointCheckFlag::_no_safepoint_check_flag;
165 
166  public:
167   Mutex(Rank rank, const char *name, bool allow_vm_block);
168 
169   Mutex(Rank rank, const char *name) :
170     Mutex(rank, name, rank > nosafepoint ? false : true) {}
171 
172   ~Mutex();
173 
174   void lock(); // prints out warning if VM thread blocks
175   void lock(Thread *thread); // overloaded with current thread
176   void unlock();
177   bool is_locked() const                     { return owner() != nullptr; }
178 
179   bool try_lock(); // Like lock(), but unblocking. It returns false instead
180  private:
181   void lock_contended(Thread *thread); // contended slow-path
182   bool try_lock_inner(bool do_rank_checks);
183  public:
184 
185   void release_for_safepoint();
186 
187   // Lock without safepoint check. Should ONLY be used by safepoint code and other code
188   // that is guaranteed not to block while running inside the VM.
189   void lock_without_safepoint_check();
190   void lock_without_safepoint_check(Thread* self);
191   // A thread should not call this if failure to acquire ownership will blocks its progress
192   bool try_lock_without_rank_check();
193 
194   // Current owner - note not MT-safe. Can only be used to guarantee that
195   // the current running thread owns the lock
196   Thread* owner() const         { return Atomic::load(&_owner); }
197   void set_owner(Thread* owner) { set_owner_implementation(owner); }
198   bool owned_by_self() const;
199 
200   const char *name() const                  { return _name; }
201 
202   int      id() const { return _id; }
203 //  void set_id(int id) { _id = id; }
204 
205   static void  add_mutex(Mutex* var);
206 
207   void print_on_error(outputStream* st) const;
208   #ifndef PRODUCT
209     void print_on(outputStream* st) const;
210     void print() const;
211   #endif
212 
213   // Print all mutexes/monitors that are currently owned by a thread; called
214   // by fatal error handler.
215   static void print_owned_locks_on_error(outputStream* st);
216   static void print_lock_ranks(outputStream* st);
217 
218   static int num_mutex() { return _num_mutex; }
219 };
220 
221 class Monitor : public Mutex {
222  public:
223   Monitor(Rank rank, const char *name, bool allow_vm_block)  :
224     Mutex(rank, name, allow_vm_block) {}
225 
226   Monitor(Rank rank, const char *name) :
227     Mutex(rank, name) {}
228   // default destructor
229 
230   // Wait until monitor is notified (or times out).
231   // Defaults are to make safepoint checks, wait time is forever (i.e.,
232   // zero). Returns true if wait times out; otherwise returns false.
233   bool wait(uint64_t timeout = 0);
234   bool wait_without_safepoint_check(uint64_t timeout = 0);
235   void notify();
236   void notify_all();
237 };
238 
239 
240 class PaddedMutex : public Mutex {
241   enum {
242     CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Mutex),
243     PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
244   };
245   char _padding[PADDING_LEN];
246 public:
247   PaddedMutex(Rank rank, const char *name, bool allow_vm_block) : Mutex(rank, name, allow_vm_block) {};
248   PaddedMutex(Rank rank, const char *name) : Mutex(rank, name) {};
249 };
250 
251 class PaddedMonitor : public Monitor {
252   enum {
253     CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Monitor),
254     PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
255   };
256   char _padding[PADDING_LEN];
257  public:
258   PaddedMonitor(Rank rank, const char *name, bool allow_vm_block) : Monitor(rank, name, allow_vm_block) {};
259   PaddedMonitor(Rank rank, const char *name) : Monitor(rank, name) {};
260 };
261 
262 // RecursiveMutex is a minimal implementation, and has no safety and rank checks that Mutex has.
263 // There are also no checks that the recursive lock is not held when going to Java or to JNI, like
264 // other JVM mutexes have.  This should be used only for cases where the alternatives with all the
265 // nice safety features don't work.
266 // Waiting on the RecursiveMutex partipates in the safepoint protocol if the current thread is a Java thread,
267 // (ie. waiting sets JavaThread to blocked)
268 class RecursiveMutex : public CHeapObj<mtThread> {
269   Semaphore  _sem;
270   Thread*    _owner;
271   int        _recursions;
272 
273   NONCOPYABLE(RecursiveMutex);
274  public:
275   RecursiveMutex();
276   void lock(Thread* current);
277   void unlock(Thread* current);
278   // For use in asserts
279   bool holds_lock(Thread* current) { return _owner == current; }
280 };
281 
282 #endif // SHARE_RUNTIME_MUTEX_HPP