1 /*
  2  * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_RUNTIME_MUTEX_HPP
 26 #define SHARE_RUNTIME_MUTEX_HPP
 27 
 28 #include "memory/allocation.hpp"
 29 #include "runtime/atomic.hpp"
 30 #include "runtime/semaphore.hpp"
 31 
 32 #if defined(LINUX) || defined(AIX) || defined(BSD)
 33 # include "mutex_posix.hpp"
 34 #else
 35 # include OS_HEADER(mutex)
 36 #endif
 37 
 38 
 39 // A Mutex/Monitor is a simple wrapper around a native lock plus condition
 40 // variable that supports lock ownership tracking, lock ranking for deadlock
 41 // detection and coordinates with the safepoint protocol.
 42 
 43 // Locking is non-recursive: if you try to lock a mutex you already own then you
 44 // will get an assertion failure in a debug build (which should suffice to expose
 45 // usage bugs). If you call try_lock on a mutex you already own it will return false.
 46 // The underlying PlatformMutex may support recursive locking but this is not exposed
 47 // and we account for that possibility in try_lock.
 48 
 49 // A thread is not allowed to safepoint while holding a mutex whose rank
 50 // is nosafepoint or lower.
 51 
 52 class Mutex : public CHeapObj<mtSynchronizer> {
 53 
 54   friend class VMStructs;
 55  public:
 56   // Special low level locks are given names and ranges avoid overlap.
 57   enum class Rank {
 58        event,
 59        service        = event          +   6,
 60        stackwatermark = service        +   3,
 61        tty            = stackwatermark +   3,
 62        oopstorage     = tty            +   3,
 63        nosafepoint    = oopstorage     +   6,
 64        safepoint      = nosafepoint    +  20
 65   };
 66 
 67   // want C++later "using enum" directives.
 68   static const Rank event          = Rank::event;
 69   static const Rank service        = Rank::service;
 70   static const Rank stackwatermark = Rank::stackwatermark;
 71   static const Rank tty            = Rank::tty;
 72   static const Rank oopstorage     = Rank::oopstorage;
 73   static const Rank nosafepoint    = Rank::nosafepoint;
 74   static const Rank safepoint      = Rank::safepoint;
 75 
 76   static void assert_no_overlap(Rank orig, Rank adjusted, int adjust);
 77 
 78   friend Rank operator-(Rank base, int adjust) {
 79     Rank result = static_cast<Rank>(static_cast<int>(base) - adjust);
 80     DEBUG_ONLY(assert_no_overlap(base, result, adjust));
 81     return result;
 82   }
 83 
 84   friend constexpr bool operator<(Rank lhs, Rank rhs) {
 85     return static_cast<int>(lhs) < static_cast<int>(rhs);
 86   }
 87 
 88   friend constexpr bool operator>(Rank lhs, Rank rhs)  { return rhs < lhs; }
 89   friend constexpr bool operator<=(Rank lhs, Rank rhs) { return !(lhs > rhs); }
 90   friend constexpr bool operator>=(Rank lhs, Rank rhs) { return !(lhs < rhs); }
 91 
 92  private:
 93   // The _owner field is only set by the current thread, either to itself after it has acquired
 94   // the low-level _lock, or to null before it has released the _lock. Accesses by any thread other
 95   // than the lock owner are inherently racy.
 96   Thread* volatile _owner;
 97   void raw_set_owner(Thread* new_owner) { Atomic::store(&_owner, new_owner); }
 98 
 99  protected:                              // Monitor-Mutex metadata
100   PlatformMonitor _lock;                 // Native monitor implementation
101   const char* _name;                     // Name of mutex/monitor
102 
103   // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
104 #ifndef PRODUCT
105   bool    _allow_vm_block;
106 #endif
107   static Mutex** _mutex_array;
108   static int _num_mutex;
109 
110 #ifdef ASSERT
111   Rank    _rank;                 // rank (to avoid/detect potential deadlocks)
112   Mutex*  _next;                 // Used by a Thread to link up owned locks
113   Thread* _last_owner;           // the last thread to own the lock
114   bool _skip_rank_check;         // read only by owner when doing rank checks
115 
116   static Mutex* get_least_ranked_lock(Mutex* locks);
117   Mutex* get_least_ranked_lock_besides_this(Mutex* locks);
118   bool skip_rank_check() {
119     assert(owned_by_self(), "only the owner should call this");
120     return _skip_rank_check;
121   }
122 
123  public:
124   Rank   rank() const          { return _rank; }
125   const char*  rank_name() const;
126   Mutex* next()  const         { return _next; }
127 #endif // ASSERT
128 
129  protected:
130   void set_owner_implementation(Thread* owner)                        NOT_DEBUG({ raw_set_owner(owner);});
131   void check_block_state       (Thread* thread)                       NOT_DEBUG_RETURN;
132   void check_safepoint_state   (Thread* thread)                       NOT_DEBUG_RETURN;
133   void check_no_safepoint_state(Thread* thread)                       NOT_DEBUG_RETURN;
134   void check_rank              (Thread* thread)                       NOT_DEBUG_RETURN;
135   void assert_owner            (Thread* expected)                     NOT_DEBUG_RETURN;
136 
137  public:
138   static const bool _allow_vm_block_flag        = true;
139 
140   // Locks can be acquired with or without a safepoint check. NonJavaThreads do not follow
141   // the safepoint protocol when acquiring locks.
142 
143   // Each lock can be acquired by only JavaThreads, only NonJavaThreads, or shared between
144   // Java and NonJavaThreads. When the lock is initialized with rank > nosafepoint,
145   // that means that whenever the lock is acquired by a JavaThread, it will verify that
146   // it is done with a safepoint check. In corollary, when the lock is initialized with
147   // rank <= nosafepoint, that means that whenever the lock is acquired by a JavaThread
148   // it will verify that it is done without a safepoint check.
149 
150   // TODO: Locks that are shared between JavaThreads and NonJavaThreads
151   // should never encounter a safepoint check while they are held, or else a
152   // deadlock can occur. We should check this by noting which
153   // locks are shared, and walk held locks during safepoint checking.
154 
155   enum class SafepointCheckFlag {
156     _safepoint_check_flag,
157     _no_safepoint_check_flag
158   };
159   // Bring the enumerator names into class scope.
160   static const SafepointCheckFlag _safepoint_check_flag =
161     SafepointCheckFlag::_safepoint_check_flag;
162   static const SafepointCheckFlag _no_safepoint_check_flag =
163     SafepointCheckFlag::_no_safepoint_check_flag;
164 
165  public:
166   Mutex(Rank rank, const char *name, bool allow_vm_block);
167 
168   Mutex(Rank rank, const char *name) :
169     Mutex(rank, name, rank > nosafepoint ? false : true) {}
170 
171   ~Mutex();
172 
173   void lock(); // prints out warning if VM thread blocks
174   void lock(Thread *thread); // overloaded with current thread
175   void unlock();
176   bool is_locked() const                     { return owner() != nullptr; }
177 
178   bool try_lock(); // Like lock(), but unblocking. It returns false instead
179  private:
180   void lock_contended(Thread *thread); // contended slow-path
181   bool try_lock_inner(bool do_rank_checks);
182  public:
183 
184   void release_for_safepoint();
185 
186   // Lock without safepoint check. Should ONLY be used by safepoint code and other code
187   // that is guaranteed not to block while running inside the VM.
188   void lock_without_safepoint_check();
189   void lock_without_safepoint_check(Thread* self);
190   // A thread should not call this if failure to acquire ownership will blocks its progress
191   bool try_lock_without_rank_check();
192 
193   // Current owner - note not MT-safe. Can only be used to guarantee that
194   // the current running thread owns the lock
195   Thread* owner() const         { return Atomic::load(&_owner); }
196   void set_owner(Thread* owner) { set_owner_implementation(owner); }
197   bool owned_by_self() const;
198 
199   const char *name() const                  { return _name; }
200 
201   static void  add_mutex(Mutex* var);
202 
203   void print_on_error(outputStream* st) const;
204   #ifndef PRODUCT
205     void print_on(outputStream* st) const;
206     void print() const;
207   #endif
208 
209   // Print all mutexes/monitors that are currently owned by a thread; called
210   // by fatal error handler.
211   static void print_owned_locks_on_error(outputStream* st);
212   static void print_lock_ranks(outputStream* st);
213 };
214 
215 class Monitor : public Mutex {
216  public:
217   Monitor(Rank rank, const char *name, bool allow_vm_block)  :
218     Mutex(rank, name, allow_vm_block) {}
219 
220   Monitor(Rank rank, const char *name) :
221     Mutex(rank, name) {}
222   // default destructor
223 
224   // Wait until monitor is notified (or times out).
225   // Defaults are to make safepoint checks, wait time is forever (i.e.,
226   // zero). Returns true if wait times out; otherwise returns false.
227   bool wait(uint64_t timeout = 0);
228   bool wait_without_safepoint_check(uint64_t timeout = 0);
229   void notify();
230   void notify_all();
231 };
232 
233 
234 class PaddedMutex : public Mutex {
235   enum {
236     CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Mutex),
237     PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
238   };
239   char _padding[PADDING_LEN];
240 public:
241   PaddedMutex(Rank rank, const char *name, bool allow_vm_block) : Mutex(rank, name, allow_vm_block) {};
242   PaddedMutex(Rank rank, const char *name) : Mutex(rank, name) {};
243 };
244 
245 class PaddedMonitor : public Monitor {
246   enum {
247     CACHE_LINE_PADDING = (int)DEFAULT_PADDING_SIZE - (int)sizeof(Monitor),
248     PADDING_LEN = CACHE_LINE_PADDING > 0 ? CACHE_LINE_PADDING : 1
249   };
250   char _padding[PADDING_LEN];
251  public:
252   PaddedMonitor(Rank rank, const char *name, bool allow_vm_block) : Monitor(rank, name, allow_vm_block) {};
253   PaddedMonitor(Rank rank, const char *name) : Monitor(rank, name) {};
254 };
255 
256 // RecursiveMutex is a minimal implementation, and has no safety and rank checks that Mutex has.
257 // There are also no checks that the recursive lock is not held when going to Java or to JNI, like
258 // other JVM mutexes have.  This should be used only for cases where the alternatives with all the
259 // nice safety features don't work.
260 // Waiting on the RecursiveMutex partipates in the safepoint protocol if the current thread is a Java thread,
261 // (ie. waiting sets JavaThread to blocked)
262 class RecursiveMutex : public CHeapObj<mtThread> {
263   Semaphore  _sem;
264   Thread*    _owner;
265   int        _recursions;
266 
267   NONCOPYABLE(RecursiveMutex);
268  public:
269   RecursiveMutex();
270   void lock(Thread* current);
271   void unlock(Thread* current);
272   // For use in asserts
273   bool holds_lock(Thread* current) { return _owner == current; }
274 };
275 
276 #endif // SHARE_RUNTIME_MUTEX_HPP