1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "compiler/compiler_globals.hpp"
 26 #include "gc/shared/gc_globals.hpp"
 27 #include "logging/log.hpp"
 28 #include "logging/logStream.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "memory/universe.hpp"
 31 #include "runtime/atomicAccess.hpp"
 32 #include "runtime/java.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/mutexLocker.hpp"
 35 #include "runtime/safepoint.hpp"
 36 #include "runtime/vmThread.hpp"
 37 #include "services/management.hpp"
 38 #include "utilities/vmError.hpp"
 39 
 40 // Mutexes used in the VM (see comment in mutexLocker.hpp):
 41 
 42 Mutex*   NMethodState_lock            = nullptr;
 43 Monitor* SystemDictionary_lock        = nullptr;
 44 Mutex*   InvokeMethodTypeTable_lock   = nullptr;
 45 Monitor* InvokeMethodIntrinsicTable_lock = nullptr;
 46 Mutex*   SharedDictionary_lock        = nullptr;
 47 Monitor* ClassInitError_lock          = nullptr;
 48 Mutex*   Module_lock                  = nullptr;
 49 Mutex*   CompiledIC_lock              = nullptr;
 50 Mutex*   VMStatistic_lock             = nullptr;
 51 Mutex*   JmethodIdCreation_lock       = nullptr;
 52 Mutex*   JfieldIdCreation_lock        = nullptr;
 53 Monitor* JNICritical_lock             = nullptr;
 54 Mutex*   JvmtiThreadState_lock        = nullptr;
 55 Monitor* EscapeBarrier_lock           = nullptr;
 56 Monitor* JvmtiVTMSTransition_lock     = nullptr;
 57 Mutex*   JvmtiVThreadSuspend_lock     = nullptr;
 58 Monitor* Heap_lock                    = nullptr;
 59 #if INCLUDE_PARALLELGC
 60 Mutex*   PSOldGenExpand_lock      = nullptr;
 61 #endif
 62 Mutex*   AdapterHandlerLibrary_lock   = nullptr;
 63 Mutex*   SignatureHandlerLibrary_lock = nullptr;
 64 Mutex*   VtableStubs_lock             = nullptr;
 65 Mutex*   SymbolArena_lock             = nullptr;
 66 Monitor* StringDedup_lock             = nullptr;
 67 Mutex*   StringDedupIntern_lock       = nullptr;
 68 Monitor* CodeCache_lock               = nullptr;
 69 Mutex*   TouchedMethodLog_lock        = nullptr;
 70 Mutex*   RetData_lock                 = nullptr;
 71 Monitor* VMOperation_lock             = nullptr;
 72 Monitor* ThreadsLockThrottle_lock     = nullptr;
 73 Monitor* Threads_lock                 = nullptr;
 74 Mutex*   NonJavaThreadsList_lock      = nullptr;
 75 Mutex*   NonJavaThreadsListSync_lock  = nullptr;
 76 Monitor* STS_lock                     = nullptr;
 77 Mutex*   MonitoringSupport_lock       = nullptr;
 78 Monitor* ConcurrentGCBreakpoints_lock = nullptr;
 79 Mutex*   Compile_lock                 = nullptr;
 80 Monitor* CompileTaskWait_lock         = nullptr;
 81 Monitor* MethodCompileQueue_lock      = nullptr;
 82 Monitor* MethodCompileQueueC1_lock    = nullptr;
 83 Monitor* MethodCompileQueueC2_lock    = nullptr;
 84 Monitor* MethodCompileQueueSC1_lock   = nullptr;
 85 Monitor* MethodCompileQueueSC2_lock   = nullptr;
 86 Monitor* CompileThread_lock           = nullptr;
 87 Monitor* Compilation_lock             = nullptr;
 88 Mutex*   CompileStatistics_lock       = nullptr;
 89 Mutex*   DirectivesStack_lock         = nullptr;
 90 Monitor* AOTHeapLoading_lock          = nullptr;
 91 Monitor* Terminator_lock              = nullptr;
 92 Monitor* InitCompleted_lock           = nullptr;
 93 Monitor* BeforeExit_lock              = nullptr;
 94 Monitor* Notify_lock                  = nullptr;
 95 Mutex*   ExceptionCache_lock          = nullptr;
 96 Mutex*   TrainingData_lock            = nullptr;
 97 Monitor* TrainingReplayQueue_lock     = nullptr;
 98 #ifndef PRODUCT
 99 Mutex*   FullGCALot_lock              = nullptr;
100 #endif
101 
102 Mutex*   tty_lock                     = nullptr;
103 
104 Mutex*   RawMonitor_lock              = nullptr;
105 Mutex*   PerfDataMemAlloc_lock        = nullptr;
106 Mutex*   PerfDataManager_lock         = nullptr;
107 
108 #if INCLUDE_G1GC
109 Monitor* G1CGC_lock                   = nullptr;
110 Mutex*   G1FreeList_lock              = nullptr;
111 Mutex*   G1MarkStackChunkList_lock    = nullptr;
112 Mutex*   G1MarkStackFreeList_lock     = nullptr;
113 Monitor* G1OldGCCount_lock            = nullptr;
114 Mutex*   G1OldSets_lock               = nullptr;
115 Mutex*   G1ReviseYoungLength_lock     = nullptr;
116 Monitor* G1RootRegionScan_lock        = nullptr;
117 Mutex*   G1RareEvent_lock             = nullptr;
118 Mutex*   G1Uncommit_lock              = nullptr;
119 #endif
120 
121 Mutex*   Management_lock              = nullptr;
122 Monitor* MonitorDeflation_lock        = nullptr;
123 Monitor* Service_lock                 = nullptr;
124 Monitor* Notification_lock            = nullptr;
125 Monitor* PeriodicTask_lock            = nullptr;
126 Monitor* RedefineClasses_lock         = nullptr;
127 Mutex*   Verify_lock                  = nullptr;
128 
129 #if INCLUDE_JFR
130 Mutex*   JfrStacktrace_lock           = nullptr;
131 Monitor* JfrMsg_lock                  = nullptr;
132 Mutex*   JfrBuffer_lock               = nullptr;
133 #endif
134 
135 Mutex*   CodeHeapStateAnalytics_lock  = nullptr;
136 
137 Mutex*   ExternalsRecorder_lock       = nullptr;
138 
139 Mutex*   AOTCodeCStrings_lock         = nullptr;
140 
141 Monitor* ContinuationRelativize_lock  = nullptr;
142 
143 Mutex*   Metaspace_lock               = nullptr;
144 Monitor* MetaspaceCritical_lock       = nullptr;
145 Mutex*   ClassLoaderDataGraph_lock    = nullptr;
146 Monitor* ThreadsSMRDelete_lock        = nullptr;
147 Mutex*   ThreadIdTableCreate_lock     = nullptr;
148 Mutex*   SharedDecoder_lock           = nullptr;
149 Mutex*   DCmdFactory_lock             = nullptr;
150 Mutex*   NMTQuery_lock                = nullptr;
151 Mutex*   NMTCompilationCostHistory_lock = nullptr;
152 Mutex*   NmtVirtualMemory_lock          = nullptr;
153 
154 #if INCLUDE_CDS
155 #if INCLUDE_JVMTI
156 Mutex*   CDSClassFileStream_lock      = nullptr;
157 #endif
158 Mutex*   DumpTimeTable_lock           = nullptr;
159 Mutex*   CDSLambda_lock               = nullptr;
160 Mutex*   DumpRegion_lock              = nullptr;
161 Mutex*   ClassListFile_lock           = nullptr;
162 Mutex*   UnregisteredClassesTable_lock= nullptr;
163 Mutex*   LambdaFormInvokers_lock      = nullptr;
164 Mutex*   ScratchObjects_lock          = nullptr;
165 Mutex*   ArchivedObjectTables_lock    = nullptr;
166 Mutex*   FinalImageRecipes_lock       = nullptr;
167 #endif // INCLUDE_CDS
168 Mutex*   Bootclasspath_lock           = nullptr;
169 
170 #if INCLUDE_JVMCI
171 Monitor* JVMCI_lock                   = nullptr;
172 Monitor* JVMCIRuntime_lock            = nullptr;
173 #endif
174 
175 // Only one RecursiveMutex
176 RecursiveMutex* MultiArray_lock       = nullptr;
177 
178 #ifdef ASSERT
179 void assert_locked_or_safepoint(const Mutex* lock) {
180   if (DebuggingContext::is_enabled() || VMError::is_error_reported()) return;
181   // check if this thread owns the lock (common case)
182   assert(lock != nullptr, "Need non-null lock");
183   if (lock->owned_by_self()) return;
184   if (SafepointSynchronize::is_at_safepoint()) return;
185   if (!Universe::is_fully_initialized()) return;
186   fatal("must own lock %s", lock->name());
187 }
188 
189 // a stronger assertion than the above
190 void assert_lock_strong(const Mutex* lock) {
191   if (DebuggingContext::is_enabled() || VMError::is_error_reported()) return;
192   assert(lock != nullptr, "Need non-null lock");
193   if (lock->owned_by_self()) return;
194   fatal("must own lock %s", lock->name());
195 }
196 #endif
197 
198 #define MUTEX_STORAGE_NAME(name) name##_storage
199 #define MUTEX_STORAGE(name, type) alignas(type) static uint8_t MUTEX_STORAGE_NAME(name)[sizeof(type)]
200 #define MUTEX_DEF(name, type, pri, ...) {                                                       \
201   assert(name == nullptr, "Mutex/Monitor initialized twice");                                   \
202   MUTEX_STORAGE(name, type);                                                                    \
203   name = ::new(static_cast<void*>(MUTEX_STORAGE_NAME(name))) type((pri), #name, ##__VA_ARGS__); \
204   Mutex::add_mutex(name);                                                                       \
205 }
206 #define MUTEX_DEFN(name, type, pri, ...) MUTEX_DEF(name, type, Mutex::pri, ##__VA_ARGS__)
207 
208 // Specify relative ranked lock
209 #ifdef ASSERT
210 #define MUTEX_DEFL(name, type, held_lock, ...) MUTEX_DEF(name, type, (held_lock)->rank() - 1, ##__VA_ARGS__)
211 #else
212 #define MUTEX_DEFL(name, type, held_lock, ...) MUTEX_DEFN(name, type, safepoint, ##__VA_ARGS__)
213 #endif
214 
215 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
216 void mutex_init() {
217   MUTEX_DEFN(tty_lock                        , PaddedMutex  , tty);      // allow to lock in VM
218 
219   MUTEX_DEFN(STS_lock                        , PaddedMonitor, nosafepoint);
220 
221 #if INCLUDE_G1GC
222   if (UseG1GC) {
223     MUTEX_DEFN(G1CGC_lock                    , PaddedMonitor, nosafepoint);
224     MUTEX_DEFN(G1FreeList_lock               , PaddedMutex  , service-1);
225     MUTEX_DEFN(G1MarkStackChunkList_lock     , PaddedMutex  , nosafepoint);
226     MUTEX_DEFN(G1MarkStackFreeList_lock      , PaddedMutex  , nosafepoint);
227     MUTEX_DEFN(G1OldSets_lock                , PaddedMutex  , nosafepoint);
228     MUTEX_DEFN(G1RootRegionScan_lock         , PaddedMonitor, nosafepoint-1);
229     MUTEX_DEFN(G1Uncommit_lock               , PaddedMutex  , service-2);
230   }
231 #endif
232 
233   MUTEX_DEFN(MonitoringSupport_lock          , PaddedMutex  , service-1);        // used for serviceability monitoring support
234 
235   MUTEX_DEFN(StringDedup_lock                , PaddedMonitor, nosafepoint);
236   MUTEX_DEFN(StringDedupIntern_lock          , PaddedMutex  , nosafepoint);
237   MUTEX_DEFN(RawMonitor_lock                 , PaddedMutex  , nosafepoint-1);
238 
239   MUTEX_DEFN(Metaspace_lock                  , PaddedMutex  , nosafepoint-3);
240   MUTEX_DEFN(MetaspaceCritical_lock          , PaddedMonitor, nosafepoint-1);
241 
242   MUTEX_DEFN(MonitorDeflation_lock           , PaddedMonitor, nosafepoint);      // used for monitor deflation thread operations
243   MUTEX_DEFN(Service_lock                    , PaddedMonitor, service);          // used for service thread operations
244   MUTEX_DEFN(Notification_lock               , PaddedMonitor, service);          // used for notification thread operations
245 
246   MUTEX_DEFN(JmethodIdCreation_lock          , PaddedMutex  , nosafepoint-1);    // used for creating jmethodIDs can also lock HandshakeState_lock
247   MUTEX_DEFN(InvokeMethodTypeTable_lock      , PaddedMutex  , safepoint);
248   MUTEX_DEFN(InvokeMethodIntrinsicTable_lock , PaddedMonitor, safepoint);
249   MUTEX_DEFN(AdapterHandlerLibrary_lock      , PaddedMutex  , safepoint);
250   MUTEX_DEFN(SharedDictionary_lock           , PaddedMutex  , safepoint);
251   MUTEX_DEFN(VMStatistic_lock                , PaddedMutex  , safepoint);
252   MUTEX_DEFN(SignatureHandlerLibrary_lock    , PaddedMutex  , safepoint);
253   MUTEX_DEFN(SymbolArena_lock                , PaddedMutex  , nosafepoint);
254   MUTEX_DEFN(ExceptionCache_lock             , PaddedMutex  , safepoint);
255 #ifndef PRODUCT
256   MUTEX_DEFN(FullGCALot_lock                 , PaddedMutex  , safepoint); // a lock to make FullGCALot MT safe
257 #endif
258   MUTEX_DEFN(BeforeExit_lock                 , PaddedMonitor, safepoint);
259 
260   MUTEX_DEFN(NonJavaThreadsList_lock         , PaddedMutex  , nosafepoint-1);
261   MUTEX_DEFN(NonJavaThreadsListSync_lock     , PaddedMutex  , nosafepoint);
262 
263   MUTEX_DEFN(RetData_lock                    , PaddedMutex  , safepoint);
264   MUTEX_DEFN(Terminator_lock                 , PaddedMonitor, safepoint, true);
265   MUTEX_DEFN(InitCompleted_lock              , PaddedMonitor, nosafepoint);
266   MUTEX_DEFN(Notify_lock                     , PaddedMonitor, safepoint, true);
267 
268   MUTEX_DEFN(JfieldIdCreation_lock           , PaddedMutex  , safepoint);
269 
270   MUTEX_DEFN(CompiledIC_lock                 , PaddedMutex  , nosafepoint);  // locks VtableStubs_lock
271   MUTEX_DEFN(MethodCompileQueue_lock         , PaddedMonitor, safepoint);
272   if (UseGlobalCompileQueueLock) {
273     MethodCompileQueueC1_lock  = MethodCompileQueue_lock;
274     MethodCompileQueueC2_lock  = MethodCompileQueue_lock;
275     MethodCompileQueueSC1_lock = MethodCompileQueue_lock;
276     MethodCompileQueueSC2_lock = MethodCompileQueue_lock;
277   } else {
278     MUTEX_DEFN(MethodCompileQueueC1_lock     , PaddedMonitor, safepoint);
279     MUTEX_DEFN(MethodCompileQueueC2_lock     , PaddedMonitor, safepoint);
280     MUTEX_DEFN(MethodCompileQueueSC1_lock    , PaddedMonitor, safepoint);
281     MUTEX_DEFN(MethodCompileQueueSC2_lock    , PaddedMonitor, safepoint);
282   }
283   MUTEX_DEFN(TrainingData_lock               , PaddedMutex  , nosafepoint);
284   MUTEX_DEFN(TrainingReplayQueue_lock        , PaddedMonitor, safepoint);
285   MUTEX_DEFN(CompileStatistics_lock          , PaddedMutex  , safepoint);
286   MUTEX_DEFN(DirectivesStack_lock            , PaddedMutex  , nosafepoint);
287 
288   MUTEX_DEFN(JvmtiVTMSTransition_lock        , PaddedMonitor, safepoint);   // used for Virtual Thread Mount State transition management
289   MUTEX_DEFN(JvmtiVThreadSuspend_lock        , PaddedMutex,   nosafepoint-1);
290   MUTEX_DEFN(EscapeBarrier_lock              , PaddedMonitor, nosafepoint); // Used to synchronize object reallocation/relocking triggered by JVMTI
291   MUTEX_DEFN(Management_lock                 , PaddedMutex  , safepoint);   // used for JVM management
292 
293   MUTEX_DEFN(ConcurrentGCBreakpoints_lock    , PaddedMonitor, safepoint, true);
294   MUTEX_DEFN(TouchedMethodLog_lock           , PaddedMutex  , safepoint);
295 
296   MUTEX_DEFN(CompileThread_lock              , PaddedMonitor, safepoint);
297   MUTEX_DEFN(PeriodicTask_lock               , PaddedMonitor, safepoint, true);
298   MUTEX_DEFN(RedefineClasses_lock            , PaddedMonitor, safepoint);
299   MUTEX_DEFN(Verify_lock                     , PaddedMutex  , safepoint);
300   MUTEX_DEFN(ClassLoaderDataGraph_lock       , PaddedMutex  , safepoint);
301 
302   MUTEX_DEFN(Compilation_lock                , PaddedMonitor, nosafepoint);
303 
304 #if INCLUDE_JFR
305   MUTEX_DEFN(JfrBuffer_lock                  , PaddedMutex  , event);
306   MUTEX_DEFN(JfrMsg_lock                     , PaddedMonitor, event);
307   MUTEX_DEFN(JfrStacktrace_lock              , PaddedMutex  , event);
308 #endif
309 
310   MUTEX_DEFN(ContinuationRelativize_lock     , PaddedMonitor, nosafepoint-3);
311   MUTEX_DEFN(CodeHeapStateAnalytics_lock     , PaddedMutex  , safepoint);
312   MUTEX_DEFN(ThreadsSMRDelete_lock           , PaddedMonitor, service-2); // Holds ConcurrentHashTableResize_lock
313   MUTEX_DEFN(ThreadIdTableCreate_lock        , PaddedMutex  , safepoint);
314   MUTEX_DEFN(DCmdFactory_lock                , PaddedMutex  , nosafepoint);
315   MUTEX_DEFN(NMTQuery_lock                   , PaddedMutex  , safepoint);
316   MUTEX_DEFN(NMTCompilationCostHistory_lock  , PaddedMutex  , nosafepoint);
317   MUTEX_DEFN(NmtVirtualMemory_lock           , PaddedMutex  , service-4); // Must be lower than G1Mapper_lock used from G1RegionsSmallerThanCommitSizeMapper::commit_regions
318 #if INCLUDE_CDS
319 #if INCLUDE_JVMTI
320   MUTEX_DEFN(CDSClassFileStream_lock         , PaddedMutex  , safepoint);
321 #endif
322   MUTEX_DEFN(DumpTimeTable_lock              , PaddedMutex  , nosafepoint);
323   MUTEX_DEFN(CDSLambda_lock                  , PaddedMutex  , nosafepoint);
324   MUTEX_DEFL(DumpRegion_lock                 , PaddedMutex  , DumpTimeTable_lock);
325   MUTEX_DEFN(ClassListFile_lock              , PaddedMutex  , nosafepoint);
326   MUTEX_DEFN(UnregisteredClassesTable_lock   , PaddedMutex  , nosafepoint-1);
327   MUTEX_DEFN(LambdaFormInvokers_lock         , PaddedMutex  , safepoint);
328   MUTEX_DEFL(ScratchObjects_lock             , PaddedMutex  , DumpTimeTable_lock);
329   MUTEX_DEFN(ArchivedObjectTables_lock       , PaddedMutex  , nosafepoint);
330   MUTEX_DEFN(FinalImageRecipes_lock          , PaddedMutex  , nosafepoint);
331 #endif // INCLUDE_CDS
332   MUTEX_DEFN(Bootclasspath_lock              , PaddedMutex  , nosafepoint);
333 
334 #if INCLUDE_JVMCI
335   // JVMCIRuntime::_lock must be acquired before JVMCI_lock to avoid deadlock
336   MUTEX_DEFN(JVMCIRuntime_lock               , PaddedMonitor, safepoint, true);
337 #endif
338 
339   MUTEX_DEFN(ThreadsLockThrottle_lock        , PaddedMonitor, safepoint);
340 
341   // These locks have relative rankings, and inherit safepoint checking attributes from that rank.
342   MUTEX_DEFL(VtableStubs_lock               , PaddedMutex  , CompiledIC_lock);  // Also holds DumpTimeTable_lock
343   MUTEX_DEFL(CodeCache_lock                 , PaddedMonitor, VtableStubs_lock);
344   MUTEX_DEFL(NMethodState_lock              , PaddedMutex  , CodeCache_lock);
345 
346   // tty_lock is held when printing nmethod and its relocations which use this lock.
347   MUTEX_DEFL(ExternalsRecorder_lock         , PaddedMutex  , tty_lock);
348 
349   MUTEX_DEFL(AOTCodeCStrings_lock           , PaddedMutex  , tty_lock);
350 
351   MUTEX_DEFL(Threads_lock                   , PaddedMonitor, CompileThread_lock, true);
352   MUTEX_DEFL(Compile_lock                   , PaddedMutex  , MethodCompileQueue_lock);
353   MUTEX_DEFL(Module_lock                    , PaddedMutex  , AdapterHandlerLibrary_lock);
354   MUTEX_DEFL(AOTHeapLoading_lock            , PaddedMonitor, Module_lock);
355   MUTEX_DEFL(JNICritical_lock               , PaddedMonitor, AOTHeapLoading_lock); // used for JNI critical regions
356   MUTEX_DEFL(Heap_lock                      , PaddedMonitor, JNICritical_lock);
357 
358   MUTEX_DEFL(PerfDataMemAlloc_lock          , PaddedMutex  , Heap_lock);
359   MUTEX_DEFL(PerfDataManager_lock           , PaddedMutex  , Heap_lock);
360   MUTEX_DEFL(VMOperation_lock               , PaddedMonitor, Heap_lock, true);
361   MUTEX_DEFL(ClassInitError_lock            , PaddedMonitor, Threads_lock);
362 
363 #if INCLUDE_G1GC
364   if (UseG1GC) {
365     MUTEX_DEFL(G1OldGCCount_lock            , PaddedMonitor, Threads_lock, true);
366     MUTEX_DEFL(G1RareEvent_lock             , PaddedMutex  , Threads_lock, true);
367     MUTEX_DEFL(G1ReviseYoungLength_lock     , PaddedMutex  , Threads_lock, true);
368   }
369 #endif
370 
371   MUTEX_DEFL(CompileTaskWait_lock           , PaddedMonitor, MethodCompileQueue_lock);
372 
373 #if INCLUDE_PARALLELGC
374   if (UseParallelGC) {
375     MUTEX_DEFL(PSOldGenExpand_lock          , PaddedMutex  , Heap_lock, true);
376   }
377 #endif
378   MUTEX_DEFL(SystemDictionary_lock          , PaddedMonitor, Module_lock);
379 #if INCLUDE_JVMCI
380   // JVMCIRuntime_lock must be acquired before JVMCI_lock to avoid deadlock
381   MUTEX_DEFL(JVMCI_lock                     , PaddedMonitor, JVMCIRuntime_lock);
382 #endif
383   MUTEX_DEFL(JvmtiThreadState_lock          , PaddedMutex  , JvmtiVTMSTransition_lock);   // Used by JvmtiThreadState/JvmtiEventController
384   MUTEX_DEFL(SharedDecoder_lock             , PaddedMutex  , NmtVirtualMemory_lock); // Must be lower than NmtVirtualMemory_lock due to MemTracker::print_containing_region
385 
386   // Allocate RecursiveMutex
387   MultiArray_lock = new RecursiveMutex();
388 }
389 
390 #undef MUTEX_DEFL
391 #undef MUTEX_DEFN
392 #undef MUTEX_DEF
393 #undef MUTEX_STORAGE
394 #undef MUTEX_STORAGE_NAME
395 
396 static const int MAX_NAMES = 200;
397 static const char* _names[MAX_NAMES] = { nullptr };
398 static bool _is_unique[MAX_NAMES] = { false };
399 static volatile int _num_names = 0;
400 
401 static bool _mutex_init_done = false;
402 
403 PerfCounter** MutexLockerImpl::_perf_lock_count     = nullptr;
404 PerfCounter** MutexLockerImpl::_perf_lock_wait_time = nullptr;
405 PerfCounter** MutexLockerImpl::_perf_lock_hold_time = nullptr;
406 
407 MutexLockerImpl::MutexLockerImpl(Mutex* mutex, Mutex::SafepointCheckFlag flag) :
408   _mutex(mutex),
409   _prof(ProfileVMLocks && Thread::current_or_null() != nullptr && Thread::current()->profile_vm_locks()) {
410 
411   bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
412   if (_mutex != nullptr) {
413     if (_prof) { _before.start(); } // before
414 
415     if (no_safepoint_check) {
416       _mutex->lock_without_safepoint_check();
417     } else {
418       _mutex->lock();
419     }
420 
421     if (_prof) { _before.stop(); _after.start(); } // after
422   }
423 }
424 
425 MutexLockerImpl::MutexLockerImpl(Thread* thread, Mutex* mutex, Mutex::SafepointCheckFlag flag) :
426   _mutex(mutex), _prof(thread->profile_vm_locks()) {
427 
428   if (_prof) { _before.start(); } // before
429 
430   bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
431   if (_mutex != nullptr) {
432     if (no_safepoint_check) {
433       _mutex->lock_without_safepoint_check(thread);
434     } else {
435       _mutex->lock(thread);
436     }
437   }
438 
439   if (_prof) { _before.stop(); _after.start(); } // after
440 }
441 
442 void MutexLockerImpl::init_counters() {
443   if (ProfileVMLocks && UsePerfData) {
444     ResourceMark rm;
445     EXCEPTION_MARK;
446     _perf_lock_count     = NEW_C_HEAP_ARRAY(PerfCounter*, MAX_NAMES + 1, mtInternal);
447     _perf_lock_wait_time = NEW_C_HEAP_ARRAY(PerfCounter*, MAX_NAMES + 1, mtInternal);
448     _perf_lock_hold_time = NEW_C_HEAP_ARRAY(PerfCounter*, MAX_NAMES + 1, mtInternal);
449 
450     NEWPERFEVENTCOUNTER(_perf_lock_count[0],     SUN_RT, PerfDataManager::counter_name("Other", "Count"));
451     NEWPERFEVENTCOUNTER(_perf_lock_wait_time[0], SUN_RT, PerfDataManager::counter_name("Other", "BeforeTime"));
452     NEWPERFEVENTCOUNTER(_perf_lock_hold_time[0], SUN_RT, PerfDataManager::counter_name("Other", "AfterTime"));
453     for (int i = 0; i < MAX_NAMES; i++) {
454       ResourceMark rm;
455       const char* counter_name = _names[i];
456       if (counter_name == nullptr) {
457         stringStream ss;
458         ss.print("UnnamedMutex#%d", i);
459         counter_name = ss.as_string();
460         _names[i] = os::strdup(counter_name, mtInternal); // replace default nullptr
461       }
462       NEWPERFEVENTCOUNTER(_perf_lock_count[i + 1],     SUN_RT, PerfDataManager::counter_name(counter_name, "Count"));
463       NEWPERFEVENTCOUNTER(_perf_lock_wait_time[i + 1], SUN_RT, PerfDataManager::counter_name(counter_name, "BeforeTime"));
464       NEWPERFEVENTCOUNTER(_perf_lock_hold_time[i + 1], SUN_RT, PerfDataManager::counter_name(counter_name, "AfterTime"));
465     }
466     if (HAS_PENDING_EXCEPTION) {
467       vm_exit_during_initialization("MutexLockerImpl::init_counters() failed unexpectedly");
468     }
469   }
470   _mutex_init_done = true;
471 }
472 
473 int MutexLockerImpl::name2id(const char* name) {
474   if (ProfileVMLocks && UsePerfData) {
475     // There is not concurency or duplication in mutex_init().
476     if (!_mutex_init_done) {
477       int new_id = AtomicAccess::load(&_num_names);
478       precond(new_id < MAX_NAMES);
479       AtomicAccess::inc(&_num_names);
480       _names[new_id] = os::strdup(name, mtInternal);
481       _is_unique[new_id] = true;
482       return new_id;
483     }
484     int limit = AtomicAccess::load(&_num_names); // Cache static value which can be updated concurently
485     for (int i = Mutex::num_mutex(); i < limit; i++) {
486       if (strcmp(_names[i], name) == 0) {
487         _is_unique[i] = false;
488         return i;
489       }
490     }
491     if (limit < MAX_NAMES) {
492       int old_limit = limit;
493       const char* name_dup = os::strdup(name, mtInternal);
494       int new_id; // Get new id for this name
495       do {
496         new_id = limit++;
497         if (new_id == MAX_NAMES) break;
498       } while (AtomicAccess::cmpxchg(&_num_names, new_id, limit) != new_id);
499       for (int i = old_limit; i < new_id; i++) {
500         if (strcmp(_names[i], name) == 0) { // Other thread put it there
501           _is_unique[i] = false;
502           return i; // Wasted new_id slot to simplify code: _num_names is only incremented
503         }
504       }
505       if (new_id < MAX_NAMES) {
506         _names[new_id] = name_dup;
507         _is_unique[new_id] = true;
508         return new_id;
509       }
510     }
511     log_debug(init)("Unnamed: %s", name); // no slots left
512   }
513   return -1;
514 }
515 
516 void MutexLockerImpl::print_counter_on(outputStream* st, const char* name, bool is_unique, int idx) {
517   jlong count = _perf_lock_count[idx]->get_value();
518   if (count > 0) {
519     st->print_cr("  %3d: %s%40s = " JLONG_FORMAT_W(5) "us (" JLONG_FORMAT_W(5) "us) / " JLONG_FORMAT_W(9) " events",
520                  idx, (is_unique ? " " : "M"), name,
521                  Management::ticks_to_us(_perf_lock_hold_time[idx]->get_value()),
522                  Management::ticks_to_us(_perf_lock_wait_time[idx]->get_value()),
523                  count);
524   }
525 }
526 
527 static jlong accumulate_lock_counters(PerfCounter** lock_counters) {
528   jlong acc = 0;
529   for (int i = 0; i < _num_names + 1; i++) { // 0 slot is reserved for unnamed locks
530     if (lock_counters[i] == nullptr) {
531       break;
532     }
533     acc += lock_counters[i]->get_value();
534   }
535   return acc;
536 }
537 
538 void MutexLockerImpl::print_counters_on(outputStream* st) {
539   if (ProfileVMLocks && UsePerfData) {
540     jlong total_count     = accumulate_lock_counters(_perf_lock_count);
541     jlong total_wait_time = accumulate_lock_counters(_perf_lock_wait_time);
542     jlong total_hold_time = accumulate_lock_counters(_perf_lock_hold_time);
543 
544     st->print_cr("MutexLocker: Total: %d named locks (%d unique names); hold = " JLONG_FORMAT "us (wait = " JLONG_FORMAT "us) / " JLONG_FORMAT " events for thread \"main\"",
545                  Mutex::num_mutex(), _num_names,
546                  Management::ticks_to_us(total_hold_time),
547                  Management::ticks_to_us(total_wait_time),
548                  total_count);
549     for (int i = 0; i < _num_names; i++) {
550       print_counter_on(st, _names[i], _is_unique[i], i+1);
551     }
552     print_counter_on(st, "Unnamed / Other", false /*is_unique*/, 0);
553   } else {
554     st->print_cr("MutexLocker: no info (%s is disabled)", (UsePerfData ? "ProfileVMLocks" : "UsePerfData"));
555   }
556 }
557 
558 void MutexLockerImpl::post_initialize() {
559   // Print mutex ranks if requested.
560   LogTarget(Info, vmmutex) lt;
561   if (lt.is_enabled()) {
562     ResourceMark rm;
563     LogStream ls(lt);
564     Mutex::print_lock_ranks(&ls);
565   }
566 }
567 
568 GCMutexLocker::GCMutexLocker(Mutex* mutex) {
569   if (SafepointSynchronize::is_at_safepoint()) {
570     _locked = false;
571   } else {
572     _mutex = mutex;
573     _locked = true;
574     _mutex->lock();
575   }
576 }