1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "compiler/compiler_globals.hpp"
 26 #include "gc/shared/gc_globals.hpp"
 27 #include "logging/log.hpp"
 28 #include "logging/logStream.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "memory/universe.hpp"
 31 #include "runtime/atomic.hpp"
 32 #include "runtime/java.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/mutexLocker.hpp"
 35 #include "runtime/safepoint.hpp"
 36 #include "runtime/vmThread.hpp"
 37 #include "services/management.hpp"
 38 #include "utilities/vmError.hpp"
 39 
 40 // Mutexes used in the VM (see comment in mutexLocker.hpp):
 41 
 42 Mutex*   NMethodState_lock            = nullptr;
 43 Mutex*   NMethodEntryBarrier_lock     = nullptr;
 44 Monitor* SystemDictionary_lock        = nullptr;
 45 Mutex*   InvokeMethodTypeTable_lock   = nullptr;
 46 Monitor* InvokeMethodIntrinsicTable_lock = nullptr;
 47 Mutex*   SharedDictionary_lock        = nullptr;
 48 Monitor* ClassInitError_lock          = nullptr;
 49 Mutex*   Module_lock                  = nullptr;
 50 Mutex*   CompiledIC_lock              = nullptr;
 51 Mutex*   VMStatistic_lock             = nullptr;
 52 Mutex*   JmethodIdCreation_lock       = nullptr;
 53 Mutex*   JfieldIdCreation_lock        = nullptr;
 54 Monitor* JNICritical_lock             = nullptr;
 55 Mutex*   JvmtiThreadState_lock        = nullptr;
 56 Monitor* EscapeBarrier_lock           = nullptr;
 57 Monitor* JvmtiVTMSTransition_lock     = nullptr;
 58 Mutex*   JvmtiVThreadSuspend_lock     = nullptr;
 59 Monitor* Heap_lock                    = nullptr;
 60 #if INCLUDE_PARALLELGC
 61 Mutex*   PSOldGenExpand_lock      = nullptr;
 62 #endif
 63 Mutex*   AdapterHandlerLibrary_lock   = nullptr;
 64 Mutex*   SignatureHandlerLibrary_lock = nullptr;
 65 Mutex*   VtableStubs_lock             = nullptr;
 66 Mutex*   SymbolArena_lock             = nullptr;
 67 Monitor* StringDedup_lock             = nullptr;
 68 Mutex*   StringDedupIntern_lock       = nullptr;
 69 Monitor* CodeCache_lock               = nullptr;
 70 Mutex*   TouchedMethodLog_lock        = nullptr;
 71 Mutex*   RetData_lock                 = nullptr;
 72 Monitor* VMOperation_lock             = nullptr;
 73 Monitor* ThreadsLockThrottle_lock     = nullptr;
 74 Monitor* Threads_lock                 = nullptr;
 75 Mutex*   NonJavaThreadsList_lock      = nullptr;
 76 Mutex*   NonJavaThreadsListSync_lock  = nullptr;
 77 Monitor* STS_lock                     = nullptr;
 78 Mutex*   MonitoringSupport_lock       = nullptr;
 79 Monitor* ConcurrentGCBreakpoints_lock = nullptr;
 80 Mutex*   Compile_lock                 = nullptr;
 81 Monitor* CompileTaskWait_lock         = nullptr;
 82 Monitor* MethodCompileQueue_lock      = nullptr;
 83 Monitor* MethodCompileQueueC1_lock    = nullptr;
 84 Monitor* MethodCompileQueueC2_lock    = nullptr;
 85 Monitor* MethodCompileQueueSC1_lock   = nullptr;
 86 Monitor* MethodCompileQueueSC2_lock   = nullptr;
 87 Monitor* CompileThread_lock           = nullptr;
 88 Monitor* Compilation_lock             = nullptr;
 89 Mutex*   CompileStatistics_lock       = nullptr;
 90 Mutex*   DirectivesStack_lock         = nullptr;
 91 Monitor* Terminator_lock              = nullptr;
 92 Monitor* InitCompleted_lock           = nullptr;
 93 Monitor* BeforeExit_lock              = nullptr;
 94 Monitor* Notify_lock                  = nullptr;
 95 Mutex*   ExceptionCache_lock          = nullptr;
 96 Mutex*   TrainingData_lock            = nullptr;
 97 Monitor* TrainingReplayQueue_lock     = nullptr;
 98 #ifndef PRODUCT
 99 Mutex*   FullGCALot_lock              = nullptr;
100 #endif
101 
102 Mutex*   tty_lock                     = nullptr;
103 
104 Mutex*   RawMonitor_lock              = nullptr;
105 Mutex*   PerfDataMemAlloc_lock        = nullptr;
106 Mutex*   PerfDataManager_lock         = nullptr;
107 
108 #if INCLUDE_G1GC
109 Monitor* G1CGC_lock                   = nullptr;
110 Mutex*   G1DetachedRefinementStats_lock = nullptr;
111 Mutex*   G1FreeList_lock              = nullptr;
112 Mutex*   G1MarkStackChunkList_lock    = nullptr;
113 Mutex*   G1MarkStackFreeList_lock     = nullptr;
114 Monitor* G1OldGCCount_lock            = nullptr;
115 Mutex*   G1OldSets_lock               = nullptr;
116 Mutex*   G1Uncommit_lock              = nullptr;
117 Monitor* G1RootRegionScan_lock        = nullptr;
118 Mutex*   G1RareEvent_lock             = nullptr;
119 #endif
120 
121 Mutex*   Management_lock              = nullptr;
122 Monitor* MonitorDeflation_lock        = nullptr;
123 Monitor* Service_lock                 = nullptr;
124 Monitor* Notification_lock            = nullptr;
125 Monitor* PeriodicTask_lock            = nullptr;
126 Monitor* RedefineClasses_lock         = nullptr;
127 Mutex*   Verify_lock                  = nullptr;
128 
129 #if INCLUDE_JFR
130 Mutex*   JfrStacktrace_lock           = nullptr;
131 Monitor* JfrMsg_lock                  = nullptr;
132 Mutex*   JfrBuffer_lock               = nullptr;
133 #endif
134 
135 Mutex*   CodeHeapStateAnalytics_lock  = nullptr;
136 
137 Mutex*   ExternalsRecorder_lock       = nullptr;
138 
139 Mutex*   AOTCodeCStrings_lock         = nullptr;
140 
141 Monitor* ContinuationRelativize_lock  = nullptr;
142 
143 Mutex*   Metaspace_lock               = nullptr;
144 Monitor* MetaspaceCritical_lock       = nullptr;
145 Mutex*   ClassLoaderDataGraph_lock    = nullptr;
146 Monitor* ThreadsSMRDelete_lock        = nullptr;
147 Mutex*   ThreadIdTableCreate_lock     = nullptr;
148 Mutex*   SharedDecoder_lock           = nullptr;
149 Mutex*   DCmdFactory_lock             = nullptr;
150 Mutex*   NMTQuery_lock                = nullptr;
151 Mutex*   NMTCompilationCostHistory_lock = nullptr;
152 Mutex*   NmtVirtualMemory_lock          = nullptr;
153 
154 #if INCLUDE_CDS
155 #if INCLUDE_JVMTI
156 Mutex*   CDSClassFileStream_lock      = nullptr;
157 #endif
158 Mutex*   DumpTimeTable_lock           = nullptr;
159 Mutex*   CDSLambda_lock               = nullptr;
160 Mutex*   DumpRegion_lock              = nullptr;
161 Mutex*   ClassListFile_lock           = nullptr;
162 Mutex*   UnregisteredClassesTable_lock= nullptr;
163 Mutex*   LambdaFormInvokers_lock      = nullptr;
164 Mutex*   ScratchObjects_lock          = nullptr;
165 Mutex*   ArchivedObjectTables_lock    = nullptr;
166 Mutex*   FinalImageRecipes_lock       = nullptr;
167 #endif // INCLUDE_CDS
168 Mutex*   Bootclasspath_lock           = nullptr;
169 
170 #if INCLUDE_JVMCI
171 Monitor* JVMCI_lock                   = nullptr;
172 Monitor* JVMCIRuntime_lock            = nullptr;
173 #endif
174 
175 // Only one RecursiveMutex
176 RecursiveMutex* MultiArray_lock       = nullptr;
177 
178 #ifdef ASSERT
179 void assert_locked_or_safepoint(const Mutex* lock) {
180   if (DebuggingContext::is_enabled() || VMError::is_error_reported()) return;
181   // check if this thread owns the lock (common case)
182   assert(lock != nullptr, "Need non-null lock");
183   if (lock->owned_by_self()) return;
184   if (SafepointSynchronize::is_at_safepoint()) return;
185   if (!Universe::is_fully_initialized()) return;
186   fatal("must own lock %s", lock->name());
187 }
188 
189 // a stronger assertion than the above
190 void assert_lock_strong(const Mutex* lock) {
191   if (DebuggingContext::is_enabled() || VMError::is_error_reported()) return;
192   assert(lock != nullptr, "Need non-null lock");
193   if (lock->owned_by_self()) return;
194   fatal("must own lock %s", lock->name());
195 }
196 #endif
197 
198 #define MUTEX_STORAGE_NAME(name) name##_storage
199 #define MUTEX_STORAGE(name, type) alignas(type) static uint8_t MUTEX_STORAGE_NAME(name)[sizeof(type)]
200 #define MUTEX_DEF(name, type, pri, ...) {                                                       \
201   assert(name == nullptr, "Mutex/Monitor initialized twice");                                   \
202   MUTEX_STORAGE(name, type);                                                                    \
203   name = ::new(static_cast<void*>(MUTEX_STORAGE_NAME(name))) type((pri), #name, ##__VA_ARGS__); \
204   Mutex::add_mutex(name);                                                                       \
205 }
206 #define MUTEX_DEFN(name, type, pri, ...) MUTEX_DEF(name, type, Mutex::pri, ##__VA_ARGS__)
207 
208 // Specify relative ranked lock
209 #ifdef ASSERT
210 #define MUTEX_DEFL(name, type, held_lock, ...) MUTEX_DEF(name, type, (held_lock)->rank() - 1, ##__VA_ARGS__)
211 #else
212 #define MUTEX_DEFL(name, type, held_lock, ...) MUTEX_DEFN(name, type, safepoint, ##__VA_ARGS__)
213 #endif
214 
215 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
216 void mutex_init() {
217   MUTEX_DEFN(tty_lock                        , PaddedMutex  , tty);      // allow to lock in VM
218 
219   MUTEX_DEFN(NMethodEntryBarrier_lock        , PaddedMutex  , service-1);
220 
221   MUTEX_DEFN(STS_lock                        , PaddedMonitor, nosafepoint);
222 
223 #if INCLUDE_G1GC
224   if (UseG1GC) {
225     MUTEX_DEFN(G1CGC_lock                    , PaddedMonitor, nosafepoint);
226     MUTEX_DEFN(G1DetachedRefinementStats_lock, PaddedMutex  , nosafepoint-2);
227     MUTEX_DEFN(G1FreeList_lock               , PaddedMutex  , service-1);
228     MUTEX_DEFN(G1MarkStackChunkList_lock     , PaddedMutex  , nosafepoint);
229     MUTEX_DEFN(G1MarkStackFreeList_lock      , PaddedMutex  , nosafepoint);
230     MUTEX_DEFN(G1OldSets_lock                , PaddedMutex  , nosafepoint);
231     MUTEX_DEFN(G1RootRegionScan_lock         , PaddedMonitor, nosafepoint-1);
232     MUTEX_DEFN(G1Uncommit_lock               , PaddedMutex  , service-2);
233   }
234 #endif
235 
236   MUTEX_DEFN(MonitoringSupport_lock          , PaddedMutex  , service-1);        // used for serviceability monitoring support
237 
238   MUTEX_DEFN(StringDedup_lock                , PaddedMonitor, nosafepoint);
239   MUTEX_DEFN(StringDedupIntern_lock          , PaddedMutex  , nosafepoint);
240   MUTEX_DEFN(RawMonitor_lock                 , PaddedMutex  , nosafepoint-1);
241 
242   MUTEX_DEFN(Metaspace_lock                  , PaddedMutex  , nosafepoint-3);
243   MUTEX_DEFN(MetaspaceCritical_lock          , PaddedMonitor, nosafepoint-1);
244 
245   MUTEX_DEFN(MonitorDeflation_lock           , PaddedMonitor, nosafepoint);      // used for monitor deflation thread operations
246   MUTEX_DEFN(Service_lock                    , PaddedMonitor, service);          // used for service thread operations
247   MUTEX_DEFN(Notification_lock               , PaddedMonitor, service);          // used for notification thread operations
248 
249   MUTEX_DEFN(JmethodIdCreation_lock          , PaddedMutex  , nosafepoint-1);    // used for creating jmethodIDs can also lock HandshakeState_lock
250   MUTEX_DEFN(InvokeMethodTypeTable_lock      , PaddedMutex  , safepoint);
251   MUTEX_DEFN(InvokeMethodIntrinsicTable_lock , PaddedMonitor, safepoint);
252   MUTEX_DEFN(AdapterHandlerLibrary_lock      , PaddedMutex  , safepoint);
253   MUTEX_DEFN(SharedDictionary_lock           , PaddedMutex  , safepoint);
254   MUTEX_DEFN(VMStatistic_lock                , PaddedMutex  , safepoint);
255   MUTEX_DEFN(SignatureHandlerLibrary_lock    , PaddedMutex  , safepoint);
256   MUTEX_DEFN(SymbolArena_lock                , PaddedMutex  , nosafepoint);
257   MUTEX_DEFN(ExceptionCache_lock             , PaddedMutex  , safepoint);
258 #ifndef PRODUCT
259   MUTEX_DEFN(FullGCALot_lock                 , PaddedMutex  , safepoint); // a lock to make FullGCALot MT safe
260 #endif
261   MUTEX_DEFN(BeforeExit_lock                 , PaddedMonitor, safepoint);
262 
263   MUTEX_DEFN(NonJavaThreadsList_lock         , PaddedMutex  , nosafepoint-1);
264   MUTEX_DEFN(NonJavaThreadsListSync_lock     , PaddedMutex  , nosafepoint);
265 
266   MUTEX_DEFN(RetData_lock                    , PaddedMutex  , safepoint);
267   MUTEX_DEFN(Terminator_lock                 , PaddedMonitor, safepoint, true);
268   MUTEX_DEFN(InitCompleted_lock              , PaddedMonitor, nosafepoint);
269   MUTEX_DEFN(Notify_lock                     , PaddedMonitor, safepoint, true);
270 
271   MUTEX_DEFN(JfieldIdCreation_lock           , PaddedMutex  , safepoint);
272 
273   MUTEX_DEFN(CompiledIC_lock                 , PaddedMutex  , nosafepoint);  // locks VtableStubs_lock
274   MUTEX_DEFN(MethodCompileQueue_lock         , PaddedMonitor, safepoint);
275   if (UseGlobalCompileQueueLock) {
276     MethodCompileQueueC1_lock  = MethodCompileQueue_lock;
277     MethodCompileQueueC2_lock  = MethodCompileQueue_lock;
278     MethodCompileQueueSC1_lock = MethodCompileQueue_lock;
279     MethodCompileQueueSC2_lock = MethodCompileQueue_lock;
280   } else {
281     MUTEX_DEFN(MethodCompileQueueC1_lock     , PaddedMonitor, safepoint);
282     MUTEX_DEFN(MethodCompileQueueC2_lock     , PaddedMonitor, safepoint);
283     MUTEX_DEFN(MethodCompileQueueSC1_lock    , PaddedMonitor, safepoint);
284     MUTEX_DEFN(MethodCompileQueueSC2_lock    , PaddedMonitor, safepoint);
285   }
286   MUTEX_DEFN(TrainingData_lock               , PaddedMutex  , nosafepoint);
287   MUTEX_DEFN(TrainingReplayQueue_lock        , PaddedMonitor, safepoint);
288   MUTEX_DEFN(CompileStatistics_lock          , PaddedMutex  , safepoint);
289   MUTEX_DEFN(DirectivesStack_lock            , PaddedMutex  , nosafepoint);
290 
291   MUTEX_DEFN(JvmtiVTMSTransition_lock        , PaddedMonitor, safepoint);   // used for Virtual Thread Mount State transition management
292   MUTEX_DEFN(JvmtiVThreadSuspend_lock        , PaddedMutex,   nosafepoint-1);
293   MUTEX_DEFN(EscapeBarrier_lock              , PaddedMonitor, nosafepoint); // Used to synchronize object reallocation/relocking triggered by JVMTI
294   MUTEX_DEFN(Management_lock                 , PaddedMutex  , safepoint);   // used for JVM management
295 
296   MUTEX_DEFN(ConcurrentGCBreakpoints_lock    , PaddedMonitor, safepoint, true);
297   MUTEX_DEFN(TouchedMethodLog_lock           , PaddedMutex  , safepoint);
298 
299   MUTEX_DEFN(CompileThread_lock              , PaddedMonitor, safepoint);
300   MUTEX_DEFN(PeriodicTask_lock               , PaddedMonitor, safepoint, true);
301   MUTEX_DEFN(RedefineClasses_lock            , PaddedMonitor, safepoint);
302   MUTEX_DEFN(Verify_lock                     , PaddedMutex  , safepoint);
303   MUTEX_DEFN(ClassLoaderDataGraph_lock       , PaddedMutex  , safepoint);
304 
305   MUTEX_DEFN(Compilation_lock                , PaddedMonitor, nosafepoint);
306 
307 #if INCLUDE_JFR
308   MUTEX_DEFN(JfrBuffer_lock                  , PaddedMutex  , event);
309   MUTEX_DEFN(JfrMsg_lock                     , PaddedMonitor, event);
310   MUTEX_DEFN(JfrStacktrace_lock              , PaddedMutex  , event);
311 #endif
312 
313   MUTEX_DEFN(ContinuationRelativize_lock     , PaddedMonitor, nosafepoint-3);
314   MUTEX_DEFN(CodeHeapStateAnalytics_lock     , PaddedMutex  , safepoint);
315   MUTEX_DEFN(ThreadsSMRDelete_lock           , PaddedMonitor, service-2); // Holds ConcurrentHashTableResize_lock
316   MUTEX_DEFN(ThreadIdTableCreate_lock        , PaddedMutex  , safepoint);
317   MUTEX_DEFN(DCmdFactory_lock                , PaddedMutex  , nosafepoint);
318   MUTEX_DEFN(NMTQuery_lock                   , PaddedMutex  , safepoint);
319   MUTEX_DEFN(NMTCompilationCostHistory_lock  , PaddedMutex  , nosafepoint);
320   MUTEX_DEFN(NmtVirtualMemory_lock           , PaddedMutex  , service-4); // Must be lower than G1Mapper_lock used from G1RegionsSmallerThanCommitSizeMapper::commit_regions
321 #if INCLUDE_CDS
322 #if INCLUDE_JVMTI
323   MUTEX_DEFN(CDSClassFileStream_lock         , PaddedMutex  , safepoint);
324 #endif
325   MUTEX_DEFN(DumpTimeTable_lock              , PaddedMutex  , nosafepoint);
326   MUTEX_DEFN(CDSLambda_lock                  , PaddedMutex  , nosafepoint);
327   MUTEX_DEFN(DumpRegion_lock                 , PaddedMutex  , nosafepoint);
328   MUTEX_DEFN(ClassListFile_lock              , PaddedMutex  , nosafepoint);
329   MUTEX_DEFN(UnregisteredClassesTable_lock   , PaddedMutex  , nosafepoint-1);
330   MUTEX_DEFN(LambdaFormInvokers_lock         , PaddedMutex  , safepoint);
331   MUTEX_DEFN(ScratchObjects_lock             , PaddedMutex  , nosafepoint-1); // Holds DumpTimeTable_lock
332   MUTEX_DEFN(ArchivedObjectTables_lock       , PaddedMutex  , nosafepoint);
333   MUTEX_DEFN(FinalImageRecipes_lock          , PaddedMutex  , nosafepoint);
334 #endif // INCLUDE_CDS
335   MUTEX_DEFN(Bootclasspath_lock              , PaddedMutex  , nosafepoint);
336 
337 #if INCLUDE_JVMCI
338   // JVMCIRuntime::_lock must be acquired before JVMCI_lock to avoid deadlock
339   MUTEX_DEFN(JVMCIRuntime_lock               , PaddedMonitor, safepoint, true);
340 #endif
341 
342   MUTEX_DEFN(ThreadsLockThrottle_lock        , PaddedMonitor, safepoint);
343 
344   // These locks have relative rankings, and inherit safepoint checking attributes from that rank.
345   MUTEX_DEFL(VtableStubs_lock               , PaddedMutex  , CompiledIC_lock);  // Also holds DumpTimeTable_lock
346   MUTEX_DEFL(CodeCache_lock                 , PaddedMonitor, VtableStubs_lock);
347   MUTEX_DEFL(NMethodState_lock              , PaddedMutex  , CodeCache_lock);
348 
349   // tty_lock is held when printing nmethod and its relocations which use this lock.
350   MUTEX_DEFL(ExternalsRecorder_lock         , PaddedMutex  , tty_lock);
351 
352   MUTEX_DEFL(AOTCodeCStrings_lock           , PaddedMutex  , tty_lock);
353 
354   MUTEX_DEFL(Threads_lock                   , PaddedMonitor, CompileThread_lock, true);
355   MUTEX_DEFL(Compile_lock                   , PaddedMutex  , MethodCompileQueue_lock);
356   MUTEX_DEFL(JNICritical_lock               , PaddedMonitor, AdapterHandlerLibrary_lock); // used for JNI critical regions
357   MUTEX_DEFL(Heap_lock                      , PaddedMonitor, JNICritical_lock);
358 
359   MUTEX_DEFL(PerfDataMemAlloc_lock          , PaddedMutex  , Heap_lock);
360   MUTEX_DEFL(PerfDataManager_lock           , PaddedMutex  , Heap_lock);
361   MUTEX_DEFL(VMOperation_lock               , PaddedMonitor, Heap_lock, true);
362   MUTEX_DEFL(ClassInitError_lock            , PaddedMonitor, Threads_lock);
363 
364 #if INCLUDE_G1GC
365   if (UseG1GC) {
366     MUTEX_DEFL(G1OldGCCount_lock             , PaddedMonitor, Threads_lock, true);
367     MUTEX_DEFL(G1RareEvent_lock              , PaddedMutex  , Threads_lock, true);
368   }
369 #endif
370 
371   MUTEX_DEFL(CompileTaskWait_lock           , PaddedMonitor, MethodCompileQueue_lock);
372 
373 #if INCLUDE_PARALLELGC
374   if (UseParallelGC) {
375     MUTEX_DEFL(PSOldGenExpand_lock          , PaddedMutex  , Heap_lock, true);
376   }
377 #endif
378   MUTEX_DEFL(Module_lock                    , PaddedMutex  ,  ClassLoaderDataGraph_lock);
379   MUTEX_DEFL(SystemDictionary_lock          , PaddedMonitor, Module_lock);
380 #if INCLUDE_JVMCI
381   // JVMCIRuntime_lock must be acquired before JVMCI_lock to avoid deadlock
382   MUTEX_DEFL(JVMCI_lock                     , PaddedMonitor, JVMCIRuntime_lock);
383 #endif
384   MUTEX_DEFL(JvmtiThreadState_lock          , PaddedMutex  , JvmtiVTMSTransition_lock);   // Used by JvmtiThreadState/JvmtiEventController
385   MUTEX_DEFL(SharedDecoder_lock             , PaddedMutex  , NmtVirtualMemory_lock); // Must be lower than NmtVirtualMemory_lock due to MemTracker::print_containing_region
386 
387   // Allocate RecursiveMutex
388   MultiArray_lock = new RecursiveMutex();
389 }
390 
391 #undef MUTEX_DEFL
392 #undef MUTEX_DEFN
393 #undef MUTEX_DEF
394 #undef MUTEX_STORAGE
395 #undef MUTEX_STORAGE_NAME
396 
397 static const int MAX_NAMES = 200;
398 static const char* _names[MAX_NAMES] = { nullptr };
399 static bool _is_unique[MAX_NAMES] = { false };
400 static volatile int _num_names = 0;
401 
402 static bool _mutex_init_done = false;
403 
404 PerfCounter** MutexLockerImpl::_perf_lock_count     = nullptr;
405 PerfCounter** MutexLockerImpl::_perf_lock_wait_time = nullptr;
406 PerfCounter** MutexLockerImpl::_perf_lock_hold_time = nullptr;
407 
408 MutexLockerImpl::MutexLockerImpl(Mutex* mutex, Mutex::SafepointCheckFlag flag) :
409   _mutex(mutex),
410   _prof(ProfileVMLocks && Thread::current_or_null() != nullptr && Thread::current()->profile_vm_locks()) {
411 
412   bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
413   if (_mutex != nullptr) {
414     if (_prof) { _before.start(); } // before
415 
416     if (no_safepoint_check) {
417       _mutex->lock_without_safepoint_check();
418     } else {
419       _mutex->lock();
420     }
421 
422     if (_prof) { _before.stop(); _after.start(); } // after
423   }
424 }
425 
426 MutexLockerImpl::MutexLockerImpl(Thread* thread, Mutex* mutex, Mutex::SafepointCheckFlag flag) :
427   _mutex(mutex), _prof(thread->profile_vm_locks()) {
428 
429   if (_prof) { _before.start(); } // before
430 
431   bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
432   if (_mutex != nullptr) {
433     if (no_safepoint_check) {
434       _mutex->lock_without_safepoint_check(thread);
435     } else {
436       _mutex->lock(thread);
437     }
438   }
439 
440   if (_prof) { _before.stop(); _after.start(); } // after
441 }
442 
443 void MutexLockerImpl::init_counters() {
444   if (ProfileVMLocks && UsePerfData) {
445     ResourceMark rm;
446     EXCEPTION_MARK;
447     _perf_lock_count     = NEW_C_HEAP_ARRAY(PerfCounter*, MAX_NAMES + 1, mtInternal);
448     _perf_lock_wait_time = NEW_C_HEAP_ARRAY(PerfCounter*, MAX_NAMES + 1, mtInternal);
449     _perf_lock_hold_time = NEW_C_HEAP_ARRAY(PerfCounter*, MAX_NAMES + 1, mtInternal);
450 
451     NEWPERFEVENTCOUNTER(_perf_lock_count[0],     SUN_RT, PerfDataManager::counter_name("Other", "Count"));
452     NEWPERFEVENTCOUNTER(_perf_lock_wait_time[0], SUN_RT, PerfDataManager::counter_name("Other", "BeforeTime"));
453     NEWPERFEVENTCOUNTER(_perf_lock_hold_time[0], SUN_RT, PerfDataManager::counter_name("Other", "AfterTime"));
454     for (int i = 0; i < MAX_NAMES; i++) {
455       ResourceMark rm;
456       const char* counter_name = _names[i];
457       if (counter_name == nullptr) {
458         stringStream ss;
459         ss.print("UnnamedMutex#%d", i);
460         counter_name = ss.as_string();
461         _names[i] = os::strdup(counter_name, mtInternal); // replace default nullptr
462       }
463       NEWPERFEVENTCOUNTER(_perf_lock_count[i + 1],     SUN_RT, PerfDataManager::counter_name(counter_name, "Count"));
464       NEWPERFEVENTCOUNTER(_perf_lock_wait_time[i + 1], SUN_RT, PerfDataManager::counter_name(counter_name, "BeforeTime"));
465       NEWPERFEVENTCOUNTER(_perf_lock_hold_time[i + 1], SUN_RT, PerfDataManager::counter_name(counter_name, "AfterTime"));
466     }
467     if (HAS_PENDING_EXCEPTION) {
468       vm_exit_during_initialization("MutexLockerImpl::init_counters() failed unexpectedly");
469     }
470   }
471   _mutex_init_done = true;
472 }
473 
474 int MutexLockerImpl::name2id(const char* name) {
475   if (ProfileVMLocks && UsePerfData) {
476     // There is not concurency or duplication in mutex_init().
477     if (!_mutex_init_done) {
478       int new_id = Atomic::load(&_num_names);
479       precond(new_id < MAX_NAMES);
480       Atomic::inc(&_num_names);
481       _names[new_id] = os::strdup(name, mtInternal);
482       _is_unique[new_id] = true;
483       return new_id;
484     }
485     int limit = Atomic::load(&_num_names); // Cache static value which can be updated concurently
486     for (int i = Mutex::num_mutex(); i < limit; i++) {
487       if (strcmp(_names[i], name) == 0) {
488         _is_unique[i] = false;
489         return i;
490       }
491     }
492     if (limit < MAX_NAMES) {
493       int old_limit = limit;
494       const char* name_dup = os::strdup(name, mtInternal);
495       int new_id; // Get new id for this name
496       do {
497         new_id = limit++;
498         if (new_id == MAX_NAMES) break;
499       } while (Atomic::cmpxchg(&_num_names, new_id, limit) != new_id);
500       for (int i = old_limit; i < new_id; i++) {
501         if (strcmp(_names[i], name) == 0) { // Other thread put it there
502           _is_unique[i] = false;
503           return i; // Wasted new_id slot to simplify code: _num_names is only incremented
504         }
505       }
506       if (new_id < MAX_NAMES) {
507         _names[new_id] = name_dup;
508         _is_unique[new_id] = true;
509         return new_id;
510       }
511     }
512     log_debug(init)("Unnamed: %s", name); // no slots left
513   }
514   return -1;
515 }
516 
517 void MutexLockerImpl::print_counter_on(outputStream* st, const char* name, bool is_unique, int idx) {
518   jlong count = _perf_lock_count[idx]->get_value();
519   if (count > 0) {
520     st->print_cr("  %3d: %s%40s = " JLONG_FORMAT_W(5) "us (" JLONG_FORMAT_W(5) "us) / " JLONG_FORMAT_W(9) " events",
521                  idx, (is_unique ? " " : "M"), name,
522                  Management::ticks_to_us(_perf_lock_hold_time[idx]->get_value()),
523                  Management::ticks_to_us(_perf_lock_wait_time[idx]->get_value()),
524                  count);
525   }
526 }
527 
528 static jlong accumulate_lock_counters(PerfCounter** lock_counters) {
529   jlong acc = 0;
530   for (int i = 0; i < _num_names + 1; i++) { // 0 slot is reserved for unnamed locks
531     if (lock_counters[i] == nullptr) {
532       break;
533     }
534     acc += lock_counters[i]->get_value();
535   }
536   return acc;
537 }
538 
539 void MutexLockerImpl::print_counters_on(outputStream* st) {
540   if (ProfileVMLocks && UsePerfData) {
541     jlong total_count     = accumulate_lock_counters(_perf_lock_count);
542     jlong total_wait_time = accumulate_lock_counters(_perf_lock_wait_time);
543     jlong total_hold_time = accumulate_lock_counters(_perf_lock_hold_time);
544 
545     st->print_cr("MutexLocker: Total: %d named locks (%d unique names); hold = " JLONG_FORMAT "us (wait = " JLONG_FORMAT "us) / " JLONG_FORMAT " events for thread \"main\"",
546                  Mutex::num_mutex(), _num_names,
547                  Management::ticks_to_us(total_hold_time),
548                  Management::ticks_to_us(total_wait_time),
549                  total_count);
550     for (int i = 0; i < _num_names; i++) {
551       print_counter_on(st, _names[i], _is_unique[i], i+1);
552     }
553     print_counter_on(st, "Unnamed / Other", false /*is_unique*/, 0);
554   } else {
555     st->print_cr("MutexLocker: no info (%s is disabled)", (UsePerfData ? "ProfileVMLocks" : "UsePerfData"));
556   }
557 }
558 
559 void MutexLockerImpl::post_initialize() {
560   // Print mutex ranks if requested.
561   LogTarget(Info, vmmutex) lt;
562   if (lt.is_enabled()) {
563     ResourceMark rm;
564     LogStream ls(lt);
565     Mutex::print_lock_ranks(&ls);
566   }
567 }
568 
569 GCMutexLocker::GCMutexLocker(Mutex* mutex) {
570   if (SafepointSynchronize::is_at_safepoint()) {
571     _locked = false;
572   } else {
573     _mutex = mutex;
574     _locked = true;
575     _mutex->lock();
576   }
577 }