1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "gc/shared/oopStorageSet.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/heapInspection.hpp"
  33 #include "memory/oopFactory.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/instanceKlass.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/objArrayOop.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "oops/oopHandle.inline.hpp"
  42 #include "prims/jvmtiRawMonitor.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/javaThread.inline.hpp"
  47 #include "runtime/objectMonitor.inline.hpp"
  48 #include "runtime/synchronizer.hpp"
  49 #include "runtime/thread.inline.hpp"
  50 #include "runtime/threads.hpp"
  51 #include "runtime/threadSMR.inline.hpp"
  52 #include "runtime/vframe.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "runtime/vmOperations.hpp"
  55 #include "services/threadService.hpp"
  56 
  57 // TODO: we need to define a naming convention for perf counters
  58 // to distinguish counters for:
  59 //   - standard JSR174 use
  60 //   - Hotspot extension (public and committed)
  61 //   - Hotspot extension (private/internal and uncommitted)
  62 
  63 // Default is disabled.
  64 bool ThreadService::_thread_monitoring_contention_enabled = false;
  65 bool ThreadService::_thread_cpu_time_enabled = false;
  66 bool ThreadService::_thread_allocated_memory_enabled = false;
  67 
  68 PerfCounter*  ThreadService::_total_threads_count = nullptr;
  69 PerfVariable* ThreadService::_live_threads_count = nullptr;
  70 PerfVariable* ThreadService::_peak_threads_count = nullptr;
  71 PerfVariable* ThreadService::_daemon_threads_count = nullptr;
  72 volatile int ThreadService::_atomic_threads_count = 0;
  73 volatile int ThreadService::_atomic_daemon_threads_count = 0;
  74 
  75 volatile jlong ThreadService::_exited_allocated_bytes = 0;
  76 
  77 ThreadDumpResult* ThreadService::_threaddump_list = nullptr;
  78 
  79 static const int INITIAL_ARRAY_SIZE = 10;
  80 
  81 // OopStorage for thread stack sampling
  82 static OopStorage* _thread_service_storage = nullptr;
  83 
  84 void ThreadService::init() {
  85   EXCEPTION_MARK;
  86 
  87   // These counters are for java.lang.management API support.
  88   // They are created even if -XX:-UsePerfData is set and in
  89   // that case, they will be allocated on C heap.
  90 
  91   _total_threads_count =
  92                 PerfDataManager::create_counter(JAVA_THREADS, "started",
  93                                                 PerfData::U_Events, CHECK);
  94 
  95   _live_threads_count =
  96                 PerfDataManager::create_variable(JAVA_THREADS, "live",
  97                                                  PerfData::U_None, CHECK);
  98 
  99   _peak_threads_count =
 100                 PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
 101                                                  PerfData::U_None, CHECK);
 102 
 103   _daemon_threads_count =
 104                 PerfDataManager::create_variable(JAVA_THREADS, "daemon",
 105                                                  PerfData::U_None, CHECK);
 106 
 107   if (os::is_thread_cpu_time_supported()) {
 108     _thread_cpu_time_enabled = true;
 109   }
 110 
 111   _thread_allocated_memory_enabled = true; // Always on, so enable it
 112 
 113   // Initialize OopStorage for thread stack sampling walking
 114   _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage",
 115                                                          mtServiceability);
 116 }
 117 
 118 void ThreadService::reset_peak_thread_count() {
 119   // Acquire the lock to update the peak thread count
 120   // to synchronize with thread addition and removal.
 121   MutexLocker mu(Threads_lock);
 122   _peak_threads_count->set_value(get_live_thread_count());
 123 }
 124 
 125 static bool is_hidden_thread(JavaThread *thread) {
 126   // hide VM internal or JVMTI agent threads
 127   return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
 128 }
 129 
 130 void ThreadService::add_thread(JavaThread* thread, bool daemon) {
 131   assert(Threads_lock->owned_by_self(), "must have threads lock");
 132 
 133   // Do not count hidden threads
 134   if (is_hidden_thread(thread)) {
 135     return;
 136   }
 137 
 138   _total_threads_count->inc();
 139   _live_threads_count->inc();
 140   Atomic::inc(&_atomic_threads_count);
 141   int count = _atomic_threads_count;
 142 
 143   if (count > _peak_threads_count->get_value()) {
 144     _peak_threads_count->set_value(count);
 145   }
 146 
 147   if (daemon) {
 148     _daemon_threads_count->inc();
 149     Atomic::inc(&_atomic_daemon_threads_count);
 150   }
 151 }
 152 
 153 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) {
 154   Atomic::dec(&_atomic_threads_count);
 155 
 156   if (daemon) {
 157     Atomic::dec(&_atomic_daemon_threads_count);
 158   }
 159 }
 160 
 161 void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
 162   assert(Threads_lock->owned_by_self(), "must have threads lock");
 163 
 164   // Include hidden thread allcations in exited_allocated_bytes
 165   ThreadService::incr_exited_allocated_bytes(thread->cooked_allocated_bytes());
 166 
 167   // Do not count hidden threads
 168   if (is_hidden_thread(thread)) {
 169     return;
 170   }
 171 
 172   assert(!thread->is_terminated(), "must not be terminated");
 173   if (!thread->is_exiting()) {
 174     // We did not get here via JavaThread::exit() so current_thread_exiting()
 175     // was not called, e.g., JavaThread::cleanup_failed_attach_current_thread().
 176     decrement_thread_counts(thread, daemon);
 177   }
 178 
 179   int daemon_count = _atomic_daemon_threads_count;
 180   int count = _atomic_threads_count;
 181 
 182   // Counts are incremented at the same time, but atomic counts are
 183   // decremented earlier than perf counts.
 184   assert(_live_threads_count->get_value() > count,
 185     "thread count mismatch %d : %d",
 186     (int)_live_threads_count->get_value(), count);
 187 
 188   _live_threads_count->dec(1);
 189   if (daemon) {
 190     assert(_daemon_threads_count->get_value() > daemon_count,
 191       "thread count mismatch %d : %d",
 192       (int)_daemon_threads_count->get_value(), daemon_count);
 193 
 194     _daemon_threads_count->dec(1);
 195   }
 196 
 197   // Counts are incremented at the same time, but atomic counts are
 198   // decremented earlier than perf counts.
 199   assert(_daemon_threads_count->get_value() >= daemon_count,
 200     "thread count mismatch %d : %d",
 201     (int)_daemon_threads_count->get_value(), daemon_count);
 202   assert(_live_threads_count->get_value() >= count,
 203     "thread count mismatch %d : %d",
 204     (int)_live_threads_count->get_value(), count);
 205   assert(_live_threads_count->get_value() > 0 ||
 206     (_live_threads_count->get_value() == 0 && count == 0 &&
 207     _daemon_threads_count->get_value() == 0 && daemon_count == 0),
 208     "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
 209     (int)_live_threads_count->get_value(), count,
 210     (int)_daemon_threads_count->get_value(), daemon_count);
 211   assert(_daemon_threads_count->get_value() > 0 ||
 212     (_daemon_threads_count->get_value() == 0 && daemon_count == 0),
 213     "thread counts should reach 0 at the same time, daemon %d,%d",
 214     (int)_daemon_threads_count->get_value(), daemon_count);
 215 }
 216 
 217 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) {
 218   // Do not count hidden threads
 219   if (is_hidden_thread(jt)) {
 220     return;
 221   }
 222 
 223   assert(jt == JavaThread::current(), "Called by current thread");
 224   assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
 225 
 226   decrement_thread_counts(jt, daemon);
 227 }
 228 
 229 // FIXME: JVMTI should call this function
 230 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
 231   assert(thread != nullptr, "should be non-null");
 232   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 233 
 234   // This function can be called on a target JavaThread that is not
 235   // the caller and we are not at a safepoint. So it is possible for
 236   // the waiting or pending condition to be over/stale and for the
 237   // first stage of async deflation to clear the object field in
 238   // the ObjectMonitor. It is also possible for the object to be
 239   // inflated again and to be associated with a completely different
 240   // ObjectMonitor by the time this object reference is processed
 241   // by the caller.
 242   ObjectMonitor *wait_obj = thread->current_waiting_monitor();
 243 
 244   oop obj = nullptr;
 245   if (wait_obj != nullptr) {
 246     // thread is doing an Object.wait() call
 247     obj = wait_obj->object();
 248   } else {
 249     ObjectMonitor *enter_obj = thread->current_pending_monitor();
 250     if (enter_obj != nullptr) {
 251       // thread is trying to enter() an ObjectMonitor.
 252       obj = enter_obj->object();
 253     }
 254   }
 255 
 256   Handle h(Thread::current(), obj);
 257   return h;
 258 }
 259 
 260 bool ThreadService::set_thread_monitoring_contention(bool flag) {
 261   MutexLocker m(Management_lock);
 262 
 263   bool prev = _thread_monitoring_contention_enabled;
 264   _thread_monitoring_contention_enabled = flag;
 265 
 266   return prev;
 267 }
 268 
 269 bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
 270   MutexLocker m(Management_lock);
 271 
 272   bool prev = _thread_cpu_time_enabled;
 273   _thread_cpu_time_enabled = flag;
 274 
 275   return prev;
 276 }
 277 
 278 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
 279   MutexLocker m(Management_lock);
 280 
 281   bool prev = _thread_allocated_memory_enabled;
 282   _thread_allocated_memory_enabled = flag;
 283 
 284   return prev;
 285 }
 286 
 287 void ThreadService::metadata_do(void f(Metadata*)) {
 288   for (ThreadDumpResult* dump = _threaddump_list; dump != nullptr; dump = dump->next()) {
 289     dump->metadata_do(f);
 290   }
 291 }
 292 
 293 void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
 294   MutexLocker ml(Management_lock);
 295   if (_threaddump_list == nullptr) {
 296     _threaddump_list = dump;
 297   } else {
 298     dump->set_next(_threaddump_list);
 299     _threaddump_list = dump;
 300   }
 301 }
 302 
 303 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
 304   MutexLocker ml(Management_lock);
 305 
 306   ThreadDumpResult* prev = nullptr;
 307   bool found = false;
 308   for (ThreadDumpResult* d = _threaddump_list; d != nullptr; prev = d, d = d->next()) {
 309     if (d == dump) {
 310       if (prev == nullptr) {
 311         _threaddump_list = dump->next();
 312       } else {
 313         prev->set_next(dump->next());
 314       }
 315       found = true;
 316       break;
 317     }
 318   }
 319   assert(found, "The threaddump result to be removed must exist.");
 320 }
 321 
 322 // Dump stack trace of threads specified in the given threads array.
 323 // Returns StackTraceElement[][] each element is the stack trace of a thread in
 324 // the corresponding entry in the given threads array
 325 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
 326                                         int num_threads,
 327                                         TRAPS) {
 328   assert(num_threads > 0, "just checking");
 329 
 330   ThreadDumpResult dump_result;
 331   VM_ThreadDump op(&dump_result,
 332                    threads,
 333                    num_threads,
 334                    -1,    /* entire stack */
 335                    false, /* with locked monitors */
 336                    false  /* with locked synchronizers */);
 337   VMThread::execute(&op);
 338 
 339   // Allocate the resulting StackTraceElement[][] object
 340 
 341   ResourceMark rm(THREAD);
 342   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
 343   ObjArrayKlass* ik = ObjArrayKlass::cast(k);
 344   objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
 345   objArrayHandle result_obj(THREAD, r);
 346 
 347   int num_snapshots = dump_result.num_snapshots();
 348   assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
 349   assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
 350   int i = 0;
 351   for (ThreadSnapshot* ts = dump_result.snapshots(); ts != nullptr; i++, ts = ts->next()) {
 352     ThreadStackTrace* stacktrace = ts->get_stack_trace();
 353     if (stacktrace == nullptr) {
 354       // No stack trace
 355       result_obj->obj_at_put(i, nullptr);
 356     } else {
 357       // Construct an array of java/lang/StackTraceElement object
 358       Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
 359       result_obj->obj_at_put(i, backtrace_h());
 360     }
 361   }
 362 
 363   return result_obj;
 364 }
 365 
 366 void ThreadService::reset_contention_count_stat(JavaThread* thread) {
 367   ThreadStatistics* stat = thread->get_thread_stat();
 368   if (stat != nullptr) {
 369     stat->reset_count_stat();
 370   }
 371 }
 372 
 373 void ThreadService::reset_contention_time_stat(JavaThread* thread) {
 374   ThreadStatistics* stat = thread->get_thread_stat();
 375   if (stat != nullptr) {
 376     stat->reset_time_stat();
 377   }
 378 }
 379 
 380 bool ThreadService::is_virtual_or_carrier_thread(JavaThread* jt) {
 381   oop threadObj = jt->threadObj();
 382   if (threadObj != nullptr && threadObj->is_a(vmClasses::BaseVirtualThread_klass())) {
 383     // a virtual thread backed by JavaThread
 384     return true;
 385   }
 386   if (jt->is_vthread_mounted()) {
 387     // carrier thread
 388     return true;
 389   }
 390   return false;
 391 }
 392 
 393 // Find deadlocks involving raw monitors, object monitors and concurrent locks
 394 // if concurrent_locks is true.
 395 // We skip virtual thread carriers under the assumption that the current scheduler, ForkJoinPool,
 396 // doesn't hold any locks while mounting a virtual thread, so any owned monitor (or j.u.c., lock for that matter)
 397 // on that JavaThread must be owned by the virtual thread, and we don't support deadlock detection for virtual threads.
 398 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
 399   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 400 
 401   // This code was modified from the original Threads::find_deadlocks code.
 402   int globalDfn = 0, thisDfn;
 403   ObjectMonitor* waitingToLockMonitor = nullptr;
 404   JvmtiRawMonitor* waitingToLockRawMonitor = nullptr;
 405   oop waitingToLockBlocker = nullptr;
 406   bool blocked_on_monitor = false;
 407   JavaThread *currentThread, *previousThread;
 408   int num_deadlocks = 0;
 409 
 410   // Initialize the depth-first-number for each JavaThread.
 411   JavaThreadIterator jti(t_list);
 412   for (JavaThread* jt = jti.first(); jt != nullptr; jt = jti.next()) {
 413     if (!is_virtual_or_carrier_thread(jt)) {
 414       jt->set_depth_first_number(-1);
 415     }
 416   }
 417 
 418   DeadlockCycle* deadlocks = nullptr;
 419   DeadlockCycle* last = nullptr;
 420   DeadlockCycle* cycle = new DeadlockCycle();
 421   for (JavaThread* jt = jti.first(); jt != nullptr; jt = jti.next()) {
 422     if (is_virtual_or_carrier_thread(jt)) {
 423       // skip virtual and carrier threads
 424       continue;
 425     }
 426     if (jt->depth_first_number() >= 0) {
 427       // this thread was already visited
 428       continue;
 429     }
 430 
 431     thisDfn = globalDfn;
 432     jt->set_depth_first_number(globalDfn++);
 433     previousThread = jt;
 434     currentThread = jt;
 435 
 436     cycle->reset();
 437 
 438     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 439     // When there is a deadlock, all the monitors involved in the dependency
 440     // cycle must be contended and heavyweight. So we only care about the
 441     // heavyweight monitor a thread is waiting to lock.
 442     waitingToLockMonitor = jt->current_pending_monitor();
 443     // JVM TI raw monitors can also be involved in deadlocks, and we can be
 444     // waiting to lock both a raw monitor and ObjectMonitor at the same time.
 445     // It isn't clear how to make deadlock detection work correctly if that
 446     // happens.
 447     waitingToLockRawMonitor = jt->current_pending_raw_monitor();
 448 
 449     if (concurrent_locks) {
 450       waitingToLockBlocker = jt->current_park_blocker();
 451     }
 452 
 453     while (waitingToLockMonitor != nullptr ||
 454            waitingToLockRawMonitor != nullptr ||
 455            waitingToLockBlocker != nullptr) {
 456       cycle->add_thread(currentThread);
 457       // Give preference to the raw monitor
 458       if (waitingToLockRawMonitor != nullptr) {
 459         Thread* owner = waitingToLockRawMonitor->owner();
 460         if (owner != nullptr && // the raw monitor could be released at any time
 461             owner->is_Java_thread()) {
 462           currentThread = JavaThread::cast(owner);
 463         }
 464       } else if (waitingToLockMonitor != nullptr) {
 465         if (waitingToLockMonitor->has_owner()) {
 466           currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor);
 467         }
 468       } else {
 469         if (concurrent_locks) {
 470           if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 471             oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
 472             // This JavaThread (if there is one) is protected by the
 473             // ThreadsListSetter in VM_FindDeadlocks::doit().
 474             currentThread = threadObj != nullptr ? java_lang_Thread::thread(threadObj) : nullptr;
 475           } else {
 476             currentThread = nullptr;
 477           }
 478         }
 479       }
 480 
 481       if (currentThread == nullptr || is_virtual_or_carrier_thread(currentThread)) {
 482         // No dependency on another thread
 483         break;
 484       }
 485       if (currentThread->depth_first_number() < 0) {
 486         // First visit to this thread
 487         currentThread->set_depth_first_number(globalDfn++);
 488       } else if (currentThread->depth_first_number() < thisDfn) {
 489         // Thread already visited, and not on a (new) cycle
 490         break;
 491       } else if (currentThread == previousThread) {
 492         // Self-loop, ignore
 493         break;
 494       } else {
 495         // We have a (new) cycle
 496         num_deadlocks++;
 497 
 498         // add this cycle to the deadlocks list
 499         if (deadlocks == nullptr) {
 500           deadlocks = cycle;
 501         } else {
 502           last->set_next(cycle);
 503         }
 504         last = cycle;
 505         cycle = new DeadlockCycle();
 506         break;
 507       }
 508       previousThread = currentThread;
 509       waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
 510       if (concurrent_locks) {
 511         waitingToLockBlocker = currentThread->current_park_blocker();
 512       }
 513     }
 514 
 515   }
 516   delete cycle;
 517   return deadlocks;
 518 }
 519 
 520 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(nullptr), _last(nullptr), _next(nullptr), _setter() {
 521 
 522   // Create a new ThreadDumpResult object and append to the list.
 523   // If GC happens before this function returns, Method*
 524   // in the stack trace will be visited.
 525   ThreadService::add_thread_dump(this);
 526 }
 527 
 528 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(nullptr), _last(nullptr), _next(nullptr), _setter() {
 529   // Create a new ThreadDumpResult object and append to the list.
 530   // If GC happens before this function returns, oops
 531   // will be visited.
 532   ThreadService::add_thread_dump(this);
 533 }
 534 
 535 ThreadDumpResult::~ThreadDumpResult() {
 536   ThreadService::remove_thread_dump(this);
 537 
 538   // free all the ThreadSnapshot objects created during
 539   // the VM_ThreadDump operation
 540   ThreadSnapshot* ts = _snapshots;
 541   while (ts != nullptr) {
 542     ThreadSnapshot* p = ts;
 543     ts = ts->next();
 544     delete p;
 545   }
 546 }
 547 
 548 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
 549   ThreadSnapshot* ts = new ThreadSnapshot();
 550   link_thread_snapshot(ts);
 551   return ts;
 552 }
 553 
 554 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
 555   ThreadSnapshot* ts = new ThreadSnapshot();
 556   link_thread_snapshot(ts);
 557   ts->initialize(t_list(), thread);
 558   return ts;
 559 }
 560 
 561 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
 562   assert(_num_threads == 0 || _num_snapshots < _num_threads,
 563          "_num_snapshots must be less than _num_threads");
 564   _num_snapshots++;
 565   if (_snapshots == nullptr) {
 566     _snapshots = ts;
 567   } else {
 568     _last->set_next(ts);
 569   }
 570   _last = ts;
 571 }
 572 
 573 void ThreadDumpResult::metadata_do(void f(Metadata*)) {
 574   for (ThreadSnapshot* ts = _snapshots; ts != nullptr; ts = ts->next()) {
 575     ts->metadata_do(f);
 576   }
 577 }
 578 
 579 ThreadsList* ThreadDumpResult::t_list() {
 580   return _setter.list();
 581 }
 582 
 583 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
 584   _method = jvf->method();
 585   _bci = jvf->bci();
 586   _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder());
 587   _locked_monitors = nullptr;
 588   if (with_lock_info) {
 589     Thread* current_thread = Thread::current();
 590     ResourceMark rm(current_thread);
 591     HandleMark hm(current_thread);
 592     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
 593     int length = list->length();
 594     if (length > 0) {
 595       _locked_monitors = new (mtServiceability) GrowableArray<OopHandle>(length, mtServiceability);
 596       for (int i = 0; i < length; i++) {
 597         MonitorInfo* monitor = list->at(i);
 598         assert(monitor->owner() != nullptr, "This monitor must have an owning object");
 599         _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner()));
 600       }
 601     }
 602   }
 603 }
 604 
 605 StackFrameInfo::~StackFrameInfo() {
 606   if (_locked_monitors != nullptr) {
 607     for (int i = 0; i < _locked_monitors->length(); i++) {
 608       _locked_monitors->at(i).release(_thread_service_storage);
 609     }
 610     delete _locked_monitors;
 611   }
 612   _class_holder.release(_thread_service_storage);
 613 }
 614 
 615 void StackFrameInfo::metadata_do(void f(Metadata*)) {
 616   f(_method);
 617 }
 618 
 619 void StackFrameInfo::print_on(outputStream* st) const {
 620   ResourceMark rm;
 621   java_lang_Throwable::print_stack_element(st, method(), bci());
 622   int len = (_locked_monitors != nullptr ? _locked_monitors->length() : 0);
 623   for (int i = 0; i < len; i++) {
 624     oop o = _locked_monitors->at(i).resolve();
 625     st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
 626   }
 627 }
 628 
 629 // Iterate through monitor cache to find JNI locked monitors
 630 class InflatedMonitorsClosure: public MonitorClosure {
 631 private:
 632   ThreadStackTrace* _stack_trace;
 633 public:
 634   InflatedMonitorsClosure(ThreadStackTrace* st) {
 635     _stack_trace = st;
 636   }
 637   void do_monitor(ObjectMonitor* mid) {
 638     oop object = mid->object();
 639     if (!_stack_trace->is_owned_monitor_on_stack(object)) {
 640       _stack_trace->add_jni_locked_monitor(object);
 641     }
 642   }
 643 };
 644 
 645 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
 646   _thread = t;
 647   _frames = new (mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability);
 648   _depth = 0;
 649   _with_locked_monitors = with_locked_monitors;
 650   if (_with_locked_monitors) {
 651     _jni_locked_monitors = new (mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 652   } else {
 653     _jni_locked_monitors = nullptr;
 654   }
 655 }
 656 
 657 void ThreadStackTrace::add_jni_locked_monitor(oop object) {
 658   _jni_locked_monitors->append(OopHandle(_thread_service_storage, object));
 659 }
 660 
 661 ThreadStackTrace::~ThreadStackTrace() {
 662   for (int i = 0; i < _frames->length(); i++) {
 663     delete _frames->at(i);
 664   }
 665   delete _frames;
 666   if (_jni_locked_monitors != nullptr) {
 667     for (int i = 0; i < _jni_locked_monitors->length(); i++) {
 668       _jni_locked_monitors->at(i).release(_thread_service_storage);
 669     }
 670     delete _jni_locked_monitors;
 671   }
 672 }
 673 
 674 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsView* monitors, bool full) {
 675   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 676 
 677   if (_thread->has_last_Java_frame()) {
 678     RegisterMap reg_map(_thread,
 679                         RegisterMap::UpdateMap::include,
 680                         RegisterMap::ProcessFrames::include,
 681                         RegisterMap::WalkContinuation::skip);
 682     ResourceMark rm(VMThread::vm_thread());
 683     // If full, we want to print both vthread and carrier frames
 684     vframe* start_vf = !full && _thread->is_vthread_mounted()
 685       ? _thread->carrier_last_java_vframe(&reg_map)
 686       : _thread->last_java_vframe(&reg_map);
 687     int count = 0;
 688     for (vframe* f = start_vf; f; f = f->sender() ) {
 689       if (maxDepth >= 0 && count == maxDepth) {
 690         // Skip frames if more than maxDepth
 691         break;
 692       }
 693       if (!full && f->is_vthread_entry()) {
 694         break;
 695       }
 696       if (f->is_java_frame()) {
 697         javaVFrame* jvf = javaVFrame::cast(f);
 698         add_stack_frame(jvf);
 699         count++;
 700       } else {
 701         // Ignore non-Java frames
 702       }
 703     }
 704   }
 705 
 706   if (_with_locked_monitors) {
 707     // Iterate inflated monitors and find monitors locked by this thread
 708     // that are not found in the stack, e.g. JNI locked monitors:
 709     InflatedMonitorsClosure imc(this);
 710     monitors->visit(&imc, _thread);
 711   }
 712 }
 713 
 714 
 715 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
 716   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 717 
 718   bool found = false;
 719   int num_frames = get_stack_depth();
 720   for (int depth = 0; depth < num_frames; depth++) {
 721     StackFrameInfo* frame = stack_frame_at(depth);
 722     int len = frame->num_locked_monitors();
 723     GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors();
 724     for (int j = 0; j < len; j++) {
 725       oop monitor = locked_monitors->at(j).resolve();
 726       assert(monitor != nullptr, "must be a Java object");
 727       if (monitor == object) {
 728         found = true;
 729         break;
 730       }
 731     }
 732   }
 733   return found;
 734 }
 735 
 736 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
 737   InstanceKlass* ik = vmClasses::StackTraceElement_klass();
 738   assert(ik != nullptr, "must be loaded in 1.4+");
 739 
 740   // Allocate an array of java/lang/StackTraceElement object
 741   objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH);
 742   objArrayHandle backtrace(THREAD, ste);
 743   for (int j = 0; j < _depth; j++) {
 744     StackFrameInfo* frame = _frames->at(j);
 745     methodHandle mh(THREAD, frame->method());
 746     oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
 747     backtrace->obj_at_put(j, element);
 748   }
 749   return backtrace;
 750 }
 751 
 752 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
 753   StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
 754   _frames->append(frame);
 755   _depth++;
 756 }
 757 
 758 void ThreadStackTrace::metadata_do(void f(Metadata*)) {
 759   int length = _frames->length();
 760   for (int i = 0; i < length; i++) {
 761     _frames->at(i)->metadata_do(f);
 762   }
 763 }
 764 
 765 
 766 ConcurrentLocksDump::~ConcurrentLocksDump() {
 767   if (_retain_map_on_free) {
 768     return;
 769   }
 770 
 771   for (ThreadConcurrentLocks* t = _map; t != nullptr;)  {
 772     ThreadConcurrentLocks* tcl = t;
 773     t = t->next();
 774     delete tcl;
 775   }
 776 }
 777 
 778 void ConcurrentLocksDump::dump_at_safepoint() {
 779   // dump all locked concurrent locks
 780   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 781 
 782   GrowableArray<oop>* aos_objects = new (mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
 783 
 784   // Find all instances of AbstractOwnableSynchronizer
 785   HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
 786                                               aos_objects);
 787   // Build a map of thread to its owned AQS locks
 788   build_map(aos_objects);
 789 
 790   delete aos_objects;
 791 }
 792 
 793 
 794 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer
 795 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
 796   int length = aos_objects->length();
 797   for (int i = 0; i < length; i++) {
 798     oop o = aos_objects->at(i);
 799     oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
 800     if (owner_thread_obj != nullptr) {
 801       // See comments in ThreadConcurrentLocks to see how this
 802       // JavaThread* is protected.
 803       JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
 804       assert(o->is_instance(), "Must be an instanceOop");
 805       add_lock(thread, (instanceOop) o);
 806     }
 807   }
 808 }
 809 
 810 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
 811   ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
 812   if (tcl != nullptr) {
 813     tcl->add_lock(o);
 814     return;
 815   }
 816 
 817   // First owned lock found for this thread
 818   tcl = new ThreadConcurrentLocks(thread);
 819   tcl->add_lock(o);
 820   if (_map == nullptr) {
 821     _map = tcl;
 822   } else {
 823     _last->set_next(tcl);
 824   }
 825   _last = tcl;
 826 }
 827 
 828 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
 829   for (ThreadConcurrentLocks* tcl = _map; tcl != nullptr; tcl = tcl->next()) {
 830     if (tcl->java_thread() == thread) {
 831       return tcl;
 832     }
 833   }
 834   return nullptr;
 835 }
 836 
 837 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
 838   st->print_cr("   Locked ownable synchronizers:");
 839   ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
 840   GrowableArray<OopHandle>* locks = (tcl != nullptr ? tcl->owned_locks() : nullptr);
 841   if (locks == nullptr || locks->is_empty()) {
 842     st->print_cr("\t- None");
 843     st->cr();
 844     return;
 845   }
 846 
 847   for (int i = 0; i < locks->length(); i++) {
 848     oop obj = locks->at(i).resolve();
 849     st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
 850   }
 851   st->cr();
 852 }
 853 
 854 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
 855   _thread = thread;
 856   _owned_locks = new (mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 857   _next = nullptr;
 858 }
 859 
 860 ThreadConcurrentLocks::~ThreadConcurrentLocks() {
 861   for (int i = 0; i < _owned_locks->length(); i++) {
 862     _owned_locks->at(i).release(_thread_service_storage);
 863   }
 864   delete _owned_locks;
 865 }
 866 
 867 void ThreadConcurrentLocks::add_lock(instanceOop o) {
 868   _owned_locks->append(OopHandle(_thread_service_storage, o));
 869 }
 870 
 871 ThreadStatistics::ThreadStatistics() {
 872   _contended_enter_count = 0;
 873   _monitor_wait_count = 0;
 874   _sleep_count = 0;
 875   _count_pending_reset = false;
 876   _timer_pending_reset = false;
 877   memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
 878 }
 879 
 880 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); }
 881 
 882 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
 883   _thread = thread;
 884   oop threadObj = thread->threadObj();
 885   _threadObj = OopHandle(_thread_service_storage, threadObj);
 886 
 887   ThreadStatistics* stat = thread->get_thread_stat();
 888   _contended_enter_ticks = stat->contended_enter_ticks();
 889   _contended_enter_count = stat->contended_enter_count();
 890   _monitor_wait_ticks = stat->monitor_wait_ticks();
 891   _monitor_wait_count = stat->monitor_wait_count();
 892   _sleep_ticks = stat->sleep_ticks();
 893   _sleep_count = stat->sleep_count();
 894 
 895   // If thread is still attaching then threadObj will be null.
 896   _thread_status = threadObj == nullptr ? JavaThreadStatus::NEW
 897                                      : java_lang_Thread::get_thread_status(threadObj);
 898 
 899   _is_suspended = thread->is_suspended();
 900   _is_in_native = (thread->thread_state() == _thread_in_native);
 901 
 902   Handle obj = ThreadService::get_current_contended_monitor(thread);
 903 
 904   oop blocker_object = nullptr;
 905   oop blocker_object_owner = nullptr;
 906 
 907   if (thread->is_vthread_mounted() && thread->vthread() != threadObj) { // ThreadSnapshot only captures platform threads
 908     _thread_status = JavaThreadStatus::IN_OBJECT_WAIT;
 909     oop vthread = thread->vthread();
 910     assert(vthread != nullptr, "");
 911     blocker_object = vthread;
 912     blocker_object_owner = vthread;
 913   } else if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER ||
 914       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT ||
 915       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) {
 916 
 917     if (obj() == nullptr) {
 918       // monitor no longer exists; thread is not blocked
 919       _thread_status = JavaThreadStatus::RUNNABLE;
 920     } else {
 921       blocker_object = obj();
 922       JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
 923       if ((owner == nullptr && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER)
 924           || (owner != nullptr && owner->is_attaching_via_jni())) {
 925         // ownership information of the monitor is not available
 926         // (may no longer be owned or releasing to some other thread)
 927         // make this thread in RUNNABLE state.
 928         // And when the owner thread is in attaching state, the java thread
 929         // is not completely initialized. For example thread name and id
 930         // and may not be set, so hide the attaching thread.
 931         _thread_status = JavaThreadStatus::RUNNABLE;
 932         blocker_object = nullptr;
 933       } else if (owner != nullptr) {
 934         blocker_object_owner = owner->threadObj();
 935       }
 936     }
 937   } else if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) {
 938     blocker_object = thread->current_park_blocker();
 939     if (blocker_object != nullptr && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 940       blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object);
 941     }
 942   }
 943 
 944   if (blocker_object != nullptr) {
 945     _blocker_object = OopHandle(_thread_service_storage, blocker_object);
 946   }
 947   if (blocker_object_owner != nullptr) {
 948     _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner);
 949   }
 950 }
 951 
 952 oop ThreadSnapshot::blocker_object() const           { return _blocker_object.resolve(); }
 953 oop ThreadSnapshot::blocker_object_owner() const     { return _blocker_object_owner.resolve(); }
 954 
 955 ThreadSnapshot::~ThreadSnapshot() {
 956   _blocker_object.release(_thread_service_storage);
 957   _blocker_object_owner.release(_thread_service_storage);
 958   _threadObj.release(_thread_service_storage);
 959 
 960   delete _stack_trace;
 961   delete _concurrent_locks;
 962 }
 963 
 964 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
 965                                              ObjectMonitorsView* monitors, bool full) {
 966   _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
 967   _stack_trace->dump_stack_at_safepoint(max_depth, monitors, full);
 968 }
 969 
 970 
 971 void ThreadSnapshot::metadata_do(void f(Metadata*)) {
 972   if (_stack_trace != nullptr) {
 973     _stack_trace->metadata_do(f);
 974   }
 975 }
 976 
 977 
 978 DeadlockCycle::DeadlockCycle() {
 979   _threads = new (mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability);
 980   _next = nullptr;
 981 }
 982 
 983 DeadlockCycle::~DeadlockCycle() {
 984   delete _threads;
 985 }
 986 
 987 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
 988   st->cr();
 989   st->print_cr("Found one Java-level deadlock:");
 990   st->print("=============================");
 991 
 992   JavaThread* currentThread;
 993   JvmtiRawMonitor* waitingToLockRawMonitor;
 994   oop waitingToLockBlocker;
 995   int len = _threads->length();
 996   for (int i = 0; i < len; i++) {
 997     currentThread = _threads->at(i);
 998     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 999     ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
1000     waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
1001     waitingToLockBlocker = currentThread->current_park_blocker();
1002     st->cr();
1003     st->print_cr("\"%s\":", currentThread->name());
1004     const char* owner_desc = ",\n  which is held by";
1005 
1006     // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
1007     // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
1008     if (waitingToLockRawMonitor != nullptr) {
1009       st->print("  waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
1010       Thread* owner = waitingToLockRawMonitor->owner();
1011       // Could be null as the raw monitor could be released at any time if held by non-JavaThread
1012       if (owner != nullptr) {
1013         if (owner->is_Java_thread()) {
1014           currentThread = JavaThread::cast(owner);
1015           st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1016         } else {
1017           st->print_cr(",\n  which has now been released");
1018         }
1019       } else {
1020         st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
1021       }
1022     }
1023 
1024     if (waitingToLockMonitor != nullptr) {
1025       st->print("  waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
1026       oop obj = waitingToLockMonitor->object();
1027       st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
1028                  obj->klass()->external_name());
1029 
1030       if (!currentThread->current_pending_monitor_is_from_java()) {
1031         owner_desc = "\n  in JNI, which is held by";
1032       }
1033       currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor);
1034       if (currentThread == nullptr) {
1035         // The deadlock was detected at a safepoint so the JavaThread
1036         // that owns waitingToLockMonitor should be findable, but
1037         // if it is not findable, then the previous currentThread is
1038         // blocked permanently.
1039         st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
1040                   p2i(waitingToLockMonitor->owner()));
1041         continue;
1042       }
1043     } else {
1044       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
1045                 p2i(waitingToLockBlocker),
1046                 waitingToLockBlocker->klass()->external_name());
1047       assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()),
1048              "Must be an AbstractOwnableSynchronizer");
1049       oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
1050       currentThread = java_lang_Thread::thread(ownerObj);
1051       assert(currentThread != nullptr, "AbstractOwnableSynchronizer owning thread is unexpectedly null");
1052     }
1053     st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1054   }
1055 
1056   st->cr();
1057 
1058   // Print stack traces
1059   bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
1060   JavaMonitorsInStackTrace = true;
1061   st->print_cr("Java stack information for the threads listed above:");
1062   st->print_cr("===================================================");
1063   for (int j = 0; j < len; j++) {
1064     currentThread = _threads->at(j);
1065     st->print_cr("\"%s\":", currentThread->name());
1066     currentThread->print_stack_on(st);
1067   }
1068   JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
1069 }
1070 
1071 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
1072                                              bool include_jvmti_agent_threads,
1073                                              bool include_jni_attaching_threads,
1074                                              bool include_bound_virtual_threads) {
1075   assert(cur_thread == Thread::current(), "Check current thread");
1076 
1077   int init_size = ThreadService::get_live_thread_count();
1078   _threads_array = new GrowableArray<instanceHandle>(init_size);
1079 
1080   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1081     // skips JavaThreads in the process of exiting
1082     // and also skips VM internal JavaThreads
1083     // Threads in _thread_new or _thread_new_trans state are included.
1084     // i.e. threads have been started but not yet running.
1085     if (jt->threadObj() == nullptr   ||
1086         jt->is_exiting() ||
1087         !java_lang_Thread::is_alive(jt->threadObj())   ||
1088         jt->is_hidden_from_external_view()) {
1089       continue;
1090     }
1091 
1092     // skip agent threads
1093     if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
1094       continue;
1095     }
1096 
1097     // skip jni threads in the process of attaching
1098     if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
1099       continue;
1100     }
1101 
1102     // skip instances of BoundVirtualThread
1103     if (!include_bound_virtual_threads && jt->threadObj()->is_a(vmClasses::BoundVirtualThread_klass())) {
1104       continue;
1105     }
1106 
1107     instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
1108     _threads_array->append(h);
1109   }
1110 }