1 /*
   2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "gc/shared/oopStorageSet.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/heapInspection.hpp"
  33 #include "memory/oopFactory.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/instanceKlass.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/objArrayOop.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "oops/oopHandle.inline.hpp"
  42 #include "prims/jvmtiRawMonitor.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/objectMonitor.inline.hpp"
  47 #include "runtime/thread.inline.hpp"
  48 #include "runtime/threadSMR.inline.hpp"
  49 #include "runtime/vframe.hpp"
  50 #include "runtime/vmThread.hpp"
  51 #include "runtime/vmOperations.hpp"
  52 #include "services/threadService.hpp"
  53 
  54 // TODO: we need to define a naming convention for perf counters
  55 // to distinguish counters for:
  56 //   - standard JSR174 use
  57 //   - Hotspot extension (public and committed)
  58 //   - Hotspot extension (private/internal and uncommitted)
  59 
  60 // Default is disabled.
  61 bool ThreadService::_thread_monitoring_contention_enabled = false;
  62 bool ThreadService::_thread_cpu_time_enabled = false;
  63 bool ThreadService::_thread_allocated_memory_enabled = false;
  64 
  65 PerfCounter*  ThreadService::_total_threads_count = NULL;
  66 PerfVariable* ThreadService::_live_threads_count = NULL;
  67 PerfVariable* ThreadService::_peak_threads_count = NULL;
  68 PerfVariable* ThreadService::_daemon_threads_count = NULL;
  69 volatile int ThreadService::_atomic_threads_count = 0;
  70 volatile int ThreadService::_atomic_daemon_threads_count = 0;
  71 
  72 ThreadDumpResult* ThreadService::_threaddump_list = NULL;
  73 
  74 static const int INITIAL_ARRAY_SIZE = 10;
  75 
  76 // OopStorage for thread stack sampling
  77 static OopStorage* _thread_service_storage = NULL;
  78 
  79 void ThreadService::init() {
  80   EXCEPTION_MARK;
  81 
  82   // These counters are for java.lang.management API support.
  83   // They are created even if -XX:-UsePerfData is set and in
  84   // that case, they will be allocated on C heap.
  85 
  86   _total_threads_count =
  87                 PerfDataManager::create_counter(JAVA_THREADS, "started",
  88                                                 PerfData::U_Events, CHECK);
  89 
  90   _live_threads_count =
  91                 PerfDataManager::create_variable(JAVA_THREADS, "live",
  92                                                  PerfData::U_None, CHECK);
  93 
  94   _peak_threads_count =
  95                 PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
  96                                                  PerfData::U_None, CHECK);
  97 
  98   _daemon_threads_count =
  99                 PerfDataManager::create_variable(JAVA_THREADS, "daemon",
 100                                                  PerfData::U_None, CHECK);
 101 
 102   if (os::is_thread_cpu_time_supported()) {
 103     _thread_cpu_time_enabled = true;
 104   }
 105 
 106   _thread_allocated_memory_enabled = true; // Always on, so enable it
 107 
 108   // Initialize OopStorage for thread stack sampling walking
 109   _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage",
 110                                                          mtServiceability);
 111 }
 112 
 113 void ThreadService::reset_peak_thread_count() {
 114   // Acquire the lock to update the peak thread count
 115   // to synchronize with thread addition and removal.
 116   MutexLocker mu(Threads_lock);
 117   _peak_threads_count->set_value(get_live_thread_count());
 118 }
 119 
 120 static bool is_hidden_thread(JavaThread *thread) {
 121   // hide VM internal or JVMTI agent threads
 122   return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
 123 }
 124 
 125 void ThreadService::add_thread(JavaThread* thread, bool daemon) {
 126   assert(Threads_lock->owned_by_self(), "must have threads lock");
 127 
 128   // Do not count hidden threads
 129   if (is_hidden_thread(thread)) {
 130     return;
 131   }
 132 
 133   _total_threads_count->inc();
 134   _live_threads_count->inc();
 135   Atomic::inc(&_atomic_threads_count);
 136   int count = _atomic_threads_count;
 137 
 138   if (count > _peak_threads_count->get_value()) {
 139     _peak_threads_count->set_value(count);
 140   }
 141 
 142   if (daemon) {
 143     _daemon_threads_count->inc();
 144     Atomic::inc(&_atomic_daemon_threads_count);
 145   }
 146 }
 147 
 148 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) {
 149   Atomic::dec(&_atomic_threads_count);
 150 
 151   if (daemon) {
 152     Atomic::dec(&_atomic_daemon_threads_count);
 153   }
 154 }
 155 
 156 void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
 157   assert(Threads_lock->owned_by_self(), "must have threads lock");
 158 
 159   // Do not count hidden threads
 160   if (is_hidden_thread(thread)) {
 161     return;
 162   }
 163 
 164   assert(!thread->is_terminated(), "must not be terminated");
 165   if (!thread->is_exiting()) {
 166     // JavaThread::exit() skipped calling current_thread_exiting()
 167     decrement_thread_counts(thread, daemon);
 168   }
 169 
 170   int daemon_count = _atomic_daemon_threads_count;
 171   int count = _atomic_threads_count;
 172 
 173   // Counts are incremented at the same time, but atomic counts are
 174   // decremented earlier than perf counts.
 175   assert(_live_threads_count->get_value() > count,
 176     "thread count mismatch %d : %d",
 177     (int)_live_threads_count->get_value(), count);
 178 
 179   _live_threads_count->dec(1);
 180   if (daemon) {
 181     assert(_daemon_threads_count->get_value() > daemon_count,
 182       "thread count mismatch %d : %d",
 183       (int)_daemon_threads_count->get_value(), daemon_count);
 184 
 185     _daemon_threads_count->dec(1);
 186   }
 187 
 188   // Counts are incremented at the same time, but atomic counts are
 189   // decremented earlier than perf counts.
 190   assert(_daemon_threads_count->get_value() >= daemon_count,
 191     "thread count mismatch %d : %d",
 192     (int)_daemon_threads_count->get_value(), daemon_count);
 193   assert(_live_threads_count->get_value() >= count,
 194     "thread count mismatch %d : %d",
 195     (int)_live_threads_count->get_value(), count);
 196   assert(_live_threads_count->get_value() > 0 ||
 197     (_live_threads_count->get_value() == 0 && count == 0 &&
 198     _daemon_threads_count->get_value() == 0 && daemon_count == 0),
 199     "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
 200     (int)_live_threads_count->get_value(), count,
 201     (int)_daemon_threads_count->get_value(), daemon_count);
 202   assert(_daemon_threads_count->get_value() > 0 ||
 203     (_daemon_threads_count->get_value() == 0 && daemon_count == 0),
 204     "thread counts should reach 0 at the same time, daemon %d,%d",
 205     (int)_daemon_threads_count->get_value(), daemon_count);
 206 }
 207 
 208 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) {
 209   // Do not count hidden threads
 210   if (is_hidden_thread(jt)) {
 211     return;
 212   }
 213 
 214   assert(jt == JavaThread::current(), "Called by current thread");
 215   assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
 216 
 217   decrement_thread_counts(jt, daemon);
 218 }
 219 
 220 // FIXME: JVMTI should call this function
 221 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
 222   assert(thread != NULL, "should be non-NULL");
 223   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 224 
 225   // This function can be called on a target JavaThread that is not
 226   // the caller and we are not at a safepoint. So it is possible for
 227   // the waiting or pending condition to be over/stale and for the
 228   // first stage of async deflation to clear the object field in
 229   // the ObjectMonitor. It is also possible for the object to be
 230   // inflated again and to be associated with a completely different
 231   // ObjectMonitor by the time this object reference is processed
 232   // by the caller.
 233   ObjectMonitor *wait_obj = thread->current_waiting_monitor();
 234 
 235   oop obj = NULL;
 236   if (wait_obj != NULL) {
 237     // thread is doing an Object.wait() call
 238     obj = wait_obj->object();
 239   } else {
 240     ObjectMonitor *enter_obj = thread->current_pending_monitor();
 241     if (enter_obj != NULL) {
 242       // thread is trying to enter() an ObjectMonitor.
 243       obj = enter_obj->object();
 244     }
 245   }
 246 
 247   Handle h(Thread::current(), obj);
 248   return h;
 249 }
 250 
 251 bool ThreadService::set_thread_monitoring_contention(bool flag) {
 252   MutexLocker m(Management_lock);
 253 
 254   bool prev = _thread_monitoring_contention_enabled;
 255   _thread_monitoring_contention_enabled = flag;
 256 
 257   return prev;
 258 }
 259 
 260 bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
 261   MutexLocker m(Management_lock);
 262 
 263   bool prev = _thread_cpu_time_enabled;
 264   _thread_cpu_time_enabled = flag;
 265 
 266   return prev;
 267 }
 268 
 269 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
 270   MutexLocker m(Management_lock);
 271 
 272   bool prev = _thread_allocated_memory_enabled;
 273   _thread_allocated_memory_enabled = flag;
 274 
 275   return prev;
 276 }
 277 
 278 void ThreadService::metadata_do(void f(Metadata*)) {
 279   for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
 280     dump->metadata_do(f);
 281   }
 282 }
 283 
 284 void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
 285   MutexLocker ml(Management_lock);
 286   if (_threaddump_list == NULL) {
 287     _threaddump_list = dump;
 288   } else {
 289     dump->set_next(_threaddump_list);
 290     _threaddump_list = dump;
 291   }
 292 }
 293 
 294 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
 295   MutexLocker ml(Management_lock);
 296 
 297   ThreadDumpResult* prev = NULL;
 298   bool found = false;
 299   for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) {
 300     if (d == dump) {
 301       if (prev == NULL) {
 302         _threaddump_list = dump->next();
 303       } else {
 304         prev->set_next(dump->next());
 305       }
 306       found = true;
 307       break;
 308     }
 309   }
 310   assert(found, "The threaddump result to be removed must exist.");
 311 }
 312 
 313 // Dump stack trace of threads specified in the given threads array.
 314 // Returns StackTraceElement[][] each element is the stack trace of a thread in
 315 // the corresponding entry in the given threads array
 316 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
 317                                         int num_threads,
 318                                         TRAPS) {
 319   assert(num_threads > 0, "just checking");
 320 
 321   ThreadDumpResult dump_result;
 322   VM_ThreadDump op(&dump_result,
 323                    threads,
 324                    num_threads,
 325                    -1,    /* entire stack */
 326                    false, /* with locked monitors */
 327                    false  /* with locked synchronizers */);
 328   VMThread::execute(&op);
 329 
 330   // Allocate the resulting StackTraceElement[][] object
 331 
 332   ResourceMark rm(THREAD);
 333   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
 334   ObjArrayKlass* ik = ObjArrayKlass::cast(k);
 335   objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
 336   objArrayHandle result_obj(THREAD, r);
 337 
 338   int num_snapshots = dump_result.num_snapshots();
 339   assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
 340   assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
 341   int i = 0;
 342   for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
 343     ThreadStackTrace* stacktrace = ts->get_stack_trace();
 344     if (stacktrace == NULL) {
 345       // No stack trace
 346       result_obj->obj_at_put(i, NULL);
 347     } else {
 348       // Construct an array of java/lang/StackTraceElement object
 349       Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
 350       result_obj->obj_at_put(i, backtrace_h());
 351     }
 352   }
 353 
 354   return result_obj;
 355 }
 356 
 357 void ThreadService::reset_contention_count_stat(JavaThread* thread) {
 358   ThreadStatistics* stat = thread->get_thread_stat();
 359   if (stat != NULL) {
 360     stat->reset_count_stat();
 361   }
 362 }
 363 
 364 void ThreadService::reset_contention_time_stat(JavaThread* thread) {
 365   ThreadStatistics* stat = thread->get_thread_stat();
 366   if (stat != NULL) {
 367     stat->reset_time_stat();
 368   }
 369 }
 370 
 371 // Find deadlocks involving raw monitors, object monitors and concurrent locks
 372 // if concurrent_locks is true.
 373 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
 374   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 375 
 376   // This code was modified from the original Threads::find_deadlocks code.
 377   int globalDfn = 0, thisDfn;
 378   ObjectMonitor* waitingToLockMonitor = NULL;
 379   JvmtiRawMonitor* waitingToLockRawMonitor = NULL;
 380   oop waitingToLockBlocker = NULL;
 381   bool blocked_on_monitor = false;
 382   JavaThread *currentThread, *previousThread;
 383   int num_deadlocks = 0;
 384 
 385   // Initialize the depth-first-number for each JavaThread.
 386   JavaThreadIterator jti(t_list);
 387   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
 388     jt->set_depth_first_number(-1);
 389   }
 390 
 391   DeadlockCycle* deadlocks = NULL;
 392   DeadlockCycle* last = NULL;
 393   DeadlockCycle* cycle = new DeadlockCycle();
 394   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
 395     if (jt->depth_first_number() >= 0) {
 396       // this thread was already visited
 397       continue;
 398     }
 399 
 400     thisDfn = globalDfn;
 401     jt->set_depth_first_number(globalDfn++);
 402     previousThread = jt;
 403     currentThread = jt;
 404 
 405     cycle->reset();
 406 
 407     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 408     // When there is a deadlock, all the monitors involved in the dependency
 409     // cycle must be contended and heavyweight. So we only care about the
 410     // heavyweight monitor a thread is waiting to lock.
 411     waitingToLockMonitor = jt->current_pending_monitor();
 412     // JVM TI raw monitors can also be involved in deadlocks, and we can be
 413     // waiting to lock both a raw monitor and ObjectMonitor at the same time.
 414     // It isn't clear how to make deadlock detection work correctly if that
 415     // happens.
 416     waitingToLockRawMonitor = jt->current_pending_raw_monitor();
 417 
 418     if (concurrent_locks) {
 419       waitingToLockBlocker = jt->current_park_blocker();
 420     }
 421 
 422     while (waitingToLockMonitor != NULL ||
 423            waitingToLockRawMonitor != NULL ||
 424            waitingToLockBlocker != NULL) {
 425       cycle->add_thread(currentThread);
 426       // Give preference to the raw monitor
 427       if (waitingToLockRawMonitor != NULL) {
 428         Thread* owner = waitingToLockRawMonitor->owner();
 429         if (owner != NULL && // the raw monitor could be released at any time
 430             owner->is_Java_thread()) {
 431           currentThread = JavaThread::cast(owner);
 432         }
 433       } else if (waitingToLockMonitor != NULL) {
 434         address currentOwner = (address)waitingToLockMonitor->owner();
 435         if (currentOwner != NULL) {
 436           currentThread = Threads::owning_thread_from_monitor_owner(t_list,
 437                                                                     currentOwner);
 438           if (currentThread == NULL) {
 439             // This function is called at a safepoint so the JavaThread
 440             // that owns waitingToLockMonitor should be findable, but
 441             // if it is not findable, then the previous currentThread is
 442             // blocked permanently. We record this as a deadlock.
 443             num_deadlocks++;
 444 
 445             // add this cycle to the deadlocks list
 446             if (deadlocks == NULL) {
 447               deadlocks = cycle;
 448             } else {
 449               last->set_next(cycle);
 450             }
 451             last = cycle;
 452             cycle = new DeadlockCycle();
 453             break;
 454           }
 455         }
 456       } else {
 457         if (concurrent_locks) {
 458           if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 459             oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
 460             // This JavaThread (if there is one) is protected by the
 461             // ThreadsListSetter in VM_FindDeadlocks::doit().
 462             currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
 463           } else {
 464             currentThread = NULL;
 465           }
 466         }
 467       }
 468 
 469       if (currentThread == NULL) {
 470         // No dependency on another thread
 471         break;
 472       }
 473       if (currentThread->depth_first_number() < 0) {
 474         // First visit to this thread
 475         currentThread->set_depth_first_number(globalDfn++);
 476       } else if (currentThread->depth_first_number() < thisDfn) {
 477         // Thread already visited, and not on a (new) cycle
 478         break;
 479       } else if (currentThread == previousThread) {
 480         // Self-loop, ignore
 481         break;
 482       } else {
 483         // We have a (new) cycle
 484         num_deadlocks++;
 485 
 486         // add this cycle to the deadlocks list
 487         if (deadlocks == NULL) {
 488           deadlocks = cycle;
 489         } else {
 490           last->set_next(cycle);
 491         }
 492         last = cycle;
 493         cycle = new DeadlockCycle();
 494         break;
 495       }
 496       previousThread = currentThread;
 497       waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
 498       if (concurrent_locks) {
 499         waitingToLockBlocker = currentThread->current_park_blocker();
 500       }
 501     }
 502 
 503   }
 504   delete cycle;
 505   return deadlocks;
 506 }
 507 
 508 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
 509 
 510   // Create a new ThreadDumpResult object and append to the list.
 511   // If GC happens before this function returns, Method*
 512   // in the stack trace will be visited.
 513   ThreadService::add_thread_dump(this);
 514 }
 515 
 516 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
 517   // Create a new ThreadDumpResult object and append to the list.
 518   // If GC happens before this function returns, oops
 519   // will be visited.
 520   ThreadService::add_thread_dump(this);
 521 }
 522 
 523 ThreadDumpResult::~ThreadDumpResult() {
 524   ThreadService::remove_thread_dump(this);
 525 
 526   // free all the ThreadSnapshot objects created during
 527   // the VM_ThreadDump operation
 528   ThreadSnapshot* ts = _snapshots;
 529   while (ts != NULL) {
 530     ThreadSnapshot* p = ts;
 531     ts = ts->next();
 532     delete p;
 533   }
 534 }
 535 
 536 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
 537   ThreadSnapshot* ts = new ThreadSnapshot();
 538   link_thread_snapshot(ts);
 539   return ts;
 540 }
 541 
 542 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
 543   ThreadSnapshot* ts = new ThreadSnapshot();
 544   link_thread_snapshot(ts);
 545   ts->initialize(t_list(), thread);
 546   return ts;
 547 }
 548 
 549 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
 550   assert(_num_threads == 0 || _num_snapshots < _num_threads,
 551          "_num_snapshots must be less than _num_threads");
 552   _num_snapshots++;
 553   if (_snapshots == NULL) {
 554     _snapshots = ts;
 555   } else {
 556     _last->set_next(ts);
 557   }
 558   _last = ts;
 559 }
 560 
 561 void ThreadDumpResult::metadata_do(void f(Metadata*)) {
 562   for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
 563     ts->metadata_do(f);
 564   }
 565 }
 566 
 567 ThreadsList* ThreadDumpResult::t_list() {
 568   return _setter.list();
 569 }
 570 
 571 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
 572   _method = jvf->method();
 573   _bci = jvf->bci();
 574   _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder());
 575   _locked_monitors = NULL;
 576   _cont_scope_name = OopHandle(_thread_service_storage, (jvf->continuation() != NULL) ? jdk_internal_vm_ContinuationScope::name(jdk_internal_vm_Continuation::scope(jvf->continuation())) : (oop)NULL);
 577   if (with_lock_info) {
 578     Thread* current_thread = Thread::current();
 579     ResourceMark rm(current_thread);
 580     HandleMark hm(current_thread);
 581     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
 582     int length = list->length();
 583     if (length > 0) {
 584       _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(length, mtServiceability);
 585       for (int i = 0; i < length; i++) {
 586         MonitorInfo* monitor = list->at(i);
 587         assert(monitor->owner() != NULL, "This monitor must have an owning object");
 588         _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner()));
 589       }
 590     }
 591   }
 592 }
 593 
 594 StackFrameInfo::~StackFrameInfo() {
 595   if (_locked_monitors != NULL) {
 596     for (int i = 0; i < _locked_monitors->length(); i++) {
 597       _locked_monitors->at(i).release(_thread_service_storage);
 598     }
 599     delete _locked_monitors;
 600   }
 601   _class_holder.release(_thread_service_storage);
 602   _cont_scope_name.release(_thread_service_storage);
 603 }
 604 
 605 void StackFrameInfo::metadata_do(void f(Metadata*)) {
 606   f(_method);
 607 }
 608 
 609 void StackFrameInfo::print_on(outputStream* st) const {
 610   ResourceMark rm;
 611   java_lang_Throwable::print_stack_element(st, method(), bci());
 612   int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
 613   for (int i = 0; i < len; i++) {
 614     oop o = _locked_monitors->at(i).resolve();
 615     st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
 616   }
 617 
 618 }
 619 
 620 // Iterate through monitor cache to find JNI locked monitors
 621 class InflatedMonitorsClosure: public MonitorClosure {
 622 private:
 623   ThreadStackTrace* _stack_trace;
 624 public:
 625   InflatedMonitorsClosure(ThreadStackTrace* st) {
 626     _stack_trace = st;
 627   }
 628   void do_monitor(ObjectMonitor* mid) {
 629     oop object = mid->object();
 630     if (!_stack_trace->is_owned_monitor_on_stack(object)) {
 631       _stack_trace->add_jni_locked_monitor(object);
 632     }
 633   }
 634 };
 635 
 636 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
 637   _thread = t;
 638   _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability);
 639   _depth = 0;
 640   _with_locked_monitors = with_locked_monitors;
 641   if (_with_locked_monitors) {
 642     _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 643   } else {
 644     _jni_locked_monitors = NULL;
 645   }
 646 }
 647 
 648 void ThreadStackTrace::add_jni_locked_monitor(oop object) {
 649   _jni_locked_monitors->append(OopHandle(_thread_service_storage, object));
 650 }
 651 
 652 ThreadStackTrace::~ThreadStackTrace() {
 653   for (int i = 0; i < _frames->length(); i++) {
 654     delete _frames->at(i);
 655   }
 656   delete _frames;
 657   if (_jni_locked_monitors != NULL) {
 658     for (int i = 0; i < _jni_locked_monitors->length(); i++) {
 659       _jni_locked_monitors->at(i).release(_thread_service_storage);
 660     }
 661     delete _jni_locked_monitors;
 662   }
 663 }
 664 
 665 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHashtable* table, bool full) {
 666   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 667 
 668   if (_thread->has_last_Java_frame()) {
 669     RegisterMap reg_map(_thread, true, false);
 670 
 671     vframe* start_vf = !full && _thread->last_continuation(java_lang_VirtualThread::vthread_scope()) != NULL
 672       ? _thread->vthread_carrier_last_java_vframe(&reg_map)
 673       : _thread->last_java_vframe(&reg_map);
 674     int count = 0;
 675     for (vframe* f = start_vf; f; f = f->sender() ) {
 676       if (maxDepth >= 0 && count == maxDepth) {
 677         // Skip frames if more than maxDepth
 678         break;
 679       }
 680       if (f->is_java_frame()) {
 681         javaVFrame* jvf = javaVFrame::cast(f);
 682         add_stack_frame(jvf);
 683         count++;
 684       } else {
 685         // Ignore non-Java frames
 686       }
 687     }
 688   }
 689 
 690   if (_with_locked_monitors) {
 691     // Iterate inflated monitors and find monitors locked by this thread
 692     // that are not found in the stack, e.g. JNI locked monitors:
 693     InflatedMonitorsClosure imc(this);
 694     if (table != nullptr) {
 695       // Get the ObjectMonitors locked by the target thread, if any,
 696       // and does not include any where owner is set to a stack lock
 697       // address in the target thread:
 698       ObjectMonitorsHashtable::PtrList* list = table->get_entry(_thread);
 699       if (list != nullptr) {
 700         ObjectSynchronizer::monitors_iterate(&imc, list, _thread);
 701       }
 702     } else {
 703       ObjectSynchronizer::monitors_iterate(&imc, _thread);
 704     }
 705   }
 706 }
 707 
 708 
 709 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
 710   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 711 
 712   bool found = false;
 713   int num_frames = get_stack_depth();
 714   for (int depth = 0; depth < num_frames; depth++) {
 715     StackFrameInfo* frame = stack_frame_at(depth);
 716     int len = frame->num_locked_monitors();
 717     GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors();
 718     for (int j = 0; j < len; j++) {
 719       oop monitor = locked_monitors->at(j).resolve();
 720       assert(monitor != NULL, "must be a Java object");
 721       if (monitor == object) {
 722         found = true;
 723         break;
 724       }
 725     }
 726   }
 727   return found;
 728 }
 729 
 730 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
 731   InstanceKlass* ik = vmClasses::StackTraceElement_klass();
 732   assert(ik != NULL, "must be loaded in 1.4+");
 733 
 734   // Allocate an array of java/lang/StackTraceElement object
 735   objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH);
 736   objArrayHandle backtrace(THREAD, ste);
 737   for (int j = 0; j < _depth; j++) {
 738     StackFrameInfo* frame = _frames->at(j);
 739     methodHandle mh(THREAD, frame->method());
 740     Handle contScopeNameH(THREAD, frame->cont_scope_name().resolve());
 741     oop element = java_lang_StackTraceElement::create(mh, frame->bci(), contScopeNameH, CHECK_NH);
 742     backtrace->obj_at_put(j, element);
 743   }
 744   return backtrace;
 745 }
 746 
 747 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
 748   StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
 749   _frames->append(frame);
 750   _depth++;
 751 }
 752 
 753 void ThreadStackTrace::metadata_do(void f(Metadata*)) {
 754   int length = _frames->length();
 755   for (int i = 0; i < length; i++) {
 756     _frames->at(i)->metadata_do(f);
 757   }
 758 }
 759 
 760 
 761 ConcurrentLocksDump::~ConcurrentLocksDump() {
 762   if (_retain_map_on_free) {
 763     return;
 764   }
 765 
 766   for (ThreadConcurrentLocks* t = _map; t != NULL;)  {
 767     ThreadConcurrentLocks* tcl = t;
 768     t = t->next();
 769     delete tcl;
 770   }
 771 }
 772 
 773 void ConcurrentLocksDump::dump_at_safepoint() {
 774   // dump all locked concurrent locks
 775   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 776 
 777   GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
 778 
 779   // Find all instances of AbstractOwnableSynchronizer
 780   HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
 781                                               aos_objects);
 782   // Build a map of thread to its owned AQS locks
 783   build_map(aos_objects);
 784 
 785   delete aos_objects;
 786 }
 787 
 788 
 789 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer
 790 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
 791   int length = aos_objects->length();
 792   for (int i = 0; i < length; i++) {
 793     oop o = aos_objects->at(i);
 794     oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
 795     if (owner_thread_obj != NULL) {
 796       // See comments in ThreadConcurrentLocks to see how this
 797       // JavaThread* is protected.
 798       JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
 799       assert(o->is_instance(), "Must be an instanceOop");
 800       add_lock(thread, (instanceOop) o);
 801     }
 802   }
 803 }
 804 
 805 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
 806   ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
 807   if (tcl != NULL) {
 808     tcl->add_lock(o);
 809     return;
 810   }
 811 
 812   // First owned lock found for this thread
 813   tcl = new ThreadConcurrentLocks(thread);
 814   tcl->add_lock(o);
 815   if (_map == NULL) {
 816     _map = tcl;
 817   } else {
 818     _last->set_next(tcl);
 819   }
 820   _last = tcl;
 821 }
 822 
 823 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
 824   for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) {
 825     if (tcl->java_thread() == thread) {
 826       return tcl;
 827     }
 828   }
 829   return NULL;
 830 }
 831 
 832 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
 833   st->print_cr("   Locked ownable synchronizers:");
 834   ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
 835   GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
 836   if (locks == NULL || locks->is_empty()) {
 837     st->print_cr("\t- None");
 838     st->cr();
 839     return;
 840   }
 841 
 842   for (int i = 0; i < locks->length(); i++) {
 843     oop obj = locks->at(i).resolve();
 844     st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
 845   }
 846   st->cr();
 847 }
 848 
 849 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
 850   _thread = thread;
 851   _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 852   _next = NULL;
 853 }
 854 
 855 ThreadConcurrentLocks::~ThreadConcurrentLocks() {
 856   for (int i = 0; i < _owned_locks->length(); i++) {
 857     _owned_locks->at(i).release(_thread_service_storage);
 858   }
 859   delete _owned_locks;
 860 }
 861 
 862 void ThreadConcurrentLocks::add_lock(instanceOop o) {
 863   _owned_locks->append(OopHandle(_thread_service_storage, o));
 864 }
 865 
 866 ThreadStatistics::ThreadStatistics() {
 867   _contended_enter_count = 0;
 868   _monitor_wait_count = 0;
 869   _sleep_count = 0;
 870   _count_pending_reset = false;
 871   _timer_pending_reset = false;
 872   memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
 873 }
 874 
 875 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); }
 876 
 877 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
 878   _thread = thread;
 879   oop threadObj = thread->threadObj();
 880   _threadObj = OopHandle(_thread_service_storage, threadObj);
 881 
 882   ThreadStatistics* stat = thread->get_thread_stat();
 883   _contended_enter_ticks = stat->contended_enter_ticks();
 884   _contended_enter_count = stat->contended_enter_count();
 885   _monitor_wait_ticks = stat->monitor_wait_ticks();
 886   _monitor_wait_count = stat->monitor_wait_count();
 887   _sleep_ticks = stat->sleep_ticks();
 888   _sleep_count = stat->sleep_count();
 889 
 890   // If thread is still attaching then threadObj will be NULL.
 891   _thread_status = threadObj == NULL ? JavaThreadStatus::NEW
 892                                      : java_lang_Thread::get_thread_status(threadObj);
 893 
 894   _is_suspended = thread->is_suspended();
 895   _is_in_native = (thread->thread_state() == _thread_in_native);
 896 
 897   Handle obj = ThreadService::get_current_contended_monitor(thread);
 898 
 899   oop blocker_object = NULL;
 900   oop blocker_object_owner = NULL;
 901 
 902   if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER ||
 903       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT ||
 904       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) {
 905 
 906     if (obj() == NULL) {
 907       // monitor no longer exists; thread is not blocked
 908       _thread_status = JavaThreadStatus::RUNNABLE;
 909     } else {
 910       blocker_object = obj();
 911       JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
 912       if ((owner == NULL && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER)
 913           || (owner != NULL && owner->is_attaching_via_jni())) {
 914         // ownership information of the monitor is not available
 915         // (may no longer be owned or releasing to some other thread)
 916         // make this thread in RUNNABLE state.
 917         // And when the owner thread is in attaching state, the java thread
 918         // is not completely initialized. For example thread name and id
 919         // and may not be set, so hide the attaching thread.
 920         _thread_status = JavaThreadStatus::RUNNABLE;
 921         blocker_object = NULL;
 922       } else if (owner != NULL) {
 923         blocker_object_owner = owner->threadObj();
 924       }
 925     }
 926   }
 927 
 928   // Support for JSR-166 locks
 929   if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) {
 930     blocker_object = thread->current_park_blocker();
 931     if (blocker_object != NULL && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 932       blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object);
 933     }
 934   }
 935 
 936   if (blocker_object != NULL) {
 937     _blocker_object = OopHandle(_thread_service_storage, blocker_object);
 938   }
 939   if (blocker_object_owner != NULL) {
 940     _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner);
 941   }
 942 }
 943 
 944 oop ThreadSnapshot::blocker_object() const           { return _blocker_object.resolve(); }
 945 oop ThreadSnapshot::blocker_object_owner() const     { return _blocker_object_owner.resolve(); }
 946 
 947 ThreadSnapshot::~ThreadSnapshot() {
 948   _blocker_object.release(_thread_service_storage);
 949   _blocker_object_owner.release(_thread_service_storage);
 950   _threadObj.release(_thread_service_storage);
 951 
 952   delete _stack_trace;
 953   delete _concurrent_locks;
 954 }
 955 
 956 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
 957                                              ObjectMonitorsHashtable* table, bool full) {
 958   _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
 959   _stack_trace->dump_stack_at_safepoint(max_depth, table, full);
 960 }
 961 
 962 
 963 void ThreadSnapshot::metadata_do(void f(Metadata*)) {
 964   if (_stack_trace != NULL) {
 965     _stack_trace->metadata_do(f);
 966   }
 967 }
 968 
 969 
 970 DeadlockCycle::DeadlockCycle() {
 971   _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability);
 972   _next = NULL;
 973 }
 974 
 975 DeadlockCycle::~DeadlockCycle() {
 976   delete _threads;
 977 }
 978 
 979 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
 980   st->cr();
 981   st->print_cr("Found one Java-level deadlock:");
 982   st->print("=============================");
 983 
 984   JavaThread* currentThread;
 985   JvmtiRawMonitor* waitingToLockRawMonitor;
 986   oop waitingToLockBlocker;
 987   int len = _threads->length();
 988   for (int i = 0; i < len; i++) {
 989     currentThread = _threads->at(i);
 990     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 991     ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
 992     waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
 993     waitingToLockBlocker = currentThread->current_park_blocker();
 994     st->cr();
 995     st->print_cr("\"%s\":", currentThread->name());
 996     const char* owner_desc = ",\n  which is held by";
 997 
 998     // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
 999     // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
1000     if (waitingToLockRawMonitor != NULL) {
1001       st->print("  waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
1002       Thread* owner = waitingToLockRawMonitor->owner();
1003       // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread
1004       if (owner != NULL) {
1005         if (owner->is_Java_thread()) {
1006           currentThread = JavaThread::cast(owner);
1007           st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1008         } else {
1009           st->print_cr(",\n  which has now been released");
1010         }
1011       } else {
1012         st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
1013       }
1014     }
1015 
1016     if (waitingToLockMonitor != NULL) {
1017       st->print("  waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
1018       oop obj = waitingToLockMonitor->object();
1019       st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
1020                  obj->klass()->external_name());
1021 
1022       if (!currentThread->current_pending_monitor_is_from_java()) {
1023         owner_desc = "\n  in JNI, which is held by";
1024       }
1025       currentThread = Threads::owning_thread_from_monitor_owner(t_list,
1026                                                                 (address)waitingToLockMonitor->owner());
1027       if (currentThread == NULL) {
1028         // The deadlock was detected at a safepoint so the JavaThread
1029         // that owns waitingToLockMonitor should be findable, but
1030         // if it is not findable, then the previous currentThread is
1031         // blocked permanently.
1032         st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
1033                   p2i(waitingToLockMonitor->owner()));
1034         continue;
1035       }
1036     } else {
1037       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
1038                 p2i(waitingToLockBlocker),
1039                 waitingToLockBlocker->klass()->external_name());
1040       assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()),
1041              "Must be an AbstractOwnableSynchronizer");
1042       oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
1043       currentThread = java_lang_Thread::thread(ownerObj);
1044       assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL");
1045     }
1046     st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1047   }
1048 
1049   st->cr();
1050 
1051   // Print stack traces
1052   bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
1053   JavaMonitorsInStackTrace = true;
1054   st->print_cr("Java stack information for the threads listed above:");
1055   st->print_cr("===================================================");
1056   for (int j = 0; j < len; j++) {
1057     currentThread = _threads->at(j);
1058     st->print_cr("\"%s\":", currentThread->name());
1059     currentThread->print_stack_on(st);
1060   }
1061   JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
1062 }
1063 
1064 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
1065                                              bool include_jvmti_agent_threads,
1066                                              bool include_jni_attaching_threads) {
1067   assert(cur_thread == Thread::current(), "Check current thread");
1068 
1069   int init_size = ThreadService::get_live_thread_count();
1070   _threads_array = new GrowableArray<instanceHandle>(init_size);
1071 
1072   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1073     // skips JavaThreads in the process of exiting
1074     // and also skips VM internal JavaThreads
1075     // Threads in _thread_new or _thread_new_trans state are included.
1076     // i.e. threads have been started but not yet running.
1077     if (jt->threadObj() == NULL   ||
1078         jt->is_exiting() ||
1079         !java_lang_Thread::is_alive(jt->threadObj())   ||
1080         jt->is_hidden_from_external_view()) {
1081       continue;
1082     }
1083 
1084     // skip agent threads
1085     if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
1086       continue;
1087     }
1088 
1089     // skip jni threads in the process of attaching
1090     if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
1091       continue;
1092     }
1093 
1094     instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
1095     _threads_array->append(h);
1096   }
1097 }