1 /*
   2  * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmClasses.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "gc/shared/oopStorageSet.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/heapInspection.hpp"
  32 #include "memory/oopFactory.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/instanceKlass.hpp"
  36 #include "oops/klass.inline.hpp"
  37 #include "oops/objArrayKlass.hpp"
  38 #include "oops/objArrayOop.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "oops/oopHandle.inline.hpp"
  41 #include "prims/jvmtiRawMonitor.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/objectMonitor.inline.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "runtime/threadSMR.inline.hpp"
  48 #include "runtime/vframe.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "runtime/vmOperations.hpp"
  51 #include "services/threadService.hpp"
  52 
  53 // TODO: we need to define a naming convention for perf counters
  54 // to distinguish counters for:
  55 //   - standard JSR174 use
  56 //   - Hotspot extension (public and committed)
  57 //   - Hotspot extension (private/internal and uncommitted)
  58 
  59 // Default is disabled.
  60 bool ThreadService::_thread_monitoring_contention_enabled = false;
  61 bool ThreadService::_thread_cpu_time_enabled = false;
  62 bool ThreadService::_thread_allocated_memory_enabled = false;
  63 
  64 PerfCounter*  ThreadService::_total_threads_count = NULL;
  65 PerfVariable* ThreadService::_live_threads_count = NULL;
  66 PerfVariable* ThreadService::_peak_threads_count = NULL;
  67 PerfVariable* ThreadService::_daemon_threads_count = NULL;
  68 volatile int ThreadService::_atomic_threads_count = 0;
  69 volatile int ThreadService::_atomic_daemon_threads_count = 0;
  70 
  71 volatile jlong ThreadService::_exited_allocated_bytes = 0;
  72 
  73 ThreadDumpResult* ThreadService::_threaddump_list = NULL;
  74 
  75 static const int INITIAL_ARRAY_SIZE = 10;
  76 
  77 // OopStorage for thread stack sampling
  78 static OopStorage* _thread_service_storage = NULL;
  79 
  80 void ThreadService::init() {
  81   EXCEPTION_MARK;
  82 
  83   // These counters are for java.lang.management API support.
  84   // They are created even if -XX:-UsePerfData is set and in
  85   // that case, they will be allocated on C heap.
  86 
  87   _total_threads_count =
  88                 PerfDataManager::create_counter(JAVA_THREADS, "started",
  89                                                 PerfData::U_Events, CHECK);
  90 
  91   _live_threads_count =
  92                 PerfDataManager::create_variable(JAVA_THREADS, "live",
  93                                                  PerfData::U_None, CHECK);
  94 
  95   _peak_threads_count =
  96                 PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
  97                                                  PerfData::U_None, CHECK);
  98 
  99   _daemon_threads_count =
 100                 PerfDataManager::create_variable(JAVA_THREADS, "daemon",
 101                                                  PerfData::U_None, CHECK);
 102 
 103   if (os::is_thread_cpu_time_supported()) {
 104     _thread_cpu_time_enabled = true;
 105   }
 106 
 107   _thread_allocated_memory_enabled = true; // Always on, so enable it
 108 
 109   // Initialize OopStorage for thread stack sampling walking
 110   _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage",
 111                                                          mtServiceability);
 112 }
 113 
 114 void ThreadService::reset_peak_thread_count() {
 115   // Acquire the lock to update the peak thread count
 116   // to synchronize with thread addition and removal.
 117   MutexLocker mu(Threads_lock);
 118   _peak_threads_count->set_value(get_live_thread_count());
 119 }
 120 
 121 static bool is_hidden_thread(JavaThread *thread) {
 122   // hide VM internal or JVMTI agent threads
 123   return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
 124 }
 125 
 126 void ThreadService::add_thread(JavaThread* thread, bool daemon) {
 127   assert(Threads_lock->owned_by_self(), "must have threads lock");
 128 
 129   // Do not count hidden threads
 130   if (is_hidden_thread(thread)) {
 131     return;
 132   }
 133 
 134   _total_threads_count->inc();
 135   _live_threads_count->inc();
 136   Atomic::inc(&_atomic_threads_count);
 137   int count = _atomic_threads_count;
 138 
 139   if (count > _peak_threads_count->get_value()) {
 140     _peak_threads_count->set_value(count);
 141   }
 142 
 143   if (daemon) {
 144     _daemon_threads_count->inc();
 145     Atomic::inc(&_atomic_daemon_threads_count);
 146   }
 147 }
 148 
 149 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) {
 150   Atomic::dec(&_atomic_threads_count);
 151 
 152   if (daemon) {
 153     Atomic::dec(&_atomic_daemon_threads_count);
 154   }
 155 }
 156 
 157 void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
 158   assert(Threads_lock->owned_by_self(), "must have threads lock");
 159 
 160   // Include hidden thread allcations in exited_allocated_bytes
 161   ThreadService::incr_exited_allocated_bytes(thread->cooked_allocated_bytes());
 162 
 163   // Do not count hidden threads
 164   if (is_hidden_thread(thread)) {
 165     return;
 166   }
 167 
 168   assert(!thread->is_terminated(), "must not be terminated");
 169   if (!thread->is_exiting()) {
 170     // JavaThread::exit() skipped calling current_thread_exiting()
 171     decrement_thread_counts(thread, daemon);
 172   }
 173 
 174   int daemon_count = _atomic_daemon_threads_count;
 175   int count = _atomic_threads_count;
 176 
 177   // Counts are incremented at the same time, but atomic counts are
 178   // decremented earlier than perf counts.
 179   assert(_live_threads_count->get_value() > count,
 180     "thread count mismatch %d : %d",
 181     (int)_live_threads_count->get_value(), count);
 182 
 183   _live_threads_count->dec(1);
 184   if (daemon) {
 185     assert(_daemon_threads_count->get_value() > daemon_count,
 186       "thread count mismatch %d : %d",
 187       (int)_daemon_threads_count->get_value(), daemon_count);
 188 
 189     _daemon_threads_count->dec(1);
 190   }
 191 
 192   // Counts are incremented at the same time, but atomic counts are
 193   // decremented earlier than perf counts.
 194   assert(_daemon_threads_count->get_value() >= daemon_count,
 195     "thread count mismatch %d : %d",
 196     (int)_daemon_threads_count->get_value(), daemon_count);
 197   assert(_live_threads_count->get_value() >= count,
 198     "thread count mismatch %d : %d",
 199     (int)_live_threads_count->get_value(), count);
 200   assert(_live_threads_count->get_value() > 0 ||
 201     (_live_threads_count->get_value() == 0 && count == 0 &&
 202     _daemon_threads_count->get_value() == 0 && daemon_count == 0),
 203     "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
 204     (int)_live_threads_count->get_value(), count,
 205     (int)_daemon_threads_count->get_value(), daemon_count);
 206   assert(_daemon_threads_count->get_value() > 0 ||
 207     (_daemon_threads_count->get_value() == 0 && daemon_count == 0),
 208     "thread counts should reach 0 at the same time, daemon %d,%d",
 209     (int)_daemon_threads_count->get_value(), daemon_count);
 210 }
 211 
 212 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) {
 213   // Do not count hidden threads
 214   if (is_hidden_thread(jt)) {
 215     return;
 216   }
 217 
 218   assert(jt == JavaThread::current(), "Called by current thread");
 219   assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
 220 
 221   decrement_thread_counts(jt, daemon);
 222 }
 223 
 224 // FIXME: JVMTI should call this function
 225 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
 226   assert(thread != NULL, "should be non-NULL");
 227   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 228 
 229   // This function can be called on a target JavaThread that is not
 230   // the caller and we are not at a safepoint. So it is possible for
 231   // the waiting or pending condition to be over/stale and for the
 232   // first stage of async deflation to clear the object field in
 233   // the ObjectMonitor. It is also possible for the object to be
 234   // inflated again and to be associated with a completely different
 235   // ObjectMonitor by the time this object reference is processed
 236   // by the caller.
 237   ObjectMonitor *wait_obj = thread->current_waiting_monitor();
 238 
 239   oop obj = NULL;
 240   if (wait_obj != NULL) {
 241     // thread is doing an Object.wait() call
 242     obj = wait_obj->object();
 243   } else {
 244     ObjectMonitor *enter_obj = thread->current_pending_monitor();
 245     if (enter_obj != NULL) {
 246       // thread is trying to enter() an ObjectMonitor.
 247       obj = enter_obj->object();
 248     }
 249   }
 250 
 251   Handle h(Thread::current(), obj);
 252   return h;
 253 }
 254 
 255 bool ThreadService::set_thread_monitoring_contention(bool flag) {
 256   MutexLocker m(Management_lock);
 257 
 258   bool prev = _thread_monitoring_contention_enabled;
 259   _thread_monitoring_contention_enabled = flag;
 260 
 261   return prev;
 262 }
 263 
 264 bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
 265   MutexLocker m(Management_lock);
 266 
 267   bool prev = _thread_cpu_time_enabled;
 268   _thread_cpu_time_enabled = flag;
 269 
 270   return prev;
 271 }
 272 
 273 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
 274   MutexLocker m(Management_lock);
 275 
 276   bool prev = _thread_allocated_memory_enabled;
 277   _thread_allocated_memory_enabled = flag;
 278 
 279   return prev;
 280 }
 281 
 282 void ThreadService::metadata_do(void f(Metadata*)) {
 283   for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
 284     dump->metadata_do(f);
 285   }
 286 }
 287 
 288 void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
 289   MutexLocker ml(Management_lock);
 290   if (_threaddump_list == NULL) {
 291     _threaddump_list = dump;
 292   } else {
 293     dump->set_next(_threaddump_list);
 294     _threaddump_list = dump;
 295   }
 296 }
 297 
 298 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
 299   MutexLocker ml(Management_lock);
 300 
 301   ThreadDumpResult* prev = NULL;
 302   bool found = false;
 303   for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) {
 304     if (d == dump) {
 305       if (prev == NULL) {
 306         _threaddump_list = dump->next();
 307       } else {
 308         prev->set_next(dump->next());
 309       }
 310       found = true;
 311       break;
 312     }
 313   }
 314   assert(found, "The threaddump result to be removed must exist.");
 315 }
 316 
 317 // Dump stack trace of threads specified in the given threads array.
 318 // Returns StackTraceElement[][] each element is the stack trace of a thread in
 319 // the corresponding entry in the given threads array
 320 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
 321                                         int num_threads,
 322                                         TRAPS) {
 323   assert(num_threads > 0, "just checking");
 324 
 325   ThreadDumpResult dump_result;
 326   VM_ThreadDump op(&dump_result,
 327                    threads,
 328                    num_threads,
 329                    -1,    /* entire stack */
 330                    false, /* with locked monitors */
 331                    false  /* with locked synchronizers */);
 332   VMThread::execute(&op);
 333 
 334   // Allocate the resulting StackTraceElement[][] object
 335 
 336   ResourceMark rm(THREAD);
 337   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
 338   ObjArrayKlass* ik = ObjArrayKlass::cast(k);
 339   objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
 340   objArrayHandle result_obj(THREAD, r);
 341 
 342   int num_snapshots = dump_result.num_snapshots();
 343   assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
 344   assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
 345   int i = 0;
 346   for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
 347     ThreadStackTrace* stacktrace = ts->get_stack_trace();
 348     if (stacktrace == NULL) {
 349       // No stack trace
 350       result_obj->obj_at_put(i, NULL);
 351     } else {
 352       // Construct an array of java/lang/StackTraceElement object
 353       Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
 354       result_obj->obj_at_put(i, backtrace_h());
 355     }
 356   }
 357 
 358   return result_obj;
 359 }
 360 
 361 void ThreadService::reset_contention_count_stat(JavaThread* thread) {
 362   ThreadStatistics* stat = thread->get_thread_stat();
 363   if (stat != NULL) {
 364     stat->reset_count_stat();
 365   }
 366 }
 367 
 368 void ThreadService::reset_contention_time_stat(JavaThread* thread) {
 369   ThreadStatistics* stat = thread->get_thread_stat();
 370   if (stat != NULL) {
 371     stat->reset_time_stat();
 372   }
 373 }
 374 
 375 // Find deadlocks involving raw monitors, object monitors and concurrent locks
 376 // if concurrent_locks is true.
 377 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
 378   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 379 
 380   // This code was modified from the original Threads::find_deadlocks code.
 381   int globalDfn = 0, thisDfn;
 382   ObjectMonitor* waitingToLockMonitor = NULL;
 383   JvmtiRawMonitor* waitingToLockRawMonitor = NULL;
 384   oop waitingToLockBlocker = NULL;
 385   bool blocked_on_monitor = false;
 386   JavaThread *currentThread, *previousThread;
 387   int num_deadlocks = 0;
 388 
 389   // Initialize the depth-first-number for each JavaThread.
 390   JavaThreadIterator jti(t_list);
 391   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
 392     jt->set_depth_first_number(-1);
 393   }
 394 
 395   DeadlockCycle* deadlocks = NULL;
 396   DeadlockCycle* last = NULL;
 397   DeadlockCycle* cycle = new DeadlockCycle();
 398   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
 399     if (jt->depth_first_number() >= 0) {
 400       // this thread was already visited
 401       continue;
 402     }
 403 
 404     thisDfn = globalDfn;
 405     jt->set_depth_first_number(globalDfn++);
 406     previousThread = jt;
 407     currentThread = jt;
 408 
 409     cycle->reset();
 410 
 411     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 412     // When there is a deadlock, all the monitors involved in the dependency
 413     // cycle must be contended and heavyweight. So we only care about the
 414     // heavyweight monitor a thread is waiting to lock.
 415     waitingToLockMonitor = jt->current_pending_monitor();
 416     // JVM TI raw monitors can also be involved in deadlocks, and we can be
 417     // waiting to lock both a raw monitor and ObjectMonitor at the same time.
 418     // It isn't clear how to make deadlock detection work correctly if that
 419     // happens.
 420     waitingToLockRawMonitor = jt->current_pending_raw_monitor();
 421 
 422     if (concurrent_locks) {
 423       waitingToLockBlocker = jt->current_park_blocker();
 424     }
 425 
 426     while (waitingToLockMonitor != NULL ||
 427            waitingToLockRawMonitor != NULL ||
 428            waitingToLockBlocker != NULL) {
 429       cycle->add_thread(currentThread);
 430       // Give preference to the raw monitor
 431       if (waitingToLockRawMonitor != NULL) {
 432         Thread* owner = waitingToLockRawMonitor->owner();
 433         if (owner != NULL && // the raw monitor could be released at any time
 434             owner->is_Java_thread()) {
 435           currentThread = owner->as_Java_thread();
 436         }
 437       } else if (waitingToLockMonitor != NULL) {
 438         address currentOwner = (address)waitingToLockMonitor->owner();
 439         if (currentOwner != NULL) {
 440           currentThread = Threads::owning_thread_from_monitor_owner(t_list,
 441                                                                     currentOwner);
 442           if (currentThread == NULL) {
 443             // This function is called at a safepoint so the JavaThread
 444             // that owns waitingToLockMonitor should be findable, but
 445             // if it is not findable, then the previous currentThread is
 446             // blocked permanently. We record this as a deadlock.
 447             num_deadlocks++;
 448 
 449             cycle->set_deadlock(true);
 450 
 451             // add this cycle to the deadlocks list
 452             if (deadlocks == NULL) {
 453               deadlocks = cycle;
 454             } else {
 455               last->set_next(cycle);
 456             }
 457             last = cycle;
 458             cycle = new DeadlockCycle();
 459             break;
 460           }
 461         }
 462       } else {
 463         if (concurrent_locks) {
 464           if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 465             oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
 466             // This JavaThread (if there is one) is protected by the
 467             // ThreadsListSetter in VM_FindDeadlocks::doit().
 468             currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
 469           } else {
 470             currentThread = NULL;
 471           }
 472         }
 473       }
 474 
 475       if (currentThread == NULL) {
 476         // No dependency on another thread
 477         break;
 478       }
 479       if (currentThread->depth_first_number() < 0) {
 480         // First visit to this thread
 481         currentThread->set_depth_first_number(globalDfn++);
 482       } else if (currentThread->depth_first_number() < thisDfn) {
 483         // Thread already visited, and not on a (new) cycle
 484         break;
 485       } else if (currentThread == previousThread) {
 486         // Self-loop, ignore
 487         break;
 488       } else {
 489         // We have a (new) cycle
 490         num_deadlocks++;
 491 
 492         cycle->set_deadlock(true);
 493 
 494         // add this cycle to the deadlocks list
 495         if (deadlocks == NULL) {
 496           deadlocks = cycle;
 497         } else {
 498           last->set_next(cycle);
 499         }
 500         last = cycle;
 501         cycle = new DeadlockCycle();
 502         break;
 503       }
 504       previousThread = currentThread;
 505       waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
 506       if (concurrent_locks) {
 507         waitingToLockBlocker = currentThread->current_park_blocker();
 508       }
 509     }
 510 
 511   }
 512   delete cycle;
 513   return deadlocks;
 514 }
 515 
 516 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
 517 
 518   // Create a new ThreadDumpResult object and append to the list.
 519   // If GC happens before this function returns, Method*
 520   // in the stack trace will be visited.
 521   ThreadService::add_thread_dump(this);
 522 }
 523 
 524 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
 525   // Create a new ThreadDumpResult object and append to the list.
 526   // If GC happens before this function returns, oops
 527   // will be visited.
 528   ThreadService::add_thread_dump(this);
 529 }
 530 
 531 ThreadDumpResult::~ThreadDumpResult() {
 532   ThreadService::remove_thread_dump(this);
 533 
 534   // free all the ThreadSnapshot objects created during
 535   // the VM_ThreadDump operation
 536   ThreadSnapshot* ts = _snapshots;
 537   while (ts != NULL) {
 538     ThreadSnapshot* p = ts;
 539     ts = ts->next();
 540     delete p;
 541   }
 542 }
 543 
 544 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
 545   ThreadSnapshot* ts = new ThreadSnapshot();
 546   link_thread_snapshot(ts);
 547   return ts;
 548 }
 549 
 550 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
 551   ThreadSnapshot* ts = new ThreadSnapshot();
 552   link_thread_snapshot(ts);
 553   ts->initialize(t_list(), thread);
 554   return ts;
 555 }
 556 
 557 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
 558   assert(_num_threads == 0 || _num_snapshots < _num_threads,
 559          "_num_snapshots must be less than _num_threads");
 560   _num_snapshots++;
 561   if (_snapshots == NULL) {
 562     _snapshots = ts;
 563   } else {
 564     _last->set_next(ts);
 565   }
 566   _last = ts;
 567 }
 568 
 569 void ThreadDumpResult::metadata_do(void f(Metadata*)) {
 570   for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
 571     ts->metadata_do(f);
 572   }
 573 }
 574 
 575 ThreadsList* ThreadDumpResult::t_list() {
 576   return _setter.list();
 577 }
 578 
 579 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
 580   _method = jvf->method();
 581   _bci = jvf->bci();
 582   _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder());
 583   _locked_monitors = NULL;
 584   if (with_lock_info) {
 585     Thread* current_thread = Thread::current();
 586     ResourceMark rm(current_thread);
 587     HandleMark hm(current_thread);
 588     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
 589     int length = list->length();
 590     if (length > 0) {
 591       _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(length, mtServiceability);
 592       for (int i = 0; i < length; i++) {
 593         MonitorInfo* monitor = list->at(i);
 594         assert(monitor->owner() != NULL, "This monitor must have an owning object");
 595         _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner()));
 596       }
 597     }
 598   }
 599 }
 600 
 601 StackFrameInfo::~StackFrameInfo() {
 602   if (_locked_monitors != NULL) {
 603     for (int i = 0; i < _locked_monitors->length(); i++) {
 604       _locked_monitors->at(i).release(_thread_service_storage);
 605     }
 606     delete _locked_monitors;
 607   }
 608   _class_holder.release(_thread_service_storage);
 609 }
 610 
 611 void StackFrameInfo::metadata_do(void f(Metadata*)) {
 612   f(_method);
 613 }
 614 
 615 void StackFrameInfo::print_on(outputStream* st) const {
 616   ResourceMark rm;
 617   java_lang_Throwable::print_stack_element(st, method(), bci());
 618   int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
 619   for (int i = 0; i < len; i++) {
 620     oop o = _locked_monitors->at(i).resolve();
 621     st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
 622   }
 623 
 624 }
 625 
 626 // Iterate through monitor cache to find JNI locked monitors
 627 class InflatedMonitorsClosure: public MonitorClosure {
 628 private:
 629   ThreadStackTrace* _stack_trace;
 630 public:
 631   InflatedMonitorsClosure(ThreadStackTrace* st) {
 632     _stack_trace = st;
 633   }
 634   void do_monitor(ObjectMonitor* mid) {
 635     oop object = mid->object();
 636     if (!_stack_trace->is_owned_monitor_on_stack(object)) {
 637       _stack_trace->add_jni_locked_monitor(object);
 638     }
 639   }
 640 };
 641 
 642 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
 643   _thread = t;
 644   _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability);
 645   _depth = 0;
 646   _with_locked_monitors = with_locked_monitors;
 647   if (_with_locked_monitors) {
 648     _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 649   } else {
 650     _jni_locked_monitors = NULL;
 651   }
 652 }
 653 
 654 void ThreadStackTrace::add_jni_locked_monitor(oop object) {
 655   _jni_locked_monitors->append(OopHandle(_thread_service_storage, object));
 656 }
 657 
 658 ThreadStackTrace::~ThreadStackTrace() {
 659   for (int i = 0; i < _frames->length(); i++) {
 660     delete _frames->at(i);
 661   }
 662   delete _frames;
 663   if (_jni_locked_monitors != NULL) {
 664     for (int i = 0; i < _jni_locked_monitors->length(); i++) {
 665       _jni_locked_monitors->at(i).release(_thread_service_storage);
 666     }
 667     delete _jni_locked_monitors;
 668   }
 669 }
 670 
 671 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
 672   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 673 
 674   if (_thread->has_last_Java_frame()) {
 675     RegisterMap reg_map(_thread);
 676     vframe* start_vf = _thread->last_java_vframe(&reg_map);
 677     int count = 0;
 678     for (vframe* f = start_vf; f; f = f->sender() ) {
 679       if (maxDepth >= 0 && count == maxDepth) {
 680         // Skip frames if more than maxDepth
 681         break;
 682       }
 683       if (f->is_java_frame()) {
 684         javaVFrame* jvf = javaVFrame::cast(f);
 685         add_stack_frame(jvf);
 686         count++;
 687       } else {
 688         // Ignore non-Java frames
 689       }
 690     }
 691   }
 692 
 693   if (_with_locked_monitors) {
 694     // Iterate inflated monitors and find monitors locked by this thread
 695     // not found in the stack
 696     InflatedMonitorsClosure imc(this);
 697     ObjectSynchronizer::monitors_iterate(&imc, _thread);
 698   }
 699 }
 700 
 701 
 702 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
 703   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 704 
 705   bool found = false;
 706   int num_frames = get_stack_depth();
 707   for (int depth = 0; depth < num_frames; depth++) {
 708     StackFrameInfo* frame = stack_frame_at(depth);
 709     int len = frame->num_locked_monitors();
 710     GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors();
 711     for (int j = 0; j < len; j++) {
 712       oop monitor = locked_monitors->at(j).resolve();
 713       assert(monitor != NULL, "must be a Java object");
 714       if (monitor == object) {
 715         found = true;
 716         break;
 717       }
 718     }
 719   }
 720   return found;
 721 }
 722 
 723 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
 724   InstanceKlass* ik = vmClasses::StackTraceElement_klass();
 725   assert(ik != NULL, "must be loaded in 1.4+");
 726 
 727   // Allocate an array of java/lang/StackTraceElement object
 728   objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH);
 729   objArrayHandle backtrace(THREAD, ste);
 730   for (int j = 0; j < _depth; j++) {
 731     StackFrameInfo* frame = _frames->at(j);
 732     methodHandle mh(THREAD, frame->method());
 733     oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
 734     backtrace->obj_at_put(j, element);
 735   }
 736   return backtrace;
 737 }
 738 
 739 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
 740   StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
 741   _frames->append(frame);
 742   _depth++;
 743 }
 744 
 745 void ThreadStackTrace::metadata_do(void f(Metadata*)) {
 746   int length = _frames->length();
 747   for (int i = 0; i < length; i++) {
 748     _frames->at(i)->metadata_do(f);
 749   }
 750 }
 751 
 752 
 753 ConcurrentLocksDump::~ConcurrentLocksDump() {
 754   if (_retain_map_on_free) {
 755     return;
 756   }
 757 
 758   for (ThreadConcurrentLocks* t = _map; t != NULL;)  {
 759     ThreadConcurrentLocks* tcl = t;
 760     t = t->next();
 761     delete tcl;
 762   }
 763 }
 764 
 765 void ConcurrentLocksDump::dump_at_safepoint() {
 766   // dump all locked concurrent locks
 767   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 768 
 769   GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
 770 
 771   // Find all instances of AbstractOwnableSynchronizer
 772   HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
 773                                               aos_objects);
 774   // Build a map of thread to its owned AQS locks
 775   build_map(aos_objects);
 776 
 777   delete aos_objects;
 778 }
 779 
 780 
 781 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer
 782 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
 783   int length = aos_objects->length();
 784   for (int i = 0; i < length; i++) {
 785     oop o = aos_objects->at(i);
 786     oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
 787     if (owner_thread_obj != NULL) {
 788       // See comments in ThreadConcurrentLocks to see how this
 789       // JavaThread* is protected.
 790       JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
 791       assert(o->is_instance(), "Must be an instanceOop");
 792       add_lock(thread, (instanceOop) o);
 793     }
 794   }
 795 }
 796 
 797 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
 798   ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
 799   if (tcl != NULL) {
 800     tcl->add_lock(o);
 801     return;
 802   }
 803 
 804   // First owned lock found for this thread
 805   tcl = new ThreadConcurrentLocks(thread);
 806   tcl->add_lock(o);
 807   if (_map == NULL) {
 808     _map = tcl;
 809   } else {
 810     _last->set_next(tcl);
 811   }
 812   _last = tcl;
 813 }
 814 
 815 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
 816   for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) {
 817     if (tcl->java_thread() == thread) {
 818       return tcl;
 819     }
 820   }
 821   return NULL;
 822 }
 823 
 824 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
 825   st->print_cr("   Locked ownable synchronizers:");
 826   ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
 827   GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
 828   if (locks == NULL || locks->is_empty()) {
 829     st->print_cr("\t- None");
 830     st->cr();
 831     return;
 832   }
 833 
 834   for (int i = 0; i < locks->length(); i++) {
 835     oop obj = locks->at(i).resolve();
 836     st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
 837   }
 838   st->cr();
 839 }
 840 
 841 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
 842   _thread = thread;
 843   _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 844   _next = NULL;
 845 }
 846 
 847 ThreadConcurrentLocks::~ThreadConcurrentLocks() {
 848   for (int i = 0; i < _owned_locks->length(); i++) {
 849     _owned_locks->at(i).release(_thread_service_storage);
 850   }
 851   delete _owned_locks;
 852 }
 853 
 854 void ThreadConcurrentLocks::add_lock(instanceOop o) {
 855   _owned_locks->append(OopHandle(_thread_service_storage, o));
 856 }
 857 
 858 ThreadStatistics::ThreadStatistics() {
 859   _contended_enter_count = 0;
 860   _monitor_wait_count = 0;
 861   _sleep_count = 0;
 862   _count_pending_reset = false;
 863   _timer_pending_reset = false;
 864   memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
 865 }
 866 
 867 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); }
 868 
 869 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
 870   _thread = thread;
 871   oop threadObj = thread->threadObj();
 872   _threadObj = OopHandle(_thread_service_storage, threadObj);
 873 
 874   ThreadStatistics* stat = thread->get_thread_stat();
 875   _contended_enter_ticks = stat->contended_enter_ticks();
 876   _contended_enter_count = stat->contended_enter_count();
 877   _monitor_wait_ticks = stat->monitor_wait_ticks();
 878   _monitor_wait_count = stat->monitor_wait_count();
 879   _sleep_ticks = stat->sleep_ticks();
 880   _sleep_count = stat->sleep_count();
 881 
 882   // If thread is still attaching then threadObj will be NULL.
 883   _thread_status = threadObj == NULL ? JavaThreadStatus::NEW
 884                                      : java_lang_Thread::get_thread_status(threadObj);
 885 
 886   _is_suspended = thread->is_suspended();
 887   _is_in_native = (thread->thread_state() == _thread_in_native);
 888 
 889   Handle obj = ThreadService::get_current_contended_monitor(thread);
 890 
 891   oop blocker_object = NULL;
 892   oop blocker_object_owner = NULL;
 893 
 894   if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER ||
 895       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT ||
 896       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) {
 897 
 898     if (obj() == NULL) {
 899       // monitor no longer exists; thread is not blocked
 900       _thread_status = JavaThreadStatus::RUNNABLE;
 901     } else {
 902       blocker_object = obj();
 903       JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
 904       if ((owner == NULL && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER)
 905           || (owner != NULL && owner->is_attaching_via_jni())) {
 906         // ownership information of the monitor is not available
 907         // (may no longer be owned or releasing to some other thread)
 908         // make this thread in RUNNABLE state.
 909         // And when the owner thread is in attaching state, the java thread
 910         // is not completely initialized. For example thread name and id
 911         // and may not be set, so hide the attaching thread.
 912         _thread_status = JavaThreadStatus::RUNNABLE;
 913         blocker_object = NULL;
 914       } else if (owner != NULL) {
 915         blocker_object_owner = owner->threadObj();
 916       }
 917     }
 918   }
 919 
 920   // Support for JSR-166 locks
 921   if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) {
 922     blocker_object = thread->current_park_blocker();
 923     if (blocker_object != NULL && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 924       blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object);
 925     }
 926   }
 927 
 928   if (blocker_object != NULL) {
 929     _blocker_object = OopHandle(_thread_service_storage, blocker_object);
 930   }
 931   if (blocker_object_owner != NULL) {
 932     _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner);
 933   }
 934 }
 935 
 936 oop ThreadSnapshot::blocker_object() const           { return _blocker_object.resolve(); }
 937 oop ThreadSnapshot::blocker_object_owner() const     { return _blocker_object_owner.resolve(); }
 938 
 939 ThreadSnapshot::~ThreadSnapshot() {
 940   _blocker_object.release(_thread_service_storage);
 941   _blocker_object_owner.release(_thread_service_storage);
 942   _threadObj.release(_thread_service_storage);
 943 
 944   delete _stack_trace;
 945   delete _concurrent_locks;
 946 }
 947 
 948 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) {
 949   _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
 950   _stack_trace->dump_stack_at_safepoint(max_depth);
 951 }
 952 
 953 
 954 void ThreadSnapshot::metadata_do(void f(Metadata*)) {
 955   if (_stack_trace != NULL) {
 956     _stack_trace->metadata_do(f);
 957   }
 958 }
 959 
 960 
 961 DeadlockCycle::DeadlockCycle() {
 962   _is_deadlock = false;
 963   _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability);
 964   _next = NULL;
 965 }
 966 
 967 DeadlockCycle::~DeadlockCycle() {
 968   delete _threads;
 969 }
 970 
 971 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
 972   st->cr();
 973   st->print_cr("Found one Java-level deadlock:");
 974   st->print("=============================");
 975 
 976   JavaThread* currentThread;
 977   JvmtiRawMonitor* waitingToLockRawMonitor;
 978   oop waitingToLockBlocker;
 979   int len = _threads->length();
 980   for (int i = 0; i < len; i++) {
 981     currentThread = _threads->at(i);
 982     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 983     ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
 984     waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
 985     waitingToLockBlocker = currentThread->current_park_blocker();
 986     st->cr();
 987     st->print_cr("\"%s\":", currentThread->get_thread_name());
 988     const char* owner_desc = ",\n  which is held by";
 989 
 990     // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
 991     // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
 992     if (waitingToLockRawMonitor != NULL) {
 993       st->print("  waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
 994       Thread* owner = waitingToLockRawMonitor->owner();
 995       // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread
 996       if (owner != NULL) {
 997         if (owner->is_Java_thread()) {
 998           currentThread = owner->as_Java_thread();
 999           st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
1000         } else {
1001           st->print_cr(",\n  which has now been released");
1002         }
1003       } else {
1004         st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
1005       }
1006     }
1007 
1008     if (waitingToLockMonitor != NULL) {
1009       st->print("  waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
1010       oop obj = waitingToLockMonitor->object();
1011       st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
1012                  obj->klass()->external_name());
1013 
1014       if (!currentThread->current_pending_monitor_is_from_java()) {
1015         owner_desc = "\n  in JNI, which is held by";
1016       }
1017       currentThread = Threads::owning_thread_from_monitor_owner(t_list,
1018                                                                 (address)waitingToLockMonitor->owner());
1019       if (currentThread == NULL) {
1020         // The deadlock was detected at a safepoint so the JavaThread
1021         // that owns waitingToLockMonitor should be findable, but
1022         // if it is not findable, then the previous currentThread is
1023         // blocked permanently.
1024         st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
1025                   p2i(waitingToLockMonitor->owner()));
1026         continue;
1027       }
1028     } else {
1029       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
1030                 p2i(waitingToLockBlocker),
1031                 waitingToLockBlocker->klass()->external_name());
1032       assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()),
1033              "Must be an AbstractOwnableSynchronizer");
1034       oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
1035       currentThread = java_lang_Thread::thread(ownerObj);
1036       assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL");
1037     }
1038     st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
1039   }
1040 
1041   st->cr();
1042 
1043   // Print stack traces
1044   bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
1045   JavaMonitorsInStackTrace = true;
1046   st->print_cr("Java stack information for the threads listed above:");
1047   st->print_cr("===================================================");
1048   for (int j = 0; j < len; j++) {
1049     currentThread = _threads->at(j);
1050     st->print_cr("\"%s\":", currentThread->get_thread_name());
1051     currentThread->print_stack_on(st);
1052   }
1053   JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
1054 }
1055 
1056 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
1057                                              bool include_jvmti_agent_threads,
1058                                              bool include_jni_attaching_threads) {
1059   assert(cur_thread == Thread::current(), "Check current thread");
1060 
1061   int init_size = ThreadService::get_live_thread_count();
1062   _threads_array = new GrowableArray<instanceHandle>(init_size);
1063 
1064   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1065     // skips JavaThreads in the process of exiting
1066     // and also skips VM internal JavaThreads
1067     // Threads in _thread_new or _thread_new_trans state are included.
1068     // i.e. threads have been started but not yet running.
1069     if (jt->threadObj() == NULL   ||
1070         jt->is_exiting() ||
1071         !java_lang_Thread::is_alive(jt->threadObj())   ||
1072         jt->is_hidden_from_external_view()) {
1073       continue;
1074     }
1075 
1076     // skip agent threads
1077     if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
1078       continue;
1079     }
1080 
1081     // skip jni threads in the process of attaching
1082     if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
1083       continue;
1084     }
1085 
1086     instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
1087     _threads_array->append(h);
1088   }
1089 }