1 /* 2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmClasses.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "gc/shared/oopStorageSet.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/heapInspection.hpp" 32 #include "memory/oopFactory.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/instanceKlass.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "oops/oopHandle.inline.hpp" 41 #include "prims/jvmtiRawMonitor.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/init.hpp" 45 #include "runtime/objectMonitor.inline.hpp" 46 #include "runtime/thread.inline.hpp" 47 #include "runtime/threadSMR.inline.hpp" 48 #include "runtime/vframe.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "runtime/vmOperations.hpp" 51 #include "services/threadService.hpp" 52 53 // TODO: we need to define a naming convention for perf counters 54 // to distinguish counters for: 55 // - standard JSR174 use 56 // - Hotspot extension (public and committed) 57 // - Hotspot extension (private/internal and uncommitted) 58 59 // Default is disabled. 60 bool ThreadService::_thread_monitoring_contention_enabled = false; 61 bool ThreadService::_thread_cpu_time_enabled = false; 62 bool ThreadService::_thread_allocated_memory_enabled = false; 63 64 PerfCounter* ThreadService::_total_threads_count = NULL; 65 PerfVariable* ThreadService::_live_threads_count = NULL; 66 PerfVariable* ThreadService::_peak_threads_count = NULL; 67 PerfVariable* ThreadService::_daemon_threads_count = NULL; 68 volatile int ThreadService::_atomic_threads_count = 0; 69 volatile int ThreadService::_atomic_daemon_threads_count = 0; 70 71 ThreadDumpResult* ThreadService::_threaddump_list = NULL; 72 73 static const int INITIAL_ARRAY_SIZE = 10; 74 75 // OopStorage for thread stack sampling 76 static OopStorage* _thread_service_storage = NULL; 77 78 void ThreadService::init() { 79 EXCEPTION_MARK; 80 81 // These counters are for java.lang.management API support. 82 // They are created even if -XX:-UsePerfData is set and in 83 // that case, they will be allocated on C heap. 84 85 _total_threads_count = 86 PerfDataManager::create_counter(JAVA_THREADS, "started", 87 PerfData::U_Events, CHECK); 88 89 _live_threads_count = 90 PerfDataManager::create_variable(JAVA_THREADS, "live", 91 PerfData::U_None, CHECK); 92 93 _peak_threads_count = 94 PerfDataManager::create_variable(JAVA_THREADS, "livePeak", 95 PerfData::U_None, CHECK); 96 97 _daemon_threads_count = 98 PerfDataManager::create_variable(JAVA_THREADS, "daemon", 99 PerfData::U_None, CHECK); 100 101 if (os::is_thread_cpu_time_supported()) { 102 _thread_cpu_time_enabled = true; 103 } 104 105 _thread_allocated_memory_enabled = true; // Always on, so enable it 106 107 // Initialize OopStorage for thread stack sampling walking 108 _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage", 109 mtServiceability); 110 } 111 112 void ThreadService::reset_peak_thread_count() { 113 // Acquire the lock to update the peak thread count 114 // to synchronize with thread addition and removal. 115 MutexLocker mu(Threads_lock); 116 _peak_threads_count->set_value(get_live_thread_count()); 117 } 118 119 static bool is_hidden_thread(JavaThread *thread) { 120 // hide VM internal or JVMTI agent threads 121 return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread(); 122 } 123 124 void ThreadService::add_thread(JavaThread* thread, bool daemon) { 125 assert(Threads_lock->owned_by_self(), "must have threads lock"); 126 127 // Do not count hidden threads 128 if (is_hidden_thread(thread)) { 129 return; 130 } 131 132 _total_threads_count->inc(); 133 _live_threads_count->inc(); 134 Atomic::inc(&_atomic_threads_count); 135 int count = _atomic_threads_count; 136 137 if (count > _peak_threads_count->get_value()) { 138 _peak_threads_count->set_value(count); 139 } 140 141 if (daemon) { 142 _daemon_threads_count->inc(); 143 Atomic::inc(&_atomic_daemon_threads_count); 144 } 145 } 146 147 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) { 148 Atomic::dec(&_atomic_threads_count); 149 150 if (daemon) { 151 Atomic::dec(&_atomic_daemon_threads_count); 152 } 153 } 154 155 void ThreadService::remove_thread(JavaThread* thread, bool daemon) { 156 assert(Threads_lock->owned_by_self(), "must have threads lock"); 157 158 // Do not count hidden threads 159 if (is_hidden_thread(thread)) { 160 return; 161 } 162 163 assert(!thread->is_terminated(), "must not be terminated"); 164 if (!thread->is_exiting()) { 165 // JavaThread::exit() skipped calling current_thread_exiting() 166 decrement_thread_counts(thread, daemon); 167 } 168 169 int daemon_count = _atomic_daemon_threads_count; 170 int count = _atomic_threads_count; 171 172 // Counts are incremented at the same time, but atomic counts are 173 // decremented earlier than perf counts. 174 assert(_live_threads_count->get_value() > count, 175 "thread count mismatch %d : %d", 176 (int)_live_threads_count->get_value(), count); 177 178 _live_threads_count->dec(1); 179 if (daemon) { 180 assert(_daemon_threads_count->get_value() > daemon_count, 181 "thread count mismatch %d : %d", 182 (int)_daemon_threads_count->get_value(), daemon_count); 183 184 _daemon_threads_count->dec(1); 185 } 186 187 // Counts are incremented at the same time, but atomic counts are 188 // decremented earlier than perf counts. 189 assert(_daemon_threads_count->get_value() >= daemon_count, 190 "thread count mismatch %d : %d", 191 (int)_daemon_threads_count->get_value(), daemon_count); 192 assert(_live_threads_count->get_value() >= count, 193 "thread count mismatch %d : %d", 194 (int)_live_threads_count->get_value(), count); 195 assert(_live_threads_count->get_value() > 0 || 196 (_live_threads_count->get_value() == 0 && count == 0 && 197 _daemon_threads_count->get_value() == 0 && daemon_count == 0), 198 "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d", 199 (int)_live_threads_count->get_value(), count, 200 (int)_daemon_threads_count->get_value(), daemon_count); 201 assert(_daemon_threads_count->get_value() > 0 || 202 (_daemon_threads_count->get_value() == 0 && daemon_count == 0), 203 "thread counts should reach 0 at the same time, daemon %d,%d", 204 (int)_daemon_threads_count->get_value(), daemon_count); 205 } 206 207 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { 208 // Do not count hidden threads 209 if (is_hidden_thread(jt)) { 210 return; 211 } 212 213 assert(jt == JavaThread::current(), "Called by current thread"); 214 assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting"); 215 216 decrement_thread_counts(jt, daemon); 217 } 218 219 // FIXME: JVMTI should call this function 220 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) { 221 assert(thread != NULL, "should be non-NULL"); 222 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 223 224 // This function can be called on a target JavaThread that is not 225 // the caller and we are not at a safepoint. So it is possible for 226 // the waiting or pending condition to be over/stale and for the 227 // first stage of async deflation to clear the object field in 228 // the ObjectMonitor. It is also possible for the object to be 229 // inflated again and to be associated with a completely different 230 // ObjectMonitor by the time this object reference is processed 231 // by the caller. 232 ObjectMonitor *wait_obj = thread->current_waiting_monitor(); 233 234 oop obj = NULL; 235 if (wait_obj != NULL) { 236 // thread is doing an Object.wait() call 237 obj = wait_obj->object(); 238 } else { 239 ObjectMonitor *enter_obj = thread->current_pending_monitor(); 240 if (enter_obj != NULL) { 241 // thread is trying to enter() an ObjectMonitor. 242 obj = enter_obj->object(); 243 } 244 } 245 246 Handle h(Thread::current(), obj); 247 return h; 248 } 249 250 bool ThreadService::set_thread_monitoring_contention(bool flag) { 251 MutexLocker m(Management_lock); 252 253 bool prev = _thread_monitoring_contention_enabled; 254 _thread_monitoring_contention_enabled = flag; 255 256 return prev; 257 } 258 259 bool ThreadService::set_thread_cpu_time_enabled(bool flag) { 260 MutexLocker m(Management_lock); 261 262 bool prev = _thread_cpu_time_enabled; 263 _thread_cpu_time_enabled = flag; 264 265 return prev; 266 } 267 268 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) { 269 MutexLocker m(Management_lock); 270 271 bool prev = _thread_allocated_memory_enabled; 272 _thread_allocated_memory_enabled = flag; 273 274 return prev; 275 } 276 277 void ThreadService::metadata_do(void f(Metadata*)) { 278 for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { 279 dump->metadata_do(f); 280 } 281 } 282 283 void ThreadService::add_thread_dump(ThreadDumpResult* dump) { 284 MutexLocker ml(Management_lock); 285 if (_threaddump_list == NULL) { 286 _threaddump_list = dump; 287 } else { 288 dump->set_next(_threaddump_list); 289 _threaddump_list = dump; 290 } 291 } 292 293 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) { 294 MutexLocker ml(Management_lock); 295 296 ThreadDumpResult* prev = NULL; 297 bool found = false; 298 for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) { 299 if (d == dump) { 300 if (prev == NULL) { 301 _threaddump_list = dump->next(); 302 } else { 303 prev->set_next(dump->next()); 304 } 305 found = true; 306 break; 307 } 308 } 309 assert(found, "The threaddump result to be removed must exist."); 310 } 311 312 // Dump stack trace of threads specified in the given threads array. 313 // Returns StackTraceElement[][] each element is the stack trace of a thread in 314 // the corresponding entry in the given threads array 315 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, 316 int num_threads, 317 TRAPS) { 318 assert(num_threads > 0, "just checking"); 319 320 ThreadDumpResult dump_result; 321 VM_ThreadDump op(&dump_result, 322 threads, 323 num_threads, 324 -1, /* entire stack */ 325 false, /* with locked monitors */ 326 false /* with locked synchronizers */); 327 VMThread::execute(&op); 328 329 // Allocate the resulting StackTraceElement[][] object 330 331 ResourceMark rm(THREAD); 332 Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH); 333 ObjArrayKlass* ik = ObjArrayKlass::cast(k); 334 objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH); 335 objArrayHandle result_obj(THREAD, r); 336 337 int num_snapshots = dump_result.num_snapshots(); 338 assert(num_snapshots == num_threads, "Must have num_threads thread snapshots"); 339 assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot"); 340 int i = 0; 341 for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) { 342 ThreadStackTrace* stacktrace = ts->get_stack_trace(); 343 if (stacktrace == NULL) { 344 // No stack trace 345 result_obj->obj_at_put(i, NULL); 346 } else { 347 // Construct an array of java/lang/StackTraceElement object 348 Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH); 349 result_obj->obj_at_put(i, backtrace_h()); 350 } 351 } 352 353 return result_obj; 354 } 355 356 void ThreadService::reset_contention_count_stat(JavaThread* thread) { 357 ThreadStatistics* stat = thread->get_thread_stat(); 358 if (stat != NULL) { 359 stat->reset_count_stat(); 360 } 361 } 362 363 void ThreadService::reset_contention_time_stat(JavaThread* thread) { 364 ThreadStatistics* stat = thread->get_thread_stat(); 365 if (stat != NULL) { 366 stat->reset_time_stat(); 367 } 368 } 369 370 // Find deadlocks involving raw monitors, object monitors and concurrent locks 371 // if concurrent_locks is true. 372 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) { 373 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 374 375 // This code was modified from the original Threads::find_deadlocks code. 376 int globalDfn = 0, thisDfn; 377 ObjectMonitor* waitingToLockMonitor = NULL; 378 JvmtiRawMonitor* waitingToLockRawMonitor = NULL; 379 oop waitingToLockBlocker = NULL; 380 bool blocked_on_monitor = false; 381 JavaThread *currentThread, *previousThread; 382 int num_deadlocks = 0; 383 384 // Initialize the depth-first-number for each JavaThread. 385 JavaThreadIterator jti(t_list); 386 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 387 jt->set_depth_first_number(-1); 388 } 389 390 DeadlockCycle* deadlocks = NULL; 391 DeadlockCycle* last = NULL; 392 DeadlockCycle* cycle = new DeadlockCycle(); 393 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 394 if (jt->depth_first_number() >= 0) { 395 // this thread was already visited 396 continue; 397 } 398 399 thisDfn = globalDfn; 400 jt->set_depth_first_number(globalDfn++); 401 previousThread = jt; 402 currentThread = jt; 403 404 cycle->reset(); 405 406 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 407 // When there is a deadlock, all the monitors involved in the dependency 408 // cycle must be contended and heavyweight. So we only care about the 409 // heavyweight monitor a thread is waiting to lock. 410 waitingToLockMonitor = jt->current_pending_monitor(); 411 // JVM TI raw monitors can also be involved in deadlocks, and we can be 412 // waiting to lock both a raw monitor and ObjectMonitor at the same time. 413 // It isn't clear how to make deadlock detection work correctly if that 414 // happens. 415 waitingToLockRawMonitor = jt->current_pending_raw_monitor(); 416 417 if (concurrent_locks) { 418 waitingToLockBlocker = jt->current_park_blocker(); 419 } 420 421 while (waitingToLockMonitor != NULL || 422 waitingToLockRawMonitor != NULL || 423 waitingToLockBlocker != NULL) { 424 cycle->add_thread(currentThread); 425 // Give preference to the raw monitor 426 if (waitingToLockRawMonitor != NULL) { 427 Thread* owner = waitingToLockRawMonitor->owner(); 428 if (owner != NULL && // the raw monitor could be released at any time 429 owner->is_Java_thread()) { 430 currentThread = owner->as_Java_thread(); 431 } 432 } else if (waitingToLockMonitor != NULL) { 433 if (waitingToLockMonitor->has_owner()) { 434 currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor); 435 if (currentThread == NULL) { 436 // This function is called at a safepoint so the JavaThread 437 // that owns waitingToLockMonitor should be findable, but 438 // if it is not findable, then the previous currentThread is 439 // blocked permanently. We record this as a deadlock. 440 num_deadlocks++; 441 442 cycle->set_deadlock(true); 443 444 // add this cycle to the deadlocks list 445 if (deadlocks == NULL) { 446 deadlocks = cycle; 447 } else { 448 last->set_next(cycle); 449 } 450 last = cycle; 451 cycle = new DeadlockCycle(); 452 break; 453 } 454 } 455 } else { 456 if (concurrent_locks) { 457 if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 458 oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 459 // This JavaThread (if there is one) is protected by the 460 // ThreadsListSetter in VM_FindDeadlocks::doit(). 461 currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL; 462 } else { 463 currentThread = NULL; 464 } 465 } 466 } 467 468 if (currentThread == NULL) { 469 // No dependency on another thread 470 break; 471 } 472 if (currentThread->depth_first_number() < 0) { 473 // First visit to this thread 474 currentThread->set_depth_first_number(globalDfn++); 475 } else if (currentThread->depth_first_number() < thisDfn) { 476 // Thread already visited, and not on a (new) cycle 477 break; 478 } else if (currentThread == previousThread) { 479 // Self-loop, ignore 480 break; 481 } else { 482 // We have a (new) cycle 483 num_deadlocks++; 484 485 cycle->set_deadlock(true); 486 487 // add this cycle to the deadlocks list 488 if (deadlocks == NULL) { 489 deadlocks = cycle; 490 } else { 491 last->set_next(cycle); 492 } 493 last = cycle; 494 cycle = new DeadlockCycle(); 495 break; 496 } 497 previousThread = currentThread; 498 waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); 499 if (concurrent_locks) { 500 waitingToLockBlocker = currentThread->current_park_blocker(); 501 } 502 } 503 504 } 505 delete cycle; 506 return deadlocks; 507 } 508 509 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 510 511 // Create a new ThreadDumpResult object and append to the list. 512 // If GC happens before this function returns, Method* 513 // in the stack trace will be visited. 514 ThreadService::add_thread_dump(this); 515 } 516 517 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 518 // Create a new ThreadDumpResult object and append to the list. 519 // If GC happens before this function returns, oops 520 // will be visited. 521 ThreadService::add_thread_dump(this); 522 } 523 524 ThreadDumpResult::~ThreadDumpResult() { 525 ThreadService::remove_thread_dump(this); 526 527 // free all the ThreadSnapshot objects created during 528 // the VM_ThreadDump operation 529 ThreadSnapshot* ts = _snapshots; 530 while (ts != NULL) { 531 ThreadSnapshot* p = ts; 532 ts = ts->next(); 533 delete p; 534 } 535 } 536 537 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() { 538 ThreadSnapshot* ts = new ThreadSnapshot(); 539 link_thread_snapshot(ts); 540 return ts; 541 } 542 543 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) { 544 ThreadSnapshot* ts = new ThreadSnapshot(); 545 link_thread_snapshot(ts); 546 ts->initialize(t_list(), thread); 547 return ts; 548 } 549 550 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) { 551 assert(_num_threads == 0 || _num_snapshots < _num_threads, 552 "_num_snapshots must be less than _num_threads"); 553 _num_snapshots++; 554 if (_snapshots == NULL) { 555 _snapshots = ts; 556 } else { 557 _last->set_next(ts); 558 } 559 _last = ts; 560 } 561 562 void ThreadDumpResult::metadata_do(void f(Metadata*)) { 563 for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { 564 ts->metadata_do(f); 565 } 566 } 567 568 ThreadsList* ThreadDumpResult::t_list() { 569 return _setter.list(); 570 } 571 572 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { 573 _method = jvf->method(); 574 _bci = jvf->bci(); 575 _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder()); 576 _locked_monitors = NULL; 577 if (with_lock_info) { 578 Thread* current_thread = Thread::current(); 579 ResourceMark rm(current_thread); 580 HandleMark hm(current_thread); 581 GrowableArray<MonitorInfo*>* list = jvf->locked_monitors(); 582 int length = list->length(); 583 if (length > 0) { 584 _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(length, mtServiceability); 585 for (int i = 0; i < length; i++) { 586 MonitorInfo* monitor = list->at(i); 587 assert(monitor->owner() != NULL, "This monitor must have an owning object"); 588 _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner())); 589 } 590 } 591 } 592 } 593 594 StackFrameInfo::~StackFrameInfo() { 595 if (_locked_monitors != NULL) { 596 for (int i = 0; i < _locked_monitors->length(); i++) { 597 _locked_monitors->at(i).release(_thread_service_storage); 598 } 599 delete _locked_monitors; 600 } 601 _class_holder.release(_thread_service_storage); 602 } 603 604 void StackFrameInfo::metadata_do(void f(Metadata*)) { 605 f(_method); 606 } 607 608 void StackFrameInfo::print_on(outputStream* st) const { 609 ResourceMark rm; 610 java_lang_Throwable::print_stack_element(st, method(), bci()); 611 int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0); 612 for (int i = 0; i < len; i++) { 613 oop o = _locked_monitors->at(i).resolve(); 614 st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name()); 615 } 616 617 } 618 619 // Iterate through monitor cache to find JNI locked monitors 620 class InflatedMonitorsClosure: public MonitorClosure { 621 private: 622 ThreadStackTrace* _stack_trace; 623 public: 624 InflatedMonitorsClosure(ThreadStackTrace* st) { 625 _stack_trace = st; 626 } 627 void do_monitor(ObjectMonitor* mid) { 628 oop object = mid->object(); 629 if (!_stack_trace->is_owned_monitor_on_stack(object)) { 630 _stack_trace->add_jni_locked_monitor(object); 631 } 632 } 633 }; 634 635 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) { 636 _thread = t; 637 _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability); 638 _depth = 0; 639 _with_locked_monitors = with_locked_monitors; 640 if (_with_locked_monitors) { 641 _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 642 } else { 643 _jni_locked_monitors = NULL; 644 } 645 } 646 647 void ThreadStackTrace::add_jni_locked_monitor(oop object) { 648 _jni_locked_monitors->append(OopHandle(_thread_service_storage, object)); 649 } 650 651 ThreadStackTrace::~ThreadStackTrace() { 652 for (int i = 0; i < _frames->length(); i++) { 653 delete _frames->at(i); 654 } 655 delete _frames; 656 if (_jni_locked_monitors != NULL) { 657 for (int i = 0; i < _jni_locked_monitors->length(); i++) { 658 _jni_locked_monitors->at(i).release(_thread_service_storage); 659 } 660 delete _jni_locked_monitors; 661 } 662 } 663 664 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) { 665 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 666 667 if (_thread->has_last_Java_frame()) { 668 RegisterMap reg_map(_thread); 669 vframe* start_vf = _thread->last_java_vframe(®_map); 670 int count = 0; 671 for (vframe* f = start_vf; f; f = f->sender() ) { 672 if (maxDepth >= 0 && count == maxDepth) { 673 // Skip frames if more than maxDepth 674 break; 675 } 676 if (f->is_java_frame()) { 677 javaVFrame* jvf = javaVFrame::cast(f); 678 add_stack_frame(jvf); 679 count++; 680 } else { 681 // Ignore non-Java frames 682 } 683 } 684 } 685 686 if (_with_locked_monitors) { 687 // Iterate inflated monitors and find monitors locked by this thread 688 // not found in the stack 689 InflatedMonitorsClosure imc(this); 690 ObjectSynchronizer::monitors_iterate(&imc, _thread); 691 } 692 } 693 694 695 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) { 696 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 697 698 bool found = false; 699 int num_frames = get_stack_depth(); 700 for (int depth = 0; depth < num_frames; depth++) { 701 StackFrameInfo* frame = stack_frame_at(depth); 702 int len = frame->num_locked_monitors(); 703 GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors(); 704 for (int j = 0; j < len; j++) { 705 oop monitor = locked_monitors->at(j).resolve(); 706 assert(monitor != NULL, "must be a Java object"); 707 if (monitor == object) { 708 found = true; 709 break; 710 } 711 } 712 } 713 return found; 714 } 715 716 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) { 717 InstanceKlass* ik = vmClasses::StackTraceElement_klass(); 718 assert(ik != NULL, "must be loaded in 1.4+"); 719 720 // Allocate an array of java/lang/StackTraceElement object 721 objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH); 722 objArrayHandle backtrace(THREAD, ste); 723 for (int j = 0; j < _depth; j++) { 724 StackFrameInfo* frame = _frames->at(j); 725 methodHandle mh(THREAD, frame->method()); 726 oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH); 727 backtrace->obj_at_put(j, element); 728 } 729 return backtrace; 730 } 731 732 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) { 733 StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors); 734 _frames->append(frame); 735 _depth++; 736 } 737 738 void ThreadStackTrace::metadata_do(void f(Metadata*)) { 739 int length = _frames->length(); 740 for (int i = 0; i < length; i++) { 741 _frames->at(i)->metadata_do(f); 742 } 743 } 744 745 746 ConcurrentLocksDump::~ConcurrentLocksDump() { 747 if (_retain_map_on_free) { 748 return; 749 } 750 751 for (ThreadConcurrentLocks* t = _map; t != NULL;) { 752 ThreadConcurrentLocks* tcl = t; 753 t = t->next(); 754 delete tcl; 755 } 756 } 757 758 void ConcurrentLocksDump::dump_at_safepoint() { 759 // dump all locked concurrent locks 760 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 761 762 GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability); 763 764 // Find all instances of AbstractOwnableSynchronizer 765 HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(), 766 aos_objects); 767 // Build a map of thread to its owned AQS locks 768 build_map(aos_objects); 769 770 delete aos_objects; 771 } 772 773 774 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer 775 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) { 776 int length = aos_objects->length(); 777 for (int i = 0; i < length; i++) { 778 oop o = aos_objects->at(i); 779 oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o); 780 if (owner_thread_obj != NULL) { 781 // See comments in ThreadConcurrentLocks to see how this 782 // JavaThread* is protected. 783 JavaThread* thread = java_lang_Thread::thread(owner_thread_obj); 784 assert(o->is_instance(), "Must be an instanceOop"); 785 add_lock(thread, (instanceOop) o); 786 } 787 } 788 } 789 790 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) { 791 ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread); 792 if (tcl != NULL) { 793 tcl->add_lock(o); 794 return; 795 } 796 797 // First owned lock found for this thread 798 tcl = new ThreadConcurrentLocks(thread); 799 tcl->add_lock(o); 800 if (_map == NULL) { 801 _map = tcl; 802 } else { 803 _last->set_next(tcl); 804 } 805 _last = tcl; 806 } 807 808 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) { 809 for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) { 810 if (tcl->java_thread() == thread) { 811 return tcl; 812 } 813 } 814 return NULL; 815 } 816 817 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { 818 st->print_cr(" Locked ownable synchronizers:"); 819 ThreadConcurrentLocks* tcl = thread_concurrent_locks(t); 820 GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL); 821 if (locks == NULL || locks->is_empty()) { 822 st->print_cr("\t- None"); 823 st->cr(); 824 return; 825 } 826 827 for (int i = 0; i < locks->length(); i++) { 828 oop obj = locks->at(i).resolve(); 829 st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name()); 830 } 831 st->cr(); 832 } 833 834 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) { 835 _thread = thread; 836 _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 837 _next = NULL; 838 } 839 840 ThreadConcurrentLocks::~ThreadConcurrentLocks() { 841 for (int i = 0; i < _owned_locks->length(); i++) { 842 _owned_locks->at(i).release(_thread_service_storage); 843 } 844 delete _owned_locks; 845 } 846 847 void ThreadConcurrentLocks::add_lock(instanceOop o) { 848 _owned_locks->append(OopHandle(_thread_service_storage, o)); 849 } 850 851 ThreadStatistics::ThreadStatistics() { 852 _contended_enter_count = 0; 853 _monitor_wait_count = 0; 854 _sleep_count = 0; 855 _count_pending_reset = false; 856 _timer_pending_reset = false; 857 memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts)); 858 } 859 860 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); } 861 862 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) { 863 _thread = thread; 864 oop threadObj = thread->threadObj(); 865 _threadObj = OopHandle(_thread_service_storage, threadObj); 866 867 ThreadStatistics* stat = thread->get_thread_stat(); 868 _contended_enter_ticks = stat->contended_enter_ticks(); 869 _contended_enter_count = stat->contended_enter_count(); 870 _monitor_wait_ticks = stat->monitor_wait_ticks(); 871 _monitor_wait_count = stat->monitor_wait_count(); 872 _sleep_ticks = stat->sleep_ticks(); 873 _sleep_count = stat->sleep_count(); 874 875 // If thread is still attaching then threadObj will be NULL. 876 _thread_status = threadObj == NULL ? JavaThreadStatus::NEW 877 : java_lang_Thread::get_thread_status(threadObj); 878 879 _is_suspended = thread->is_suspended(); 880 _is_in_native = (thread->thread_state() == _thread_in_native); 881 882 Handle obj = ThreadService::get_current_contended_monitor(thread); 883 884 oop blocker_object = NULL; 885 oop blocker_object_owner = NULL; 886 887 if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER || 888 _thread_status == JavaThreadStatus::IN_OBJECT_WAIT || 889 _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) { 890 891 if (obj() == NULL) { 892 // monitor no longer exists; thread is not blocked 893 _thread_status = JavaThreadStatus::RUNNABLE; 894 } else { 895 blocker_object = obj(); 896 JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj); 897 if ((owner == NULL && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER) 898 || (owner != NULL && owner->is_attaching_via_jni())) { 899 // ownership information of the monitor is not available 900 // (may no longer be owned or releasing to some other thread) 901 // make this thread in RUNNABLE state. 902 // And when the owner thread is in attaching state, the java thread 903 // is not completely initialized. For example thread name and id 904 // and may not be set, so hide the attaching thread. 905 _thread_status = JavaThreadStatus::RUNNABLE; 906 blocker_object = NULL; 907 } else if (owner != NULL) { 908 blocker_object_owner = owner->threadObj(); 909 } 910 } 911 } 912 913 // Support for JSR-166 locks 914 if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) { 915 blocker_object = thread->current_park_blocker(); 916 if (blocker_object != NULL && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 917 blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object); 918 } 919 } 920 921 if (blocker_object != NULL) { 922 _blocker_object = OopHandle(_thread_service_storage, blocker_object); 923 } 924 if (blocker_object_owner != NULL) { 925 _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner); 926 } 927 } 928 929 oop ThreadSnapshot::blocker_object() const { return _blocker_object.resolve(); } 930 oop ThreadSnapshot::blocker_object_owner() const { return _blocker_object_owner.resolve(); } 931 932 ThreadSnapshot::~ThreadSnapshot() { 933 _blocker_object.release(_thread_service_storage); 934 _blocker_object_owner.release(_thread_service_storage); 935 _threadObj.release(_thread_service_storage); 936 937 delete _stack_trace; 938 delete _concurrent_locks; 939 } 940 941 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) { 942 _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors); 943 _stack_trace->dump_stack_at_safepoint(max_depth); 944 } 945 946 947 void ThreadSnapshot::metadata_do(void f(Metadata*)) { 948 if (_stack_trace != NULL) { 949 _stack_trace->metadata_do(f); 950 } 951 } 952 953 954 DeadlockCycle::DeadlockCycle() { 955 _is_deadlock = false; 956 _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability); 957 _next = NULL; 958 } 959 960 DeadlockCycle::~DeadlockCycle() { 961 delete _threads; 962 } 963 964 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const { 965 st->cr(); 966 st->print_cr("Found one Java-level deadlock:"); 967 st->print("============================="); 968 969 JavaThread* currentThread; 970 JvmtiRawMonitor* waitingToLockRawMonitor; 971 oop waitingToLockBlocker; 972 int len = _threads->length(); 973 for (int i = 0; i < len; i++) { 974 currentThread = _threads->at(i); 975 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 976 ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor(); 977 waitingToLockRawMonitor = currentThread->current_pending_raw_monitor(); 978 waitingToLockBlocker = currentThread->current_park_blocker(); 979 st->cr(); 980 st->print_cr("\"%s\":", currentThread->get_thread_name()); 981 const char* owner_desc = ",\n which is held by"; 982 983 // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor 984 // sets the current pending monitor, it is possible to then see a pending raw monitor as well. 985 if (waitingToLockRawMonitor != NULL) { 986 st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor)); 987 Thread* owner = waitingToLockRawMonitor->owner(); 988 // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread 989 if (owner != NULL) { 990 if (owner->is_Java_thread()) { 991 currentThread = owner->as_Java_thread(); 992 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 993 } else { 994 st->print_cr(",\n which has now been released"); 995 } 996 } else { 997 st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner)); 998 } 999 } 1000 1001 if (waitingToLockMonitor != NULL) { 1002 st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor)); 1003 oop obj = waitingToLockMonitor->object(); 1004 st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj), 1005 obj->klass()->external_name()); 1006 1007 if (!currentThread->current_pending_monitor_is_from_java()) { 1008 owner_desc = "\n in JNI, which is held by"; 1009 } 1010 currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor); 1011 if (currentThread == NULL) { 1012 // The deadlock was detected at a safepoint so the JavaThread 1013 // that owns waitingToLockMonitor should be findable, but 1014 // if it is not findable, then the previous currentThread is 1015 // blocked permanently. 1016 st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, 1017 p2i(waitingToLockMonitor->owner())); 1018 continue; 1019 } 1020 } else { 1021 st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)", 1022 p2i(waitingToLockBlocker), 1023 waitingToLockBlocker->klass()->external_name()); 1024 assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()), 1025 "Must be an AbstractOwnableSynchronizer"); 1026 oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 1027 currentThread = java_lang_Thread::thread(ownerObj); 1028 assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL"); 1029 } 1030 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 1031 } 1032 1033 st->cr(); 1034 1035 // Print stack traces 1036 bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace; 1037 JavaMonitorsInStackTrace = true; 1038 st->print_cr("Java stack information for the threads listed above:"); 1039 st->print_cr("==================================================="); 1040 for (int j = 0; j < len; j++) { 1041 currentThread = _threads->at(j); 1042 st->print_cr("\"%s\":", currentThread->get_thread_name()); 1043 currentThread->print_stack_on(st); 1044 } 1045 JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace; 1046 } 1047 1048 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread, 1049 bool include_jvmti_agent_threads, 1050 bool include_jni_attaching_threads) { 1051 assert(cur_thread == Thread::current(), "Check current thread"); 1052 1053 int init_size = ThreadService::get_live_thread_count(); 1054 _threads_array = new GrowableArray<instanceHandle>(init_size); 1055 1056 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1057 // skips JavaThreads in the process of exiting 1058 // and also skips VM internal JavaThreads 1059 // Threads in _thread_new or _thread_new_trans state are included. 1060 // i.e. threads have been started but not yet running. 1061 if (jt->threadObj() == NULL || 1062 jt->is_exiting() || 1063 !java_lang_Thread::is_alive(jt->threadObj()) || 1064 jt->is_hidden_from_external_view()) { 1065 continue; 1066 } 1067 1068 // skip agent threads 1069 if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) { 1070 continue; 1071 } 1072 1073 // skip jni threads in the process of attaching 1074 if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) { 1075 continue; 1076 } 1077 1078 instanceHandle h(cur_thread, (instanceOop) jt->threadObj()); 1079 _threads_array->append(h); 1080 } 1081 }