1 /* 2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmClasses.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "gc/shared/oopStorageSet.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/heapInspection.hpp" 32 #include "memory/oopFactory.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/instanceKlass.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "oops/oopHandle.inline.hpp" 41 #include "prims/jvmtiRawMonitor.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/init.hpp" 45 #include "runtime/objectMonitor.inline.hpp" 46 #include "runtime/thread.inline.hpp" 47 #include "runtime/threadSMR.inline.hpp" 48 #include "runtime/vframe.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "runtime/vmOperations.hpp" 51 #include "services/threadService.hpp" 52 53 // TODO: we need to define a naming convention for perf counters 54 // to distinguish counters for: 55 // - standard JSR174 use 56 // - Hotspot extension (public and committed) 57 // - Hotspot extension (private/internal and uncommitted) 58 59 // Default is disabled. 60 bool ThreadService::_thread_monitoring_contention_enabled = false; 61 bool ThreadService::_thread_cpu_time_enabled = false; 62 bool ThreadService::_thread_allocated_memory_enabled = false; 63 64 PerfCounter* ThreadService::_total_threads_count = NULL; 65 PerfVariable* ThreadService::_live_threads_count = NULL; 66 PerfVariable* ThreadService::_peak_threads_count = NULL; 67 PerfVariable* ThreadService::_daemon_threads_count = NULL; 68 volatile int ThreadService::_atomic_threads_count = 0; 69 volatile int ThreadService::_atomic_daemon_threads_count = 0; 70 71 ThreadDumpResult* ThreadService::_threaddump_list = NULL; 72 73 static const int INITIAL_ARRAY_SIZE = 10; 74 75 // OopStorage for thread stack sampling 76 static OopStorage* _thread_service_storage = NULL; 77 78 void ThreadService::init() { 79 EXCEPTION_MARK; 80 81 // These counters are for java.lang.management API support. 82 // They are created even if -XX:-UsePerfData is set and in 83 // that case, they will be allocated on C heap. 84 85 _total_threads_count = 86 PerfDataManager::create_counter(JAVA_THREADS, "started", 87 PerfData::U_Events, CHECK); 88 89 _live_threads_count = 90 PerfDataManager::create_variable(JAVA_THREADS, "live", 91 PerfData::U_None, CHECK); 92 93 _peak_threads_count = 94 PerfDataManager::create_variable(JAVA_THREADS, "livePeak", 95 PerfData::U_None, CHECK); 96 97 _daemon_threads_count = 98 PerfDataManager::create_variable(JAVA_THREADS, "daemon", 99 PerfData::U_None, CHECK); 100 101 if (os::is_thread_cpu_time_supported()) { 102 _thread_cpu_time_enabled = true; 103 } 104 105 _thread_allocated_memory_enabled = true; // Always on, so enable it 106 107 // Initialize OopStorage for thread stack sampling walking 108 _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage", 109 mtServiceability); 110 } 111 112 void ThreadService::reset_peak_thread_count() { 113 // Acquire the lock to update the peak thread count 114 // to synchronize with thread addition and removal. 115 MutexLocker mu(Threads_lock); 116 _peak_threads_count->set_value(get_live_thread_count()); 117 } 118 119 static bool is_hidden_thread(JavaThread *thread) { 120 // hide VM internal or JVMTI agent threads 121 return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread(); 122 } 123 124 void ThreadService::add_thread(JavaThread* thread, bool daemon) { 125 assert(Threads_lock->owned_by_self(), "must have threads lock"); 126 127 // Do not count hidden threads 128 if (is_hidden_thread(thread)) { 129 return; 130 } 131 132 _total_threads_count->inc(); 133 _live_threads_count->inc(); 134 Atomic::inc(&_atomic_threads_count); 135 int count = _atomic_threads_count; 136 137 if (count > _peak_threads_count->get_value()) { 138 _peak_threads_count->set_value(count); 139 } 140 141 if (daemon) { 142 _daemon_threads_count->inc(); 143 Atomic::inc(&_atomic_daemon_threads_count); 144 } 145 } 146 147 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) { 148 Atomic::dec(&_atomic_threads_count); 149 150 if (daemon) { 151 Atomic::dec(&_atomic_daemon_threads_count); 152 } 153 } 154 155 void ThreadService::remove_thread(JavaThread* thread, bool daemon) { 156 assert(Threads_lock->owned_by_self(), "must have threads lock"); 157 158 // Do not count hidden threads 159 if (is_hidden_thread(thread)) { 160 return; 161 } 162 163 assert(!thread->is_terminated(), "must not be terminated"); 164 if (!thread->is_exiting()) { 165 // JavaThread::exit() skipped calling current_thread_exiting() 166 decrement_thread_counts(thread, daemon); 167 } 168 169 int daemon_count = _atomic_daemon_threads_count; 170 int count = _atomic_threads_count; 171 172 // Counts are incremented at the same time, but atomic counts are 173 // decremented earlier than perf counts. 174 assert(_live_threads_count->get_value() > count, 175 "thread count mismatch %d : %d", 176 (int)_live_threads_count->get_value(), count); 177 178 _live_threads_count->dec(1); 179 if (daemon) { 180 assert(_daemon_threads_count->get_value() > daemon_count, 181 "thread count mismatch %d : %d", 182 (int)_daemon_threads_count->get_value(), daemon_count); 183 184 _daemon_threads_count->dec(1); 185 } 186 187 // Counts are incremented at the same time, but atomic counts are 188 // decremented earlier than perf counts. 189 assert(_daemon_threads_count->get_value() >= daemon_count, 190 "thread count mismatch %d : %d", 191 (int)_daemon_threads_count->get_value(), daemon_count); 192 assert(_live_threads_count->get_value() >= count, 193 "thread count mismatch %d : %d", 194 (int)_live_threads_count->get_value(), count); 195 assert(_live_threads_count->get_value() > 0 || 196 (_live_threads_count->get_value() == 0 && count == 0 && 197 _daemon_threads_count->get_value() == 0 && daemon_count == 0), 198 "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d", 199 (int)_live_threads_count->get_value(), count, 200 (int)_daemon_threads_count->get_value(), daemon_count); 201 assert(_daemon_threads_count->get_value() > 0 || 202 (_daemon_threads_count->get_value() == 0 && daemon_count == 0), 203 "thread counts should reach 0 at the same time, daemon %d,%d", 204 (int)_daemon_threads_count->get_value(), daemon_count); 205 } 206 207 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { 208 // Do not count hidden threads 209 if (is_hidden_thread(jt)) { 210 return; 211 } 212 213 assert(jt == JavaThread::current(), "Called by current thread"); 214 assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting"); 215 216 decrement_thread_counts(jt, daemon); 217 } 218 219 // FIXME: JVMTI should call this function 220 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) { 221 assert(thread != NULL, "should be non-NULL"); 222 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 223 224 // This function can be called on a target JavaThread that is not 225 // the caller and we are not at a safepoint. So it is possible for 226 // the waiting or pending condition to be over/stale and for the 227 // first stage of async deflation to clear the object field in 228 // the ObjectMonitor. It is also possible for the object to be 229 // inflated again and to be associated with a completely different 230 // ObjectMonitor by the time this object reference is processed 231 // by the caller. 232 ObjectMonitor *wait_obj = thread->current_waiting_monitor(); 233 234 oop obj = NULL; 235 if (wait_obj != NULL) { 236 // thread is doing an Object.wait() call 237 obj = wait_obj->object(); 238 } else { 239 ObjectMonitor *enter_obj = thread->current_pending_monitor(); 240 if (enter_obj != NULL) { 241 // thread is trying to enter() an ObjectMonitor. 242 obj = enter_obj->object(); 243 } 244 } 245 246 Handle h(Thread::current(), obj); 247 return h; 248 } 249 250 bool ThreadService::set_thread_monitoring_contention(bool flag) { 251 MutexLocker m(Management_lock); 252 253 bool prev = _thread_monitoring_contention_enabled; 254 _thread_monitoring_contention_enabled = flag; 255 256 return prev; 257 } 258 259 bool ThreadService::set_thread_cpu_time_enabled(bool flag) { 260 MutexLocker m(Management_lock); 261 262 bool prev = _thread_cpu_time_enabled; 263 _thread_cpu_time_enabled = flag; 264 265 return prev; 266 } 267 268 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) { 269 MutexLocker m(Management_lock); 270 271 bool prev = _thread_allocated_memory_enabled; 272 _thread_allocated_memory_enabled = flag; 273 274 return prev; 275 } 276 277 void ThreadService::metadata_do(void f(Metadata*)) { 278 for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { 279 dump->metadata_do(f); 280 } 281 } 282 283 void ThreadService::add_thread_dump(ThreadDumpResult* dump) { 284 MutexLocker ml(Management_lock); 285 if (_threaddump_list == NULL) { 286 _threaddump_list = dump; 287 } else { 288 dump->set_next(_threaddump_list); 289 _threaddump_list = dump; 290 } 291 } 292 293 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) { 294 MutexLocker ml(Management_lock); 295 296 ThreadDumpResult* prev = NULL; 297 bool found = false; 298 for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) { 299 if (d == dump) { 300 if (prev == NULL) { 301 _threaddump_list = dump->next(); 302 } else { 303 prev->set_next(dump->next()); 304 } 305 found = true; 306 break; 307 } 308 } 309 assert(found, "The threaddump result to be removed must exist."); 310 } 311 312 // Dump stack trace of threads specified in the given threads array. 313 // Returns StackTraceElement[][] each element is the stack trace of a thread in 314 // the corresponding entry in the given threads array 315 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, 316 int num_threads, 317 TRAPS) { 318 assert(num_threads > 0, "just checking"); 319 320 ThreadDumpResult dump_result; 321 VM_ThreadDump op(&dump_result, 322 threads, 323 num_threads, 324 -1, /* entire stack */ 325 false, /* with locked monitors */ 326 false /* with locked synchronizers */); 327 VMThread::execute(&op); 328 329 // Allocate the resulting StackTraceElement[][] object 330 331 ResourceMark rm(THREAD); 332 Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH); 333 ObjArrayKlass* ik = ObjArrayKlass::cast(k); 334 objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH); 335 objArrayHandle result_obj(THREAD, r); 336 337 int num_snapshots = dump_result.num_snapshots(); 338 assert(num_snapshots == num_threads, "Must have num_threads thread snapshots"); 339 assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot"); 340 int i = 0; 341 for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) { 342 ThreadStackTrace* stacktrace = ts->get_stack_trace(); 343 if (stacktrace == NULL) { 344 // No stack trace 345 result_obj->obj_at_put(i, NULL); 346 } else { 347 // Construct an array of java/lang/StackTraceElement object 348 Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH); 349 result_obj->obj_at_put(i, backtrace_h()); 350 } 351 } 352 353 return result_obj; 354 } 355 356 void ThreadService::reset_contention_count_stat(JavaThread* thread) { 357 ThreadStatistics* stat = thread->get_thread_stat(); 358 if (stat != NULL) { 359 stat->reset_count_stat(); 360 } 361 } 362 363 void ThreadService::reset_contention_time_stat(JavaThread* thread) { 364 ThreadStatistics* stat = thread->get_thread_stat(); 365 if (stat != NULL) { 366 stat->reset_time_stat(); 367 } 368 } 369 370 // Find deadlocks involving raw monitors, object monitors and concurrent locks 371 // if concurrent_locks is true. 372 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) { 373 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 374 375 // This code was modified from the original Threads::find_deadlocks code. 376 int globalDfn = 0, thisDfn; 377 ObjectMonitor* waitingToLockMonitor = NULL; 378 JvmtiRawMonitor* waitingToLockRawMonitor = NULL; 379 oop waitingToLockBlocker = NULL; 380 bool blocked_on_monitor = false; 381 JavaThread *currentThread, *previousThread; 382 int num_deadlocks = 0; 383 384 // Initialize the depth-first-number for each JavaThread. 385 JavaThreadIterator jti(t_list); 386 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 387 jt->set_depth_first_number(-1); 388 } 389 390 DeadlockCycle* deadlocks = NULL; 391 DeadlockCycle* last = NULL; 392 DeadlockCycle* cycle = new DeadlockCycle(); 393 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 394 if (jt->depth_first_number() >= 0) { 395 // this thread was already visited 396 continue; 397 } 398 399 thisDfn = globalDfn; 400 jt->set_depth_first_number(globalDfn++); 401 previousThread = jt; 402 currentThread = jt; 403 404 cycle->reset(); 405 406 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 407 // When there is a deadlock, all the monitors involved in the dependency 408 // cycle must be contended and heavyweight. So we only care about the 409 // heavyweight monitor a thread is waiting to lock. 410 waitingToLockMonitor = jt->current_pending_monitor(); 411 // JVM TI raw monitors can also be involved in deadlocks, and we can be 412 // waiting to lock both a raw monitor and ObjectMonitor at the same time. 413 // It isn't clear how to make deadlock detection work correctly if that 414 // happens. 415 waitingToLockRawMonitor = jt->current_pending_raw_monitor(); 416 417 if (concurrent_locks) { 418 waitingToLockBlocker = jt->current_park_blocker(); 419 } 420 421 while (waitingToLockMonitor != NULL || 422 waitingToLockRawMonitor != NULL || 423 waitingToLockBlocker != NULL) { 424 cycle->add_thread(currentThread); 425 // Give preference to the raw monitor 426 if (waitingToLockRawMonitor != NULL) { 427 Thread* owner = waitingToLockRawMonitor->owner(); 428 if (owner != NULL && // the raw monitor could be released at any time 429 owner->is_Java_thread()) { 430 currentThread = owner->as_Java_thread(); 431 } 432 } else if (waitingToLockMonitor != NULL) { 433 if (waitingToLockMonitor->has_owner()) { 434 currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor); 435 if (currentThread == NULL) { 436 // This function is called at a safepoint so the JavaThread 437 // that owns waitingToLockMonitor should be findable, but 438 // if it is not findable, then the previous currentThread is 439 // blocked permanently. We record this as a deadlock. 440 num_deadlocks++; 441 442 // add this cycle to the deadlocks list 443 if (deadlocks == NULL) { 444 deadlocks = cycle; 445 } else { 446 last->set_next(cycle); 447 } 448 last = cycle; 449 cycle = new DeadlockCycle(); 450 break; 451 } 452 } 453 } else { 454 if (concurrent_locks) { 455 if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 456 oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 457 // This JavaThread (if there is one) is protected by the 458 // ThreadsListSetter in VM_FindDeadlocks::doit(). 459 currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL; 460 } else { 461 currentThread = NULL; 462 } 463 } 464 } 465 466 if (currentThread == NULL) { 467 // No dependency on another thread 468 break; 469 } 470 if (currentThread->depth_first_number() < 0) { 471 // First visit to this thread 472 currentThread->set_depth_first_number(globalDfn++); 473 } else if (currentThread->depth_first_number() < thisDfn) { 474 // Thread already visited, and not on a (new) cycle 475 break; 476 } else if (currentThread == previousThread) { 477 // Self-loop, ignore 478 break; 479 } else { 480 // We have a (new) cycle 481 num_deadlocks++; 482 483 cycle->set_deadlock(true); 484 485 // add this cycle to the deadlocks list 486 if (deadlocks == NULL) { 487 deadlocks = cycle; 488 } else { 489 last->set_next(cycle); 490 } 491 last = cycle; 492 cycle = new DeadlockCycle(); 493 break; 494 } 495 previousThread = currentThread; 496 waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); 497 if (concurrent_locks) { 498 waitingToLockBlocker = currentThread->current_park_blocker(); 499 } 500 } 501 502 } 503 delete cycle; 504 return deadlocks; 505 } 506 507 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 508 509 // Create a new ThreadDumpResult object and append to the list. 510 // If GC happens before this function returns, Method* 511 // in the stack trace will be visited. 512 ThreadService::add_thread_dump(this); 513 } 514 515 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 516 // Create a new ThreadDumpResult object and append to the list. 517 // If GC happens before this function returns, oops 518 // will be visited. 519 ThreadService::add_thread_dump(this); 520 } 521 522 ThreadDumpResult::~ThreadDumpResult() { 523 ThreadService::remove_thread_dump(this); 524 525 // free all the ThreadSnapshot objects created during 526 // the VM_ThreadDump operation 527 ThreadSnapshot* ts = _snapshots; 528 while (ts != NULL) { 529 ThreadSnapshot* p = ts; 530 ts = ts->next(); 531 delete p; 532 } 533 } 534 535 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() { 536 ThreadSnapshot* ts = new ThreadSnapshot(); 537 link_thread_snapshot(ts); 538 return ts; 539 } 540 541 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) { 542 ThreadSnapshot* ts = new ThreadSnapshot(); 543 link_thread_snapshot(ts); 544 ts->initialize(t_list(), thread); 545 return ts; 546 } 547 548 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) { 549 assert(_num_threads == 0 || _num_snapshots < _num_threads, 550 "_num_snapshots must be less than _num_threads"); 551 _num_snapshots++; 552 if (_snapshots == NULL) { 553 _snapshots = ts; 554 } else { 555 _last->set_next(ts); 556 } 557 _last = ts; 558 } 559 560 void ThreadDumpResult::metadata_do(void f(Metadata*)) { 561 for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { 562 ts->metadata_do(f); 563 } 564 } 565 566 ThreadsList* ThreadDumpResult::t_list() { 567 return _setter.list(); 568 } 569 570 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { 571 _method = jvf->method(); 572 _bci = jvf->bci(); 573 _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder()); 574 _locked_monitors = NULL; 575 if (with_lock_info) { 576 Thread* current_thread = Thread::current(); 577 ResourceMark rm(current_thread); 578 HandleMark hm(current_thread); 579 GrowableArray<MonitorInfo*>* list = jvf->locked_monitors(); 580 int length = list->length(); 581 if (length > 0) { 582 _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(length, mtServiceability); 583 for (int i = 0; i < length; i++) { 584 MonitorInfo* monitor = list->at(i); 585 assert(monitor->owner() != NULL, "This monitor must have an owning object"); 586 _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner())); 587 } 588 } 589 } 590 } 591 592 StackFrameInfo::~StackFrameInfo() { 593 if (_locked_monitors != NULL) { 594 for (int i = 0; i < _locked_monitors->length(); i++) { 595 _locked_monitors->at(i).release(_thread_service_storage); 596 } 597 delete _locked_monitors; 598 } 599 _class_holder.release(_thread_service_storage); 600 } 601 602 void StackFrameInfo::metadata_do(void f(Metadata*)) { 603 f(_method); 604 } 605 606 void StackFrameInfo::print_on(outputStream* st) const { 607 ResourceMark rm; 608 java_lang_Throwable::print_stack_element(st, method(), bci()); 609 int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0); 610 for (int i = 0; i < len; i++) { 611 oop o = _locked_monitors->at(i).resolve(); 612 st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name()); 613 } 614 615 } 616 617 // Iterate through monitor cache to find JNI locked monitors 618 class InflatedMonitorsClosure: public MonitorClosure { 619 private: 620 ThreadStackTrace* _stack_trace; 621 public: 622 InflatedMonitorsClosure(ThreadStackTrace* st) { 623 _stack_trace = st; 624 } 625 void do_monitor(ObjectMonitor* mid) { 626 oop object = mid->object(); 627 if (!_stack_trace->is_owned_monitor_on_stack(object)) { 628 _stack_trace->add_jni_locked_monitor(object); 629 } 630 } 631 }; 632 633 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) { 634 _thread = t; 635 _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability); 636 _depth = 0; 637 _with_locked_monitors = with_locked_monitors; 638 if (_with_locked_monitors) { 639 _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 640 } else { 641 _jni_locked_monitors = NULL; 642 } 643 } 644 645 void ThreadStackTrace::add_jni_locked_monitor(oop object) { 646 _jni_locked_monitors->append(OopHandle(_thread_service_storage, object)); 647 } 648 649 ThreadStackTrace::~ThreadStackTrace() { 650 for (int i = 0; i < _frames->length(); i++) { 651 delete _frames->at(i); 652 } 653 delete _frames; 654 if (_jni_locked_monitors != NULL) { 655 for (int i = 0; i < _jni_locked_monitors->length(); i++) { 656 _jni_locked_monitors->at(i).release(_thread_service_storage); 657 } 658 delete _jni_locked_monitors; 659 } 660 } 661 662 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) { 663 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 664 665 if (_thread->has_last_Java_frame()) { 666 RegisterMap reg_map(_thread); 667 vframe* start_vf = _thread->last_java_vframe(®_map); 668 int count = 0; 669 for (vframe* f = start_vf; f; f = f->sender() ) { 670 if (maxDepth >= 0 && count == maxDepth) { 671 // Skip frames if more than maxDepth 672 break; 673 } 674 if (f->is_java_frame()) { 675 javaVFrame* jvf = javaVFrame::cast(f); 676 add_stack_frame(jvf); 677 count++; 678 } else { 679 // Ignore non-Java frames 680 } 681 } 682 } 683 684 if (_with_locked_monitors) { 685 // Iterate inflated monitors and find monitors locked by this thread 686 // not found in the stack 687 InflatedMonitorsClosure imc(this); 688 ObjectSynchronizer::monitors_iterate(&imc, _thread); 689 } 690 } 691 692 693 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) { 694 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 695 696 bool found = false; 697 int num_frames = get_stack_depth(); 698 for (int depth = 0; depth < num_frames; depth++) { 699 StackFrameInfo* frame = stack_frame_at(depth); 700 int len = frame->num_locked_monitors(); 701 GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors(); 702 for (int j = 0; j < len; j++) { 703 oop monitor = locked_monitors->at(j).resolve(); 704 assert(monitor != NULL, "must be a Java object"); 705 if (monitor == object) { 706 found = true; 707 break; 708 } 709 } 710 } 711 return found; 712 } 713 714 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) { 715 InstanceKlass* ik = vmClasses::StackTraceElement_klass(); 716 assert(ik != NULL, "must be loaded in 1.4+"); 717 718 // Allocate an array of java/lang/StackTraceElement object 719 objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH); 720 objArrayHandle backtrace(THREAD, ste); 721 for (int j = 0; j < _depth; j++) { 722 StackFrameInfo* frame = _frames->at(j); 723 methodHandle mh(THREAD, frame->method()); 724 oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH); 725 backtrace->obj_at_put(j, element); 726 } 727 return backtrace; 728 } 729 730 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) { 731 StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors); 732 _frames->append(frame); 733 _depth++; 734 } 735 736 void ThreadStackTrace::metadata_do(void f(Metadata*)) { 737 int length = _frames->length(); 738 for (int i = 0; i < length; i++) { 739 _frames->at(i)->metadata_do(f); 740 } 741 } 742 743 744 ConcurrentLocksDump::~ConcurrentLocksDump() { 745 if (_retain_map_on_free) { 746 return; 747 } 748 749 for (ThreadConcurrentLocks* t = _map; t != NULL;) { 750 ThreadConcurrentLocks* tcl = t; 751 t = t->next(); 752 delete tcl; 753 } 754 } 755 756 void ConcurrentLocksDump::dump_at_safepoint() { 757 // dump all locked concurrent locks 758 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 759 760 GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability); 761 762 // Find all instances of AbstractOwnableSynchronizer 763 HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(), 764 aos_objects); 765 // Build a map of thread to its owned AQS locks 766 build_map(aos_objects); 767 768 delete aos_objects; 769 } 770 771 772 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer 773 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) { 774 int length = aos_objects->length(); 775 for (int i = 0; i < length; i++) { 776 oop o = aos_objects->at(i); 777 oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o); 778 if (owner_thread_obj != NULL) { 779 // See comments in ThreadConcurrentLocks to see how this 780 // JavaThread* is protected. 781 JavaThread* thread = java_lang_Thread::thread(owner_thread_obj); 782 assert(o->is_instance(), "Must be an instanceOop"); 783 add_lock(thread, (instanceOop) o); 784 } 785 } 786 } 787 788 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) { 789 ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread); 790 if (tcl != NULL) { 791 tcl->add_lock(o); 792 return; 793 } 794 795 // First owned lock found for this thread 796 tcl = new ThreadConcurrentLocks(thread); 797 tcl->add_lock(o); 798 if (_map == NULL) { 799 _map = tcl; 800 } else { 801 _last->set_next(tcl); 802 } 803 _last = tcl; 804 } 805 806 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) { 807 for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) { 808 if (tcl->java_thread() == thread) { 809 return tcl; 810 } 811 } 812 return NULL; 813 } 814 815 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { 816 st->print_cr(" Locked ownable synchronizers:"); 817 ThreadConcurrentLocks* tcl = thread_concurrent_locks(t); 818 GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL); 819 if (locks == NULL || locks->is_empty()) { 820 st->print_cr("\t- None"); 821 st->cr(); 822 return; 823 } 824 825 for (int i = 0; i < locks->length(); i++) { 826 oop obj = locks->at(i).resolve(); 827 st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name()); 828 } 829 st->cr(); 830 } 831 832 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) { 833 _thread = thread; 834 _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 835 _next = NULL; 836 } 837 838 ThreadConcurrentLocks::~ThreadConcurrentLocks() { 839 for (int i = 0; i < _owned_locks->length(); i++) { 840 _owned_locks->at(i).release(_thread_service_storage); 841 } 842 delete _owned_locks; 843 } 844 845 void ThreadConcurrentLocks::add_lock(instanceOop o) { 846 _owned_locks->append(OopHandle(_thread_service_storage, o)); 847 } 848 849 ThreadStatistics::ThreadStatistics() { 850 _contended_enter_count = 0; 851 _monitor_wait_count = 0; 852 _sleep_count = 0; 853 _count_pending_reset = false; 854 _timer_pending_reset = false; 855 memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts)); 856 } 857 858 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); } 859 860 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) { 861 _thread = thread; 862 oop threadObj = thread->threadObj(); 863 _threadObj = OopHandle(_thread_service_storage, threadObj); 864 865 ThreadStatistics* stat = thread->get_thread_stat(); 866 _contended_enter_ticks = stat->contended_enter_ticks(); 867 _contended_enter_count = stat->contended_enter_count(); 868 _monitor_wait_ticks = stat->monitor_wait_ticks(); 869 _monitor_wait_count = stat->monitor_wait_count(); 870 _sleep_ticks = stat->sleep_ticks(); 871 _sleep_count = stat->sleep_count(); 872 873 // If thread is still attaching then threadObj will be NULL. 874 _thread_status = threadObj == NULL ? JavaThreadStatus::NEW 875 : java_lang_Thread::get_thread_status(threadObj); 876 877 _is_suspended = thread->is_suspended(); 878 _is_in_native = (thread->thread_state() == _thread_in_native); 879 880 Handle obj = ThreadService::get_current_contended_monitor(thread); 881 882 oop blocker_object = NULL; 883 oop blocker_object_owner = NULL; 884 885 if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER || 886 _thread_status == JavaThreadStatus::IN_OBJECT_WAIT || 887 _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) { 888 889 if (obj() == NULL) { 890 // monitor no longer exists; thread is not blocked 891 _thread_status = JavaThreadStatus::RUNNABLE; 892 } else { 893 blocker_object = obj(); 894 JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj); 895 if ((owner == NULL && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER) 896 || (owner != NULL && owner->is_attaching_via_jni())) { 897 // ownership information of the monitor is not available 898 // (may no longer be owned or releasing to some other thread) 899 // make this thread in RUNNABLE state. 900 // And when the owner thread is in attaching state, the java thread 901 // is not completely initialized. For example thread name and id 902 // and may not be set, so hide the attaching thread. 903 _thread_status = JavaThreadStatus::RUNNABLE; 904 blocker_object = NULL; 905 } else if (owner != NULL) { 906 blocker_object_owner = owner->threadObj(); 907 } 908 } 909 } 910 911 // Support for JSR-166 locks 912 if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) { 913 blocker_object = thread->current_park_blocker(); 914 if (blocker_object != NULL && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 915 blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object); 916 } 917 } 918 919 if (blocker_object != NULL) { 920 _blocker_object = OopHandle(_thread_service_storage, blocker_object); 921 } 922 if (blocker_object_owner != NULL) { 923 _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner); 924 } 925 } 926 927 oop ThreadSnapshot::blocker_object() const { return _blocker_object.resolve(); } 928 oop ThreadSnapshot::blocker_object_owner() const { return _blocker_object_owner.resolve(); } 929 930 ThreadSnapshot::~ThreadSnapshot() { 931 _blocker_object.release(_thread_service_storage); 932 _blocker_object_owner.release(_thread_service_storage); 933 _threadObj.release(_thread_service_storage); 934 935 delete _stack_trace; 936 delete _concurrent_locks; 937 } 938 939 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) { 940 _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors); 941 _stack_trace->dump_stack_at_safepoint(max_depth); 942 } 943 944 945 void ThreadSnapshot::metadata_do(void f(Metadata*)) { 946 if (_stack_trace != NULL) { 947 _stack_trace->metadata_do(f); 948 } 949 } 950 951 952 DeadlockCycle::DeadlockCycle() { 953 _is_deadlock = false; 954 _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability); 955 _next = NULL; 956 } 957 958 DeadlockCycle::~DeadlockCycle() { 959 delete _threads; 960 } 961 962 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const { 963 st->cr(); 964 st->print_cr("Found one Java-level deadlock:"); 965 st->print("============================="); 966 967 JavaThread* currentThread; 968 JvmtiRawMonitor* waitingToLockRawMonitor; 969 oop waitingToLockBlocker; 970 int len = _threads->length(); 971 for (int i = 0; i < len; i++) { 972 currentThread = _threads->at(i); 973 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 974 ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor(); 975 waitingToLockRawMonitor = currentThread->current_pending_raw_monitor(); 976 waitingToLockBlocker = currentThread->current_park_blocker(); 977 st->cr(); 978 st->print_cr("\"%s\":", currentThread->get_thread_name()); 979 const char* owner_desc = ",\n which is held by"; 980 981 // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor 982 // sets the current pending monitor, it is possible to then see a pending raw monitor as well. 983 if (waitingToLockRawMonitor != NULL) { 984 st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor)); 985 Thread* owner = waitingToLockRawMonitor->owner(); 986 // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread 987 if (owner != NULL) { 988 if (owner->is_Java_thread()) { 989 currentThread = owner->as_Java_thread(); 990 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 991 } else { 992 st->print_cr(",\n which has now been released"); 993 } 994 } else { 995 st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner)); 996 } 997 } 998 999 if (waitingToLockMonitor != NULL) { 1000 st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor)); 1001 oop obj = waitingToLockMonitor->object(); 1002 st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj), 1003 obj->klass()->external_name()); 1004 1005 if (!currentThread->current_pending_monitor_is_from_java()) { 1006 owner_desc = "\n in JNI, which is held by"; 1007 } 1008 currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor); 1009 if (currentThread == NULL) { 1010 // The deadlock was detected at a safepoint so the JavaThread 1011 // that owns waitingToLockMonitor should be findable, but 1012 // if it is not findable, then the previous currentThread is 1013 // blocked permanently. 1014 st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, 1015 p2i(waitingToLockMonitor->owner())); 1016 continue; 1017 } 1018 } else { 1019 st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)", 1020 p2i(waitingToLockBlocker), 1021 waitingToLockBlocker->klass()->external_name()); 1022 assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()), 1023 "Must be an AbstractOwnableSynchronizer"); 1024 oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 1025 currentThread = java_lang_Thread::thread(ownerObj); 1026 assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL"); 1027 } 1028 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 1029 } 1030 1031 st->cr(); 1032 1033 // Print stack traces 1034 bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace; 1035 JavaMonitorsInStackTrace = true; 1036 st->print_cr("Java stack information for the threads listed above:"); 1037 st->print_cr("==================================================="); 1038 for (int j = 0; j < len; j++) { 1039 currentThread = _threads->at(j); 1040 st->print_cr("\"%s\":", currentThread->get_thread_name()); 1041 currentThread->print_stack_on(st); 1042 } 1043 JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace; 1044 } 1045 1046 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread, 1047 bool include_jvmti_agent_threads, 1048 bool include_jni_attaching_threads) { 1049 assert(cur_thread == Thread::current(), "Check current thread"); 1050 1051 int init_size = ThreadService::get_live_thread_count(); 1052 _threads_array = new GrowableArray<instanceHandle>(init_size); 1053 1054 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1055 // skips JavaThreads in the process of exiting 1056 // and also skips VM internal JavaThreads 1057 // Threads in _thread_new or _thread_new_trans state are included. 1058 // i.e. threads have been started but not yet running. 1059 if (jt->threadObj() == NULL || 1060 jt->is_exiting() || 1061 !java_lang_Thread::is_alive(jt->threadObj()) || 1062 jt->is_hidden_from_external_view()) { 1063 continue; 1064 } 1065 1066 // skip agent threads 1067 if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) { 1068 continue; 1069 } 1070 1071 // skip jni threads in the process of attaching 1072 if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) { 1073 continue; 1074 } 1075 1076 instanceHandle h(cur_thread, (instanceOop) jt->threadObj()); 1077 _threads_array->append(h); 1078 } 1079 } --- EOF ---