1 /* 2 * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmClasses.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "gc/shared/oopStorageSet.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/heapInspection.hpp" 32 #include "memory/oopFactory.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/instanceKlass.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "oops/oopHandle.inline.hpp" 41 #include "prims/jvmtiRawMonitor.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/init.hpp" 45 #include "runtime/objectMonitor.inline.hpp" 46 #include "runtime/thread.inline.hpp" 47 #include "runtime/threadSMR.inline.hpp" 48 #include "runtime/vframe.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "runtime/vmOperations.hpp" 51 #include "services/threadService.hpp" 52 53 // TODO: we need to define a naming convention for perf counters 54 // to distinguish counters for: 55 // - standard JSR174 use 56 // - Hotspot extension (public and committed) 57 // - Hotspot extension (private/internal and uncommitted) 58 59 // Default is disabled. 60 bool ThreadService::_thread_monitoring_contention_enabled = false; 61 bool ThreadService::_thread_cpu_time_enabled = false; 62 bool ThreadService::_thread_allocated_memory_enabled = false; 63 64 PerfCounter* ThreadService::_total_threads_count = NULL; 65 PerfVariable* ThreadService::_live_threads_count = NULL; 66 PerfVariable* ThreadService::_peak_threads_count = NULL; 67 PerfVariable* ThreadService::_daemon_threads_count = NULL; 68 volatile int ThreadService::_atomic_threads_count = 0; 69 volatile int ThreadService::_atomic_daemon_threads_count = 0; 70 71 ThreadDumpResult* ThreadService::_threaddump_list = NULL; 72 73 static const int INITIAL_ARRAY_SIZE = 10; 74 75 // OopStorage for thread stack sampling 76 static OopStorage* _thread_service_storage = NULL; 77 78 void ThreadService::init() { 79 EXCEPTION_MARK; 80 81 // These counters are for java.lang.management API support. 82 // They are created even if -XX:-UsePerfData is set and in 83 // that case, they will be allocated on C heap. 84 85 _total_threads_count = 86 PerfDataManager::create_counter(JAVA_THREADS, "started", 87 PerfData::U_Events, CHECK); 88 89 _live_threads_count = 90 PerfDataManager::create_variable(JAVA_THREADS, "live", 91 PerfData::U_None, CHECK); 92 93 _peak_threads_count = 94 PerfDataManager::create_variable(JAVA_THREADS, "livePeak", 95 PerfData::U_None, CHECK); 96 97 _daemon_threads_count = 98 PerfDataManager::create_variable(JAVA_THREADS, "daemon", 99 PerfData::U_None, CHECK); 100 101 if (os::is_thread_cpu_time_supported()) { 102 _thread_cpu_time_enabled = true; 103 } 104 105 _thread_allocated_memory_enabled = true; // Always on, so enable it 106 107 // Initialize OopStorage for thread stack sampling walking 108 _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage", 109 mtServiceability); 110 } 111 112 void ThreadService::reset_peak_thread_count() { 113 // Acquire the lock to update the peak thread count 114 // to synchronize with thread addition and removal. 115 MutexLocker mu(Threads_lock); 116 _peak_threads_count->set_value(get_live_thread_count()); 117 } 118 119 static bool is_hidden_thread(JavaThread *thread) { 120 // hide VM internal or JVMTI agent threads 121 return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread(); 122 } 123 124 void ThreadService::add_thread(JavaThread* thread, bool daemon) { 125 assert(Threads_lock->owned_by_self(), "must have threads lock"); 126 127 // Do not count hidden threads 128 if (is_hidden_thread(thread)) { 129 return; 130 } 131 132 _total_threads_count->inc(); 133 _live_threads_count->inc(); 134 Atomic::inc(&_atomic_threads_count); 135 int count = _atomic_threads_count; 136 137 if (count > _peak_threads_count->get_value()) { 138 _peak_threads_count->set_value(count); 139 } 140 141 if (daemon) { 142 _daemon_threads_count->inc(); 143 Atomic::inc(&_atomic_daemon_threads_count); 144 } 145 } 146 147 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) { 148 Atomic::dec(&_atomic_threads_count); 149 150 if (daemon) { 151 Atomic::dec(&_atomic_daemon_threads_count); 152 } 153 } 154 155 void ThreadService::remove_thread(JavaThread* thread, bool daemon) { 156 assert(Threads_lock->owned_by_self(), "must have threads lock"); 157 158 // Do not count hidden threads 159 if (is_hidden_thread(thread)) { 160 return; 161 } 162 163 assert(!thread->is_terminated(), "must not be terminated"); 164 if (!thread->is_exiting()) { 165 // JavaThread::exit() skipped calling current_thread_exiting() 166 decrement_thread_counts(thread, daemon); 167 } 168 169 int daemon_count = _atomic_daemon_threads_count; 170 int count = _atomic_threads_count; 171 172 // Counts are incremented at the same time, but atomic counts are 173 // decremented earlier than perf counts. 174 assert(_live_threads_count->get_value() > count, 175 "thread count mismatch %d : %d", 176 (int)_live_threads_count->get_value(), count); 177 178 _live_threads_count->dec(1); 179 if (daemon) { 180 assert(_daemon_threads_count->get_value() > daemon_count, 181 "thread count mismatch %d : %d", 182 (int)_daemon_threads_count->get_value(), daemon_count); 183 184 _daemon_threads_count->dec(1); 185 } 186 187 // Counts are incremented at the same time, but atomic counts are 188 // decremented earlier than perf counts. 189 assert(_daemon_threads_count->get_value() >= daemon_count, 190 "thread count mismatch %d : %d", 191 (int)_daemon_threads_count->get_value(), daemon_count); 192 assert(_live_threads_count->get_value() >= count, 193 "thread count mismatch %d : %d", 194 (int)_live_threads_count->get_value(), count); 195 assert(_live_threads_count->get_value() > 0 || 196 (_live_threads_count->get_value() == 0 && count == 0 && 197 _daemon_threads_count->get_value() == 0 && daemon_count == 0), 198 "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d", 199 (int)_live_threads_count->get_value(), count, 200 (int)_daemon_threads_count->get_value(), daemon_count); 201 assert(_daemon_threads_count->get_value() > 0 || 202 (_daemon_threads_count->get_value() == 0 && daemon_count == 0), 203 "thread counts should reach 0 at the same time, daemon %d,%d", 204 (int)_daemon_threads_count->get_value(), daemon_count); 205 } 206 207 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { 208 // Do not count hidden threads 209 if (is_hidden_thread(jt)) { 210 return; 211 } 212 213 assert(jt == JavaThread::current(), "Called by current thread"); 214 assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting"); 215 216 decrement_thread_counts(jt, daemon); 217 } 218 219 // FIXME: JVMTI should call this function 220 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) { 221 assert(thread != NULL, "should be non-NULL"); 222 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 223 224 // This function can be called on a target JavaThread that is not 225 // the caller and we are not at a safepoint. So it is possible for 226 // the waiting or pending condition to be over/stale and for the 227 // first stage of async deflation to clear the object field in 228 // the ObjectMonitor. It is also possible for the object to be 229 // inflated again and to be associated with a completely different 230 // ObjectMonitor by the time this object reference is processed 231 // by the caller. 232 ObjectMonitor *wait_obj = thread->current_waiting_monitor(); 233 234 oop obj = NULL; 235 if (wait_obj != NULL) { 236 // thread is doing an Object.wait() call 237 obj = wait_obj->object(); 238 } else { 239 ObjectMonitor *enter_obj = thread->current_pending_monitor(); 240 if (enter_obj != NULL) { 241 // thread is trying to enter() an ObjectMonitor. 242 obj = enter_obj->object(); 243 } 244 } 245 246 Handle h(Thread::current(), obj); 247 return h; 248 } 249 250 bool ThreadService::set_thread_monitoring_contention(bool flag) { 251 MutexLocker m(Management_lock); 252 253 bool prev = _thread_monitoring_contention_enabled; 254 _thread_monitoring_contention_enabled = flag; 255 256 return prev; 257 } 258 259 bool ThreadService::set_thread_cpu_time_enabled(bool flag) { 260 MutexLocker m(Management_lock); 261 262 bool prev = _thread_cpu_time_enabled; 263 _thread_cpu_time_enabled = flag; 264 265 return prev; 266 } 267 268 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) { 269 MutexLocker m(Management_lock); 270 271 bool prev = _thread_allocated_memory_enabled; 272 _thread_allocated_memory_enabled = flag; 273 274 return prev; 275 } 276 277 void ThreadService::metadata_do(void f(Metadata*)) { 278 for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { 279 dump->metadata_do(f); 280 } 281 } 282 283 void ThreadService::add_thread_dump(ThreadDumpResult* dump) { 284 MutexLocker ml(Management_lock); 285 if (_threaddump_list == NULL) { 286 _threaddump_list = dump; 287 } else { 288 dump->set_next(_threaddump_list); 289 _threaddump_list = dump; 290 } 291 } 292 293 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) { 294 MutexLocker ml(Management_lock); 295 296 ThreadDumpResult* prev = NULL; 297 bool found = false; 298 for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) { 299 if (d == dump) { 300 if (prev == NULL) { 301 _threaddump_list = dump->next(); 302 } else { 303 prev->set_next(dump->next()); 304 } 305 found = true; 306 break; 307 } 308 } 309 assert(found, "The threaddump result to be removed must exist."); 310 } 311 312 // Dump stack trace of threads specified in the given threads array. 313 // Returns StackTraceElement[][] each element is the stack trace of a thread in 314 // the corresponding entry in the given threads array 315 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, 316 int num_threads, 317 TRAPS) { 318 assert(num_threads > 0, "just checking"); 319 320 ThreadDumpResult dump_result; 321 VM_ThreadDump op(&dump_result, 322 threads, 323 num_threads, 324 -1, /* entire stack */ 325 false, /* with locked monitors */ 326 false /* with locked synchronizers */); 327 VMThread::execute(&op); 328 329 // Allocate the resulting StackTraceElement[][] object 330 331 ResourceMark rm(THREAD); 332 Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH); 333 ObjArrayKlass* ik = ObjArrayKlass::cast(k); 334 objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH); 335 objArrayHandle result_obj(THREAD, r); 336 337 int num_snapshots = dump_result.num_snapshots(); 338 assert(num_snapshots == num_threads, "Must have num_threads thread snapshots"); 339 assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot"); 340 int i = 0; 341 for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) { 342 ThreadStackTrace* stacktrace = ts->get_stack_trace(); 343 if (stacktrace == NULL) { 344 // No stack trace 345 result_obj->obj_at_put(i, NULL); 346 } else { 347 // Construct an array of java/lang/StackTraceElement object 348 Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH); 349 result_obj->obj_at_put(i, backtrace_h()); 350 } 351 } 352 353 return result_obj; 354 } 355 356 void ThreadService::reset_contention_count_stat(JavaThread* thread) { 357 ThreadStatistics* stat = thread->get_thread_stat(); 358 if (stat != NULL) { 359 stat->reset_count_stat(); 360 } 361 } 362 363 void ThreadService::reset_contention_time_stat(JavaThread* thread) { 364 ThreadStatistics* stat = thread->get_thread_stat(); 365 if (stat != NULL) { 366 stat->reset_time_stat(); 367 } 368 } 369 370 // Find deadlocks involving raw monitors, object monitors and concurrent locks 371 // if concurrent_locks is true. 372 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) { 373 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 374 375 // This code was modified from the original Threads::find_deadlocks code. 376 int globalDfn = 0, thisDfn; 377 ObjectMonitor* waitingToLockMonitor = NULL; 378 JvmtiRawMonitor* waitingToLockRawMonitor = NULL; 379 oop waitingToLockBlocker = NULL; 380 bool blocked_on_monitor = false; 381 JavaThread *currentThread, *previousThread; 382 int num_deadlocks = 0; 383 384 // Initialize the depth-first-number for each JavaThread. 385 JavaThreadIterator jti(t_list); 386 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 387 jt->set_depth_first_number(-1); 388 } 389 390 DeadlockCycle* deadlocks = NULL; 391 DeadlockCycle* last = NULL; 392 DeadlockCycle* cycle = new DeadlockCycle(); 393 for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { 394 if (jt->depth_first_number() >= 0) { 395 // this thread was already visited 396 continue; 397 } 398 399 thisDfn = globalDfn; 400 jt->set_depth_first_number(globalDfn++); 401 previousThread = jt; 402 currentThread = jt; 403 404 cycle->reset(); 405 406 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 407 // When there is a deadlock, all the monitors involved in the dependency 408 // cycle must be contended and heavyweight. So we only care about the 409 // heavyweight monitor a thread is waiting to lock. 410 waitingToLockMonitor = jt->current_pending_monitor(); 411 // JVM TI raw monitors can also be involved in deadlocks, and we can be 412 // waiting to lock both a raw monitor and ObjectMonitor at the same time. 413 // It isn't clear how to make deadlock detection work correctly if that 414 // happens. 415 waitingToLockRawMonitor = jt->current_pending_raw_monitor(); 416 417 if (concurrent_locks) { 418 waitingToLockBlocker = jt->current_park_blocker(); 419 } 420 421 while (waitingToLockMonitor != NULL || 422 waitingToLockRawMonitor != NULL || 423 waitingToLockBlocker != NULL) { 424 cycle->add_thread(currentThread); 425 // Give preference to the raw monitor 426 if (waitingToLockRawMonitor != NULL) { 427 Thread* owner = waitingToLockRawMonitor->owner(); 428 if (owner != NULL && // the raw monitor could be released at any time 429 owner->is_Java_thread()) { 430 currentThread = owner->as_Java_thread(); 431 } 432 } else if (waitingToLockMonitor != NULL) { 433 address currentOwner = (address)waitingToLockMonitor->owner(); 434 if (currentOwner != NULL) { 435 currentThread = Threads::owning_thread_from_monitor_owner(t_list, 436 currentOwner); 437 if (currentThread == NULL) { 438 // This function is called at a safepoint so the JavaThread 439 // that owns waitingToLockMonitor should be findable, but 440 // if it is not findable, then the previous currentThread is 441 // blocked permanently. We record this as a deadlock. 442 num_deadlocks++; 443 444 cycle->set_deadlock(true); 445 446 // add this cycle to the deadlocks list 447 if (deadlocks == NULL) { 448 deadlocks = cycle; 449 } else { 450 last->set_next(cycle); 451 } 452 last = cycle; 453 cycle = new DeadlockCycle(); 454 break; 455 } 456 } 457 } else { 458 if (concurrent_locks) { 459 if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 460 oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 461 // This JavaThread (if there is one) is protected by the 462 // ThreadsListSetter in VM_FindDeadlocks::doit(). 463 currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL; 464 } else { 465 currentThread = NULL; 466 } 467 } 468 } 469 470 if (currentThread == NULL) { 471 // No dependency on another thread 472 break; 473 } 474 if (currentThread->depth_first_number() < 0) { 475 // First visit to this thread 476 currentThread->set_depth_first_number(globalDfn++); 477 } else if (currentThread->depth_first_number() < thisDfn) { 478 // Thread already visited, and not on a (new) cycle 479 break; 480 } else if (currentThread == previousThread) { 481 // Self-loop, ignore 482 break; 483 } else { 484 // We have a (new) cycle 485 num_deadlocks++; 486 487 cycle->set_deadlock(true); 488 489 // add this cycle to the deadlocks list 490 if (deadlocks == NULL) { 491 deadlocks = cycle; 492 } else { 493 last->set_next(cycle); 494 } 495 last = cycle; 496 cycle = new DeadlockCycle(); 497 break; 498 } 499 previousThread = currentThread; 500 waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); 501 if (concurrent_locks) { 502 waitingToLockBlocker = currentThread->current_park_blocker(); 503 } 504 } 505 506 } 507 delete cycle; 508 return deadlocks; 509 } 510 511 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 512 513 // Create a new ThreadDumpResult object and append to the list. 514 // If GC happens before this function returns, Method* 515 // in the stack trace will be visited. 516 ThreadService::add_thread_dump(this); 517 } 518 519 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { 520 // Create a new ThreadDumpResult object and append to the list. 521 // If GC happens before this function returns, oops 522 // will be visited. 523 ThreadService::add_thread_dump(this); 524 } 525 526 ThreadDumpResult::~ThreadDumpResult() { 527 ThreadService::remove_thread_dump(this); 528 529 // free all the ThreadSnapshot objects created during 530 // the VM_ThreadDump operation 531 ThreadSnapshot* ts = _snapshots; 532 while (ts != NULL) { 533 ThreadSnapshot* p = ts; 534 ts = ts->next(); 535 delete p; 536 } 537 } 538 539 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() { 540 ThreadSnapshot* ts = new ThreadSnapshot(); 541 link_thread_snapshot(ts); 542 return ts; 543 } 544 545 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) { 546 ThreadSnapshot* ts = new ThreadSnapshot(); 547 link_thread_snapshot(ts); 548 ts->initialize(t_list(), thread); 549 return ts; 550 } 551 552 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) { 553 assert(_num_threads == 0 || _num_snapshots < _num_threads, 554 "_num_snapshots must be less than _num_threads"); 555 _num_snapshots++; 556 if (_snapshots == NULL) { 557 _snapshots = ts; 558 } else { 559 _last->set_next(ts); 560 } 561 _last = ts; 562 } 563 564 void ThreadDumpResult::metadata_do(void f(Metadata*)) { 565 for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { 566 ts->metadata_do(f); 567 } 568 } 569 570 ThreadsList* ThreadDumpResult::t_list() { 571 return _setter.list(); 572 } 573 574 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { 575 _method = jvf->method(); 576 _bci = jvf->bci(); 577 _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder()); 578 _locked_monitors = NULL; 579 if (with_lock_info) { 580 Thread* current_thread = Thread::current(); 581 ResourceMark rm(current_thread); 582 HandleMark hm(current_thread); 583 GrowableArray<MonitorInfo*>* list = jvf->locked_monitors(); 584 int length = list->length(); 585 if (length > 0) { 586 _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(length, mtServiceability); 587 for (int i = 0; i < length; i++) { 588 MonitorInfo* monitor = list->at(i); 589 assert(monitor->owner() != NULL, "This monitor must have an owning object"); 590 _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner())); 591 } 592 } 593 } 594 } 595 596 StackFrameInfo::~StackFrameInfo() { 597 if (_locked_monitors != NULL) { 598 for (int i = 0; i < _locked_monitors->length(); i++) { 599 _locked_monitors->at(i).release(_thread_service_storage); 600 } 601 delete _locked_monitors; 602 } 603 _class_holder.release(_thread_service_storage); 604 } 605 606 void StackFrameInfo::metadata_do(void f(Metadata*)) { 607 f(_method); 608 } 609 610 void StackFrameInfo::print_on(outputStream* st) const { 611 ResourceMark rm; 612 java_lang_Throwable::print_stack_element(st, method(), bci()); 613 int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0); 614 for (int i = 0; i < len; i++) { 615 oop o = _locked_monitors->at(i).resolve(); 616 st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name()); 617 } 618 619 } 620 621 // Iterate through monitor cache to find JNI locked monitors 622 class InflatedMonitorsClosure: public MonitorClosure { 623 private: 624 ThreadStackTrace* _stack_trace; 625 public: 626 InflatedMonitorsClosure(ThreadStackTrace* st) { 627 _stack_trace = st; 628 } 629 void do_monitor(ObjectMonitor* mid) { 630 oop object = mid->object(); 631 if (!_stack_trace->is_owned_monitor_on_stack(object)) { 632 _stack_trace->add_jni_locked_monitor(object); 633 } 634 } 635 }; 636 637 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) { 638 _thread = t; 639 _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability); 640 _depth = 0; 641 _with_locked_monitors = with_locked_monitors; 642 if (_with_locked_monitors) { 643 _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 644 } else { 645 _jni_locked_monitors = NULL; 646 } 647 } 648 649 void ThreadStackTrace::add_jni_locked_monitor(oop object) { 650 _jni_locked_monitors->append(OopHandle(_thread_service_storage, object)); 651 } 652 653 ThreadStackTrace::~ThreadStackTrace() { 654 for (int i = 0; i < _frames->length(); i++) { 655 delete _frames->at(i); 656 } 657 delete _frames; 658 if (_jni_locked_monitors != NULL) { 659 for (int i = 0; i < _jni_locked_monitors->length(); i++) { 660 _jni_locked_monitors->at(i).release(_thread_service_storage); 661 } 662 delete _jni_locked_monitors; 663 } 664 } 665 666 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) { 667 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 668 669 if (_thread->has_last_Java_frame()) { 670 RegisterMap reg_map(_thread); 671 vframe* start_vf = _thread->last_java_vframe(®_map); 672 int count = 0; 673 for (vframe* f = start_vf; f; f = f->sender() ) { 674 if (maxDepth >= 0 && count == maxDepth) { 675 // Skip frames if more than maxDepth 676 break; 677 } 678 if (f->is_java_frame()) { 679 javaVFrame* jvf = javaVFrame::cast(f); 680 add_stack_frame(jvf); 681 count++; 682 } else { 683 // Ignore non-Java frames 684 } 685 } 686 } 687 688 if (_with_locked_monitors) { 689 // Iterate inflated monitors and find monitors locked by this thread 690 // not found in the stack 691 InflatedMonitorsClosure imc(this); 692 ObjectSynchronizer::monitors_iterate(&imc, _thread); 693 } 694 } 695 696 697 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) { 698 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 699 700 bool found = false; 701 int num_frames = get_stack_depth(); 702 for (int depth = 0; depth < num_frames; depth++) { 703 StackFrameInfo* frame = stack_frame_at(depth); 704 int len = frame->num_locked_monitors(); 705 GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors(); 706 for (int j = 0; j < len; j++) { 707 oop monitor = locked_monitors->at(j).resolve(); 708 assert(monitor != NULL, "must be a Java object"); 709 if (monitor == object) { 710 found = true; 711 break; 712 } 713 } 714 } 715 return found; 716 } 717 718 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) { 719 InstanceKlass* ik = vmClasses::StackTraceElement_klass(); 720 assert(ik != NULL, "must be loaded in 1.4+"); 721 722 // Allocate an array of java/lang/StackTraceElement object 723 objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH); 724 objArrayHandle backtrace(THREAD, ste); 725 for (int j = 0; j < _depth; j++) { 726 StackFrameInfo* frame = _frames->at(j); 727 methodHandle mh(THREAD, frame->method()); 728 oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH); 729 backtrace->obj_at_put(j, element); 730 } 731 return backtrace; 732 } 733 734 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) { 735 StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors); 736 _frames->append(frame); 737 _depth++; 738 } 739 740 void ThreadStackTrace::metadata_do(void f(Metadata*)) { 741 int length = _frames->length(); 742 for (int i = 0; i < length; i++) { 743 _frames->at(i)->metadata_do(f); 744 } 745 } 746 747 748 ConcurrentLocksDump::~ConcurrentLocksDump() { 749 if (_retain_map_on_free) { 750 return; 751 } 752 753 for (ThreadConcurrentLocks* t = _map; t != NULL;) { 754 ThreadConcurrentLocks* tcl = t; 755 t = t->next(); 756 delete tcl; 757 } 758 } 759 760 void ConcurrentLocksDump::dump_at_safepoint() { 761 // dump all locked concurrent locks 762 assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); 763 764 GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability); 765 766 // Find all instances of AbstractOwnableSynchronizer 767 HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(), 768 aos_objects); 769 // Build a map of thread to its owned AQS locks 770 build_map(aos_objects); 771 772 delete aos_objects; 773 } 774 775 776 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer 777 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) { 778 int length = aos_objects->length(); 779 for (int i = 0; i < length; i++) { 780 oop o = aos_objects->at(i); 781 oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o); 782 if (owner_thread_obj != NULL) { 783 // See comments in ThreadConcurrentLocks to see how this 784 // JavaThread* is protected. 785 JavaThread* thread = java_lang_Thread::thread(owner_thread_obj); 786 assert(o->is_instance(), "Must be an instanceOop"); 787 add_lock(thread, (instanceOop) o); 788 } 789 } 790 } 791 792 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) { 793 ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread); 794 if (tcl != NULL) { 795 tcl->add_lock(o); 796 return; 797 } 798 799 // First owned lock found for this thread 800 tcl = new ThreadConcurrentLocks(thread); 801 tcl->add_lock(o); 802 if (_map == NULL) { 803 _map = tcl; 804 } else { 805 _last->set_next(tcl); 806 } 807 _last = tcl; 808 } 809 810 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) { 811 for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) { 812 if (tcl->java_thread() == thread) { 813 return tcl; 814 } 815 } 816 return NULL; 817 } 818 819 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { 820 st->print_cr(" Locked ownable synchronizers:"); 821 ThreadConcurrentLocks* tcl = thread_concurrent_locks(t); 822 GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL); 823 if (locks == NULL || locks->is_empty()) { 824 st->print_cr("\t- None"); 825 st->cr(); 826 return; 827 } 828 829 for (int i = 0; i < locks->length(); i++) { 830 oop obj = locks->at(i).resolve(); 831 st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name()); 832 } 833 st->cr(); 834 } 835 836 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) { 837 _thread = thread; 838 _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability); 839 _next = NULL; 840 } 841 842 ThreadConcurrentLocks::~ThreadConcurrentLocks() { 843 for (int i = 0; i < _owned_locks->length(); i++) { 844 _owned_locks->at(i).release(_thread_service_storage); 845 } 846 delete _owned_locks; 847 } 848 849 void ThreadConcurrentLocks::add_lock(instanceOop o) { 850 _owned_locks->append(OopHandle(_thread_service_storage, o)); 851 } 852 853 ThreadStatistics::ThreadStatistics() { 854 _contended_enter_count = 0; 855 _monitor_wait_count = 0; 856 _sleep_count = 0; 857 _count_pending_reset = false; 858 _timer_pending_reset = false; 859 memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts)); 860 } 861 862 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); } 863 864 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) { 865 _thread = thread; 866 oop threadObj = thread->threadObj(); 867 _threadObj = OopHandle(_thread_service_storage, threadObj); 868 869 ThreadStatistics* stat = thread->get_thread_stat(); 870 _contended_enter_ticks = stat->contended_enter_ticks(); 871 _contended_enter_count = stat->contended_enter_count(); 872 _monitor_wait_ticks = stat->monitor_wait_ticks(); 873 _monitor_wait_count = stat->monitor_wait_count(); 874 _sleep_ticks = stat->sleep_ticks(); 875 _sleep_count = stat->sleep_count(); 876 877 // If thread is still attaching then threadObj will be NULL. 878 _thread_status = threadObj == NULL ? JavaThreadStatus::NEW 879 : java_lang_Thread::get_thread_status(threadObj); 880 881 _is_suspended = thread->is_suspended(); 882 _is_in_native = (thread->thread_state() == _thread_in_native); 883 884 Handle obj = ThreadService::get_current_contended_monitor(thread); 885 886 oop blocker_object = NULL; 887 oop blocker_object_owner = NULL; 888 889 if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER || 890 _thread_status == JavaThreadStatus::IN_OBJECT_WAIT || 891 _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) { 892 893 if (obj() == NULL) { 894 // monitor no longer exists; thread is not blocked 895 _thread_status = JavaThreadStatus::RUNNABLE; 896 } else { 897 blocker_object = obj(); 898 JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj); 899 if ((owner == NULL && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER) 900 || (owner != NULL && owner->is_attaching_via_jni())) { 901 // ownership information of the monitor is not available 902 // (may no longer be owned or releasing to some other thread) 903 // make this thread in RUNNABLE state. 904 // And when the owner thread is in attaching state, the java thread 905 // is not completely initialized. For example thread name and id 906 // and may not be set, so hide the attaching thread. 907 _thread_status = JavaThreadStatus::RUNNABLE; 908 blocker_object = NULL; 909 } else if (owner != NULL) { 910 blocker_object_owner = owner->threadObj(); 911 } 912 } 913 } 914 915 // Support for JSR-166 locks 916 if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) { 917 blocker_object = thread->current_park_blocker(); 918 if (blocker_object != NULL && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) { 919 blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object); 920 } 921 } 922 923 if (blocker_object != NULL) { 924 _blocker_object = OopHandle(_thread_service_storage, blocker_object); 925 } 926 if (blocker_object_owner != NULL) { 927 _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner); 928 } 929 } 930 931 oop ThreadSnapshot::blocker_object() const { return _blocker_object.resolve(); } 932 oop ThreadSnapshot::blocker_object_owner() const { return _blocker_object_owner.resolve(); } 933 934 ThreadSnapshot::~ThreadSnapshot() { 935 _blocker_object.release(_thread_service_storage); 936 _blocker_object_owner.release(_thread_service_storage); 937 _threadObj.release(_thread_service_storage); 938 939 delete _stack_trace; 940 delete _concurrent_locks; 941 } 942 943 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) { 944 _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors); 945 _stack_trace->dump_stack_at_safepoint(max_depth); 946 } 947 948 949 void ThreadSnapshot::metadata_do(void f(Metadata*)) { 950 if (_stack_trace != NULL) { 951 _stack_trace->metadata_do(f); 952 } 953 } 954 955 956 DeadlockCycle::DeadlockCycle() { 957 _is_deadlock = false; 958 _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability); 959 _next = NULL; 960 } 961 962 DeadlockCycle::~DeadlockCycle() { 963 delete _threads; 964 } 965 966 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const { 967 st->cr(); 968 st->print_cr("Found one Java-level deadlock:"); 969 st->print("============================="); 970 971 JavaThread* currentThread; 972 JvmtiRawMonitor* waitingToLockRawMonitor; 973 oop waitingToLockBlocker; 974 int len = _threads->length(); 975 for (int i = 0; i < len; i++) { 976 currentThread = _threads->at(i); 977 // The ObjectMonitor* can't be async deflated since we are at a safepoint. 978 ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor(); 979 waitingToLockRawMonitor = currentThread->current_pending_raw_monitor(); 980 waitingToLockBlocker = currentThread->current_park_blocker(); 981 st->cr(); 982 st->print_cr("\"%s\":", currentThread->get_thread_name()); 983 const char* owner_desc = ",\n which is held by"; 984 985 // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor 986 // sets the current pending monitor, it is possible to then see a pending raw monitor as well. 987 if (waitingToLockRawMonitor != NULL) { 988 st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor)); 989 Thread* owner = waitingToLockRawMonitor->owner(); 990 // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread 991 if (owner != NULL) { 992 if (owner->is_Java_thread()) { 993 currentThread = owner->as_Java_thread(); 994 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 995 } else { 996 st->print_cr(",\n which has now been released"); 997 } 998 } else { 999 st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner)); 1000 } 1001 } 1002 1003 if (waitingToLockMonitor != NULL) { 1004 st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor)); 1005 oop obj = waitingToLockMonitor->object(); 1006 st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj), 1007 obj->klass()->external_name()); 1008 1009 if (!currentThread->current_pending_monitor_is_from_java()) { 1010 owner_desc = "\n in JNI, which is held by"; 1011 } 1012 currentThread = Threads::owning_thread_from_monitor_owner(t_list, 1013 (address)waitingToLockMonitor->owner()); 1014 if (currentThread == NULL) { 1015 // The deadlock was detected at a safepoint so the JavaThread 1016 // that owns waitingToLockMonitor should be findable, but 1017 // if it is not findable, then the previous currentThread is 1018 // blocked permanently. 1019 st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, 1020 p2i(waitingToLockMonitor->owner())); 1021 continue; 1022 } 1023 } else { 1024 st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)", 1025 p2i(waitingToLockBlocker), 1026 waitingToLockBlocker->klass()->external_name()); 1027 assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()), 1028 "Must be an AbstractOwnableSynchronizer"); 1029 oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); 1030 currentThread = java_lang_Thread::thread(ownerObj); 1031 assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL"); 1032 } 1033 st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); 1034 } 1035 1036 st->cr(); 1037 1038 // Print stack traces 1039 bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace; 1040 JavaMonitorsInStackTrace = true; 1041 st->print_cr("Java stack information for the threads listed above:"); 1042 st->print_cr("==================================================="); 1043 for (int j = 0; j < len; j++) { 1044 currentThread = _threads->at(j); 1045 st->print_cr("\"%s\":", currentThread->get_thread_name()); 1046 currentThread->print_stack_on(st); 1047 } 1048 JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace; 1049 } 1050 1051 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread, 1052 bool include_jvmti_agent_threads, 1053 bool include_jni_attaching_threads) { 1054 assert(cur_thread == Thread::current(), "Check current thread"); 1055 1056 int init_size = ThreadService::get_live_thread_count(); 1057 _threads_array = new GrowableArray<instanceHandle>(init_size); 1058 1059 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1060 // skips JavaThreads in the process of exiting 1061 // and also skips VM internal JavaThreads 1062 // Threads in _thread_new or _thread_new_trans state are included. 1063 // i.e. threads have been started but not yet running. 1064 if (jt->threadObj() == NULL || 1065 jt->is_exiting() || 1066 !java_lang_Thread::is_alive(jt->threadObj()) || 1067 jt->is_hidden_from_external_view()) { 1068 continue; 1069 } 1070 1071 // skip agent threads 1072 if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) { 1073 continue; 1074 } 1075 1076 // skip jni threads in the process of attaching 1077 if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) { 1078 continue; 1079 } 1080 1081 instanceHandle h(cur_thread, (instanceOop) jt->threadObj()); 1082 _threads_array->append(h); 1083 } 1084 } --- EOF ---