1 /*
   2  * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "gc/shared/oopStorageSet.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/heapInspection.hpp"
  33 #include "memory/oopFactory.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/instanceKlass.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/objArrayOop.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "oops/oopHandle.inline.hpp"
  42 #include "prims/jvmtiRawMonitor.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/javaThread.inline.hpp"
  47 #include "runtime/objectMonitor.inline.hpp"
  48 #include "runtime/thread.inline.hpp"
  49 #include "runtime/threads.hpp"
  50 #include "runtime/threadSMR.inline.hpp"
  51 #include "runtime/vframe.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "runtime/vmOperations.hpp"
  54 #include "services/threadService.hpp"
  55 
  56 // TODO: we need to define a naming convention for perf counters
  57 // to distinguish counters for:
  58 //   - standard JSR174 use
  59 //   - Hotspot extension (public and committed)
  60 //   - Hotspot extension (private/internal and uncommitted)
  61 
  62 // Default is disabled.
  63 bool ThreadService::_thread_monitoring_contention_enabled = false;
  64 bool ThreadService::_thread_cpu_time_enabled = false;
  65 bool ThreadService::_thread_allocated_memory_enabled = false;
  66 
  67 PerfCounter*  ThreadService::_total_threads_count = nullptr;
  68 PerfVariable* ThreadService::_live_threads_count = nullptr;
  69 PerfVariable* ThreadService::_peak_threads_count = nullptr;
  70 PerfVariable* ThreadService::_daemon_threads_count = nullptr;
  71 volatile int ThreadService::_atomic_threads_count = 0;
  72 volatile int ThreadService::_atomic_daemon_threads_count = 0;
  73 
  74 volatile jlong ThreadService::_exited_allocated_bytes = 0;
  75 
  76 ThreadDumpResult* ThreadService::_threaddump_list = nullptr;
  77 
  78 static const int INITIAL_ARRAY_SIZE = 10;
  79 
  80 // OopStorage for thread stack sampling
  81 static OopStorage* _thread_service_storage = nullptr;
  82 
  83 void ThreadService::init() {
  84   EXCEPTION_MARK;
  85 
  86   // These counters are for java.lang.management API support.
  87   // They are created even if -XX:-UsePerfData is set and in
  88   // that case, they will be allocated on C heap.
  89 
  90   _total_threads_count =
  91                 PerfDataManager::create_counter(JAVA_THREADS, "started",
  92                                                 PerfData::U_Events, CHECK);
  93 
  94   _live_threads_count =
  95                 PerfDataManager::create_variable(JAVA_THREADS, "live",
  96                                                  PerfData::U_None, CHECK);
  97 
  98   _peak_threads_count =
  99                 PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
 100                                                  PerfData::U_None, CHECK);
 101 
 102   _daemon_threads_count =
 103                 PerfDataManager::create_variable(JAVA_THREADS, "daemon",
 104                                                  PerfData::U_None, CHECK);
 105 
 106   if (os::is_thread_cpu_time_supported()) {
 107     _thread_cpu_time_enabled = true;
 108   }
 109 
 110   _thread_allocated_memory_enabled = true; // Always on, so enable it
 111 
 112   // Initialize OopStorage for thread stack sampling walking
 113   _thread_service_storage = OopStorageSet::create_strong("ThreadService OopStorage",
 114                                                          mtServiceability);
 115 }
 116 
 117 void ThreadService::reset_peak_thread_count() {
 118   // Acquire the lock to update the peak thread count
 119   // to synchronize with thread addition and removal.
 120   MutexLocker mu(Threads_lock);
 121   _peak_threads_count->set_value(get_live_thread_count());
 122 }
 123 
 124 static bool is_hidden_thread(JavaThread *thread) {
 125   // hide VM internal or JVMTI agent threads
 126   return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
 127 }
 128 
 129 void ThreadService::add_thread(JavaThread* thread, bool daemon) {
 130   assert(Threads_lock->owned_by_self(), "must have threads lock");
 131 
 132   // Do not count hidden threads
 133   if (is_hidden_thread(thread)) {
 134     return;
 135   }
 136 
 137   _total_threads_count->inc();
 138   _live_threads_count->inc();
 139   Atomic::inc(&_atomic_threads_count);
 140   int count = _atomic_threads_count;
 141 
 142   if (count > _peak_threads_count->get_value()) {
 143     _peak_threads_count->set_value(count);
 144   }
 145 
 146   if (daemon) {
 147     _daemon_threads_count->inc();
 148     Atomic::inc(&_atomic_daemon_threads_count);
 149   }
 150 }
 151 
 152 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) {
 153   Atomic::dec(&_atomic_threads_count);
 154 
 155   if (daemon) {
 156     Atomic::dec(&_atomic_daemon_threads_count);
 157   }
 158 }
 159 
 160 void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
 161   assert(Threads_lock->owned_by_self(), "must have threads lock");
 162 
 163   // Include hidden thread allcations in exited_allocated_bytes
 164   ThreadService::incr_exited_allocated_bytes(thread->cooked_allocated_bytes());
 165 
 166   // Do not count hidden threads
 167   if (is_hidden_thread(thread)) {
 168     return;
 169   }
 170 
 171   assert(!thread->is_terminated(), "must not be terminated");
 172   if (!thread->is_exiting()) {
 173     // We did not get here via JavaThread::exit() so current_thread_exiting()
 174     // was not called, e.g., JavaThread::cleanup_failed_attach_current_thread().
 175     decrement_thread_counts(thread, daemon);
 176   }
 177 
 178   int daemon_count = _atomic_daemon_threads_count;
 179   int count = _atomic_threads_count;
 180 
 181   // Counts are incremented at the same time, but atomic counts are
 182   // decremented earlier than perf counts.
 183   assert(_live_threads_count->get_value() > count,
 184     "thread count mismatch %d : %d",
 185     (int)_live_threads_count->get_value(), count);
 186 
 187   _live_threads_count->dec(1);
 188   if (daemon) {
 189     assert(_daemon_threads_count->get_value() > daemon_count,
 190       "thread count mismatch %d : %d",
 191       (int)_daemon_threads_count->get_value(), daemon_count);
 192 
 193     _daemon_threads_count->dec(1);
 194   }
 195 
 196   // Counts are incremented at the same time, but atomic counts are
 197   // decremented earlier than perf counts.
 198   assert(_daemon_threads_count->get_value() >= daemon_count,
 199     "thread count mismatch %d : %d",
 200     (int)_daemon_threads_count->get_value(), daemon_count);
 201   assert(_live_threads_count->get_value() >= count,
 202     "thread count mismatch %d : %d",
 203     (int)_live_threads_count->get_value(), count);
 204   assert(_live_threads_count->get_value() > 0 ||
 205     (_live_threads_count->get_value() == 0 && count == 0 &&
 206     _daemon_threads_count->get_value() == 0 && daemon_count == 0),
 207     "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
 208     (int)_live_threads_count->get_value(), count,
 209     (int)_daemon_threads_count->get_value(), daemon_count);
 210   assert(_daemon_threads_count->get_value() > 0 ||
 211     (_daemon_threads_count->get_value() == 0 && daemon_count == 0),
 212     "thread counts should reach 0 at the same time, daemon %d,%d",
 213     (int)_daemon_threads_count->get_value(), daemon_count);
 214 }
 215 
 216 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) {
 217   // Do not count hidden threads
 218   if (is_hidden_thread(jt)) {
 219     return;
 220   }
 221 
 222   assert(jt == JavaThread::current(), "Called by current thread");
 223   assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
 224 
 225   decrement_thread_counts(jt, daemon);
 226 }
 227 
 228 // FIXME: JVMTI should call this function
 229 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
 230   assert(thread != nullptr, "should be non-null");
 231   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 232 
 233   // This function can be called on a target JavaThread that is not
 234   // the caller and we are not at a safepoint. So it is possible for
 235   // the waiting or pending condition to be over/stale and for the
 236   // first stage of async deflation to clear the object field in
 237   // the ObjectMonitor. It is also possible for the object to be
 238   // inflated again and to be associated with a completely different
 239   // ObjectMonitor by the time this object reference is processed
 240   // by the caller.
 241   ObjectMonitor *wait_obj = thread->current_waiting_monitor();
 242 
 243   oop obj = nullptr;
 244   if (wait_obj != nullptr) {
 245     // thread is doing an Object.wait() call
 246     obj = wait_obj->object();
 247   } else {
 248     ObjectMonitor *enter_obj = thread->current_pending_monitor();
 249     if (enter_obj != nullptr) {
 250       // thread is trying to enter() an ObjectMonitor.
 251       obj = enter_obj->object();
 252     }
 253   }
 254 
 255   Handle h(Thread::current(), obj);
 256   return h;
 257 }
 258 
 259 bool ThreadService::set_thread_monitoring_contention(bool flag) {
 260   MutexLocker m(Management_lock);
 261 
 262   bool prev = _thread_monitoring_contention_enabled;
 263   _thread_monitoring_contention_enabled = flag;
 264 
 265   return prev;
 266 }
 267 
 268 bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
 269   MutexLocker m(Management_lock);
 270 
 271   bool prev = _thread_cpu_time_enabled;
 272   _thread_cpu_time_enabled = flag;
 273 
 274   return prev;
 275 }
 276 
 277 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
 278   MutexLocker m(Management_lock);
 279 
 280   bool prev = _thread_allocated_memory_enabled;
 281   _thread_allocated_memory_enabled = flag;
 282 
 283   return prev;
 284 }
 285 
 286 void ThreadService::metadata_do(void f(Metadata*)) {
 287   for (ThreadDumpResult* dump = _threaddump_list; dump != nullptr; dump = dump->next()) {
 288     dump->metadata_do(f);
 289   }
 290 }
 291 
 292 void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
 293   MutexLocker ml(Management_lock);
 294   if (_threaddump_list == nullptr) {
 295     _threaddump_list = dump;
 296   } else {
 297     dump->set_next(_threaddump_list);
 298     _threaddump_list = dump;
 299   }
 300 }
 301 
 302 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
 303   MutexLocker ml(Management_lock);
 304 
 305   ThreadDumpResult* prev = nullptr;
 306   bool found = false;
 307   for (ThreadDumpResult* d = _threaddump_list; d != nullptr; prev = d, d = d->next()) {
 308     if (d == dump) {
 309       if (prev == nullptr) {
 310         _threaddump_list = dump->next();
 311       } else {
 312         prev->set_next(dump->next());
 313       }
 314       found = true;
 315       break;
 316     }
 317   }
 318   assert(found, "The threaddump result to be removed must exist.");
 319 }
 320 
 321 // Dump stack trace of threads specified in the given threads array.
 322 // Returns StackTraceElement[][] each element is the stack trace of a thread in
 323 // the corresponding entry in the given threads array
 324 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
 325                                         int num_threads,
 326                                         TRAPS) {
 327   assert(num_threads > 0, "just checking");
 328 
 329   ThreadDumpResult dump_result;
 330   VM_ThreadDump op(&dump_result,
 331                    threads,
 332                    num_threads,
 333                    -1,    /* entire stack */
 334                    false, /* with locked monitors */
 335                    false  /* with locked synchronizers */);
 336   VMThread::execute(&op);
 337 
 338   // Allocate the resulting StackTraceElement[][] object
 339 
 340   ResourceMark rm(THREAD);
 341   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
 342   ObjArrayKlass* ik = ObjArrayKlass::cast(k);
 343   objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
 344   objArrayHandle result_obj(THREAD, r);
 345 
 346   int num_snapshots = dump_result.num_snapshots();
 347   assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
 348   assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
 349   int i = 0;
 350   for (ThreadSnapshot* ts = dump_result.snapshots(); ts != nullptr; i++, ts = ts->next()) {
 351     ThreadStackTrace* stacktrace = ts->get_stack_trace();
 352     if (stacktrace == nullptr) {
 353       // No stack trace
 354       result_obj->obj_at_put(i, nullptr);
 355     } else {
 356       // Construct an array of java/lang/StackTraceElement object
 357       Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
 358       result_obj->obj_at_put(i, backtrace_h());
 359     }
 360   }
 361 
 362   return result_obj;
 363 }
 364 
 365 void ThreadService::reset_contention_count_stat(JavaThread* thread) {
 366   ThreadStatistics* stat = thread->get_thread_stat();
 367   if (stat != nullptr) {
 368     stat->reset_count_stat();
 369   }
 370 }
 371 
 372 void ThreadService::reset_contention_time_stat(JavaThread* thread) {
 373   ThreadStatistics* stat = thread->get_thread_stat();
 374   if (stat != nullptr) {
 375     stat->reset_time_stat();
 376   }
 377 }
 378 
 379 bool ThreadService::is_virtual_or_carrier_thread(JavaThread* jt) {
 380   oop threadObj = jt->threadObj();
 381   if (threadObj != nullptr && threadObj->is_a(vmClasses::BaseVirtualThread_klass())) {
 382     // a virtual thread backed by JavaThread
 383     return true;
 384   }
 385   if (jt->is_vthread_mounted()) {
 386     // carrier thread
 387     return true;
 388   }
 389   return false;
 390 }
 391 
 392 // Find deadlocks involving raw monitors, object monitors and concurrent locks
 393 // if concurrent_locks is true.
 394 // We skip virtual thread carriers under the assumption that the current scheduler, ForkJoinPool,
 395 // doesn't hold any locks while mounting a virtual thread, so any owned monitor (or j.u.c., lock for that matter)
 396 // on that JavaThread must be owned by the virtual thread, and we don't support deadlock detection for virtual threads.
 397 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
 398   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 399 
 400   // This code was modified from the original Threads::find_deadlocks code.
 401   int globalDfn = 0, thisDfn;
 402   ObjectMonitor* waitingToLockMonitor = nullptr;
 403   JvmtiRawMonitor* waitingToLockRawMonitor = nullptr;
 404   oop waitingToLockBlocker = nullptr;
 405   bool blocked_on_monitor = false;
 406   JavaThread *currentThread, *previousThread;
 407   int num_deadlocks = 0;
 408 
 409   // Initialize the depth-first-number for each JavaThread.
 410   JavaThreadIterator jti(t_list);
 411   for (JavaThread* jt = jti.first(); jt != nullptr; jt = jti.next()) {
 412     if (!is_virtual_or_carrier_thread(jt)) {
 413       jt->set_depth_first_number(-1);
 414     }
 415   }
 416 
 417   DeadlockCycle* deadlocks = nullptr;
 418   DeadlockCycle* last = nullptr;
 419   DeadlockCycle* cycle = new DeadlockCycle();
 420   for (JavaThread* jt = jti.first(); jt != nullptr; jt = jti.next()) {
 421     if (is_virtual_or_carrier_thread(jt)) {
 422       // skip virtual and carrier threads
 423       continue;
 424     }
 425     if (jt->depth_first_number() >= 0) {
 426       // this thread was already visited
 427       continue;
 428     }
 429 
 430     thisDfn = globalDfn;
 431     jt->set_depth_first_number(globalDfn++);
 432     previousThread = jt;
 433     currentThread = jt;
 434 
 435     cycle->reset();
 436 
 437     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
 438     // When there is a deadlock, all the monitors involved in the dependency
 439     // cycle must be contended and heavyweight. So we only care about the
 440     // heavyweight monitor a thread is waiting to lock.
 441     waitingToLockMonitor = jt->current_pending_monitor();
 442     // JVM TI raw monitors can also be involved in deadlocks, and we can be
 443     // waiting to lock both a raw monitor and ObjectMonitor at the same time.
 444     // It isn't clear how to make deadlock detection work correctly if that
 445     // happens.
 446     waitingToLockRawMonitor = jt->current_pending_raw_monitor();
 447 
 448     if (concurrent_locks) {
 449       waitingToLockBlocker = jt->current_park_blocker();
 450     }
 451 
 452     while (waitingToLockMonitor != nullptr ||
 453            waitingToLockRawMonitor != nullptr ||
 454            waitingToLockBlocker != nullptr) {
 455       cycle->add_thread(currentThread);
 456       // Give preference to the raw monitor
 457       if (waitingToLockRawMonitor != nullptr) {
 458         Thread* owner = waitingToLockRawMonitor->owner();
 459         if (owner != nullptr && // the raw monitor could be released at any time
 460             owner->is_Java_thread()) {
 461           currentThread = JavaThread::cast(owner);
 462         }
 463       } else if (waitingToLockMonitor != nullptr) {
 464         if (waitingToLockMonitor->has_owner()) {
 465           currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor);
 466           if (currentThread == nullptr) {
 467             // This function is called at a safepoint so the JavaThread
 468             // that owns waitingToLockMonitor should be findable, but
 469             // if it is not findable, then the previous currentThread is
 470             // blocked permanently. We record this as a deadlock.
 471             num_deadlocks++;
 472 
 473             // add this cycle to the deadlocks list
 474             if (deadlocks == nullptr) {
 475               deadlocks = cycle;
 476             } else {
 477               last->set_next(cycle);
 478             }
 479             last = cycle;
 480             cycle = new DeadlockCycle();
 481             break;
 482           }
 483         }
 484       } else {
 485         if (concurrent_locks) {
 486           if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 487             oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
 488             // This JavaThread (if there is one) is protected by the
 489             // ThreadsListSetter in VM_FindDeadlocks::doit().
 490             currentThread = threadObj != nullptr ? java_lang_Thread::thread(threadObj) : nullptr;
 491           } else {
 492             currentThread = nullptr;
 493           }
 494         }
 495       }
 496 
 497       if (currentThread == nullptr || is_virtual_or_carrier_thread(currentThread)) {
 498         // No dependency on another thread
 499         break;
 500       }
 501       if (currentThread->depth_first_number() < 0) {
 502         // First visit to this thread
 503         currentThread->set_depth_first_number(globalDfn++);
 504       } else if (currentThread->depth_first_number() < thisDfn) {
 505         // Thread already visited, and not on a (new) cycle
 506         break;
 507       } else if (currentThread == previousThread) {
 508         // Self-loop, ignore
 509         break;
 510       } else {
 511         // We have a (new) cycle
 512         num_deadlocks++;
 513 
 514         // add this cycle to the deadlocks list
 515         if (deadlocks == nullptr) {
 516           deadlocks = cycle;
 517         } else {
 518           last->set_next(cycle);
 519         }
 520         last = cycle;
 521         cycle = new DeadlockCycle();
 522         break;
 523       }
 524       previousThread = currentThread;
 525       waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
 526       if (concurrent_locks) {
 527         waitingToLockBlocker = currentThread->current_park_blocker();
 528       }
 529     }
 530 
 531   }
 532   delete cycle;
 533   return deadlocks;
 534 }
 535 
 536 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(nullptr), _last(nullptr), _next(nullptr), _setter() {
 537 
 538   // Create a new ThreadDumpResult object and append to the list.
 539   // If GC happens before this function returns, Method*
 540   // in the stack trace will be visited.
 541   ThreadService::add_thread_dump(this);
 542 }
 543 
 544 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(nullptr), _last(nullptr), _next(nullptr), _setter() {
 545   // Create a new ThreadDumpResult object and append to the list.
 546   // If GC happens before this function returns, oops
 547   // will be visited.
 548   ThreadService::add_thread_dump(this);
 549 }
 550 
 551 ThreadDumpResult::~ThreadDumpResult() {
 552   ThreadService::remove_thread_dump(this);
 553 
 554   // free all the ThreadSnapshot objects created during
 555   // the VM_ThreadDump operation
 556   ThreadSnapshot* ts = _snapshots;
 557   while (ts != nullptr) {
 558     ThreadSnapshot* p = ts;
 559     ts = ts->next();
 560     delete p;
 561   }
 562 }
 563 
 564 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
 565   ThreadSnapshot* ts = new ThreadSnapshot();
 566   link_thread_snapshot(ts);
 567   return ts;
 568 }
 569 
 570 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
 571   ThreadSnapshot* ts = new ThreadSnapshot();
 572   link_thread_snapshot(ts);
 573   ts->initialize(t_list(), thread);
 574   return ts;
 575 }
 576 
 577 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
 578   assert(_num_threads == 0 || _num_snapshots < _num_threads,
 579          "_num_snapshots must be less than _num_threads");
 580   _num_snapshots++;
 581   if (_snapshots == nullptr) {
 582     _snapshots = ts;
 583   } else {
 584     _last->set_next(ts);
 585   }
 586   _last = ts;
 587 }
 588 
 589 void ThreadDumpResult::metadata_do(void f(Metadata*)) {
 590   for (ThreadSnapshot* ts = _snapshots; ts != nullptr; ts = ts->next()) {
 591     ts->metadata_do(f);
 592   }
 593 }
 594 
 595 ThreadsList* ThreadDumpResult::t_list() {
 596   return _setter.list();
 597 }
 598 
 599 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
 600   _method = jvf->method();
 601   _bci = jvf->bci();
 602   _class_holder = OopHandle(_thread_service_storage, _method->method_holder()->klass_holder());
 603   _locked_monitors = nullptr;
 604   if (with_lock_info) {
 605     Thread* current_thread = Thread::current();
 606     ResourceMark rm(current_thread);
 607     HandleMark hm(current_thread);
 608     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
 609     int length = list->length();
 610     if (length > 0) {
 611       _locked_monitors = new (mtServiceability) GrowableArray<OopHandle>(length, mtServiceability);
 612       for (int i = 0; i < length; i++) {
 613         MonitorInfo* monitor = list->at(i);
 614         assert(monitor->owner() != nullptr, "This monitor must have an owning object");
 615         _locked_monitors->append(OopHandle(_thread_service_storage, monitor->owner()));
 616       }
 617     }
 618   }
 619 }
 620 
 621 StackFrameInfo::~StackFrameInfo() {
 622   if (_locked_monitors != nullptr) {
 623     for (int i = 0; i < _locked_monitors->length(); i++) {
 624       _locked_monitors->at(i).release(_thread_service_storage);
 625     }
 626     delete _locked_monitors;
 627   }
 628   _class_holder.release(_thread_service_storage);
 629 }
 630 
 631 void StackFrameInfo::metadata_do(void f(Metadata*)) {
 632   f(_method);
 633 }
 634 
 635 void StackFrameInfo::print_on(outputStream* st) const {
 636   ResourceMark rm;
 637   java_lang_Throwable::print_stack_element(st, method(), bci());
 638   int len = (_locked_monitors != nullptr ? _locked_monitors->length() : 0);
 639   for (int i = 0; i < len; i++) {
 640     oop o = _locked_monitors->at(i).resolve();
 641     st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
 642   }
 643 }
 644 
 645 // Iterate through monitor cache to find JNI locked monitors
 646 class InflatedMonitorsClosure: public MonitorClosure {
 647 private:
 648   ThreadStackTrace* _stack_trace;
 649 public:
 650   InflatedMonitorsClosure(ThreadStackTrace* st) {
 651     _stack_trace = st;
 652   }
 653   void do_monitor(ObjectMonitor* mid) {
 654     oop object = mid->object();
 655     if (!_stack_trace->is_owned_monitor_on_stack(object)) {
 656       _stack_trace->add_jni_locked_monitor(object);
 657     }
 658   }
 659 };
 660 
 661 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
 662   _thread = t;
 663   _frames = new (mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability);
 664   _depth = 0;
 665   _with_locked_monitors = with_locked_monitors;
 666   if (_with_locked_monitors) {
 667     _jni_locked_monitors = new (mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 668   } else {
 669     _jni_locked_monitors = nullptr;
 670   }
 671 }
 672 
 673 void ThreadStackTrace::add_jni_locked_monitor(oop object) {
 674   _jni_locked_monitors->append(OopHandle(_thread_service_storage, object));
 675 }
 676 
 677 ThreadStackTrace::~ThreadStackTrace() {
 678   for (int i = 0; i < _frames->length(); i++) {
 679     delete _frames->at(i);
 680   }
 681   delete _frames;
 682   if (_jni_locked_monitors != nullptr) {
 683     for (int i = 0; i < _jni_locked_monitors->length(); i++) {
 684       _jni_locked_monitors->at(i).release(_thread_service_storage);
 685     }
 686     delete _jni_locked_monitors;
 687   }
 688 }
 689 
 690 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHashtable* table, bool full) {
 691   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 692 
 693   if (_thread->has_last_Java_frame()) {
 694     RegisterMap reg_map(_thread,
 695                         RegisterMap::UpdateMap::include,
 696                         RegisterMap::ProcessFrames::include,
 697                         RegisterMap::WalkContinuation::skip);
 698 
 699     // If full, we want to print both vthread and carrier frames
 700     vframe* start_vf = !full && _thread->is_vthread_mounted()
 701       ? _thread->carrier_last_java_vframe(&reg_map)
 702       : _thread->last_java_vframe(&reg_map);
 703     int count = 0;
 704     for (vframe* f = start_vf; f; f = f->sender() ) {
 705       if (maxDepth >= 0 && count == maxDepth) {
 706         // Skip frames if more than maxDepth
 707         break;
 708       }
 709       if (!full && f->is_vthread_entry()) {
 710         break;
 711       }
 712       if (f->is_java_frame()) {
 713         javaVFrame* jvf = javaVFrame::cast(f);
 714         add_stack_frame(jvf);
 715         count++;
 716       } else {
 717         // Ignore non-Java frames
 718       }
 719     }
 720   }
 721 
 722   if (_with_locked_monitors) {
 723     // Iterate inflated monitors and find monitors locked by this thread
 724     // that are not found in the stack, e.g. JNI locked monitors:
 725     InflatedMonitorsClosure imc(this);
 726     if (table != nullptr) {
 727       // Get the ObjectMonitors locked by the target thread, if any,
 728       // and does not include any where owner is set to a stack lock
 729       // address in the target thread:
 730       ObjectMonitorsHashtable::PtrList* list = table->get_entry(_thread);
 731       if (list != nullptr) {
 732         ObjectSynchronizer::monitors_iterate(&imc, list, _thread);
 733       }
 734     } else {
 735       ObjectSynchronizer::monitors_iterate(&imc, _thread);
 736     }
 737   }
 738 }
 739 
 740 
 741 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
 742   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 743 
 744   bool found = false;
 745   int num_frames = get_stack_depth();
 746   for (int depth = 0; depth < num_frames; depth++) {
 747     StackFrameInfo* frame = stack_frame_at(depth);
 748     int len = frame->num_locked_monitors();
 749     GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors();
 750     for (int j = 0; j < len; j++) {
 751       oop monitor = locked_monitors->at(j).resolve();
 752       assert(monitor != nullptr, "must be a Java object");
 753       if (monitor == object) {
 754         found = true;
 755         break;
 756       }
 757     }
 758   }
 759   return found;
 760 }
 761 
 762 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
 763   InstanceKlass* ik = vmClasses::StackTraceElement_klass();
 764   assert(ik != nullptr, "must be loaded in 1.4+");
 765 
 766   // Allocate an array of java/lang/StackTraceElement object
 767   objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH);
 768   objArrayHandle backtrace(THREAD, ste);
 769   for (int j = 0; j < _depth; j++) {
 770     StackFrameInfo* frame = _frames->at(j);
 771     methodHandle mh(THREAD, frame->method());
 772     oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
 773     backtrace->obj_at_put(j, element);
 774   }
 775   return backtrace;
 776 }
 777 
 778 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
 779   StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
 780   _frames->append(frame);
 781   _depth++;
 782 }
 783 
 784 void ThreadStackTrace::metadata_do(void f(Metadata*)) {
 785   int length = _frames->length();
 786   for (int i = 0; i < length; i++) {
 787     _frames->at(i)->metadata_do(f);
 788   }
 789 }
 790 
 791 
 792 ConcurrentLocksDump::~ConcurrentLocksDump() {
 793   if (_retain_map_on_free) {
 794     return;
 795   }
 796 
 797   for (ThreadConcurrentLocks* t = _map; t != nullptr;)  {
 798     ThreadConcurrentLocks* tcl = t;
 799     t = t->next();
 800     delete tcl;
 801   }
 802 }
 803 
 804 void ConcurrentLocksDump::dump_at_safepoint() {
 805   // dump all locked concurrent locks
 806   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 807 
 808   GrowableArray<oop>* aos_objects = new (mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
 809 
 810   // Find all instances of AbstractOwnableSynchronizer
 811   HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
 812                                               aos_objects);
 813   // Build a map of thread to its owned AQS locks
 814   build_map(aos_objects);
 815 
 816   delete aos_objects;
 817 }
 818 
 819 
 820 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer
 821 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
 822   int length = aos_objects->length();
 823   for (int i = 0; i < length; i++) {
 824     oop o = aos_objects->at(i);
 825     oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
 826     if (owner_thread_obj != nullptr) {
 827       // See comments in ThreadConcurrentLocks to see how this
 828       // JavaThread* is protected.
 829       JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
 830       assert(o->is_instance(), "Must be an instanceOop");
 831       add_lock(thread, (instanceOop) o);
 832     }
 833   }
 834 }
 835 
 836 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
 837   ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
 838   if (tcl != nullptr) {
 839     tcl->add_lock(o);
 840     return;
 841   }
 842 
 843   // First owned lock found for this thread
 844   tcl = new ThreadConcurrentLocks(thread);
 845   tcl->add_lock(o);
 846   if (_map == nullptr) {
 847     _map = tcl;
 848   } else {
 849     _last->set_next(tcl);
 850   }
 851   _last = tcl;
 852 }
 853 
 854 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
 855   for (ThreadConcurrentLocks* tcl = _map; tcl != nullptr; tcl = tcl->next()) {
 856     if (tcl->java_thread() == thread) {
 857       return tcl;
 858     }
 859   }
 860   return nullptr;
 861 }
 862 
 863 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
 864   st->print_cr("   Locked ownable synchronizers:");
 865   ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
 866   GrowableArray<OopHandle>* locks = (tcl != nullptr ? tcl->owned_locks() : nullptr);
 867   if (locks == nullptr || locks->is_empty()) {
 868     st->print_cr("\t- None");
 869     st->cr();
 870     return;
 871   }
 872 
 873   for (int i = 0; i < locks->length(); i++) {
 874     oop obj = locks->at(i).resolve();
 875     st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
 876   }
 877   st->cr();
 878 }
 879 
 880 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
 881   _thread = thread;
 882   _owned_locks = new (mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
 883   _next = nullptr;
 884 }
 885 
 886 ThreadConcurrentLocks::~ThreadConcurrentLocks() {
 887   for (int i = 0; i < _owned_locks->length(); i++) {
 888     _owned_locks->at(i).release(_thread_service_storage);
 889   }
 890   delete _owned_locks;
 891 }
 892 
 893 void ThreadConcurrentLocks::add_lock(instanceOop o) {
 894   _owned_locks->append(OopHandle(_thread_service_storage, o));
 895 }
 896 
 897 ThreadStatistics::ThreadStatistics() {
 898   _contended_enter_count = 0;
 899   _monitor_wait_count = 0;
 900   _sleep_count = 0;
 901   _count_pending_reset = false;
 902   _timer_pending_reset = false;
 903   memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
 904 }
 905 
 906 oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); }
 907 
 908 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
 909   _thread = thread;
 910   oop threadObj = thread->threadObj();
 911   _threadObj = OopHandle(_thread_service_storage, threadObj);
 912 
 913   ThreadStatistics* stat = thread->get_thread_stat();
 914   _contended_enter_ticks = stat->contended_enter_ticks();
 915   _contended_enter_count = stat->contended_enter_count();
 916   _monitor_wait_ticks = stat->monitor_wait_ticks();
 917   _monitor_wait_count = stat->monitor_wait_count();
 918   _sleep_ticks = stat->sleep_ticks();
 919   _sleep_count = stat->sleep_count();
 920 
 921   // If thread is still attaching then threadObj will be null.
 922   _thread_status = threadObj == nullptr ? JavaThreadStatus::NEW
 923                                      : java_lang_Thread::get_thread_status(threadObj);
 924 
 925   _is_suspended = thread->is_suspended();
 926   _is_in_native = (thread->thread_state() == _thread_in_native);
 927 
 928   Handle obj = ThreadService::get_current_contended_monitor(thread);
 929 
 930   oop blocker_object = nullptr;
 931   oop blocker_object_owner = nullptr;
 932 
 933   if (thread->is_vthread_mounted() && thread->vthread() != threadObj) { // ThreadSnapshot only captures platform threads
 934     _thread_status = JavaThreadStatus::IN_OBJECT_WAIT;
 935     oop vthread = thread->vthread();
 936     assert(vthread != nullptr, "");
 937     blocker_object = vthread;
 938     blocker_object_owner = vthread;
 939   } else if (_thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER ||
 940       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT ||
 941       _thread_status == JavaThreadStatus::IN_OBJECT_WAIT_TIMED) {
 942 
 943     if (obj() == nullptr) {
 944       // monitor no longer exists; thread is not blocked
 945       _thread_status = JavaThreadStatus::RUNNABLE;
 946     } else {
 947       blocker_object = obj();
 948       JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
 949       if ((owner == nullptr && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER)
 950           || (owner != nullptr && owner->is_attaching_via_jni())) {
 951         // ownership information of the monitor is not available
 952         // (may no longer be owned or releasing to some other thread)
 953         // make this thread in RUNNABLE state.
 954         // And when the owner thread is in attaching state, the java thread
 955         // is not completely initialized. For example thread name and id
 956         // and may not be set, so hide the attaching thread.
 957         _thread_status = JavaThreadStatus::RUNNABLE;
 958         blocker_object = nullptr;
 959       } else if (owner != nullptr) {
 960         blocker_object_owner = owner->threadObj();
 961       }
 962     }
 963   } else if (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) {
 964     blocker_object = thread->current_park_blocker();
 965     if (blocker_object != nullptr && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
 966       blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object);
 967     }
 968   }
 969 
 970   if (blocker_object != nullptr) {
 971     _blocker_object = OopHandle(_thread_service_storage, blocker_object);
 972   }
 973   if (blocker_object_owner != nullptr) {
 974     _blocker_object_owner = OopHandle(_thread_service_storage, blocker_object_owner);
 975   }
 976 }
 977 
 978 oop ThreadSnapshot::blocker_object() const           { return _blocker_object.resolve(); }
 979 oop ThreadSnapshot::blocker_object_owner() const     { return _blocker_object_owner.resolve(); }
 980 
 981 ThreadSnapshot::~ThreadSnapshot() {
 982   _blocker_object.release(_thread_service_storage);
 983   _blocker_object_owner.release(_thread_service_storage);
 984   _threadObj.release(_thread_service_storage);
 985 
 986   delete _stack_trace;
 987   delete _concurrent_locks;
 988 }
 989 
 990 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
 991                                              ObjectMonitorsHashtable* table, bool full) {
 992   _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
 993   _stack_trace->dump_stack_at_safepoint(max_depth, table, full);
 994 }
 995 
 996 
 997 void ThreadSnapshot::metadata_do(void f(Metadata*)) {
 998   if (_stack_trace != nullptr) {
 999     _stack_trace->metadata_do(f);
1000   }
1001 }
1002 
1003 
1004 DeadlockCycle::DeadlockCycle() {
1005   _threads = new (mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability);
1006   _next = nullptr;
1007 }
1008 
1009 DeadlockCycle::~DeadlockCycle() {
1010   delete _threads;
1011 }
1012 
1013 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
1014   st->cr();
1015   st->print_cr("Found one Java-level deadlock:");
1016   st->print("=============================");
1017 
1018   JavaThread* currentThread;
1019   JvmtiRawMonitor* waitingToLockRawMonitor;
1020   oop waitingToLockBlocker;
1021   int len = _threads->length();
1022   for (int i = 0; i < len; i++) {
1023     currentThread = _threads->at(i);
1024     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
1025     ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
1026     waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
1027     waitingToLockBlocker = currentThread->current_park_blocker();
1028     st->cr();
1029     st->print_cr("\"%s\":", currentThread->name());
1030     const char* owner_desc = ",\n  which is held by";
1031 
1032     // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
1033     // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
1034     if (waitingToLockRawMonitor != nullptr) {
1035       st->print("  waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
1036       Thread* owner = waitingToLockRawMonitor->owner();
1037       // Could be null as the raw monitor could be released at any time if held by non-JavaThread
1038       if (owner != nullptr) {
1039         if (owner->is_Java_thread()) {
1040           currentThread = JavaThread::cast(owner);
1041           st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1042         } else {
1043           st->print_cr(",\n  which has now been released");
1044         }
1045       } else {
1046         st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
1047       }
1048     }
1049 
1050     if (waitingToLockMonitor != nullptr) {
1051       st->print("  waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
1052       oop obj = waitingToLockMonitor->object();
1053       st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
1054                  obj->klass()->external_name());
1055 
1056       if (!currentThread->current_pending_monitor_is_from_java()) {
1057         owner_desc = "\n  in JNI, which is held by";
1058       }
1059       currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor);
1060       if (currentThread == nullptr) {
1061         // The deadlock was detected at a safepoint so the JavaThread
1062         // that owns waitingToLockMonitor should be findable, but
1063         // if it is not findable, then the previous currentThread is
1064         // blocked permanently.
1065         st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
1066                   p2i(waitingToLockMonitor->owner()));
1067         continue;
1068       }
1069     } else {
1070       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
1071                 p2i(waitingToLockBlocker),
1072                 waitingToLockBlocker->klass()->external_name());
1073       assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()),
1074              "Must be an AbstractOwnableSynchronizer");
1075       oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
1076       currentThread = java_lang_Thread::thread(ownerObj);
1077       assert(currentThread != nullptr, "AbstractOwnableSynchronizer owning thread is unexpectedly null");
1078     }
1079     st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
1080   }
1081 
1082   st->cr();
1083 
1084   // Print stack traces
1085   bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
1086   JavaMonitorsInStackTrace = true;
1087   st->print_cr("Java stack information for the threads listed above:");
1088   st->print_cr("===================================================");
1089   for (int j = 0; j < len; j++) {
1090     currentThread = _threads->at(j);
1091     st->print_cr("\"%s\":", currentThread->name());
1092     currentThread->print_stack_on(st);
1093   }
1094   JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
1095 }
1096 
1097 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
1098                                              bool include_jvmti_agent_threads,
1099                                              bool include_jni_attaching_threads,
1100                                              bool include_bound_virtual_threads) {
1101   assert(cur_thread == Thread::current(), "Check current thread");
1102 
1103   int init_size = ThreadService::get_live_thread_count();
1104   _threads_array = new GrowableArray<instanceHandle>(init_size);
1105 
1106   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1107     // skips JavaThreads in the process of exiting
1108     // and also skips VM internal JavaThreads
1109     // Threads in _thread_new or _thread_new_trans state are included.
1110     // i.e. threads have been started but not yet running.
1111     if (jt->threadObj() == nullptr   ||
1112         jt->is_exiting() ||
1113         !java_lang_Thread::is_alive(jt->threadObj())   ||
1114         jt->is_hidden_from_external_view()) {
1115       continue;
1116     }
1117 
1118     // skip agent threads
1119     if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
1120       continue;
1121     }
1122 
1123     // skip jni threads in the process of attaching
1124     if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
1125       continue;
1126     }
1127 
1128     // skip instances of BoundVirtualThread
1129     if (!include_bound_virtual_threads && jt->threadObj()->is_a(vmClasses::BoundVirtualThread_klass())) {
1130       continue;
1131     }
1132 
1133     instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
1134     _threads_array->append(h);
1135   }
1136 }