1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "gc/shared/isGCActiveMark.hpp" 34 #include "logging/log.hpp" 35 #include "logging/logStream.hpp" 36 #include "logging/logConfiguration.hpp" 37 #include "memory/heapInspection.hpp" 38 #include "memory/metaspace/metaspaceReporter.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "memory/universe.hpp" 41 #include "oops/symbol.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/deoptimization.hpp" 44 #include "runtime/frame.inline.hpp" 45 #include "runtime/interfaceSupport.inline.hpp" 46 #include "runtime/javaThread.inline.hpp" 47 #include "runtime/jniHandles.hpp" 48 #include "runtime/objectMonitor.inline.hpp" 49 #include "runtime/stackFrameStream.inline.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "runtime/threads.hpp" 52 #include "runtime/threadSMR.inline.hpp" 53 #include "runtime/vmOperations.hpp" 54 #include "services/threadService.hpp" 55 #include "utilities/ticks.hpp" 56 57 #define VM_OP_NAME_INITIALIZE(name) #name, 58 59 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \ 60 { VM_OPS_DO(VM_OP_NAME_INITIALIZE) }; 61 62 void VM_Operation::set_calling_thread(Thread* thread) { 63 _calling_thread = thread; 64 } 65 66 void VM_Operation::evaluate() { 67 ResourceMark rm; 68 LogTarget(Debug, vmoperation) lt; 69 if (lt.is_enabled()) { 70 LogStream ls(lt); 71 ls.print("begin "); 72 print_on_error(&ls); 73 ls.cr(); 74 } 75 doit(); 76 if (lt.is_enabled()) { 77 LogStream ls(lt); 78 ls.print("end "); 79 print_on_error(&ls); 80 ls.cr(); 81 } 82 } 83 84 // Called by fatal error handler. 85 void VM_Operation::print_on_error(outputStream* st) const { 86 st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this)); 87 st->print("%s", name()); 88 89 st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint"); 90 91 if (calling_thread()) { 92 st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread())); 93 } 94 } 95 96 void VM_ClearICs::doit() { 97 if (_preserve_static_stubs) { 98 CodeCache::cleanup_inline_caches_whitebox(); 99 } else { 100 CodeCache::clear_inline_caches(); 101 } 102 } 103 104 void VM_CleanClassLoaderDataMetaspaces::doit() { 105 ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces(); 106 } 107 108 void VM_RehashStringTable::doit() { 109 StringTable::rehash_table(); 110 } 111 112 void VM_RehashSymbolTable::doit() { 113 SymbolTable::rehash_table(); 114 } 115 116 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) { 117 _thread = thread; 118 _id = id; 119 _reason = reason; 120 } 121 122 123 void VM_DeoptimizeFrame::doit() { 124 assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason"); 125 Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason); 126 } 127 128 129 #ifndef PRODUCT 130 131 void VM_DeoptimizeAll::doit() { 132 JavaThreadIteratorWithHandle jtiwh; 133 // deoptimize all java threads in the system 134 if (DeoptimizeALot) { 135 for (; JavaThread *thread = jtiwh.next(); ) { 136 if (thread->has_last_Java_frame()) { 137 thread->deoptimize(); 138 } 139 } 140 } else if (DeoptimizeRandom) { 141 142 // Deoptimize some selected threads and frames 143 int tnum = os::random() & 0x3; 144 int fnum = os::random() & 0x3; 145 int tcount = 0; 146 for (; JavaThread *thread = jtiwh.next(); ) { 147 if (thread->has_last_Java_frame()) { 148 if (tcount++ == tnum) { 149 tcount = 0; 150 int fcount = 0; 151 // Deoptimize some selected frames. 152 for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) { 153 if (fst.current()->can_be_deoptimized()) { 154 if (fcount++ == fnum) { 155 fcount = 0; 156 Deoptimization::deoptimize(thread, *fst.current()); 157 } 158 } 159 } 160 } 161 } 162 } 163 } 164 } 165 166 167 void VM_ZombieAll::doit() { 168 JavaThread::cast(calling_thread())->make_zombies(); 169 } 170 171 #endif // !PRODUCT 172 173 bool VM_PrintThreads::doit_prologue() { 174 // Get Heap_lock if concurrent locks will be dumped 175 if (_print_concurrent_locks) { 176 Heap_lock->lock(); 177 } 178 return true; 179 } 180 181 void VM_PrintThreads::doit() { 182 Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info); 183 if (_print_jni_handle_info) { 184 JNIHandles::print_on(_out); 185 } 186 } 187 188 void VM_PrintThreads::doit_epilogue() { 189 if (_print_concurrent_locks) { 190 // Release Heap_lock 191 Heap_lock->unlock(); 192 } 193 } 194 195 void VM_PrintMetadata::doit() { 196 metaspace::MetaspaceReporter::print_report(_out, _scale, _flags); 197 } 198 199 VM_FindDeadlocks::~VM_FindDeadlocks() { 200 if (_deadlocks != nullptr) { 201 DeadlockCycle* cycle = _deadlocks; 202 while (cycle != nullptr) { 203 DeadlockCycle* d = cycle; 204 cycle = cycle->next(); 205 delete d; 206 } 207 } 208 } 209 210 void VM_FindDeadlocks::doit() { 211 // Update the hazard ptr in the originating thread to the current 212 // list of threads. This VM operation needs the current list of 213 // threads for proper deadlock detection and those are the 214 // JavaThreads we need to be protected when we return info to the 215 // originating thread. 216 _setter.set(); 217 218 _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks); 219 if (_out != nullptr) { 220 int num_deadlocks = 0; 221 for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) { 222 num_deadlocks++; 223 cycle->print_on_with(_setter.list(), _out); 224 } 225 226 if (num_deadlocks == 1) { 227 _out->print_cr("\nFound 1 deadlock.\n"); 228 _out->flush(); 229 } else if (num_deadlocks > 1) { 230 _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks); 231 _out->flush(); 232 } 233 } 234 } 235 236 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result, 237 int max_depth, 238 bool with_locked_monitors, 239 bool with_locked_synchronizers) { 240 _result = result; 241 _num_threads = 0; // 0 indicates all threads 242 _threads = nullptr; 243 _max_depth = max_depth; 244 _with_locked_monitors = with_locked_monitors; 245 _with_locked_synchronizers = with_locked_synchronizers; 246 } 247 248 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result, 249 GrowableArray<instanceHandle>* threads, 250 int num_threads, 251 int max_depth, 252 bool with_locked_monitors, 253 bool with_locked_synchronizers) { 254 _result = result; 255 _num_threads = num_threads; 256 _threads = threads; 257 _max_depth = max_depth; 258 _with_locked_monitors = with_locked_monitors; 259 _with_locked_synchronizers = with_locked_synchronizers; 260 } 261 262 bool VM_ThreadDump::doit_prologue() { 263 if (_with_locked_synchronizers) { 264 // Acquire Heap_lock to dump concurrent locks 265 Heap_lock->lock(); 266 } 267 268 return true; 269 } 270 271 void VM_ThreadDump::doit_epilogue() { 272 if (_with_locked_synchronizers) { 273 // Release Heap_lock 274 Heap_lock->unlock(); 275 } 276 } 277 278 // Hash table of void* to a list of ObjectMonitor* owned by the JavaThread. 279 // The JavaThread's owner key is either a JavaThread* or a stack lock 280 // address in the JavaThread so we use "void*". 281 // 282 class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView { 283 private: 284 static unsigned int ptr_hash(void* const& s1) { 285 // 2654435761 = 2^32 * Phi (golden ratio) 286 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u); 287 } 288 289 private: 290 class ObjectMonitorLinkedList : 291 public LinkedListImpl<ObjectMonitor*, 292 AnyObj::C_HEAP, mtThread, 293 AllocFailStrategy::RETURN_NULL> {}; 294 295 // ResourceHashtable SIZE is specified at compile time so we 296 // use 1031 which is the first prime after 1024. 297 typedef ResourceHashtable<void*, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread, 298 &ObjectMonitorsDump::ptr_hash> PtrTable; 299 PtrTable* _ptrs; 300 size_t _key_count; 301 size_t _om_count; 302 303 void add_list(void* key, ObjectMonitorLinkedList* list) { 304 _ptrs->put(key, list); 305 _key_count++; 306 } 307 308 ObjectMonitorLinkedList* get_list(void* key) { 309 ObjectMonitorLinkedList** listpp = _ptrs->get(key); 310 return (listpp == nullptr) ? nullptr : *listpp; 311 } 312 313 void add(ObjectMonitor* monitor) { 314 void* key = monitor->owner(); 315 316 ObjectMonitorLinkedList* list = get_list(key); 317 if (list == nullptr) { 318 // Create new list and add it to the hash table: 319 list = new (mtThread) ObjectMonitorLinkedList; 320 _ptrs->put(key, list); 321 _key_count++; 322 } 323 324 assert(list->find(monitor) == nullptr, "Should not contain duplicates"); 325 list->add(monitor); // Add the ObjectMonitor to the list. 326 _om_count++; 327 } 328 329 public: 330 // ResourceHashtable is passed to various functions and populated in 331 // different places so we allocate it using C_HEAP to make it immune 332 // from any ResourceMarks that happen to be in the code paths. 333 ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {} 334 335 ~ObjectMonitorsDump() { 336 class CleanupObjectMonitorsDump: StackObj { 337 public: 338 bool do_entry(void*& key, ObjectMonitorLinkedList*& list) { 339 list->clear(); // clear the LinkListNodes 340 delete list; // then delete the LinkedList 341 return true; 342 } 343 } cleanup; 344 345 _ptrs->unlink(&cleanup); // cleanup the LinkedLists 346 delete _ptrs; // then delete the hash table 347 } 348 349 // Implements MonitorClosure used to collect all owned monitors in the system 350 void do_monitor(ObjectMonitor* monitor) override { 351 assert(monitor->has_owner(), "Expects only owned monitors"); 352 353 if (monitor->is_owner_anonymous()) { 354 // There's no need to collect anonymous owned monitors 355 // because the caller of this code is only interested 356 // in JNI owned monitors. 357 return; 358 } 359 360 if (monitor->object_peek() == nullptr) { 361 // JNI code doesn't necessarily keep the monitor object 362 // alive. Filter out monitors with dead objects. 363 return; 364 } 365 366 add(monitor); 367 } 368 369 // Implements the ObjectMonitorsView interface 370 void visit(MonitorClosure* closure, JavaThread* thread) override { 371 ObjectMonitorLinkedList* list = get_list(thread); 372 LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr); 373 while (!iter.is_empty()) { 374 ObjectMonitor* monitor = *iter.next(); 375 closure->do_monitor(monitor); 376 } 377 } 378 379 size_t key_count() { return _key_count; } 380 size_t om_count() { return _om_count; } 381 }; 382 383 void VM_ThreadDump::doit() { 384 ResourceMark rm; 385 386 // Set the hazard ptr in the originating thread to protect the 387 // current list of threads. This VM operation needs the current list 388 // of threads for a proper dump and those are the JavaThreads we need 389 // to be protected when we return info to the originating thread. 390 _result->set_t_list(); 391 392 ConcurrentLocksDump concurrent_locks(true); 393 if (_with_locked_synchronizers) { 394 concurrent_locks.dump_at_safepoint(); 395 } 396 397 ObjectMonitorsDump object_monitors; 398 if (_with_locked_monitors) { 399 // Gather information about owned monitors. 400 ObjectSynchronizer::owned_monitors_iterate(&object_monitors); 401 402 // If there are many object monitors in the system then the above iteration 403 // can start to take time. Be friendly to following thread dumps by telling 404 // the MonitorDeflationThread to deflate monitors. 405 // 406 // This is trying to be somewhat backwards compatible with the previous 407 // implementation, which performed monitor deflation right here. We might 408 // want to reconsider the need to trigger monitor deflation from the thread 409 // dumping and instead maybe tweak the deflation heuristics. 410 ObjectSynchronizer::request_deflate_idle_monitors(); 411 } 412 413 if (_num_threads == 0) { 414 // Snapshot all live threads 415 416 for (uint i = 0; i < _result->t_list()->length(); i++) { 417 JavaThread* jt = _result->t_list()->thread_at(i); 418 if (jt->is_exiting() || 419 jt->is_hidden_from_external_view()) { 420 // skip terminating threads and hidden threads 421 continue; 422 } 423 ThreadConcurrentLocks* tcl = nullptr; 424 if (_with_locked_synchronizers) { 425 tcl = concurrent_locks.thread_concurrent_locks(jt); 426 } 427 snapshot_thread(jt, tcl, &object_monitors); 428 } 429 } else { 430 // Snapshot threads in the given _threads array 431 // A dummy snapshot is created if a thread doesn't exist 432 433 for (int i = 0; i < _num_threads; i++) { 434 instanceHandle th = _threads->at(i); 435 if (th() == nullptr) { 436 // skip if the thread doesn't exist 437 // Add a dummy snapshot 438 _result->add_thread_snapshot(); 439 continue; 440 } 441 442 // Dump thread stack only if the thread is alive and not exiting 443 // and not VM internal thread. 444 JavaThread* jt = java_lang_Thread::thread(th()); 445 if (jt != nullptr && !_result->t_list()->includes(jt)) { 446 // _threads[i] doesn't refer to a valid JavaThread; this check 447 // is primarily for JVM_DumpThreads() which doesn't have a good 448 // way to validate the _threads array. 449 jt = nullptr; 450 } 451 if (jt == nullptr || /* thread not alive */ 452 jt->is_exiting() || 453 jt->is_hidden_from_external_view()) { 454 // add a null snapshot if skipped 455 _result->add_thread_snapshot(); 456 continue; 457 } 458 ThreadConcurrentLocks* tcl = nullptr; 459 if (_with_locked_synchronizers) { 460 tcl = concurrent_locks.thread_concurrent_locks(jt); 461 } 462 snapshot_thread(jt, tcl, &object_monitors); 463 } 464 } 465 } 466 467 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl, 468 ObjectMonitorsView* monitors) { 469 ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread); 470 snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false); 471 snapshot->set_concurrent_locks(tcl); 472 } 473 474 volatile bool VM_Exit::_vm_exited = false; 475 Thread * volatile VM_Exit::_shutdown_thread = nullptr; 476 477 int VM_Exit::set_vm_exited() { 478 479 Thread * thr_cur = Thread::current(); 480 481 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already"); 482 483 int num_active = 0; 484 485 _shutdown_thread = thr_cur; 486 _vm_exited = true; // global flag 487 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) { 488 if (thr != thr_cur && thr->thread_state() == _thread_in_native) { 489 ++num_active; 490 thr->set_terminated(JavaThread::_vm_exited); // per-thread flag 491 } 492 } 493 494 return num_active; 495 } 496 497 int VM_Exit::wait_for_threads_in_native_to_block() { 498 // VM exits at safepoint. This function must be called at the final safepoint 499 // to wait for threads in _thread_in_native state to be quiescent. 500 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already"); 501 502 Thread * thr_cur = Thread::current(); 503 Monitor timer(Mutex::nosafepoint, "VM_ExitTimer_lock"); 504 505 // Compiler threads need longer wait because they can access VM data directly 506 // while in native. If they are active and some structures being used are 507 // deleted by the shutdown sequence, they will crash. On the other hand, user 508 // threads must go through native=>Java/VM transitions first to access VM 509 // data, and they will be stopped during state transition. In theory, we 510 // don't have to wait for user threads to be quiescent, but it's always 511 // better to terminate VM when current thread is the only active thread, so 512 // wait for user threads too. Numbers are in 10 milliseconds. 513 int wait_time_per_attempt = 10; // in milliseconds 514 int max_wait_attempts_user_thread = UserThreadWaitAttemptsAtExit; 515 int max_wait_attempts_compiler_thread = 1000; // at least 10 seconds 516 517 int attempts = 0; 518 JavaThreadIteratorWithHandle jtiwh; 519 while (true) { 520 int num_active = 0; 521 int num_active_compiler_thread = 0; 522 523 jtiwh.rewind(); 524 for (; JavaThread *thr = jtiwh.next(); ) { 525 if (thr!=thr_cur && thr->thread_state() == _thread_in_native) { 526 num_active++; 527 if (thr->is_Compiler_thread()) { 528 #if INCLUDE_JVMCI 529 CompilerThread* ct = (CompilerThread*) thr; 530 if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) { 531 num_active_compiler_thread++; 532 } else { 533 // A JVMCI compiler thread never accesses VM data structures 534 // while in _thread_in_native state so there's no need to wait 535 // for it and potentially add a 300 millisecond delay to VM 536 // shutdown. 537 num_active--; 538 } 539 #else 540 num_active_compiler_thread++; 541 #endif 542 } 543 } 544 } 545 546 if (num_active == 0) { 547 return 0; 548 } else if (attempts >= max_wait_attempts_compiler_thread) { 549 return num_active; 550 } else if (num_active_compiler_thread == 0 && 551 attempts >= max_wait_attempts_user_thread) { 552 return num_active; 553 } 554 555 attempts++; 556 557 MonitorLocker ml(&timer, Mutex::_no_safepoint_check_flag); 558 ml.wait(wait_time_per_attempt); 559 } 560 } 561 562 void VM_Exit::doit() { 563 564 if (VerifyBeforeExit) { 565 HandleMark hm(VMThread::vm_thread()); 566 // Among other things, this ensures that Eden top is correct. 567 Universe::heap()->prepare_for_verify(); 568 // Silent verification so as not to pollute normal output, 569 // unless we really asked for it. 570 Universe::verify(); 571 } 572 573 CompileBroker::set_should_block(); 574 575 // Wait for a short period for threads in native to block. Any thread 576 // still executing native code after the wait will be stopped at 577 // native==>Java/VM barriers. 578 // Among 16276 JCK tests, 94% of them come here without any threads still 579 // running in native; the other 6% are quiescent within 250ms (Ultra 80). 580 wait_for_threads_in_native_to_block(); 581 582 set_vm_exited(); 583 584 // The ObjectMonitor subsystem uses perf counters so do this before 585 // we call exit_globals() so we don't run afoul of perfMemory_exit(). 586 ObjectSynchronizer::do_final_audit_and_print_stats(); 587 588 // We'd like to call IdealGraphPrinter::clean_up() to finalize the 589 // XML logging, but we can't safely do that here. The logic to make 590 // XML termination logging safe is tied to the termination of the 591 // VMThread, and it doesn't terminate on this exit path. See 8222534. 592 593 // cleanup globals resources before exiting. exit_globals() currently 594 // cleans up outputStream resources and PerfMemory resources. 595 exit_globals(); 596 597 LogConfiguration::finalize(); 598 599 // Check for exit hook 600 exit_hook_t exit_hook = Arguments::exit_hook(); 601 if (exit_hook != nullptr) { 602 // exit hook should exit. 603 exit_hook(_exit_code); 604 // ... but if it didn't, we must do it here 605 vm_direct_exit(_exit_code); 606 } else { 607 vm_direct_exit(_exit_code); 608 } 609 } 610 611 612 void VM_Exit::wait_if_vm_exited() { 613 if (_vm_exited && 614 Thread::current_or_null() != _shutdown_thread) { 615 // _vm_exited is set at safepoint, and the Threads_lock is never released 616 // so we will block here until the process dies. 617 Threads_lock->lock(); 618 ShouldNotReachHere(); 619 } 620 } 621 622 void VM_PrintCompileQueue::doit() { 623 CompileBroker::print_compile_queues(_out); 624 } 625 626 #if INCLUDE_SERVICES 627 void VM_PrintClassHierarchy::doit() { 628 KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname); 629 } 630 631 void VM_PrintClassLayout::doit() { 632 PrintClassLayout::print_class_layout(_out, _class_name); 633 } 634 #endif