1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "code/codeCache.hpp" 29 #include "compiler/compileBroker.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/isGCActiveMark.hpp" 32 #include "logging/log.hpp" 33 #include "logging/logStream.hpp" 34 #include "logging/logConfiguration.hpp" 35 #include "memory/heapInspection.hpp" 36 #include "memory/metaspace/metaspaceReporter.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "memory/universe.hpp" 39 #include "oops/symbol.hpp" 40 #include "runtime/arguments.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/interfaceSupport.inline.hpp" 44 #include "runtime/javaThread.inline.hpp" 45 #include "runtime/jniHandles.hpp" 46 #include "runtime/objectMonitor.inline.hpp" 47 #include "runtime/stackFrameStream.inline.hpp" 48 #include "runtime/synchronizer.hpp" 49 #include "runtime/threads.hpp" 50 #include "runtime/threadSMR.inline.hpp" 51 #include "runtime/vmOperations.hpp" 52 #include "services/threadService.hpp" 53 #include "utilities/ticks.hpp" 54 55 #define VM_OP_NAME_INITIALIZE(name) #name, 56 57 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \ 58 { VM_OPS_DO(VM_OP_NAME_INITIALIZE) }; 59 60 void VM_Operation::set_calling_thread(Thread* thread) { 61 _calling_thread = thread; 62 } 63 64 void VM_Operation::evaluate() { 65 ResourceMark rm; 66 LogTarget(Debug, vmoperation) lt; 67 if (lt.is_enabled()) { 68 LogStream ls(lt); 69 ls.print("begin "); 70 print_on_error(&ls); 71 ls.cr(); 72 } 73 doit(); 74 if (lt.is_enabled()) { 75 LogStream ls(lt); 76 ls.print("end "); 77 print_on_error(&ls); 78 ls.cr(); 79 } 80 } 81 82 // Called by fatal error handler. 83 void VM_Operation::print_on_error(outputStream* st) const { 84 st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this)); 85 st->print("%s", name()); 86 87 st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint"); 88 89 if (calling_thread()) { 90 st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread())); 91 } 92 } 93 94 void VM_ClearICs::doit() { 95 if (_preserve_static_stubs) { 96 CodeCache::cleanup_inline_caches_whitebox(); 97 } else { 98 CodeCache::clear_inline_caches(); 99 } 100 } 101 102 void VM_CleanClassLoaderDataMetaspaces::doit() { 103 ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces(); 104 } 105 106 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) { 107 _thread = thread; 108 _id = id; 109 _reason = reason; 110 } 111 112 113 void VM_DeoptimizeFrame::doit() { 114 assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason"); 115 Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason); 116 } 117 118 119 #ifndef PRODUCT 120 121 void VM_DeoptimizeAll::doit() { 122 JavaThreadIteratorWithHandle jtiwh; 123 // deoptimize all java threads in the system 124 if (DeoptimizeALot) { 125 for (; JavaThread *thread = jtiwh.next(); ) { 126 if (thread->has_last_Java_frame()) { 127 thread->deoptimize(); 128 } 129 } 130 } else if (DeoptimizeRandom) { 131 132 // Deoptimize some selected threads and frames 133 int tnum = os::random() & 0x3; 134 int fnum = os::random() & 0x3; 135 int tcount = 0; 136 for (; JavaThread *thread = jtiwh.next(); ) { 137 if (thread->has_last_Java_frame()) { 138 if (tcount++ == tnum) { 139 tcount = 0; 140 int fcount = 0; 141 // Deoptimize some selected frames. 142 for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) { 143 if (fst.current()->can_be_deoptimized()) { 144 if (fcount++ == fnum) { 145 fcount = 0; 146 Deoptimization::deoptimize(thread, *fst.current()); 147 } 148 } 149 } 150 } 151 } 152 } 153 } 154 } 155 156 157 void VM_ZombieAll::doit() { 158 JavaThread::cast(calling_thread())->make_zombies(); 159 } 160 161 #endif // !PRODUCT 162 163 bool VM_PrintThreads::doit_prologue() { 164 // Get Heap_lock if concurrent locks will be dumped 165 if (_print_concurrent_locks) { 166 Heap_lock->lock(); 167 } 168 return true; 169 } 170 171 void VM_PrintThreads::doit() { 172 Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info); 173 if (_print_jni_handle_info) { 174 JNIHandles::print_on(_out); 175 } 176 } 177 178 void VM_PrintThreads::doit_epilogue() { 179 if (_print_concurrent_locks) { 180 // Release Heap_lock 181 Heap_lock->unlock(); 182 } 183 } 184 185 void VM_PrintMetadata::doit() { 186 metaspace::MetaspaceReporter::print_report(_out, _scale, _flags); 187 } 188 189 VM_FindDeadlocks::~VM_FindDeadlocks() { 190 if (_deadlocks != nullptr) { 191 DeadlockCycle* cycle = _deadlocks; 192 while (cycle != nullptr) { 193 DeadlockCycle* d = cycle; 194 cycle = cycle->next(); 195 delete d; 196 } 197 } 198 } 199 200 void VM_FindDeadlocks::doit() { 201 // Update the hazard ptr in the originating thread to the current 202 // list of threads. This VM operation needs the current list of 203 // threads for proper deadlock detection and those are the 204 // JavaThreads we need to be protected when we return info to the 205 // originating thread. 206 _setter.set(); 207 208 _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks); 209 if (_out != nullptr) { 210 int num_deadlocks = 0; 211 for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) { 212 num_deadlocks++; 213 cycle->print_on_with(_setter.list(), _out); 214 } 215 216 if (num_deadlocks == 1) { 217 _out->print_cr("\nFound 1 deadlock.\n"); 218 _out->flush(); 219 } else if (num_deadlocks > 1) { 220 _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks); 221 _out->flush(); 222 } 223 } 224 } 225 226 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result, 227 int max_depth, 228 bool with_locked_monitors, 229 bool with_locked_synchronizers) { 230 _result = result; 231 _num_threads = 0; // 0 indicates all threads 232 _threads = nullptr; 233 _max_depth = max_depth; 234 _with_locked_monitors = with_locked_monitors; 235 _with_locked_synchronizers = with_locked_synchronizers; 236 } 237 238 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result, 239 GrowableArray<instanceHandle>* threads, 240 int num_threads, 241 int max_depth, 242 bool with_locked_monitors, 243 bool with_locked_synchronizers) { 244 _result = result; 245 _num_threads = num_threads; 246 _threads = threads; 247 _max_depth = max_depth; 248 _with_locked_monitors = with_locked_monitors; 249 _with_locked_synchronizers = with_locked_synchronizers; 250 } 251 252 bool VM_ThreadDump::doit_prologue() { 253 if (_with_locked_synchronizers) { 254 // Acquire Heap_lock to dump concurrent locks 255 Heap_lock->lock(); 256 } 257 258 return true; 259 } 260 261 void VM_ThreadDump::doit_epilogue() { 262 if (_with_locked_synchronizers) { 263 // Release Heap_lock 264 Heap_lock->unlock(); 265 } 266 } 267 268 // Hash table of void* to a list of ObjectMonitor* owned by the JavaThread. 269 // The JavaThread's owner key is either a JavaThread* or a stack lock 270 // address in the JavaThread so we use "void*". 271 // 272 class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView { 273 private: 274 static unsigned int ptr_hash(void* const& s1) { 275 // 2654435761 = 2^32 * Phi (golden ratio) 276 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u); 277 } 278 279 private: 280 class ObjectMonitorLinkedList : 281 public LinkedListImpl<ObjectMonitor*, 282 AnyObj::C_HEAP, mtThread, 283 AllocFailStrategy::RETURN_NULL> {}; 284 285 // ResourceHashtable SIZE is specified at compile time so we 286 // use 1031 which is the first prime after 1024. 287 typedef ResourceHashtable<void*, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread, 288 &ObjectMonitorsDump::ptr_hash> PtrTable; 289 PtrTable* _ptrs; 290 size_t _key_count; 291 size_t _om_count; 292 293 void add_list(void* key, ObjectMonitorLinkedList* list) { 294 _ptrs->put(key, list); 295 _key_count++; 296 } 297 298 ObjectMonitorLinkedList* get_list(void* key) { 299 ObjectMonitorLinkedList** listpp = _ptrs->get(key); 300 return (listpp == nullptr) ? nullptr : *listpp; 301 } 302 303 void add(ObjectMonitor* monitor) { 304 void* key = monitor->owner(); 305 306 ObjectMonitorLinkedList* list = get_list(key); 307 if (list == nullptr) { 308 // Create new list and add it to the hash table: 309 list = new (mtThread) ObjectMonitorLinkedList; 310 _ptrs->put(key, list); 311 _key_count++; 312 } 313 314 assert(list->find(monitor) == nullptr, "Should not contain duplicates"); 315 list->add(monitor); // Add the ObjectMonitor to the list. 316 _om_count++; 317 } 318 319 public: 320 // ResourceHashtable is passed to various functions and populated in 321 // different places so we allocate it using C_HEAP to make it immune 322 // from any ResourceMarks that happen to be in the code paths. 323 ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {} 324 325 ~ObjectMonitorsDump() { 326 class CleanupObjectMonitorsDump: StackObj { 327 public: 328 bool do_entry(void*& key, ObjectMonitorLinkedList*& list) { 329 list->clear(); // clear the LinkListNodes 330 delete list; // then delete the LinkedList 331 return true; 332 } 333 } cleanup; 334 335 _ptrs->unlink(&cleanup); // cleanup the LinkedLists 336 delete _ptrs; // then delete the hash table 337 } 338 339 // Implements MonitorClosure used to collect all owned monitors in the system 340 void do_monitor(ObjectMonitor* monitor) override { 341 assert(monitor->has_owner(), "Expects only owned monitors"); 342 343 if (monitor->is_owner_anonymous()) { 344 // There's no need to collect anonymous owned monitors 345 // because the caller of this code is only interested 346 // in JNI owned monitors. 347 return; 348 } 349 350 if (monitor->object_peek() == nullptr) { 351 // JNI code doesn't necessarily keep the monitor object 352 // alive. Filter out monitors with dead objects. 353 return; 354 } 355 356 add(monitor); 357 } 358 359 // Implements the ObjectMonitorsView interface 360 void visit(MonitorClosure* closure, JavaThread* thread) override { 361 ObjectMonitorLinkedList* list = get_list(thread); 362 LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr); 363 while (!iter.is_empty()) { 364 ObjectMonitor* monitor = *iter.next(); 365 closure->do_monitor(monitor); 366 } 367 } 368 369 size_t key_count() { return _key_count; } 370 size_t om_count() { return _om_count; } 371 }; 372 373 void VM_ThreadDump::doit() { 374 ResourceMark rm; 375 376 // Set the hazard ptr in the originating thread to protect the 377 // current list of threads. This VM operation needs the current list 378 // of threads for a proper dump and those are the JavaThreads we need 379 // to be protected when we return info to the originating thread. 380 _result->set_t_list(); 381 382 ConcurrentLocksDump concurrent_locks(true); 383 if (_with_locked_synchronizers) { 384 concurrent_locks.dump_at_safepoint(); 385 } 386 387 ObjectMonitorsDump object_monitors; 388 if (_with_locked_monitors) { 389 // Gather information about owned monitors. 390 ObjectSynchronizer::owned_monitors_iterate(&object_monitors); 391 392 // If there are many object monitors in the system then the above iteration 393 // can start to take time. Be friendly to following thread dumps by telling 394 // the MonitorDeflationThread to deflate monitors. 395 // 396 // This is trying to be somewhat backwards compatible with the previous 397 // implementation, which performed monitor deflation right here. We might 398 // want to reconsider the need to trigger monitor deflation from the thread 399 // dumping and instead maybe tweak the deflation heuristics. 400 ObjectSynchronizer::request_deflate_idle_monitors(); 401 } 402 403 if (_num_threads == 0) { 404 // Snapshot all live threads 405 406 for (uint i = 0; i < _result->t_list()->length(); i++) { 407 JavaThread* jt = _result->t_list()->thread_at(i); 408 if (jt->is_exiting() || 409 jt->is_hidden_from_external_view()) { 410 // skip terminating threads and hidden threads 411 continue; 412 } 413 ThreadConcurrentLocks* tcl = nullptr; 414 if (_with_locked_synchronizers) { 415 tcl = concurrent_locks.thread_concurrent_locks(jt); 416 } 417 snapshot_thread(jt, tcl, &object_monitors); 418 } 419 } else { 420 // Snapshot threads in the given _threads array 421 // A dummy snapshot is created if a thread doesn't exist 422 423 for (int i = 0; i < _num_threads; i++) { 424 instanceHandle th = _threads->at(i); 425 if (th() == nullptr) { 426 // skip if the thread doesn't exist 427 // Add a dummy snapshot 428 _result->add_thread_snapshot(); 429 continue; 430 } 431 432 // Dump thread stack only if the thread is alive and not exiting 433 // and not VM internal thread. 434 JavaThread* jt = java_lang_Thread::thread(th()); 435 if (jt != nullptr && !_result->t_list()->includes(jt)) { 436 // _threads[i] doesn't refer to a valid JavaThread; this check 437 // is primarily for JVM_DumpThreads() which doesn't have a good 438 // way to validate the _threads array. 439 jt = nullptr; 440 } 441 if (jt == nullptr || /* thread not alive */ 442 jt->is_exiting() || 443 jt->is_hidden_from_external_view()) { 444 // add a null snapshot if skipped 445 _result->add_thread_snapshot(); 446 continue; 447 } 448 ThreadConcurrentLocks* tcl = nullptr; 449 if (_with_locked_synchronizers) { 450 tcl = concurrent_locks.thread_concurrent_locks(jt); 451 } 452 snapshot_thread(jt, tcl, &object_monitors); 453 } 454 } 455 } 456 457 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl, 458 ObjectMonitorsView* monitors) { 459 ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread); 460 snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false); 461 snapshot->set_concurrent_locks(tcl); 462 } 463 464 volatile bool VM_Exit::_vm_exited = false; 465 Thread * volatile VM_Exit::_shutdown_thread = nullptr; 466 467 int VM_Exit::set_vm_exited() { 468 469 Thread * thr_cur = Thread::current(); 470 471 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already"); 472 473 int num_active = 0; 474 475 _shutdown_thread = thr_cur; 476 _vm_exited = true; // global flag 477 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) { 478 if (thr != thr_cur && thr->thread_state() == _thread_in_native) { 479 ++num_active; 480 thr->set_terminated(JavaThread::_vm_exited); // per-thread flag 481 } 482 } 483 484 return num_active; 485 } 486 487 int VM_Exit::wait_for_threads_in_native_to_block() { 488 // VM exits at safepoint. This function must be called at the final safepoint 489 // to wait for threads in _thread_in_native state to be quiescent. 490 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already"); 491 492 Thread * thr_cur = Thread::current(); 493 Monitor timer(Mutex::nosafepoint, "VM_ExitTimer_lock"); 494 495 // Compiler threads need longer wait because they can access VM data directly 496 // while in native. If they are active and some structures being used are 497 // deleted by the shutdown sequence, they will crash. On the other hand, user 498 // threads must go through native=>Java/VM transitions first to access VM 499 // data, and they will be stopped during state transition. In theory, we 500 // don't have to wait for user threads to be quiescent, but it's always 501 // better to terminate VM when current thread is the only active thread, so 502 // wait for user threads too. Numbers are in 10 milliseconds. 503 int wait_time_per_attempt = 10; // in milliseconds 504 int max_wait_attempts_user_thread = UserThreadWaitAttemptsAtExit; 505 int max_wait_attempts_compiler_thread = 1000; // at least 10 seconds 506 507 int attempts = 0; 508 JavaThreadIteratorWithHandle jtiwh; 509 while (true) { 510 int num_active = 0; 511 int num_active_compiler_thread = 0; 512 513 jtiwh.rewind(); 514 for (; JavaThread *thr = jtiwh.next(); ) { 515 if (thr!=thr_cur && thr->thread_state() == _thread_in_native) { 516 num_active++; 517 if (thr->is_Compiler_thread()) { 518 #if INCLUDE_JVMCI 519 CompilerThread* ct = (CompilerThread*) thr; 520 if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) { 521 num_active_compiler_thread++; 522 } else { 523 // A JVMCI compiler thread never accesses VM data structures 524 // while in _thread_in_native state so there's no need to wait 525 // for it and potentially add a 300 millisecond delay to VM 526 // shutdown. 527 num_active--; 528 } 529 #else 530 num_active_compiler_thread++; 531 #endif 532 } 533 } 534 } 535 536 if (num_active == 0) { 537 return 0; 538 } else if (attempts >= max_wait_attempts_compiler_thread) { 539 return num_active; 540 } else if (num_active_compiler_thread == 0 && 541 attempts >= max_wait_attempts_user_thread) { 542 return num_active; 543 } 544 545 attempts++; 546 547 MonitorLocker ml(&timer, Mutex::_no_safepoint_check_flag); 548 ml.wait(wait_time_per_attempt); 549 } 550 } 551 552 void VM_Exit::doit() { 553 554 if (VerifyBeforeExit) { 555 HandleMark hm(VMThread::vm_thread()); 556 // Among other things, this ensures that Eden top is correct. 557 Universe::heap()->prepare_for_verify(); 558 // Silent verification so as not to pollute normal output, 559 // unless we really asked for it. 560 Universe::verify(); 561 } 562 563 CompileBroker::set_should_block(); 564 565 // Wait for a short period for threads in native to block. Any thread 566 // still executing native code after the wait will be stopped at 567 // native==>Java/VM barriers. 568 // Among 16276 JCK tests, 94% of them come here without any threads still 569 // running in native; the other 6% are quiescent within 250ms (Ultra 80). 570 wait_for_threads_in_native_to_block(); 571 572 set_vm_exited(); 573 574 // The ObjectMonitor subsystem uses perf counters so do this before 575 // we call exit_globals() so we don't run afoul of perfMemory_exit(). 576 ObjectSynchronizer::do_final_audit_and_print_stats(); 577 578 // We'd like to call IdealGraphPrinter::clean_up() to finalize the 579 // XML logging, but we can't safely do that here. The logic to make 580 // XML termination logging safe is tied to the termination of the 581 // VMThread, and it doesn't terminate on this exit path. See 8222534. 582 583 // cleanup globals resources before exiting. exit_globals() currently 584 // cleans up outputStream resources and PerfMemory resources. 585 exit_globals(); 586 587 LogConfiguration::finalize(); 588 589 // Check for exit hook 590 exit_hook_t exit_hook = Arguments::exit_hook(); 591 if (exit_hook != nullptr) { 592 // exit hook should exit. 593 exit_hook(_exit_code); 594 // ... but if it didn't, we must do it here 595 vm_direct_exit(_exit_code); 596 } else { 597 vm_direct_exit(_exit_code); 598 } 599 } 600 601 602 void VM_Exit::wait_if_vm_exited() { 603 if (_vm_exited && 604 Thread::current_or_null() != _shutdown_thread) { 605 // _vm_exited is set at safepoint, and the Threads_lock is never released 606 // so we will block here until the process dies. 607 Threads_lock->lock(); 608 ShouldNotReachHere(); 609 } 610 } 611 612 void VM_PrintCompileQueue::doit() { 613 CompileBroker::print_compile_queues(_out); 614 } 615 616 #if INCLUDE_SERVICES 617 void VM_PrintClassHierarchy::doit() { 618 KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname); 619 } 620 #endif