1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotLinkedClassBulkLoader.hpp" 26 #include "cds/cds_globals.hpp" 27 #include "cds/cdsConfig.hpp" 28 #include "cds/classListWriter.hpp" 29 #include "cds/dynamicArchive.hpp" 30 #include "cds/methodProfiler.hpp" 31 #include "cds/metaspaceShared.hpp" 32 #include "classfile/classLoader.hpp" 33 #include "classfile/classLoaderDataGraph.hpp" 34 #include "classfile/javaClasses.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/symbolTable.hpp" 37 #include "classfile/systemDictionary.hpp" 38 #include "code/codeCache.hpp" 39 #include "code/aotCodeCache.hpp" 40 #include "compiler/compilationMemoryStatistic.hpp" 41 #include "compiler/compilationPolicy.hpp" 42 #include "compiler/compileBroker.hpp" 43 #include "compiler/compilerOracle.hpp" 44 #include "gc/shared/collectedHeap.hpp" 45 #include "gc/shared/stringdedup/stringDedup.hpp" 46 #include "interpreter/bytecodeHistogram.hpp" 47 #include "interpreter/interpreterRuntime.hpp" 48 #include "jfr/jfrEvents.hpp" 49 #include "jfr/support/jfrThreadId.hpp" 50 #include "jvm.h" 51 #include "logging/log.hpp" 52 #include "logging/logStream.hpp" 53 #include "memory/metaspaceUtils.hpp" 54 #include "memory/oopFactory.hpp" 55 #include "memory/resourceArea.hpp" 56 #include "memory/universe.hpp" 57 #include "nmt/memMapPrinter.hpp" 58 #include "nmt/memTracker.hpp" 59 #include "oops/constantPool.hpp" 60 #include "oops/generateOopMap.hpp" 61 #include "oops/instanceKlass.hpp" 62 #include "oops/instanceOop.hpp" 63 #include "oops/klassVtable.hpp" 64 #include "oops/method.inline.hpp" 65 #include "oops/objArrayOop.hpp" 66 #include "oops/oop.inline.hpp" 67 #include "oops/symbol.hpp" 68 #include "oops/trainingData.hpp" 69 #include "prims/jvmtiAgentList.hpp" 70 #include "prims/jvmtiExport.hpp" 71 #include "prims/methodHandles.hpp" 72 #include "runtime/continuation.hpp" 73 #include "runtime/deoptimization.hpp" 74 #include "runtime/flags/flagSetting.hpp" 75 #include "runtime/globals_extension.hpp" 76 #include "runtime/handles.inline.hpp" 77 #include "runtime/init.hpp" 78 #include "runtime/interfaceSupport.inline.hpp" 79 #include "runtime/java.hpp" 80 #include "runtime/javaThread.hpp" 81 #include "runtime/sharedRuntime.hpp" 82 #include "runtime/stubRoutines.hpp" 83 #include "runtime/task.hpp" 84 #include "runtime/threads.hpp" 85 #include "runtime/timer.hpp" 86 #include "runtime/trimNativeHeap.hpp" 87 #include "runtime/vmOperations.hpp" 88 #include "runtime/vmThread.hpp" 89 #include "runtime/vm_version.hpp" 90 #include "sanitizers/leak.hpp" 91 #include "utilities/dtrace.hpp" 92 #include "utilities/events.hpp" 93 #include "utilities/globalDefinitions.hpp" 94 #include "utilities/macros.hpp" 95 #include "utilities/vmError.hpp" 96 #ifdef COMPILER1 97 #include "c1/c1_Compiler.hpp" 98 #include "c1/c1_Runtime1.hpp" 99 #endif 100 #ifdef COMPILER2 101 #include "code/compiledIC.hpp" 102 #include "opto/compile.hpp" 103 #include "opto/indexSet.hpp" 104 #include "opto/runtime.hpp" 105 #endif 106 #if INCLUDE_JFR 107 #include "jfr/jfr.hpp" 108 #endif 109 #if INCLUDE_JVMCI 110 #include "jvmci/jvmci.hpp" 111 #endif 112 113 GrowableArray<Method*>* collected_profiled_methods; 114 115 static int compare_methods(Method** a, Method** b) { 116 // compiled_invocation_count() returns int64_t, forcing the entire expression 117 // to be evaluated as int64_t. Overflow is not an issue. 118 int64_t diff = (((*b)->invocation_count() + (*b)->compiled_invocation_count()) 119 - ((*a)->invocation_count() + (*a)->compiled_invocation_count())); 120 return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; 121 } 122 123 static void collect_profiled_methods(Method* m) { 124 Thread* thread = Thread::current(); 125 methodHandle mh(thread, m); 126 if ((m->method_data() != nullptr) && 127 (PrintMethodData || CompilerOracle::should_print(mh))) { 128 collected_profiled_methods->push(m); 129 } 130 } 131 132 static void print_method_profiling_data() { 133 if ((ProfileInterpreter COMPILER1_PRESENT(|| C1UpdateMethodData)) && 134 (PrintMethodData || CompilerOracle::should_print_methods())) { 135 ResourceMark rm; 136 collected_profiled_methods = new GrowableArray<Method*>(1024); 137 SystemDictionary::methods_do(collect_profiled_methods); 138 collected_profiled_methods->sort(&compare_methods); 139 140 int count = collected_profiled_methods->length(); 141 int total_size = 0; 142 if (count > 0) { 143 for (int index = 0; index < count; index++) { 144 Method* m = collected_profiled_methods->at(index); 145 146 // Instead of taking tty lock, we collect all lines into a string stream 147 // and then print them all at once. 148 ResourceMark rm2; 149 stringStream ss; 150 151 ss.print_cr("------------------------------------------------------------------------"); 152 m->print_invocation_count(&ss); 153 ss.print_cr(" mdo size: %d bytes", m->method_data()->size_in_bytes()); 154 ss.cr(); 155 // Dump data on parameters if any 156 if (m->method_data() != nullptr && m->method_data()->parameters_type_data() != nullptr) { 157 ss.fill_to(2); 158 m->method_data()->parameters_type_data()->print_data_on(&ss); 159 } 160 m->print_codes_on(&ss); 161 tty->print("%s", ss.as_string()); // print all at once 162 total_size += m->method_data()->size_in_bytes(); 163 } 164 tty->print_cr("------------------------------------------------------------------------"); 165 tty->print_cr("Total MDO size: %d bytes", total_size); 166 } 167 } 168 } 169 170 void perf_jvm_print_on(outputStream* st); 171 172 void log_vm_init_stats() { 173 LogStreamHandle(Info, init) log; 174 if (log.is_enabled()) { 175 SharedRuntime::print_counters_on(&log); 176 ClassLoader::print_counters(&log); 177 AOTLinkedClassBulkLoader::print_counters_on(&log); 178 log.cr(); 179 // FIXME: intermittent crashes 180 // if (CountBytecodesPerThread) { 181 // log.print_cr("Thread info:"); 182 // class PrintThreadInfo : public ThreadClosure { 183 // outputStream* _st; 184 // public: 185 // PrintThreadInfo(outputStream* st) : ThreadClosure(), _st(st) {} 186 // void do_thread(Thread* thread) { 187 // JavaThread* jt = JavaThread::cast(thread); 188 // if (jt->bc_counter_value() > 0) { 189 // _st->print_cr(" Thread " INTPTR_FORMAT ": %ld bytecodes executed (clinit: %ld)", 190 // p2i(jt), /*jt->name(),*/ jt->bc_counter_value(), jt->clinit_bc_counter_value()); 191 // } 192 // } 193 // }; 194 // PrintThreadInfo cl(&log); 195 // Threads::java_threads_do(&cl); 196 // } 197 // log.cr(); 198 log.print_cr("Deoptimization events: "); 199 Deoptimization::print_statistics_on(&log); 200 log.cr(); 201 202 log.print("Compilation statistics: "); 203 CompileBroker::print_statistics_on(&log); 204 log.cr(); 205 206 { 207 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 208 log.print("Code cache statistics: "); 209 CodeCache::print_nmethod_statistics_on(&log); 210 log.cr(); 211 } 212 213 if (AOTCodeCache::is_on_for_use()) { 214 log.print_cr("Startup Code Cache: "); 215 AOTCodeCache::print_statistics_on(&log); 216 log.cr(); 217 AOTCodeCache::print_timers_on(&log); 218 } 219 220 VMThread::print_counters_on(&log); 221 log.cr(); 222 MutexLockerImpl::print_counters_on(&log); 223 log.cr(); 224 log.print("Runtime events for thread \"main\""); 225 if (ProfileRuntimeCalls) { 226 log.print_cr(" (%d nested events):", ProfileVMCallContext::nested_runtime_calls_count()); 227 228 InterpreterRuntime::print_counters_on(&log); 229 #ifdef COMPILER1 230 Runtime1::print_counters_on(&log); 231 #endif 232 #ifdef COMPILER2 233 OptoRuntime::print_counters_on(&log); 234 Deoptimization::print_counters_on(&log); 235 #endif 236 } else { 237 log.print_cr(": no info (%s is disabled)", (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData")); 238 } 239 log.cr(); 240 perf_jvm_print_on(&log); 241 log.cr(); 242 MethodHandles::print_counters_on(&log); 243 } 244 } 245 246 void print_bytecode_count() { 247 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 248 tty->print_cr("[BytecodeCounter::counter_value = %zu]", BytecodeCounter::counter_value()); 249 } 250 } 251 252 #ifndef PRODUCT 253 254 // Statistics printing (method invocation histogram) 255 256 GrowableArray<Method*>* collected_invoked_methods; 257 258 static void collect_invoked_methods(Method* m) { 259 if (m->invocation_count() + m->compiled_invocation_count() >= 1) { 260 collected_invoked_methods->push(m); 261 } 262 } 263 264 265 // Invocation count accumulators should be unsigned long to shift the 266 // overflow border. Longer-running workloads tend to create invocation 267 // counts which already overflow 32-bit counters for individual methods. 268 static void print_method_invocation_histogram() { 269 ResourceMark rm; 270 collected_invoked_methods = new GrowableArray<Method*>(1024); 271 SystemDictionary::methods_do(collect_invoked_methods); 272 collected_invoked_methods->sort(&compare_methods); 273 // 274 tty->cr(); 275 tty->print_cr("Histogram Over Method Invocation Counters (cutoff = %zd):", MethodHistogramCutoff); 276 tty->cr(); 277 tty->print_cr("____Count_(I+C)____Method________________________Module_________________"); 278 uint64_t total = 0, 279 int_total = 0, 280 comp_total = 0, 281 special_total= 0, 282 static_total = 0, 283 final_total = 0, 284 synch_total = 0, 285 native_total = 0, 286 access_total = 0; 287 for (int index = 0; index < collected_invoked_methods->length(); index++) { 288 // Counter values returned from getter methods are signed int. 289 // To shift the overflow border by a factor of two, we interpret 290 // them here as unsigned long. A counter can't be negative anyway. 291 Method* m = collected_invoked_methods->at(index); 292 uint64_t iic = (uint64_t)m->invocation_count(); 293 uint64_t cic = (uint64_t)m->compiled_invocation_count(); 294 if ((iic + cic) >= (uint64_t)MethodHistogramCutoff) m->print_invocation_count(tty); 295 int_total += iic; 296 comp_total += cic; 297 if (m->is_final()) final_total += iic + cic; 298 if (m->is_static()) static_total += iic + cic; 299 if (m->is_synchronized()) synch_total += iic + cic; 300 if (m->is_native()) native_total += iic + cic; 301 if (m->is_accessor()) access_total += iic + cic; 302 } 303 tty->cr(); 304 total = int_total + comp_total; 305 special_total = final_total + static_total +synch_total + native_total + access_total; 306 tty->print_cr("Invocations summary for %d methods:", collected_invoked_methods->length()); 307 double total_div = (double)total; 308 tty->print_cr("\t" UINT64_FORMAT_W(12) " (100%%) total", total); 309 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- interpreted", int_total, 100.0 * (double)int_total / total_div); 310 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- compiled", comp_total, 100.0 * (double)comp_total / total_div); 311 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- special methods (interpreted and compiled)", 312 special_total, 100.0 * (double)special_total/ total_div); 313 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- synchronized",synch_total, 100.0 * (double)synch_total / total_div); 314 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- final", final_total, 100.0 * (double)final_total / total_div); 315 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- static", static_total, 100.0 * (double)static_total / total_div); 316 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- native", native_total, 100.0 * (double)native_total / total_div); 317 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- accessor", access_total, 100.0 * (double)access_total / total_div); 318 tty->cr(); 319 SharedRuntime::print_call_statistics_on(tty); 320 321 } 322 323 #else 324 325 static void print_method_invocation_histogram() {} 326 327 #endif // PRODUCT 328 329 330 // General statistics printing (profiling ...) 331 void print_statistics() { 332 #if INCLUDE_CDS 333 if (AOTReplayTraining && AOTPrintTrainingInfo) { 334 TrainingData::print_archived_training_data_on(tty); 335 } 336 #endif 337 if (CITime) { 338 CompileBroker::print_times(); 339 } 340 341 #ifdef COMPILER1 342 if ((PrintC1Statistics || LogVMOutput || LogCompilation) && UseCompiler) { 343 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintC1Statistics); 344 Runtime1::print_statistics_on(tty); 345 SharedRuntime::print_statistics(); 346 } 347 #endif /* COMPILER1 */ 348 349 #ifdef COMPILER2 350 if ((PrintOptoStatistics || LogVMOutput || LogCompilation) && UseCompiler) { 351 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintOptoStatistics); 352 Compile::print_statistics(); 353 Deoptimization::print_statistics(); 354 #ifndef COMPILER1 355 SharedRuntime::print_statistics(); 356 #endif //COMPILER1 357 } 358 359 if (PrintLockStatistics) { 360 OptoRuntime::print_named_counters(); 361 } 362 #ifdef ASSERT 363 if (CollectIndexSetStatistics) { 364 IndexSet::print_statistics(); 365 } 366 #endif // ASSERT 367 #else // COMPILER2 368 #if INCLUDE_JVMCI 369 #ifndef COMPILER1 370 if ((TraceDeoptimization || LogVMOutput || LogCompilation) && UseCompiler) { 371 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && TraceDeoptimization); 372 Deoptimization::print_statistics(); 373 SharedRuntime::print_statistics(); 374 } 375 #endif // COMPILER1 376 #endif // INCLUDE_JVMCI 377 #endif // COMPILER2 378 379 if (PrintNMethodStatistics) { 380 nmethod::print_statistics(); 381 } 382 if (CountCompiledCalls) { 383 print_method_invocation_histogram(); 384 } 385 386 print_method_profiling_data(); 387 388 if (TimeOopMap) { 389 GenerateOopMap::print_time(); 390 } 391 if (PrintSymbolTableSizeHistogram) { 392 SymbolTable::print_histogram(); 393 } 394 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 395 BytecodeCounter::print(); 396 } 397 if (PrintBytecodePairHistogram) { 398 BytecodePairHistogram::print(); 399 } 400 401 if (PrintCodeCache) { 402 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 403 CodeCache::print(); 404 } 405 406 // CodeHeap State Analytics. 407 if (PrintCodeHeapAnalytics) { 408 CompileBroker::print_heapinfo(nullptr, "all", 4096); // details 409 } 410 411 LogStreamHandle(Debug, codecache, nmethod) log; 412 if (log.is_enabled()) { 413 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 414 CodeCache::print_nmethods_on(&log); 415 } 416 417 #ifndef PRODUCT 418 if (PrintCodeCache2) { 419 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 420 CodeCache::print_internals(); 421 } 422 #endif 423 424 if (VerifyOops && Verbose) { 425 tty->print_cr("+VerifyOops count: %d", StubRoutines::verify_oop_count()); 426 } 427 428 print_bytecode_count(); 429 430 if (PrintVMInfoAtExit) { 431 // Use an intermediate stream to prevent deadlocking on tty_lock 432 stringStream ss; 433 VMError::print_vm_info(&ss); 434 tty->print_raw(ss.base()); 435 } 436 437 if (PrintSystemDictionaryAtExit) { 438 ResourceMark rm; 439 MutexLocker mcld(ClassLoaderDataGraph_lock); 440 SystemDictionary::print(); 441 } 442 443 if (PrintClassLoaderDataGraphAtExit) { 444 ResourceMark rm; 445 MutexLocker mcld(ClassLoaderDataGraph_lock); 446 ClassLoaderDataGraph::print(); 447 } 448 449 // Native memory tracking data 450 if (PrintNMTStatistics) { 451 MemTracker::final_report(tty); 452 } 453 454 if (PrintMetaspaceStatisticsAtExit) { 455 MetaspaceUtils::print_basic_report(tty, 0); 456 } 457 458 if (PrintCompilerMemoryStatisticsAtExit) { 459 CompilationMemoryStatistic::print_final_report(tty); 460 } 461 462 ThreadsSMRSupport::log_statistics(); 463 464 log_vm_init_stats(); 465 466 if (log_is_enabled(Info, perf, class, link)) { 467 LogStreamHandle(Info, perf, class, link) log; 468 log.print_cr("At VM exit:"); 469 ClassLoader::print_counters(&log); 470 } 471 } 472 473 // Note: before_exit() can be executed only once, if more than one threads 474 // are trying to shutdown the VM at the same time, only one thread 475 // can run before_exit() and all other threads must wait. 476 void before_exit(JavaThread* thread, bool halt) { 477 #define BEFORE_EXIT_NOT_RUN 0 478 #define BEFORE_EXIT_RUNNING 1 479 #define BEFORE_EXIT_DONE 2 480 static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN; 481 482 Events::log(thread, "Before exit entered"); 483 484 // Note: don't use a Mutex to guard the entire before_exit(), as 485 // JVMTI post_thread_end_event and post_vm_death_event will run native code. 486 // A CAS or OSMutex would work just fine but then we need to manipulate 487 // thread state for Safepoint. Here we use Monitor wait() and notify_all() 488 // for synchronization. 489 { MonitorLocker ml(BeforeExit_lock); 490 switch (_before_exit_status) { 491 case BEFORE_EXIT_NOT_RUN: 492 _before_exit_status = BEFORE_EXIT_RUNNING; 493 break; 494 case BEFORE_EXIT_RUNNING: 495 while (_before_exit_status == BEFORE_EXIT_RUNNING) { 496 ml.wait(); 497 } 498 assert(_before_exit_status == BEFORE_EXIT_DONE, "invalid state"); 499 return; 500 case BEFORE_EXIT_DONE: 501 // need block to avoid SS compiler bug 502 { 503 return; 504 } 505 } 506 } 507 508 // At this point only one thread is executing this logic. Any other threads 509 // attempting to invoke before_exit() will wait above and return early once 510 // this thread finishes before_exit(). 511 512 // Do not add any additional shutdown logic between the above mutex logic and 513 // leak sanitizer logic below. Any additional shutdown code which performs some 514 // cleanup should be added after the leak sanitizer logic below. 515 516 #ifdef LEAK_SANITIZER 517 // If we are built with LSan, we need to perform leak checking. If we are 518 // terminating normally, not halting and no VM error, we perform a normal 519 // leak check which terminates if leaks are found. If we are not terminating 520 // normally, halting or VM error, we perform a recoverable leak check which 521 // prints leaks but will not terminate. 522 if (!halt && !VMError::is_error_reported()) { 523 LSAN_DO_LEAK_CHECK(); 524 } else { 525 // Ignore the return value. 526 static_cast<void>(LSAN_DO_RECOVERABLE_LEAK_CHECK()); 527 } 528 #endif 529 530 #if INCLUDE_CDS 531 MethodProfiler::process_method_hotness(); 532 // Dynamic CDS dumping must happen whilst we can still reliably 533 // run Java code. 534 DynamicArchive::dump_at_exit(thread); 535 assert(!thread->has_pending_exception(), "must be"); 536 #endif 537 538 // Actual shutdown logic begins here. 539 540 #if INCLUDE_JVMCI 541 if (EnableJVMCI) { 542 JVMCI::shutdown(thread); 543 } 544 #endif 545 546 #if INCLUDE_CDS 547 ClassListWriter::write_resolved_constants(); 548 ClassListWriter::write_reflection_data(); 549 ClassListWriter::write_loader_negative_lookup_cache(); 550 if (CDSConfig::is_dumping_preimage_static_archive()) { 551 // Creating the hotspot.cds.preimage file 552 MetaspaceShared::preload_and_dump(thread); 553 assert(!thread->has_pending_exception(), "must be"); 554 } 555 #endif 556 557 AOTCodeCache::close(); // Write final data and close archive 558 559 // Hang forever on exit if we're reporting an error. 560 if (ShowMessageBoxOnError && VMError::is_error_reported()) { 561 os::infinite_sleep(); 562 } 563 564 EventThreadEnd event; 565 if (event.should_commit()) { 566 event.set_thread(JFR_JVM_THREAD_ID(thread)); 567 event.commit(); 568 } 569 570 JFR_ONLY(Jfr::on_vm_shutdown(false, halt);) 571 572 // Stop the WatcherThread. We do this before disenrolling various 573 // PeriodicTasks to reduce the likelihood of races. 574 WatcherThread::stop(); 575 576 NativeHeapTrimmer::cleanup(); 577 578 // Stop concurrent GC threads 579 Universe::heap()->stop(); 580 581 // Print GC/heap related information. 582 Log(gc, exit) log; 583 if (log.is_info()) { 584 LogStream ls_info(log.info()); 585 Universe::print_on(&ls_info); 586 if (log.is_trace()) { 587 LogStream ls_trace(log.trace()); 588 MutexLocker mcld(ClassLoaderDataGraph_lock); 589 ClassLoaderDataGraph::print_on(&ls_trace); 590 } 591 } 592 593 if (PrintBytecodeHistogram) { 594 BytecodeHistogram::print(PrintBytecodeHistogramCutoff); 595 } 596 597 #ifdef LINUX 598 if (DumpPerfMapAtExit) { 599 CodeCache::write_perf_map(nullptr, tty); 600 } 601 if (PrintMemoryMapAtExit) { 602 MemMapPrinter::print_all_mappings(tty); 603 } 604 #endif 605 606 if (JvmtiExport::should_post_thread_life()) { 607 JvmtiExport::post_thread_end(thread); 608 } 609 610 // Always call even when there are not JVMTI environments yet, since environments 611 // may be attached late and JVMTI must track phases of VM execution 612 JvmtiExport::post_vm_death(); 613 JvmtiAgentList::unload_agents(); 614 615 // Terminate the signal thread 616 // Note: we don't wait until it actually dies. 617 os::terminate_signal_thread(); 618 619 if (AOTVerifyTrainingData) { 620 EXCEPTION_MARK; 621 CompilationPolicy::flush_replay_training_at_init(THREAD); 622 TrainingData::verify(); 623 } 624 625 print_statistics(); 626 Universe::heap()->print_tracing_info(); 627 628 { MutexLocker ml(BeforeExit_lock); 629 _before_exit_status = BEFORE_EXIT_DONE; 630 BeforeExit_lock->notify_all(); 631 } 632 633 if (VerifyStringTableAtExit) { 634 size_t fail_cnt = StringTable::verify_and_compare_entries(); 635 if (fail_cnt != 0) { 636 tty->print_cr("ERROR: fail_cnt=%zu", fail_cnt); 637 guarantee(fail_cnt == 0, "unexpected StringTable verification failures"); 638 } 639 } 640 641 #undef BEFORE_EXIT_NOT_RUN 642 #undef BEFORE_EXIT_RUNNING 643 #undef BEFORE_EXIT_DONE 644 } 645 646 void vm_exit(int code) { 647 Thread* thread = 648 ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : nullptr; 649 if (thread == nullptr) { 650 // very early initialization failure -- just exit 651 vm_direct_exit(code); 652 } 653 654 // We'd like to add an entry to the XML log to show that the VM is 655 // terminating, but we can't safely do that here. The logic to make 656 // XML termination logging safe is tied to the termination of the 657 // VMThread, and it doesn't terminate on this exit path. See 8222534. 658 659 if (VMThread::vm_thread() != nullptr) { 660 if (thread->is_Java_thread()) { 661 // We must be "in_vm" for the code below to work correctly. 662 // Historically there must have been some exit path for which 663 // that was not the case and so we set it explicitly - even 664 // though we no longer know what that path may be. 665 JavaThread::cast(thread)->set_thread_state(_thread_in_vm); 666 } 667 668 // Fire off a VM_Exit operation to bring VM to a safepoint and exit 669 VM_Exit op(code); 670 671 // 4945125 The vm thread comes to a safepoint during exit. 672 // GC vm_operations can get caught at the safepoint, and the 673 // heap is unparseable if they are caught. Grab the Heap_lock 674 // to prevent this. The GC vm_operations will not be able to 675 // queue until after we release it, but we never do that as we 676 // are terminating the VM process. 677 MutexLocker ml(Heap_lock); 678 679 VMThread::execute(&op); 680 // should never reach here; but in case something wrong with VM Thread. 681 vm_direct_exit(code); 682 } else { 683 // VM thread is gone, just exit 684 vm_direct_exit(code); 685 } 686 ShouldNotReachHere(); 687 } 688 689 void notify_vm_shutdown() { 690 // For now, just a dtrace probe. 691 HOTSPOT_VM_SHUTDOWN(); 692 } 693 694 void vm_direct_exit(int code) { 695 notify_vm_shutdown(); 696 os::wait_for_keypress_at_exit(); 697 os::exit(code); 698 } 699 700 void vm_direct_exit(int code, const char* message) { 701 if (message != nullptr) { 702 tty->print_cr("%s", message); 703 } 704 vm_direct_exit(code); 705 } 706 707 static void vm_perform_shutdown_actions() { 708 if (is_init_completed()) { 709 Thread* thread = Thread::current_or_null(); 710 if (thread != nullptr && thread->is_Java_thread()) { 711 // We are leaving the VM, set state to native (in case any OS exit 712 // handlers call back to the VM) 713 JavaThread* jt = JavaThread::cast(thread); 714 // Must always be walkable or have no last_Java_frame when in 715 // thread_in_native 716 jt->frame_anchor()->make_walkable(); 717 jt->set_thread_state(_thread_in_native); 718 } 719 } 720 notify_vm_shutdown(); 721 } 722 723 void vm_shutdown() 724 { 725 vm_perform_shutdown_actions(); 726 os::wait_for_keypress_at_exit(); 727 os::shutdown(); 728 } 729 730 void vm_abort(bool dump_core) { 731 vm_perform_shutdown_actions(); 732 os::wait_for_keypress_at_exit(); 733 734 // Flush stdout and stderr before abort. 735 fflush(stdout); 736 fflush(stderr); 737 738 os::abort(dump_core); 739 ShouldNotReachHere(); 740 } 741 742 static void vm_notify_during_cds_dumping(const char* error, const char* message) { 743 if (error != nullptr) { 744 tty->print_cr("Error occurred during CDS dumping"); 745 tty->print("%s", error); 746 if (message != nullptr) { 747 tty->print_cr(": %s", message); 748 } 749 else { 750 tty->cr(); 751 } 752 } 753 } 754 755 void vm_exit_during_cds_dumping(const char* error, const char* message) { 756 vm_notify_during_cds_dumping(error, message); 757 758 // Failure during CDS dumping, we don't want to dump core 759 vm_abort(false); 760 } 761 762 static void vm_notify_during_shutdown(const char* error, const char* message) { 763 if (error != nullptr) { 764 tty->print_cr("Error occurred during initialization of VM"); 765 tty->print("%s", error); 766 if (message != nullptr) { 767 tty->print_cr(": %s", message); 768 } 769 else { 770 tty->cr(); 771 } 772 } 773 if (ShowMessageBoxOnError && WizardMode) { 774 fatal("Error occurred during initialization of VM"); 775 } 776 } 777 778 void vm_exit_during_initialization() { 779 vm_notify_during_shutdown(nullptr, nullptr); 780 781 // Failure during initialization, we don't want to dump core 782 vm_abort(false); 783 } 784 785 void vm_exit_during_initialization(Handle exception) { 786 tty->print_cr("Error occurred during initialization of VM"); 787 // If there are exceptions on this thread it must be cleared 788 // first and here. Any future calls to EXCEPTION_MARK requires 789 // that no pending exceptions exist. 790 JavaThread* THREAD = JavaThread::current(); // can't be null 791 if (HAS_PENDING_EXCEPTION) { 792 CLEAR_PENDING_EXCEPTION; 793 } 794 java_lang_Throwable::print_stack_trace(exception, tty); 795 tty->cr(); 796 vm_notify_during_shutdown(nullptr, nullptr); 797 798 // Failure during initialization, we don't want to dump core 799 vm_abort(false); 800 } 801 802 void vm_exit_during_initialization(Symbol* ex, const char* message) { 803 ResourceMark rm; 804 vm_notify_during_shutdown(ex->as_C_string(), message); 805 806 // Failure during initialization, we don't want to dump core 807 vm_abort(false); 808 } 809 810 void vm_exit_during_initialization(const char* error, const char* message) { 811 vm_notify_during_shutdown(error, message); 812 813 // Failure during initialization, we don't want to dump core 814 vm_abort(false); 815 } 816 817 void vm_shutdown_during_initialization(const char* error, const char* message) { 818 vm_notify_during_shutdown(error, message); 819 vm_shutdown(); 820 } 821 822 JDK_Version JDK_Version::_current; 823 const char* JDK_Version::_java_version; 824 const char* JDK_Version::_runtime_name; 825 const char* JDK_Version::_runtime_version; 826 const char* JDK_Version::_runtime_vendor_version; 827 const char* JDK_Version::_runtime_vendor_vm_bug_url; 828 829 void JDK_Version::initialize() { 830 assert(!_current.is_valid(), "Don't initialize twice"); 831 832 int major = VM_Version::vm_major_version(); 833 int minor = VM_Version::vm_minor_version(); 834 int security = VM_Version::vm_security_version(); 835 int build = VM_Version::vm_build_number(); 836 int patch = VM_Version::vm_patch_version(); 837 _current = JDK_Version(major, minor, security, patch, build); 838 } 839 840 void JDK_Version_init() { 841 JDK_Version::initialize(); 842 } 843 844 static int64_t encode_jdk_version(const JDK_Version& v) { 845 return 846 ((int64_t)v.major_version() << (BitsPerByte * 4)) | 847 ((int64_t)v.minor_version() << (BitsPerByte * 3)) | 848 ((int64_t)v.security_version() << (BitsPerByte * 2)) | 849 ((int64_t)v.patch_version() << (BitsPerByte * 1)) | 850 ((int64_t)v.build_number() << (BitsPerByte * 0)); 851 } 852 853 int JDK_Version::compare(const JDK_Version& other) const { 854 assert(is_valid() && other.is_valid(), "Invalid version (uninitialized?)"); 855 uint64_t e = encode_jdk_version(*this); 856 uint64_t o = encode_jdk_version(other); 857 return (e > o) ? 1 : ((e == o) ? 0 : -1); 858 } 859 860 /* See JEP 223 */ 861 void JDK_Version::to_string(char* buffer, size_t buflen) const { 862 assert(buffer && buflen > 0, "call with useful buffer"); 863 size_t index = 0; 864 865 if (!is_valid()) { 866 jio_snprintf(buffer, buflen, "%s", "(uninitialized)"); 867 } else { 868 int rc = jio_snprintf( 869 &buffer[index], buflen - index, "%d.%d", _major, _minor); 870 if (rc == -1) return; 871 index += rc; 872 if (_patch > 0) { 873 rc = jio_snprintf(&buffer[index], buflen - index, ".%d.%d", _security, _patch); 874 if (rc == -1) return; 875 index += rc; 876 } else if (_security > 0) { 877 rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _security); 878 if (rc == -1) return; 879 index += rc; 880 } 881 if (_build > 0) { 882 rc = jio_snprintf(&buffer[index], buflen - index, "+%d", _build); 883 if (rc == -1) return; 884 index += rc; 885 } 886 } 887 }