1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotMetaspace.hpp" 26 #include "cds/cds_globals.hpp" 27 #include "cds/cdsConfig.hpp" 28 #include "cds/classListWriter.hpp" 29 #include "cds/dynamicArchive.hpp" 30 #include "classfile/classLoader.hpp" 31 #include "classfile/classLoaderDataGraph.hpp" 32 #include "classfile/javaClasses.hpp" 33 #include "classfile/stringTable.hpp" 34 #include "classfile/symbolTable.hpp" 35 #include "classfile/systemDictionary.hpp" 36 #include "code/codeCache.hpp" 37 #include "compiler/compilationMemoryStatistic.hpp" 38 #include "compiler/compilationPolicy.hpp" 39 #include "compiler/compileBroker.hpp" 40 #include "compiler/compilerOracle.hpp" 41 #include "gc/shared/collectedHeap.hpp" 42 #include "gc/shared/stringdedup/stringDedup.hpp" 43 #include "interpreter/bytecodeHistogram.hpp" 44 #include "jfr/jfrEvents.hpp" 45 #include "jfr/support/jfrThreadId.hpp" 46 #include "jvm.h" 47 #include "logging/log.hpp" 48 #include "logging/logStream.hpp" 49 #include "memory/metaspaceUtils.hpp" 50 #include "memory/oopFactory.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "memory/universe.hpp" 53 #include "nmt/memMapPrinter.hpp" 54 #include "nmt/memTracker.hpp" 55 #include "oops/constantPool.hpp" 56 #include "oops/generateOopMap.hpp" 57 #include "oops/instanceKlass.hpp" 58 #include "oops/instanceOop.hpp" 59 #include "oops/klassVtable.hpp" 60 #include "oops/method.inline.hpp" 61 #include "oops/objArrayOop.hpp" 62 #include "oops/oop.inline.hpp" 63 #include "oops/symbol.hpp" 64 #include "prims/jvmtiAgentList.hpp" 65 #include "prims/jvmtiExport.hpp" 66 #include "runtime/continuation.hpp" 67 #include "runtime/deoptimization.hpp" 68 #include "runtime/flags/flagSetting.hpp" 69 #include "runtime/handles.inline.hpp" 70 #include "runtime/init.hpp" 71 #include "runtime/interfaceSupport.inline.hpp" 72 #include "runtime/java.hpp" 73 #include "runtime/javaThread.hpp" 74 #include "runtime/sharedRuntime.hpp" 75 #include "runtime/stubRoutines.hpp" 76 #include "runtime/task.hpp" 77 #include "runtime/threads.hpp" 78 #include "runtime/timer.hpp" 79 #include "runtime/trimNativeHeap.hpp" 80 #include "runtime/vm_version.hpp" 81 #include "runtime/vmOperations.hpp" 82 #include "runtime/vmThread.hpp" 83 #include "sanitizers/leak.hpp" 84 #include "utilities/dtrace.hpp" 85 #include "utilities/events.hpp" 86 #include "utilities/globalDefinitions.hpp" 87 #include "utilities/macros.hpp" 88 #include "utilities/vmError.hpp" 89 #ifdef COMPILER1 90 #include "c1/c1_Compiler.hpp" 91 #include "c1/c1_Runtime1.hpp" 92 #endif 93 #ifdef COMPILER2 94 #include "code/compiledIC.hpp" 95 #include "opto/compile.hpp" 96 #include "opto/indexSet.hpp" 97 #include "opto/runtime.hpp" 98 #endif 99 #if INCLUDE_JFR 100 #include "jfr/jfr.hpp" 101 #endif 102 #if INCLUDE_JVMCI 103 #include "jvmci/jvmci.hpp" 104 #endif 105 106 GrowableArray<Method*>* collected_profiled_methods; 107 108 static int compare_methods(Method** a, Method** b) { 109 // compiled_invocation_count() returns int64_t, forcing the entire expression 110 // to be evaluated as int64_t. Overflow is not an issue. 111 int64_t diff = (((*b)->invocation_count() + (*b)->compiled_invocation_count()) 112 - ((*a)->invocation_count() + (*a)->compiled_invocation_count())); 113 return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; 114 } 115 116 static void collect_profiled_methods(Method* m) { 117 Thread* thread = Thread::current(); 118 methodHandle mh(thread, m); 119 if ((m->method_data() != nullptr) && 120 (PrintMethodData || CompilerOracle::should_print(mh))) { 121 collected_profiled_methods->push(m); 122 } 123 } 124 125 static void print_method_profiling_data() { 126 if ((ProfileInterpreter COMPILER1_PRESENT(|| C1UpdateMethodData)) && 127 (PrintMethodData || CompilerOracle::should_print_methods())) { 128 ResourceMark rm; 129 collected_profiled_methods = new GrowableArray<Method*>(1024); 130 SystemDictionary::methods_do(collect_profiled_methods); 131 collected_profiled_methods->sort(&compare_methods); 132 133 int count = collected_profiled_methods->length(); 134 int total_size = 0; 135 if (count > 0) { 136 for (int index = 0; index < count; index++) { 137 Method* m = collected_profiled_methods->at(index); 138 139 // Instead of taking tty lock, we collect all lines into a string stream 140 // and then print them all at once. 141 ResourceMark rm2; 142 stringStream ss; 143 144 ss.print_cr("------------------------------------------------------------------------"); 145 m->print_invocation_count(&ss); 146 ss.print_cr(" mdo size: %d bytes", m->method_data()->size_in_bytes()); 147 ss.cr(); 148 // Dump data on parameters if any 149 if (m->method_data() != nullptr && m->method_data()->parameters_type_data() != nullptr) { 150 ss.fill_to(2); 151 m->method_data()->parameters_type_data()->print_data_on(&ss); 152 } 153 m->print_codes_on(&ss); 154 tty->print("%s", ss.as_string()); // print all at once 155 total_size += m->method_data()->size_in_bytes(); 156 } 157 tty->print_cr("------------------------------------------------------------------------"); 158 tty->print_cr("Total MDO size: %d bytes", total_size); 159 } 160 } 161 } 162 163 #ifndef PRODUCT 164 165 // Statistics printing (method invocation histogram) 166 167 GrowableArray<Method*>* collected_invoked_methods; 168 169 static void collect_invoked_methods(Method* m) { 170 if (m->invocation_count() + m->compiled_invocation_count() >= 1) { 171 collected_invoked_methods->push(m); 172 } 173 } 174 175 176 // Invocation count accumulators should be unsigned long to shift the 177 // overflow border. Longer-running workloads tend to create invocation 178 // counts which already overflow 32-bit counters for individual methods. 179 static void print_method_invocation_histogram() { 180 ResourceMark rm; 181 collected_invoked_methods = new GrowableArray<Method*>(1024); 182 SystemDictionary::methods_do(collect_invoked_methods); 183 collected_invoked_methods->sort(&compare_methods); 184 // 185 tty->cr(); 186 tty->print_cr("Histogram Over Method Invocation Counters (cutoff = %zd):", MethodHistogramCutoff); 187 tty->cr(); 188 tty->print_cr("____Count_(I+C)____Method________________________Module_________________"); 189 uint64_t total = 0, 190 int_total = 0, 191 comp_total = 0, 192 special_total= 0, 193 static_total = 0, 194 final_total = 0, 195 synch_total = 0, 196 native_total = 0, 197 access_total = 0; 198 for (int index = 0; index < collected_invoked_methods->length(); index++) { 199 // Counter values returned from getter methods are signed int. 200 // To shift the overflow border by a factor of two, we interpret 201 // them here as unsigned long. A counter can't be negative anyway. 202 Method* m = collected_invoked_methods->at(index); 203 uint64_t iic = (uint64_t)m->invocation_count(); 204 uint64_t cic = (uint64_t)m->compiled_invocation_count(); 205 if ((iic + cic) >= (uint64_t)MethodHistogramCutoff) m->print_invocation_count(tty); 206 int_total += iic; 207 comp_total += cic; 208 if (m->is_final()) final_total += iic + cic; 209 if (m->is_static()) static_total += iic + cic; 210 if (m->is_synchronized()) synch_total += iic + cic; 211 if (m->is_native()) native_total += iic + cic; 212 if (m->is_accessor()) access_total += iic + cic; 213 } 214 tty->cr(); 215 total = int_total + comp_total; 216 special_total = final_total + static_total +synch_total + native_total + access_total; 217 tty->print_cr("Invocations summary for %d methods:", collected_invoked_methods->length()); 218 double total_div = (double)total; 219 tty->print_cr("\t" UINT64_FORMAT_W(12) " (100%%) total", total); 220 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- interpreted", int_total, 100.0 * (double)int_total / total_div); 221 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- compiled", comp_total, 100.0 * (double)comp_total / total_div); 222 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- special methods (interpreted and compiled)", 223 special_total, 100.0 * (double)special_total/ total_div); 224 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- synchronized",synch_total, 100.0 * (double)synch_total / total_div); 225 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- final", final_total, 100.0 * (double)final_total / total_div); 226 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- static", static_total, 100.0 * (double)static_total / total_div); 227 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- native", native_total, 100.0 * (double)native_total / total_div); 228 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- accessor", access_total, 100.0 * (double)access_total / total_div); 229 tty->cr(); 230 SharedRuntime::print_call_statistics(comp_total); 231 } 232 233 static void print_bytecode_count() { 234 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 235 tty->print_cr("[BytecodeCounter::counter_value = %zu]", BytecodeCounter::counter_value()); 236 } 237 } 238 239 #else 240 241 static void print_method_invocation_histogram() {} 242 static void print_bytecode_count() {} 243 244 #endif // PRODUCT 245 246 247 // General statistics printing (profiling ...) 248 void print_statistics() { 249 if (CITime) { 250 CompileBroker::print_times(); 251 } 252 253 #ifdef COMPILER1 254 if ((PrintC1Statistics || LogVMOutput || LogCompilation) && UseCompiler) { 255 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintC1Statistics); 256 Runtime1::print_statistics(); 257 SharedRuntime::print_statistics(); 258 } 259 #endif /* COMPILER1 */ 260 261 #ifdef COMPILER2 262 if ((PrintOptoStatistics || LogVMOutput || LogCompilation) && UseCompiler) { 263 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintOptoStatistics); 264 Compile::print_statistics(); 265 Deoptimization::print_statistics(); 266 #ifndef COMPILER1 267 SharedRuntime::print_statistics(); 268 #endif //COMPILER1 269 } 270 271 if (PrintLockStatistics) { 272 OptoRuntime::print_named_counters(); 273 } 274 #ifdef ASSERT 275 if (CollectIndexSetStatistics) { 276 IndexSet::print_statistics(); 277 } 278 #endif // ASSERT 279 #else // COMPILER2 280 #if INCLUDE_JVMCI 281 #ifndef COMPILER1 282 if ((TraceDeoptimization || LogVMOutput || LogCompilation) && UseCompiler) { 283 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && TraceDeoptimization); 284 Deoptimization::print_statistics(); 285 SharedRuntime::print_statistics(); 286 } 287 #endif // COMPILER1 288 #endif // INCLUDE_JVMCI 289 #endif // COMPILER2 290 291 if (PrintNMethodStatistics) { 292 nmethod::print_statistics(); 293 } 294 if (CountCompiledCalls) { 295 print_method_invocation_histogram(); 296 } 297 298 print_method_profiling_data(); 299 300 if (TimeOopMap) { 301 GenerateOopMap::print_time(); 302 } 303 if (PrintSymbolTableSizeHistogram) { 304 SymbolTable::print_histogram(); 305 } 306 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 307 BytecodeCounter::print(); 308 } 309 if (PrintBytecodePairHistogram) { 310 BytecodePairHistogram::print(); 311 } 312 313 if (PrintCodeCache) { 314 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 315 CodeCache::print(); 316 } 317 318 // CodeHeap State Analytics. 319 if (PrintCodeHeapAnalytics) { 320 CompileBroker::print_heapinfo(nullptr, "all", 4096); // details 321 } 322 323 #ifndef PRODUCT 324 if (PrintCodeCache2) { 325 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 326 CodeCache::print_internals(); 327 } 328 #endif 329 330 if (VerifyOops && Verbose) { 331 tty->print_cr("+VerifyOops count: %d", StubRoutines::verify_oop_count()); 332 } 333 334 print_bytecode_count(); 335 336 if (PrintVMInfoAtExit) { 337 // Use an intermediate stream to prevent deadlocking on tty_lock 338 stringStream ss; 339 VMError::print_vm_info(&ss); 340 tty->print_raw(ss.base()); 341 } 342 343 if (PrintSystemDictionaryAtExit) { 344 ResourceMark rm; 345 MutexLocker mcld(ClassLoaderDataGraph_lock); 346 SystemDictionary::print(); 347 } 348 349 if (PrintClassLoaderDataGraphAtExit) { 350 ResourceMark rm; 351 MutexLocker mcld(ClassLoaderDataGraph_lock); 352 ClassLoaderDataGraph::print(); 353 } 354 355 // Native memory tracking data 356 if (PrintNMTStatistics) { 357 MemTracker::final_report(tty); 358 } 359 360 if (PrintMetaspaceStatisticsAtExit) { 361 MetaspaceUtils::print_basic_report(tty, 0); 362 } 363 364 if (PrintCompilerMemoryStatisticsAtExit) { 365 CompilationMemoryStatistic::print_final_report(tty); 366 } 367 368 ThreadsSMRSupport::log_statistics(); 369 370 if (log_is_enabled(Info, perf, class, link)) { 371 LogStreamHandle(Info, perf, class, link) log; 372 log.print_cr("At VM exit:"); 373 ClassLoader::print_counters(&log); 374 } 375 } 376 377 // Note: before_exit() can be executed only once, if more than one threads 378 // are trying to shutdown the VM at the same time, only one thread 379 // can run before_exit() and all other threads must wait. 380 void before_exit(JavaThread* thread, bool halt) { 381 #define BEFORE_EXIT_NOT_RUN 0 382 #define BEFORE_EXIT_RUNNING 1 383 #define BEFORE_EXIT_DONE 2 384 static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN; 385 386 Events::log(thread, "Before exit entered"); 387 388 // Note: don't use a Mutex to guard the entire before_exit(), as 389 // JVMTI post_thread_end_event and post_vm_death_event will run native code. 390 // A CAS or OSMutex would work just fine but then we need to manipulate 391 // thread state for Safepoint. Here we use Monitor wait() and notify_all() 392 // for synchronization. 393 { MonitorLocker ml(BeforeExit_lock); 394 switch (_before_exit_status) { 395 case BEFORE_EXIT_NOT_RUN: 396 _before_exit_status = BEFORE_EXIT_RUNNING; 397 break; 398 case BEFORE_EXIT_RUNNING: 399 while (_before_exit_status == BEFORE_EXIT_RUNNING) { 400 ml.wait(); 401 } 402 assert(_before_exit_status == BEFORE_EXIT_DONE, "invalid state"); 403 return; 404 case BEFORE_EXIT_DONE: 405 // need block to avoid SS compiler bug 406 { 407 return; 408 } 409 } 410 } 411 412 // At this point only one thread is executing this logic. Any other threads 413 // attempting to invoke before_exit() will wait above and return early once 414 // this thread finishes before_exit(). 415 416 // Do not add any additional shutdown logic between the above mutex logic and 417 // leak sanitizer logic below. Any additional shutdown code which performs some 418 // cleanup should be added after the leak sanitizer logic below. 419 420 #ifdef LEAK_SANITIZER 421 // If we are built with LSan, we need to perform leak checking. If we are 422 // terminating normally, not halting and no VM error, we perform a normal 423 // leak check which terminates if leaks are found. If we are not terminating 424 // normally, halting or VM error, we perform a recoverable leak check which 425 // prints leaks but will not terminate. 426 if (!halt && !VMError::is_error_reported()) { 427 LSAN_DO_LEAK_CHECK(); 428 } else { 429 // Ignore the return value. 430 static_cast<void>(LSAN_DO_RECOVERABLE_LEAK_CHECK()); 431 } 432 #endif 433 434 #if INCLUDE_CDS 435 // Dynamic CDS dumping must happen whilst we can still reliably 436 // run Java code. 437 DynamicArchive::dump_at_exit(thread); 438 assert(!thread->has_pending_exception(), "must be"); 439 #endif 440 441 // Actual shutdown logic begins here. 442 443 #if INCLUDE_JVMCI 444 if (EnableJVMCI) { 445 JVMCI::shutdown(thread); 446 } 447 #endif 448 449 #if INCLUDE_CDS 450 ClassListWriter::write_resolved_constants(); 451 452 if (CDSConfig::is_dumping_preimage_static_archive()) { 453 AOTMetaspace::preload_and_dump(thread); 454 } 455 #endif 456 457 // Hang forever on exit if we're reporting an error. 458 if (ShowMessageBoxOnError && VMError::is_error_reported()) { 459 os::infinite_sleep(); 460 } 461 462 EventThreadEnd event; 463 if (event.should_commit()) { 464 event.set_thread(JFR_JVM_THREAD_ID(thread)); 465 event.commit(); 466 } 467 468 // 2nd argument (emit_event_shutdown) should be set to false 469 // because EventShutdown would be emitted at Threads::destroy_vm(). 470 // (one of the callers of before_exit()) 471 JFR_ONLY(Jfr::on_vm_shutdown(true, false, halt);) 472 473 // Stop the WatcherThread. We do this before disenrolling various 474 // PeriodicTasks to reduce the likelihood of races. 475 WatcherThread::stop(); 476 477 NativeHeapTrimmer::cleanup(); 478 479 // Run before exit and then stop concurrent GC threads 480 Universe::before_exit(); 481 482 if (PrintBytecodeHistogram) { 483 BytecodeHistogram::print(); 484 } 485 486 #ifdef LINUX 487 if (DumpPerfMapAtExit) { 488 CodeCache::write_perf_map(nullptr, tty); 489 } 490 if (PrintMemoryMapAtExit) { 491 MemMapPrinter::print_all_mappings(tty); 492 } 493 #endif 494 495 if (JvmtiExport::should_post_thread_life()) { 496 JvmtiExport::post_thread_end(thread); 497 } 498 499 // Always call even when there are not JVMTI environments yet, since environments 500 // may be attached late and JVMTI must track phases of VM execution 501 JvmtiExport::post_vm_death(); 502 JvmtiAgentList::unload_agents(); 503 504 // Terminate the signal thread 505 // Note: we don't wait until it actually dies. 506 os::terminate_signal_thread(); 507 508 #if INCLUDE_CDS 509 if (AOTVerifyTrainingData) { 510 TrainingData::verify(); 511 } 512 #endif 513 514 print_statistics(); 515 516 { MutexLocker ml(BeforeExit_lock); 517 _before_exit_status = BEFORE_EXIT_DONE; 518 BeforeExit_lock->notify_all(); 519 } 520 521 if (VerifyStringTableAtExit) { 522 size_t fail_cnt = StringTable::verify_and_compare_entries(); 523 if (fail_cnt != 0) { 524 tty->print_cr("ERROR: fail_cnt=%zu", fail_cnt); 525 guarantee(fail_cnt == 0, "unexpected StringTable verification failures"); 526 } 527 } 528 529 #undef BEFORE_EXIT_NOT_RUN 530 #undef BEFORE_EXIT_RUNNING 531 #undef BEFORE_EXIT_DONE 532 } 533 534 void vm_exit(int code) { 535 Thread* thread = 536 ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : nullptr; 537 if (thread == nullptr) { 538 // very early initialization failure -- just exit 539 vm_direct_exit(code); 540 } 541 542 // We'd like to add an entry to the XML log to show that the VM is 543 // terminating, but we can't safely do that here. The logic to make 544 // XML termination logging safe is tied to the termination of the 545 // VMThread, and it doesn't terminate on this exit path. See 8222534. 546 547 if (VMThread::vm_thread() != nullptr) { 548 if (thread->is_Java_thread()) { 549 // We must be "in_vm" for the code below to work correctly. 550 // Historically there must have been some exit path for which 551 // that was not the case and so we set it explicitly - even 552 // though we no longer know what that path may be. 553 JavaThread::cast(thread)->set_thread_state(_thread_in_vm); 554 } 555 556 // Fire off a VM_Exit operation to bring VM to a safepoint and exit 557 VM_Exit op(code); 558 559 // 4945125 The vm thread comes to a safepoint during exit. 560 // GC vm_operations can get caught at the safepoint, and the 561 // heap is unparseable if they are caught. Grab the Heap_lock 562 // to prevent this. The GC vm_operations will not be able to 563 // queue until after we release it, but we never do that as we 564 // are terminating the VM process. 565 MutexLocker ml(Heap_lock); 566 567 VMThread::execute(&op); 568 // should never reach here; but in case something wrong with VM Thread. 569 vm_direct_exit(code); 570 } else { 571 // VM thread is gone, just exit 572 vm_direct_exit(code); 573 } 574 ShouldNotReachHere(); 575 } 576 577 void notify_vm_shutdown() { 578 // For now, just a dtrace probe. 579 HOTSPOT_VM_SHUTDOWN(); 580 } 581 582 void vm_direct_exit(int code) { 583 notify_vm_shutdown(); 584 os::wait_for_keypress_at_exit(); 585 os::exit(code); 586 } 587 588 void vm_direct_exit(int code, const char* message) { 589 if (message != nullptr) { 590 tty->print_cr("%s", message); 591 } 592 vm_direct_exit(code); 593 } 594 595 static void vm_perform_shutdown_actions() { 596 if (is_init_completed()) { 597 Thread* thread = Thread::current_or_null(); 598 if (thread != nullptr && thread->is_Java_thread()) { 599 // We are leaving the VM, set state to native (in case any OS exit 600 // handlers call back to the VM) 601 JavaThread* jt = JavaThread::cast(thread); 602 // Must always be walkable or have no last_Java_frame when in 603 // thread_in_native 604 jt->frame_anchor()->make_walkable(); 605 jt->set_thread_state(_thread_in_native); 606 } 607 } 608 notify_vm_shutdown(); 609 } 610 611 void vm_shutdown() 612 { 613 vm_perform_shutdown_actions(); 614 os::wait_for_keypress_at_exit(); 615 os::shutdown(); 616 } 617 618 void vm_abort(bool dump_core) { 619 vm_perform_shutdown_actions(); 620 os::wait_for_keypress_at_exit(); 621 622 // Flush stdout and stderr before abort. 623 fflush(stdout); 624 fflush(stderr); 625 626 os::abort(dump_core); 627 ShouldNotReachHere(); 628 } 629 630 static void vm_notify_during_cds_dumping(const char* error, const char* message) { 631 if (error != nullptr) { 632 tty->print_cr("Error occurred during CDS dumping"); 633 tty->print("%s", error); 634 if (message != nullptr) { 635 tty->print_cr(": %s", message); 636 } 637 else { 638 tty->cr(); 639 } 640 } 641 } 642 643 void vm_exit_during_cds_dumping(const char* error, const char* message) { 644 vm_notify_during_cds_dumping(error, message); 645 646 // Failure during CDS dumping, we don't want to dump core 647 vm_abort(false); 648 } 649 650 static void vm_notify_during_shutdown(const char* error, const char* message) { 651 if (error != nullptr) { 652 tty->print_cr("Error occurred during initialization of VM"); 653 tty->print("%s", error); 654 if (message != nullptr) { 655 tty->print_cr(": %s", message); 656 } 657 else { 658 tty->cr(); 659 } 660 } 661 if (ShowMessageBoxOnError && WizardMode) { 662 fatal("Error occurred during initialization of VM"); 663 } 664 } 665 666 void vm_exit_during_initialization() { 667 vm_notify_during_shutdown(nullptr, nullptr); 668 669 // Failure during initialization, we don't want to dump core 670 vm_abort(false); 671 } 672 673 void vm_exit_during_initialization(Handle exception) { 674 tty->print_cr("Error occurred during initialization of VM"); 675 // If there are exceptions on this thread it must be cleared 676 // first and here. Any future calls to EXCEPTION_MARK requires 677 // that no pending exceptions exist. 678 JavaThread* THREAD = JavaThread::current(); // can't be null 679 if (HAS_PENDING_EXCEPTION) { 680 CLEAR_PENDING_EXCEPTION; 681 } 682 java_lang_Throwable::print_stack_trace(exception, tty); 683 tty->cr(); 684 vm_notify_during_shutdown(nullptr, nullptr); 685 686 // Failure during initialization, we don't want to dump core 687 vm_abort(false); 688 } 689 690 void vm_exit_during_initialization(Symbol* ex, const char* message) { 691 ResourceMark rm; 692 vm_notify_during_shutdown(ex->as_C_string(), message); 693 694 // Failure during initialization, we don't want to dump core 695 vm_abort(false); 696 } 697 698 void vm_exit_during_initialization(const char* error, const char* message) { 699 vm_notify_during_shutdown(error, message); 700 701 // Failure during initialization, we don't want to dump core 702 vm_abort(false); 703 } 704 705 void vm_shutdown_during_initialization(const char* error, const char* message) { 706 vm_notify_during_shutdown(error, message); 707 vm_shutdown(); 708 } 709 710 JDK_Version JDK_Version::_current; 711 const char* JDK_Version::_java_version; 712 const char* JDK_Version::_runtime_name; 713 const char* JDK_Version::_runtime_version; 714 const char* JDK_Version::_runtime_vendor_version; 715 const char* JDK_Version::_runtime_vendor_vm_bug_url; 716 717 void JDK_Version::initialize() { 718 assert(!_current.is_valid(), "Don't initialize twice"); 719 720 int major = VM_Version::vm_major_version(); 721 int minor = VM_Version::vm_minor_version(); 722 int security = VM_Version::vm_security_version(); 723 int build = VM_Version::vm_build_number(); 724 int patch = VM_Version::vm_patch_version(); 725 _current = JDK_Version(major, minor, security, patch, build); 726 } 727 728 void JDK_Version_init() { 729 JDK_Version::initialize(); 730 } 731 732 static int64_t encode_jdk_version(const JDK_Version& v) { 733 return 734 ((int64_t)v.major_version() << (BitsPerByte * 4)) | 735 ((int64_t)v.minor_version() << (BitsPerByte * 3)) | 736 ((int64_t)v.security_version() << (BitsPerByte * 2)) | 737 ((int64_t)v.patch_version() << (BitsPerByte * 1)) | 738 ((int64_t)v.build_number() << (BitsPerByte * 0)); 739 } 740 741 int JDK_Version::compare(const JDK_Version& other) const { 742 assert(is_valid() && other.is_valid(), "Invalid version (uninitialized?)"); 743 uint64_t e = encode_jdk_version(*this); 744 uint64_t o = encode_jdk_version(other); 745 return (e > o) ? 1 : ((e == o) ? 0 : -1); 746 } 747 748 /* See JEP 223 */ 749 void JDK_Version::to_string(char* buffer, size_t buflen) const { 750 assert(buffer && buflen > 0, "call with useful buffer"); 751 size_t index = 0; 752 753 if (!is_valid()) { 754 jio_snprintf(buffer, buflen, "%s", "(uninitialized)"); 755 } else { 756 int rc = jio_snprintf( 757 &buffer[index], buflen - index, "%d.%d", _major, _minor); 758 if (rc == -1) return; 759 index += rc; 760 if (_patch > 0) { 761 rc = jio_snprintf(&buffer[index], buflen - index, ".%d.%d", _security, _patch); 762 if (rc == -1) return; 763 index += rc; 764 } else if (_security > 0) { 765 rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _security); 766 if (rc == -1) return; 767 index += rc; 768 } 769 if (_build > 0) { 770 rc = jio_snprintf(&buffer[index], buflen - index, "+%d", _build); 771 if (rc == -1) return; 772 index += rc; 773 } 774 } 775 }