1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/cds_globals.hpp" 26 #include "cds/classListWriter.hpp" 27 #include "cds/dynamicArchive.hpp" 28 #include "classfile/classLoader.hpp" 29 #include "classfile/classLoaderDataGraph.hpp" 30 #include "classfile/javaClasses.hpp" 31 #include "classfile/stringTable.hpp" 32 #include "classfile/symbolTable.hpp" 33 #include "classfile/systemDictionary.hpp" 34 #include "code/codeCache.hpp" 35 #include "compiler/compilationMemoryStatistic.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "compiler/compilerOracle.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/stringdedup/stringDedup.hpp" 40 #include "interpreter/bytecodeHistogram.hpp" 41 #include "jfr/jfrEvents.hpp" 42 #include "jfr/support/jfrThreadId.hpp" 43 #include "jvm.h" 44 #include "logging/log.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/metaspaceUtils.hpp" 47 #include "memory/oopFactory.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "memory/universe.hpp" 50 #include "nmt/memMapPrinter.hpp" 51 #include "nmt/memTracker.hpp" 52 #include "oops/constantPool.hpp" 53 #include "oops/generateOopMap.hpp" 54 #include "oops/instanceKlass.hpp" 55 #include "oops/instanceOop.hpp" 56 #include "oops/klassVtable.hpp" 57 #include "oops/method.hpp" 58 #include "oops/objArrayOop.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "oops/symbol.hpp" 61 #include "prims/jvmtiAgentList.hpp" 62 #include "prims/jvmtiExport.hpp" 63 #include "runtime/continuation.hpp" 64 #include "runtime/deoptimization.hpp" 65 #include "runtime/flags/flagSetting.hpp" 66 #include "runtime/handles.inline.hpp" 67 #include "runtime/init.hpp" 68 #include "runtime/interfaceSupport.inline.hpp" 69 #include "runtime/java.hpp" 70 #include "runtime/javaThread.hpp" 71 #include "runtime/sharedRuntime.hpp" 72 #include "runtime/statSampler.hpp" 73 #include "runtime/stubRoutines.hpp" 74 #include "runtime/task.hpp" 75 #include "runtime/threads.hpp" 76 #include "runtime/timer.hpp" 77 #include "runtime/trimNativeHeap.hpp" 78 #include "runtime/vmOperations.hpp" 79 #include "runtime/vmThread.hpp" 80 #include "runtime/vm_version.hpp" 81 #include "sanitizers/leak.hpp" 82 #include "utilities/dtrace.hpp" 83 #include "utilities/events.hpp" 84 #include "utilities/globalDefinitions.hpp" 85 #include "utilities/macros.hpp" 86 #include "utilities/vmError.hpp" 87 #ifdef COMPILER1 88 #include "c1/c1_Compiler.hpp" 89 #include "c1/c1_Runtime1.hpp" 90 #endif 91 #ifdef COMPILER2 92 #include "code/compiledIC.hpp" 93 #include "opto/compile.hpp" 94 #include "opto/indexSet.hpp" 95 #include "opto/runtime.hpp" 96 #endif 97 #if INCLUDE_JFR 98 #include "jfr/jfr.hpp" 99 #endif 100 #if INCLUDE_JVMCI 101 #include "jvmci/jvmci.hpp" 102 #endif 103 104 GrowableArray<Method*>* collected_profiled_methods; 105 106 static int compare_methods(Method** a, Method** b) { 107 // compiled_invocation_count() returns int64_t, forcing the entire expression 108 // to be evaluated as int64_t. Overflow is not an issue. 109 int64_t diff = (((*b)->invocation_count() + (*b)->compiled_invocation_count()) 110 - ((*a)->invocation_count() + (*a)->compiled_invocation_count())); 111 return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; 112 } 113 114 static void collect_profiled_methods(Method* m) { 115 Thread* thread = Thread::current(); 116 methodHandle mh(thread, m); 117 if ((m->method_data() != nullptr) && 118 (PrintMethodData || CompilerOracle::should_print(mh))) { 119 collected_profiled_methods->push(m); 120 } 121 } 122 123 static void print_method_profiling_data() { 124 if ((ProfileInterpreter COMPILER1_PRESENT(|| C1UpdateMethodData)) && 125 (PrintMethodData || CompilerOracle::should_print_methods())) { 126 ResourceMark rm; 127 collected_profiled_methods = new GrowableArray<Method*>(1024); 128 SystemDictionary::methods_do(collect_profiled_methods); 129 collected_profiled_methods->sort(&compare_methods); 130 131 int count = collected_profiled_methods->length(); 132 int total_size = 0; 133 if (count > 0) { 134 for (int index = 0; index < count; index++) { 135 Method* m = collected_profiled_methods->at(index); 136 137 // Instead of taking tty lock, we collect all lines into a string stream 138 // and then print them all at once. 139 ResourceMark rm2; 140 stringStream ss; 141 142 ss.print_cr("------------------------------------------------------------------------"); 143 m->print_invocation_count(&ss); 144 ss.print_cr(" mdo size: %d bytes", m->method_data()->size_in_bytes()); 145 ss.cr(); 146 // Dump data on parameters if any 147 if (m->method_data() != nullptr && m->method_data()->parameters_type_data() != nullptr) { 148 ss.fill_to(2); 149 m->method_data()->parameters_type_data()->print_data_on(&ss); 150 } 151 m->print_codes_on(&ss); 152 tty->print("%s", ss.as_string()); // print all at once 153 total_size += m->method_data()->size_in_bytes(); 154 } 155 tty->print_cr("------------------------------------------------------------------------"); 156 tty->print_cr("Total MDO size: %d bytes", total_size); 157 } 158 } 159 } 160 161 #ifndef PRODUCT 162 163 // Statistics printing (method invocation histogram) 164 165 GrowableArray<Method*>* collected_invoked_methods; 166 167 static void collect_invoked_methods(Method* m) { 168 if (m->invocation_count() + m->compiled_invocation_count() >= 1) { 169 collected_invoked_methods->push(m); 170 } 171 } 172 173 174 // Invocation count accumulators should be unsigned long to shift the 175 // overflow border. Longer-running workloads tend to create invocation 176 // counts which already overflow 32-bit counters for individual methods. 177 static void print_method_invocation_histogram() { 178 ResourceMark rm; 179 collected_invoked_methods = new GrowableArray<Method*>(1024); 180 SystemDictionary::methods_do(collect_invoked_methods); 181 collected_invoked_methods->sort(&compare_methods); 182 // 183 tty->cr(); 184 tty->print_cr("Histogram Over Method Invocation Counters (cutoff = %zd):", MethodHistogramCutoff); 185 tty->cr(); 186 tty->print_cr("____Count_(I+C)____Method________________________Module_________________"); 187 uint64_t total = 0, 188 int_total = 0, 189 comp_total = 0, 190 special_total= 0, 191 static_total = 0, 192 final_total = 0, 193 synch_total = 0, 194 native_total = 0, 195 access_total = 0; 196 for (int index = 0; index < collected_invoked_methods->length(); index++) { 197 // Counter values returned from getter methods are signed int. 198 // To shift the overflow border by a factor of two, we interpret 199 // them here as unsigned long. A counter can't be negative anyway. 200 Method* m = collected_invoked_methods->at(index); 201 uint64_t iic = (uint64_t)m->invocation_count(); 202 uint64_t cic = (uint64_t)m->compiled_invocation_count(); 203 if ((iic + cic) >= (uint64_t)MethodHistogramCutoff) m->print_invocation_count(tty); 204 int_total += iic; 205 comp_total += cic; 206 if (m->is_final()) final_total += iic + cic; 207 if (m->is_static()) static_total += iic + cic; 208 if (m->is_synchronized()) synch_total += iic + cic; 209 if (m->is_native()) native_total += iic + cic; 210 if (m->is_accessor()) access_total += iic + cic; 211 } 212 tty->cr(); 213 total = int_total + comp_total; 214 special_total = final_total + static_total +synch_total + native_total + access_total; 215 tty->print_cr("Invocations summary for %d methods:", collected_invoked_methods->length()); 216 double total_div = (double)total; 217 tty->print_cr("\t" UINT64_FORMAT_W(12) " (100%%) total", total); 218 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- interpreted", int_total, 100.0 * (double)int_total / total_div); 219 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- compiled", comp_total, 100.0 * (double)comp_total / total_div); 220 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- special methods (interpreted and compiled)", 221 special_total, 100.0 * (double)special_total/ total_div); 222 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- synchronized",synch_total, 100.0 * (double)synch_total / total_div); 223 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- final", final_total, 100.0 * (double)final_total / total_div); 224 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- static", static_total, 100.0 * (double)static_total / total_div); 225 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- native", native_total, 100.0 * (double)native_total / total_div); 226 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- accessor", access_total, 100.0 * (double)access_total / total_div); 227 tty->cr(); 228 SharedRuntime::print_call_statistics(comp_total); 229 } 230 231 static void print_bytecode_count() { 232 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 233 tty->print_cr("[BytecodeCounter::counter_value = %d]", BytecodeCounter::counter_value()); 234 } 235 } 236 237 #else 238 239 static void print_method_invocation_histogram() {} 240 static void print_bytecode_count() {} 241 242 #endif // PRODUCT 243 244 245 // General statistics printing (profiling ...) 246 void print_statistics() { 247 if (CITime) { 248 CompileBroker::print_times(); 249 } 250 251 #ifdef COMPILER1 252 if ((PrintC1Statistics || LogVMOutput || LogCompilation) && UseCompiler) { 253 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintC1Statistics); 254 Runtime1::print_statistics(); 255 SharedRuntime::print_statistics(); 256 } 257 #endif /* COMPILER1 */ 258 259 #ifdef COMPILER2 260 if ((PrintOptoStatistics || LogVMOutput || LogCompilation) && UseCompiler) { 261 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintOptoStatistics); 262 Compile::print_statistics(); 263 Deoptimization::print_statistics(); 264 #ifndef COMPILER1 265 SharedRuntime::print_statistics(); 266 #endif //COMPILER1 267 } 268 269 if (PrintLockStatistics) { 270 OptoRuntime::print_named_counters(); 271 } 272 #ifdef ASSERT 273 if (CollectIndexSetStatistics) { 274 IndexSet::print_statistics(); 275 } 276 #endif // ASSERT 277 #else // COMPILER2 278 #if INCLUDE_JVMCI 279 #ifndef COMPILER1 280 if ((TraceDeoptimization || LogVMOutput || LogCompilation) && UseCompiler) { 281 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && TraceDeoptimization); 282 Deoptimization::print_statistics(); 283 SharedRuntime::print_statistics(); 284 } 285 #endif // COMPILER1 286 #endif // INCLUDE_JVMCI 287 #endif // COMPILER2 288 289 if (PrintNMethodStatistics) { 290 nmethod::print_statistics(); 291 } 292 if (CountCompiledCalls) { 293 print_method_invocation_histogram(); 294 } 295 296 print_method_profiling_data(); 297 298 if (TimeOopMap) { 299 GenerateOopMap::print_time(); 300 } 301 if (PrintSymbolTableSizeHistogram) { 302 SymbolTable::print_histogram(); 303 } 304 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 305 BytecodeCounter::print(); 306 } 307 if (PrintBytecodePairHistogram) { 308 BytecodePairHistogram::print(); 309 } 310 311 if (PrintCodeCache) { 312 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 313 CodeCache::print(); 314 } 315 316 // CodeHeap State Analytics. 317 if (PrintCodeHeapAnalytics) { 318 CompileBroker::print_heapinfo(nullptr, "all", 4096); // details 319 } 320 321 #ifndef PRODUCT 322 if (PrintCodeCache2) { 323 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 324 CodeCache::print_internals(); 325 } 326 #endif 327 328 if (VerifyOops && Verbose) { 329 tty->print_cr("+VerifyOops count: %d", StubRoutines::verify_oop_count()); 330 } 331 332 print_bytecode_count(); 333 334 if (PrintSystemDictionaryAtExit) { 335 ResourceMark rm; 336 MutexLocker mcld(ClassLoaderDataGraph_lock); 337 SystemDictionary::print(); 338 } 339 340 if (PrintClassLoaderDataGraphAtExit) { 341 ResourceMark rm; 342 MutexLocker mcld(ClassLoaderDataGraph_lock); 343 ClassLoaderDataGraph::print(); 344 } 345 346 // Native memory tracking data 347 if (PrintNMTStatistics) { 348 MemTracker::final_report(tty); 349 } 350 351 if (PrintMetaspaceStatisticsAtExit) { 352 MetaspaceUtils::print_basic_report(tty, 0); 353 } 354 355 if (CompilerOracle::should_print_final_memstat_report()) { 356 CompilationMemoryStatistic::print_all_by_size(tty, false, 0); 357 } 358 359 ThreadsSMRSupport::log_statistics(); 360 361 if (log_is_enabled(Info, perf, class, link)) { 362 LogStreamHandle(Info, perf, class, link) log; 363 log.print_cr("At VM exit:"); 364 ClassLoader::print_counters(&log); 365 } 366 } 367 368 // Note: before_exit() can be executed only once, if more than one threads 369 // are trying to shutdown the VM at the same time, only one thread 370 // can run before_exit() and all other threads must wait. 371 void before_exit(JavaThread* thread, bool halt) { 372 #define BEFORE_EXIT_NOT_RUN 0 373 #define BEFORE_EXIT_RUNNING 1 374 #define BEFORE_EXIT_DONE 2 375 static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN; 376 377 Events::log(thread, "Before exit entered"); 378 379 // Note: don't use a Mutex to guard the entire before_exit(), as 380 // JVMTI post_thread_end_event and post_vm_death_event will run native code. 381 // A CAS or OSMutex would work just fine but then we need to manipulate 382 // thread state for Safepoint. Here we use Monitor wait() and notify_all() 383 // for synchronization. 384 { MonitorLocker ml(BeforeExit_lock); 385 switch (_before_exit_status) { 386 case BEFORE_EXIT_NOT_RUN: 387 _before_exit_status = BEFORE_EXIT_RUNNING; 388 break; 389 case BEFORE_EXIT_RUNNING: 390 while (_before_exit_status == BEFORE_EXIT_RUNNING) { 391 ml.wait(); 392 } 393 assert(_before_exit_status == BEFORE_EXIT_DONE, "invalid state"); 394 return; 395 case BEFORE_EXIT_DONE: 396 // need block to avoid SS compiler bug 397 { 398 return; 399 } 400 } 401 } 402 403 // At this point only one thread is executing this logic. Any other threads 404 // attempting to invoke before_exit() will wait above and return early once 405 // this thread finishes before_exit(). 406 407 // Do not add any additional shutdown logic between the above mutex logic and 408 // leak sanitizer logic below. Any additional shutdown code which performs some 409 // cleanup should be added after the leak sanitizer logic below. 410 411 #ifdef LEAK_SANITIZER 412 // If we are built with LSan, we need to perform leak checking. If we are 413 // terminating normally, not halting and no VM error, we perform a normal 414 // leak check which terminates if leaks are found. If we are not terminating 415 // normally, halting or VM error, we perform a recoverable leak check which 416 // prints leaks but will not terminate. 417 if (!halt && !VMError::is_error_reported()) { 418 LSAN_DO_LEAK_CHECK(); 419 } else { 420 // Ignore the return value. 421 static_cast<void>(LSAN_DO_RECOVERABLE_LEAK_CHECK()); 422 } 423 #endif 424 425 #if INCLUDE_CDS 426 // Dynamic CDS dumping must happen whilst we can still reliably 427 // run Java code. 428 DynamicArchive::dump_at_exit(thread, ArchiveClassesAtExit); 429 assert(!thread->has_pending_exception(), "must be"); 430 #endif 431 432 433 // Actual shutdown logic begins here. 434 435 #if INCLUDE_JVMCI 436 if (EnableJVMCI) { 437 JVMCI::shutdown(thread); 438 } 439 #endif 440 441 #if INCLUDE_CDS 442 ClassListWriter::write_resolved_constants(); 443 #endif 444 445 // Hang forever on exit if we're reporting an error. 446 if (ShowMessageBoxOnError && VMError::is_error_reported()) { 447 os::infinite_sleep(); 448 } 449 450 EventThreadEnd event; 451 if (event.should_commit()) { 452 event.set_thread(JFR_JVM_THREAD_ID(thread)); 453 event.commit(); 454 } 455 456 JFR_ONLY(Jfr::on_vm_shutdown(false, halt);) 457 458 // Stop the WatcherThread. We do this before disenrolling various 459 // PeriodicTasks to reduce the likelihood of races. 460 WatcherThread::stop(); 461 462 // shut down the StatSampler task 463 StatSampler::disengage(); 464 StatSampler::destroy(); 465 466 NativeHeapTrimmer::cleanup(); 467 468 // Stop concurrent GC threads 469 Universe::heap()->stop(); 470 471 // Print GC/heap related information. 472 Log(gc, heap, exit) log; 473 if (log.is_info()) { 474 ResourceMark rm; 475 LogStream ls_info(log.info()); 476 Universe::print_on(&ls_info); 477 if (log.is_trace()) { 478 LogStream ls_trace(log.trace()); 479 MutexLocker mcld(ClassLoaderDataGraph_lock); 480 ClassLoaderDataGraph::print_on(&ls_trace); 481 } 482 } 483 484 if (PrintBytecodeHistogram) { 485 BytecodeHistogram::print(); 486 } 487 488 #ifdef LINUX 489 if (DumpPerfMapAtExit) { 490 CodeCache::write_perf_map(nullptr, tty); 491 } 492 if (PrintMemoryMapAtExit) { 493 MemMapPrinter::print_all_mappings(tty); 494 } 495 #endif 496 497 if (JvmtiExport::should_post_thread_life()) { 498 JvmtiExport::post_thread_end(thread); 499 } 500 501 // Always call even when there are not JVMTI environments yet, since environments 502 // may be attached late and JVMTI must track phases of VM execution 503 JvmtiExport::post_vm_death(); 504 JvmtiAgentList::unload_agents(); 505 506 // Terminate the signal thread 507 // Note: we don't wait until it actually dies. 508 os::terminate_signal_thread(); 509 510 print_statistics(); 511 Universe::heap()->print_tracing_info(); 512 513 { MutexLocker ml(BeforeExit_lock); 514 _before_exit_status = BEFORE_EXIT_DONE; 515 BeforeExit_lock->notify_all(); 516 } 517 518 if (VerifyStringTableAtExit) { 519 size_t fail_cnt = StringTable::verify_and_compare_entries(); 520 if (fail_cnt != 0) { 521 tty->print_cr("ERROR: fail_cnt=%zu", fail_cnt); 522 guarantee(fail_cnt == 0, "unexpected StringTable verification failures"); 523 } 524 } 525 526 #undef BEFORE_EXIT_NOT_RUN 527 #undef BEFORE_EXIT_RUNNING 528 #undef BEFORE_EXIT_DONE 529 } 530 531 void vm_exit(int code) { 532 Thread* thread = 533 ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : nullptr; 534 if (thread == nullptr) { 535 // very early initialization failure -- just exit 536 vm_direct_exit(code); 537 } 538 539 // We'd like to add an entry to the XML log to show that the VM is 540 // terminating, but we can't safely do that here. The logic to make 541 // XML termination logging safe is tied to the termination of the 542 // VMThread, and it doesn't terminate on this exit path. See 8222534. 543 544 if (VMThread::vm_thread() != nullptr) { 545 if (thread->is_Java_thread()) { 546 // We must be "in_vm" for the code below to work correctly. 547 // Historically there must have been some exit path for which 548 // that was not the case and so we set it explicitly - even 549 // though we no longer know what that path may be. 550 JavaThread::cast(thread)->set_thread_state(_thread_in_vm); 551 } 552 553 // Fire off a VM_Exit operation to bring VM to a safepoint and exit 554 VM_Exit op(code); 555 556 // 4945125 The vm thread comes to a safepoint during exit. 557 // GC vm_operations can get caught at the safepoint, and the 558 // heap is unparseable if they are caught. Grab the Heap_lock 559 // to prevent this. The GC vm_operations will not be able to 560 // queue until after we release it, but we never do that as we 561 // are terminating the VM process. 562 MutexLocker ml(Heap_lock); 563 564 VMThread::execute(&op); 565 // should never reach here; but in case something wrong with VM Thread. 566 vm_direct_exit(code); 567 } else { 568 // VM thread is gone, just exit 569 vm_direct_exit(code); 570 } 571 ShouldNotReachHere(); 572 } 573 574 void notify_vm_shutdown() { 575 // For now, just a dtrace probe. 576 HOTSPOT_VM_SHUTDOWN(); 577 } 578 579 void vm_direct_exit(int code) { 580 notify_vm_shutdown(); 581 os::wait_for_keypress_at_exit(); 582 os::exit(code); 583 } 584 585 void vm_direct_exit(int code, const char* message) { 586 if (message != nullptr) { 587 tty->print_cr("%s", message); 588 } 589 vm_direct_exit(code); 590 } 591 592 static void vm_perform_shutdown_actions() { 593 if (is_init_completed()) { 594 Thread* thread = Thread::current_or_null(); 595 if (thread != nullptr && thread->is_Java_thread()) { 596 // We are leaving the VM, set state to native (in case any OS exit 597 // handlers call back to the VM) 598 JavaThread* jt = JavaThread::cast(thread); 599 // Must always be walkable or have no last_Java_frame when in 600 // thread_in_native 601 jt->frame_anchor()->make_walkable(); 602 jt->set_thread_state(_thread_in_native); 603 } 604 } 605 notify_vm_shutdown(); 606 } 607 608 void vm_shutdown() 609 { 610 vm_perform_shutdown_actions(); 611 os::wait_for_keypress_at_exit(); 612 os::shutdown(); 613 } 614 615 void vm_abort(bool dump_core) { 616 vm_perform_shutdown_actions(); 617 os::wait_for_keypress_at_exit(); 618 619 // Flush stdout and stderr before abort. 620 fflush(stdout); 621 fflush(stderr); 622 623 os::abort(dump_core); 624 ShouldNotReachHere(); 625 } 626 627 static void vm_notify_during_cds_dumping(const char* error, const char* message) { 628 if (error != nullptr) { 629 tty->print_cr("Error occurred during CDS dumping"); 630 tty->print("%s", error); 631 if (message != nullptr) { 632 tty->print_cr(": %s", message); 633 } 634 else { 635 tty->cr(); 636 } 637 } 638 } 639 640 void vm_exit_during_cds_dumping(const char* error, const char* message) { 641 vm_notify_during_cds_dumping(error, message); 642 643 // Failure during CDS dumping, we don't want to dump core 644 vm_abort(false); 645 } 646 647 static void vm_notify_during_shutdown(const char* error, const char* message) { 648 if (error != nullptr) { 649 tty->print_cr("Error occurred during initialization of VM"); 650 tty->print("%s", error); 651 if (message != nullptr) { 652 tty->print_cr(": %s", message); 653 } 654 else { 655 tty->cr(); 656 } 657 } 658 if (ShowMessageBoxOnError && WizardMode) { 659 fatal("Error occurred during initialization of VM"); 660 } 661 } 662 663 void vm_exit_during_initialization() { 664 vm_notify_during_shutdown(nullptr, nullptr); 665 666 // Failure during initialization, we don't want to dump core 667 vm_abort(false); 668 } 669 670 void vm_exit_during_initialization(Handle exception) { 671 tty->print_cr("Error occurred during initialization of VM"); 672 // If there are exceptions on this thread it must be cleared 673 // first and here. Any future calls to EXCEPTION_MARK requires 674 // that no pending exceptions exist. 675 JavaThread* THREAD = JavaThread::current(); // can't be null 676 if (HAS_PENDING_EXCEPTION) { 677 CLEAR_PENDING_EXCEPTION; 678 } 679 java_lang_Throwable::print_stack_trace(exception, tty); 680 tty->cr(); 681 vm_notify_during_shutdown(nullptr, nullptr); 682 683 // Failure during initialization, we don't want to dump core 684 vm_abort(false); 685 } 686 687 void vm_exit_during_initialization(Symbol* ex, const char* message) { 688 ResourceMark rm; 689 vm_notify_during_shutdown(ex->as_C_string(), message); 690 691 // Failure during initialization, we don't want to dump core 692 vm_abort(false); 693 } 694 695 void vm_exit_during_initialization(const char* error, const char* message) { 696 vm_notify_during_shutdown(error, message); 697 698 // Failure during initialization, we don't want to dump core 699 vm_abort(false); 700 } 701 702 void vm_shutdown_during_initialization(const char* error, const char* message) { 703 vm_notify_during_shutdown(error, message); 704 vm_shutdown(); 705 } 706 707 JDK_Version JDK_Version::_current; 708 const char* JDK_Version::_java_version; 709 const char* JDK_Version::_runtime_name; 710 const char* JDK_Version::_runtime_version; 711 const char* JDK_Version::_runtime_vendor_version; 712 const char* JDK_Version::_runtime_vendor_vm_bug_url; 713 714 void JDK_Version::initialize() { 715 assert(!_current.is_valid(), "Don't initialize twice"); 716 717 int major = VM_Version::vm_major_version(); 718 int minor = VM_Version::vm_minor_version(); 719 int security = VM_Version::vm_security_version(); 720 int build = VM_Version::vm_build_number(); 721 int patch = VM_Version::vm_patch_version(); 722 _current = JDK_Version(major, minor, security, patch, build); 723 } 724 725 void JDK_Version_init() { 726 JDK_Version::initialize(); 727 } 728 729 static int64_t encode_jdk_version(const JDK_Version& v) { 730 return 731 ((int64_t)v.major_version() << (BitsPerByte * 4)) | 732 ((int64_t)v.minor_version() << (BitsPerByte * 3)) | 733 ((int64_t)v.security_version() << (BitsPerByte * 2)) | 734 ((int64_t)v.patch_version() << (BitsPerByte * 1)) | 735 ((int64_t)v.build_number() << (BitsPerByte * 0)); 736 } 737 738 int JDK_Version::compare(const JDK_Version& other) const { 739 assert(is_valid() && other.is_valid(), "Invalid version (uninitialized?)"); 740 uint64_t e = encode_jdk_version(*this); 741 uint64_t o = encode_jdk_version(other); 742 return (e > o) ? 1 : ((e == o) ? 0 : -1); 743 } 744 745 /* See JEP 223 */ 746 void JDK_Version::to_string(char* buffer, size_t buflen) const { 747 assert(buffer && buflen > 0, "call with useful buffer"); 748 size_t index = 0; 749 750 if (!is_valid()) { 751 jio_snprintf(buffer, buflen, "%s", "(uninitialized)"); 752 } else { 753 int rc = jio_snprintf( 754 &buffer[index], buflen - index, "%d.%d", _major, _minor); 755 if (rc == -1) return; 756 index += rc; 757 if (_patch > 0) { 758 rc = jio_snprintf(&buffer[index], buflen - index, ".%d.%d", _security, _patch); 759 if (rc == -1) return; 760 index += rc; 761 } else if (_security > 0) { 762 rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _security); 763 if (rc == -1) return; 764 index += rc; 765 } 766 if (_build > 0) { 767 rc = jio_snprintf(&buffer[index], buflen - index, "+%d", _build); 768 if (rc == -1) return; 769 index += rc; 770 } 771 } 772 }