1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/cds_globals.hpp" 27 #include "cds/dynamicArchive.hpp" 28 #include "classfile/classLoaderDataGraph.hpp" 29 #include "classfile/javaClasses.hpp" 30 #include "classfile/stringTable.hpp" 31 #include "classfile/symbolTable.hpp" 32 #include "classfile/systemDictionary.hpp" 33 #include "code/codeCache.hpp" 34 #include "compiler/compilationMemoryStatistic.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/compilerOracle.hpp" 37 #include "gc/shared/collectedHeap.hpp" 38 #include "gc/shared/stringdedup/stringDedup.hpp" 39 #include "interpreter/bytecodeHistogram.hpp" 40 #include "jfr/jfrEvents.hpp" 41 #include "jfr/support/jfrThreadId.hpp" 42 #include "jvm.h" 43 #include "logging/log.hpp" 44 #include "logging/logStream.hpp" 45 #include "memory/metaspaceUtils.hpp" 46 #include "memory/oopFactory.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "nmt/memTracker.hpp" 50 #include "oops/constantPool.hpp" 51 #include "oops/generateOopMap.hpp" 52 #include "oops/instanceKlass.hpp" 53 #include "oops/instanceOop.hpp" 54 #include "oops/klassVtable.hpp" 55 #include "oops/method.hpp" 56 #include "oops/objArrayOop.hpp" 57 #include "oops/oop.inline.hpp" 58 #include "oops/symbol.hpp" 59 #include "prims/jvmtiAgentList.hpp" 60 #include "prims/jvmtiExport.hpp" 61 #include "runtime/continuation.hpp" 62 #include "runtime/deoptimization.hpp" 63 #include "runtime/flags/flagSetting.hpp" 64 #include "runtime/handles.inline.hpp" 65 #include "runtime/init.hpp" 66 #include "runtime/interfaceSupport.inline.hpp" 67 #include "runtime/java.hpp" 68 #include "runtime/javaThread.hpp" 69 #include "runtime/sharedRuntime.hpp" 70 #include "runtime/statSampler.hpp" 71 #include "runtime/stubRoutines.hpp" 72 #include "runtime/task.hpp" 73 #include "runtime/threads.hpp" 74 #include "runtime/timer.hpp" 75 #include "runtime/trimNativeHeap.hpp" 76 #include "runtime/vmOperations.hpp" 77 #include "runtime/vmThread.hpp" 78 #include "runtime/vm_version.hpp" 79 #include "sanitizers/leak.hpp" 80 #include "utilities/dtrace.hpp" 81 #include "utilities/events.hpp" 82 #include "utilities/globalDefinitions.hpp" 83 #include "utilities/macros.hpp" 84 #include "utilities/vmError.hpp" 85 #ifdef COMPILER1 86 #include "c1/c1_Compiler.hpp" 87 #include "c1/c1_Runtime1.hpp" 88 #endif 89 #ifdef COMPILER2 90 #include "code/compiledIC.hpp" 91 #include "opto/compile.hpp" 92 #include "opto/indexSet.hpp" 93 #include "opto/runtime.hpp" 94 #endif 95 #if INCLUDE_JFR 96 #include "jfr/jfr.hpp" 97 #endif 98 #if INCLUDE_JVMCI 99 #include "jvmci/jvmci.hpp" 100 #endif 101 102 GrowableArray<Method*>* collected_profiled_methods; 103 104 static int compare_methods(Method** a, Method** b) { 105 // compiled_invocation_count() returns int64_t, forcing the entire expression 106 // to be evaluated as int64_t. Overflow is not an issue. 107 int64_t diff = (((*b)->invocation_count() + (*b)->compiled_invocation_count()) 108 - ((*a)->invocation_count() + (*a)->compiled_invocation_count())); 109 return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; 110 } 111 112 static void collect_profiled_methods(Method* m) { 113 Thread* thread = Thread::current(); 114 methodHandle mh(thread, m); 115 if ((m->method_data() != nullptr) && 116 (PrintMethodData || CompilerOracle::should_print(mh))) { 117 collected_profiled_methods->push(m); 118 } 119 } 120 121 static void print_method_profiling_data() { 122 if ((ProfileInterpreter COMPILER1_PRESENT(|| C1UpdateMethodData)) && 123 (PrintMethodData || CompilerOracle::should_print_methods())) { 124 ResourceMark rm; 125 collected_profiled_methods = new GrowableArray<Method*>(1024); 126 SystemDictionary::methods_do(collect_profiled_methods); 127 collected_profiled_methods->sort(&compare_methods); 128 129 int count = collected_profiled_methods->length(); 130 int total_size = 0; 131 if (count > 0) { 132 for (int index = 0; index < count; index++) { 133 Method* m = collected_profiled_methods->at(index); 134 135 // Instead of taking tty lock, we collect all lines into a string stream 136 // and then print them all at once. 137 ResourceMark rm2; 138 stringStream ss; 139 140 ss.print_cr("------------------------------------------------------------------------"); 141 m->print_invocation_count(&ss); 142 ss.print_cr(" mdo size: %d bytes", m->method_data()->size_in_bytes()); 143 ss.cr(); 144 // Dump data on parameters if any 145 if (m->method_data() != nullptr && m->method_data()->parameters_type_data() != nullptr) { 146 ss.fill_to(2); 147 m->method_data()->parameters_type_data()->print_data_on(&ss); 148 } 149 m->print_codes_on(&ss); 150 tty->print("%s", ss.as_string()); // print all at once 151 total_size += m->method_data()->size_in_bytes(); 152 } 153 tty->print_cr("------------------------------------------------------------------------"); 154 tty->print_cr("Total MDO size: %d bytes", total_size); 155 } 156 } 157 } 158 159 160 #ifndef PRODUCT 161 162 // Statistics printing (method invocation histogram) 163 164 GrowableArray<Method*>* collected_invoked_methods; 165 166 static void collect_invoked_methods(Method* m) { 167 if (m->invocation_count() + m->compiled_invocation_count() >= 1) { 168 collected_invoked_methods->push(m); 169 } 170 } 171 172 173 // Invocation count accumulators should be unsigned long to shift the 174 // overflow border. Longer-running workloads tend to create invocation 175 // counts which already overflow 32-bit counters for individual methods. 176 static void print_method_invocation_histogram() { 177 ResourceMark rm; 178 collected_invoked_methods = new GrowableArray<Method*>(1024); 179 SystemDictionary::methods_do(collect_invoked_methods); 180 collected_invoked_methods->sort(&compare_methods); 181 // 182 tty->cr(); 183 tty->print_cr("Histogram Over Method Invocation Counters (cutoff = " INTX_FORMAT "):", MethodHistogramCutoff); 184 tty->cr(); 185 tty->print_cr("____Count_(I+C)____Method________________________Module_________________"); 186 uint64_t total = 0, 187 int_total = 0, 188 comp_total = 0, 189 special_total= 0, 190 static_total = 0, 191 final_total = 0, 192 synch_total = 0, 193 native_total = 0, 194 access_total = 0; 195 for (int index = 0; index < collected_invoked_methods->length(); index++) { 196 // Counter values returned from getter methods are signed int. 197 // To shift the overflow border by a factor of two, we interpret 198 // them here as unsigned long. A counter can't be negative anyway. 199 Method* m = collected_invoked_methods->at(index); 200 uint64_t iic = (uint64_t)m->invocation_count(); 201 uint64_t cic = (uint64_t)m->compiled_invocation_count(); 202 if ((iic + cic) >= (uint64_t)MethodHistogramCutoff) m->print_invocation_count(tty); 203 int_total += iic; 204 comp_total += cic; 205 if (m->is_final()) final_total += iic + cic; 206 if (m->is_static()) static_total += iic + cic; 207 if (m->is_synchronized()) synch_total += iic + cic; 208 if (m->is_native()) native_total += iic + cic; 209 if (m->is_accessor()) access_total += iic + cic; 210 } 211 tty->cr(); 212 total = int_total + comp_total; 213 special_total = final_total + static_total +synch_total + native_total + access_total; 214 tty->print_cr("Invocations summary for %d methods:", collected_invoked_methods->length()); 215 double total_div = (double)total; 216 tty->print_cr("\t" UINT64_FORMAT_W(12) " (100%%) total", total); 217 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- interpreted", int_total, 100.0 * (double)int_total / total_div); 218 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- compiled", comp_total, 100.0 * (double)comp_total / total_div); 219 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- special methods (interpreted and compiled)", 220 special_total, 100.0 * (double)special_total/ total_div); 221 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- synchronized",synch_total, 100.0 * (double)synch_total / total_div); 222 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- final", final_total, 100.0 * (double)final_total / total_div); 223 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- static", static_total, 100.0 * (double)static_total / total_div); 224 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- native", native_total, 100.0 * (double)native_total / total_div); 225 tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- accessor", access_total, 100.0 * (double)access_total / total_div); 226 tty->cr(); 227 SharedRuntime::print_call_statistics(comp_total); 228 } 229 230 static void print_bytecode_count() { 231 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 232 tty->print_cr("[BytecodeCounter::counter_value = %d]", BytecodeCounter::counter_value()); 233 } 234 } 235 236 #else 237 238 static void print_method_invocation_histogram() {} 239 static void print_bytecode_count() {} 240 241 #endif // PRODUCT 242 243 244 // General statistics printing (profiling ...) 245 void print_statistics() { 246 if (CITime) { 247 CompileBroker::print_times(); 248 } 249 250 #ifdef COMPILER1 251 if ((PrintC1Statistics || LogVMOutput || LogCompilation) && UseCompiler) { 252 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintC1Statistics); 253 Runtime1::print_statistics(); 254 SharedRuntime::print_statistics(); 255 } 256 #endif /* COMPILER1 */ 257 258 #ifdef COMPILER2 259 if ((PrintOptoStatistics || LogVMOutput || LogCompilation) && UseCompiler) { 260 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintOptoStatistics); 261 Compile::print_statistics(); 262 Deoptimization::print_statistics(); 263 #ifndef COMPILER1 264 SharedRuntime::print_statistics(); 265 #endif //COMPILER1 266 } 267 268 if (PrintLockStatistics || PrintPreciseRTMLockingStatistics) { 269 OptoRuntime::print_named_counters(); 270 } 271 #ifdef ASSERT 272 if (CollectIndexSetStatistics) { 273 IndexSet::print_statistics(); 274 } 275 #endif // ASSERT 276 #else // COMPILER2 277 #if INCLUDE_JVMCI 278 #ifndef COMPILER1 279 if ((TraceDeoptimization || LogVMOutput || LogCompilation) && UseCompiler) { 280 FlagSetting fs(DisplayVMOutput, DisplayVMOutput && TraceDeoptimization); 281 Deoptimization::print_statistics(); 282 SharedRuntime::print_statistics(); 283 } 284 #endif // COMPILER1 285 #endif // INCLUDE_JVMCI 286 #endif // COMPILER2 287 288 if (PrintNMethodStatistics) { 289 nmethod::print_statistics(); 290 } 291 if (CountCompiledCalls) { 292 print_method_invocation_histogram(); 293 } 294 295 print_method_profiling_data(); 296 297 if (TimeOopMap) { 298 GenerateOopMap::print_time(); 299 } 300 if (PrintSymbolTableSizeHistogram) { 301 SymbolTable::print_histogram(); 302 } 303 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 304 BytecodeCounter::print(); 305 } 306 if (PrintBytecodePairHistogram) { 307 BytecodePairHistogram::print(); 308 } 309 310 if (PrintCodeCache) { 311 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 312 CodeCache::print(); 313 } 314 315 // CodeHeap State Analytics. 316 if (PrintCodeHeapAnalytics) { 317 CompileBroker::print_heapinfo(nullptr, "all", 4096); // details 318 } 319 320 #ifndef PRODUCT 321 if (PrintCodeCache2) { 322 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 323 CodeCache::print_internals(); 324 } 325 #endif 326 327 if (VerifyOops && Verbose) { 328 tty->print_cr("+VerifyOops count: %d", StubRoutines::verify_oop_count()); 329 } 330 331 print_bytecode_count(); 332 333 if (PrintSystemDictionaryAtExit) { 334 ResourceMark rm; 335 MutexLocker mcld(ClassLoaderDataGraph_lock); 336 SystemDictionary::print(); 337 } 338 339 if (PrintClassLoaderDataGraphAtExit) { 340 ResourceMark rm; 341 MutexLocker mcld(ClassLoaderDataGraph_lock); 342 ClassLoaderDataGraph::print(); 343 } 344 345 // Native memory tracking data 346 if (PrintNMTStatistics) { 347 MemTracker::final_report(tty); 348 } 349 350 if (PrintMetaspaceStatisticsAtExit) { 351 MetaspaceUtils::print_basic_report(tty, 0); 352 } 353 354 if (CompilerOracle::should_print_final_memstat_report()) { 355 CompilationMemoryStatistic::print_all_by_size(tty, false, 0); 356 } 357 358 ThreadsSMRSupport::log_statistics(); 359 } 360 361 // Note: before_exit() can be executed only once, if more than one threads 362 // are trying to shutdown the VM at the same time, only one thread 363 // can run before_exit() and all other threads must wait. 364 void before_exit(JavaThread* thread, bool halt) { 365 #define BEFORE_EXIT_NOT_RUN 0 366 #define BEFORE_EXIT_RUNNING 1 367 #define BEFORE_EXIT_DONE 2 368 static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN; 369 370 Events::log(thread, "Before exit entered"); 371 372 // Note: don't use a Mutex to guard the entire before_exit(), as 373 // JVMTI post_thread_end_event and post_vm_death_event will run native code. 374 // A CAS or OSMutex would work just fine but then we need to manipulate 375 // thread state for Safepoint. Here we use Monitor wait() and notify_all() 376 // for synchronization. 377 { MonitorLocker ml(BeforeExit_lock); 378 switch (_before_exit_status) { 379 case BEFORE_EXIT_NOT_RUN: 380 _before_exit_status = BEFORE_EXIT_RUNNING; 381 break; 382 case BEFORE_EXIT_RUNNING: 383 while (_before_exit_status == BEFORE_EXIT_RUNNING) { 384 ml.wait(); 385 } 386 assert(_before_exit_status == BEFORE_EXIT_DONE, "invalid state"); 387 return; 388 case BEFORE_EXIT_DONE: 389 // need block to avoid SS compiler bug 390 { 391 return; 392 } 393 } 394 } 395 396 // At this point only one thread is executing this logic. Any other threads 397 // attempting to invoke before_exit() will wait above and return early once 398 // this thread finishes before_exit(). 399 400 // Do not add any additional shutdown logic between the above mutex logic and 401 // leak sanitizer logic below. Any additional shutdown code which performs some 402 // cleanup should be added after the leak sanitizer logic below. 403 404 #ifdef LEAK_SANITIZER 405 // If we are built with LSan, we need to perform leak checking. If we are 406 // terminating normally, not halting and no VM error, we perform a normal 407 // leak check which terminates if leaks are found. If we are not terminating 408 // normally, halting or VM error, we perform a recoverable leak check which 409 // prints leaks but will not terminate. 410 if (!halt && !VMError::is_error_reported()) { 411 LSAN_DO_LEAK_CHECK(); 412 } else { 413 // Ignore the return value. 414 static_cast<void>(LSAN_DO_RECOVERABLE_LEAK_CHECK()); 415 } 416 #endif 417 418 #if INCLUDE_CDS 419 // Dynamic CDS dumping must happen whilst we can still reliably 420 // run Java code. 421 DynamicArchive::dump_at_exit(thread, ArchiveClassesAtExit); 422 assert(!thread->has_pending_exception(), "must be"); 423 #endif 424 425 426 // Actual shutdown logic begins here. 427 428 #if INCLUDE_JVMCI 429 if (EnableJVMCI) { 430 JVMCI::shutdown(thread); 431 } 432 #endif 433 434 // Hang forever on exit if we're reporting an error. 435 if (ShowMessageBoxOnError && VMError::is_error_reported()) { 436 os::infinite_sleep(); 437 } 438 439 EventThreadEnd event; 440 if (event.should_commit()) { 441 event.set_thread(JFR_JVM_THREAD_ID(thread)); 442 event.commit(); 443 } 444 445 JFR_ONLY(Jfr::on_vm_shutdown(false, halt);) 446 447 // Stop the WatcherThread. We do this before disenrolling various 448 // PeriodicTasks to reduce the likelihood of races. 449 WatcherThread::stop(); 450 451 // shut down the StatSampler task 452 StatSampler::disengage(); 453 StatSampler::destroy(); 454 455 NativeHeapTrimmer::cleanup(); 456 457 // Stop concurrent GC threads 458 Universe::heap()->stop(); 459 460 // Print GC/heap related information. 461 Log(gc, heap, exit) log; 462 if (log.is_info()) { 463 ResourceMark rm; 464 LogStream ls_info(log.info()); 465 Universe::print_on(&ls_info); 466 if (log.is_trace()) { 467 LogStream ls_trace(log.trace()); 468 MutexLocker mcld(ClassLoaderDataGraph_lock); 469 ClassLoaderDataGraph::print_on(&ls_trace); 470 } 471 } 472 473 if (PrintBytecodeHistogram) { 474 BytecodeHistogram::print(); 475 } 476 477 #ifdef LINUX 478 if (DumpPerfMapAtExit) { 479 CodeCache::write_perf_map(); 480 } 481 #endif 482 483 if (JvmtiExport::should_post_thread_life()) { 484 JvmtiExport::post_thread_end(thread); 485 } 486 487 // Always call even when there are not JVMTI environments yet, since environments 488 // may be attached late and JVMTI must track phases of VM execution 489 JvmtiExport::post_vm_death(); 490 JvmtiAgentList::unload_agents(); 491 492 // Terminate the signal thread 493 // Note: we don't wait until it actually dies. 494 os::terminate_signal_thread(); 495 496 print_statistics(); 497 Universe::heap()->print_tracing_info(); 498 499 { MutexLocker ml(BeforeExit_lock); 500 _before_exit_status = BEFORE_EXIT_DONE; 501 BeforeExit_lock->notify_all(); 502 } 503 504 if (VerifyStringTableAtExit) { 505 size_t fail_cnt = StringTable::verify_and_compare_entries(); 506 if (fail_cnt != 0) { 507 tty->print_cr("ERROR: fail_cnt=" SIZE_FORMAT, fail_cnt); 508 guarantee(fail_cnt == 0, "unexpected StringTable verification failures"); 509 } 510 } 511 512 #undef BEFORE_EXIT_NOT_RUN 513 #undef BEFORE_EXIT_RUNNING 514 #undef BEFORE_EXIT_DONE 515 } 516 517 void vm_exit(int code) { 518 Thread* thread = 519 ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : nullptr; 520 if (thread == nullptr) { 521 // very early initialization failure -- just exit 522 vm_direct_exit(code); 523 } 524 525 // We'd like to add an entry to the XML log to show that the VM is 526 // terminating, but we can't safely do that here. The logic to make 527 // XML termination logging safe is tied to the termination of the 528 // VMThread, and it doesn't terminate on this exit path. See 8222534. 529 530 if (VMThread::vm_thread() != nullptr) { 531 if (thread->is_Java_thread()) { 532 // We must be "in_vm" for the code below to work correctly. 533 // Historically there must have been some exit path for which 534 // that was not the case and so we set it explicitly - even 535 // though we no longer know what that path may be. 536 JavaThread::cast(thread)->set_thread_state(_thread_in_vm); 537 } 538 539 // Fire off a VM_Exit operation to bring VM to a safepoint and exit 540 VM_Exit op(code); 541 542 // 4945125 The vm thread comes to a safepoint during exit. 543 // GC vm_operations can get caught at the safepoint, and the 544 // heap is unparseable if they are caught. Grab the Heap_lock 545 // to prevent this. The GC vm_operations will not be able to 546 // queue until after we release it, but we never do that as we 547 // are terminating the VM process. 548 MutexLocker ml(Heap_lock); 549 550 VMThread::execute(&op); 551 // should never reach here; but in case something wrong with VM Thread. 552 vm_direct_exit(code); 553 } else { 554 // VM thread is gone, just exit 555 vm_direct_exit(code); 556 } 557 ShouldNotReachHere(); 558 } 559 560 void notify_vm_shutdown() { 561 // For now, just a dtrace probe. 562 HOTSPOT_VM_SHUTDOWN(); 563 } 564 565 void vm_direct_exit(int code) { 566 notify_vm_shutdown(); 567 os::wait_for_keypress_at_exit(); 568 os::exit(code); 569 } 570 571 void vm_direct_exit(int code, const char* message) { 572 if (message != nullptr) { 573 tty->print_cr("%s", message); 574 } 575 vm_direct_exit(code); 576 } 577 578 static void vm_perform_shutdown_actions() { 579 if (is_init_completed()) { 580 Thread* thread = Thread::current_or_null(); 581 if (thread != nullptr && thread->is_Java_thread()) { 582 // We are leaving the VM, set state to native (in case any OS exit 583 // handlers call back to the VM) 584 JavaThread* jt = JavaThread::cast(thread); 585 // Must always be walkable or have no last_Java_frame when in 586 // thread_in_native 587 jt->frame_anchor()->make_walkable(); 588 jt->set_thread_state(_thread_in_native); 589 } 590 } 591 notify_vm_shutdown(); 592 } 593 594 void vm_shutdown() 595 { 596 vm_perform_shutdown_actions(); 597 os::wait_for_keypress_at_exit(); 598 os::shutdown(); 599 } 600 601 void vm_abort(bool dump_core) { 602 vm_perform_shutdown_actions(); 603 os::wait_for_keypress_at_exit(); 604 605 // Flush stdout and stderr before abort. 606 fflush(stdout); 607 fflush(stderr); 608 609 os::abort(dump_core); 610 ShouldNotReachHere(); 611 } 612 613 static void vm_notify_during_cds_dumping(const char* error, const char* message) { 614 if (error != nullptr) { 615 tty->print_cr("Error occurred during CDS dumping"); 616 tty->print("%s", error); 617 if (message != nullptr) { 618 tty->print_cr(": %s", message); 619 } 620 else { 621 tty->cr(); 622 } 623 } 624 } 625 626 void vm_exit_during_cds_dumping(const char* error, const char* message) { 627 vm_notify_during_cds_dumping(error, message); 628 629 // Failure during CDS dumping, we don't want to dump core 630 vm_abort(false); 631 } 632 633 static void vm_notify_during_shutdown(const char* error, const char* message) { 634 if (error != nullptr) { 635 tty->print_cr("Error occurred during initialization of VM"); 636 tty->print("%s", error); 637 if (message != nullptr) { 638 tty->print_cr(": %s", message); 639 } 640 else { 641 tty->cr(); 642 } 643 } 644 if (ShowMessageBoxOnError && WizardMode) { 645 fatal("Error occurred during initialization of VM"); 646 } 647 } 648 649 void vm_exit_during_initialization() { 650 vm_notify_during_shutdown(nullptr, nullptr); 651 652 // Failure during initialization, we don't want to dump core 653 vm_abort(false); 654 } 655 656 void vm_exit_during_initialization(Handle exception) { 657 tty->print_cr("Error occurred during initialization of VM"); 658 // If there are exceptions on this thread it must be cleared 659 // first and here. Any future calls to EXCEPTION_MARK requires 660 // that no pending exceptions exist. 661 JavaThread* THREAD = JavaThread::current(); // can't be null 662 if (HAS_PENDING_EXCEPTION) { 663 CLEAR_PENDING_EXCEPTION; 664 } 665 java_lang_Throwable::print_stack_trace(exception, tty); 666 tty->cr(); 667 vm_notify_during_shutdown(nullptr, nullptr); 668 669 // Failure during initialization, we don't want to dump core 670 vm_abort(false); 671 } 672 673 void vm_exit_during_initialization(Symbol* ex, const char* message) { 674 ResourceMark rm; 675 vm_notify_during_shutdown(ex->as_C_string(), message); 676 677 // Failure during initialization, we don't want to dump core 678 vm_abort(false); 679 } 680 681 void vm_exit_during_initialization(const char* error, const char* message) { 682 vm_notify_during_shutdown(error, message); 683 684 // Failure during initialization, we don't want to dump core 685 vm_abort(false); 686 } 687 688 void vm_shutdown_during_initialization(const char* error, const char* message) { 689 vm_notify_during_shutdown(error, message); 690 vm_shutdown(); 691 } 692 693 JDK_Version JDK_Version::_current; 694 const char* JDK_Version::_java_version; 695 const char* JDK_Version::_runtime_name; 696 const char* JDK_Version::_runtime_version; 697 const char* JDK_Version::_runtime_vendor_version; 698 const char* JDK_Version::_runtime_vendor_vm_bug_url; 699 700 void JDK_Version::initialize() { 701 assert(!_current.is_valid(), "Don't initialize twice"); 702 703 int major = VM_Version::vm_major_version(); 704 int minor = VM_Version::vm_minor_version(); 705 int security = VM_Version::vm_security_version(); 706 int build = VM_Version::vm_build_number(); 707 int patch = VM_Version::vm_patch_version(); 708 _current = JDK_Version(major, minor, security, patch, build); 709 } 710 711 void JDK_Version_init() { 712 JDK_Version::initialize(); 713 } 714 715 static int64_t encode_jdk_version(const JDK_Version& v) { 716 return 717 ((int64_t)v.major_version() << (BitsPerByte * 4)) | 718 ((int64_t)v.minor_version() << (BitsPerByte * 3)) | 719 ((int64_t)v.security_version() << (BitsPerByte * 2)) | 720 ((int64_t)v.patch_version() << (BitsPerByte * 1)) | 721 ((int64_t)v.build_number() << (BitsPerByte * 0)); 722 } 723 724 int JDK_Version::compare(const JDK_Version& other) const { 725 assert(is_valid() && other.is_valid(), "Invalid version (uninitialized?)"); 726 uint64_t e = encode_jdk_version(*this); 727 uint64_t o = encode_jdk_version(other); 728 return (e > o) ? 1 : ((e == o) ? 0 : -1); 729 } 730 731 /* See JEP 223 */ 732 void JDK_Version::to_string(char* buffer, size_t buflen) const { 733 assert(buffer && buflen > 0, "call with useful buffer"); 734 size_t index = 0; 735 736 if (!is_valid()) { 737 jio_snprintf(buffer, buflen, "%s", "(uninitialized)"); 738 } else { 739 int rc = jio_snprintf( 740 &buffer[index], buflen - index, "%d.%d", _major, _minor); 741 if (rc == -1) return; 742 index += rc; 743 if (_patch > 0) { 744 rc = jio_snprintf(&buffer[index], buflen - index, ".%d.%d", _security, _patch); 745 if (rc == -1) return; 746 index += rc; 747 } else if (_security > 0) { 748 rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _security); 749 if (rc == -1) return; 750 index += rc; 751 } 752 if (_build > 0) { 753 rc = jio_snprintf(&buffer[index], buflen - index, "+%d", _build); 754 if (rc == -1) return; 755 index += rc; 756 } 757 } 758 }