1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "compiler/compileBroker.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/vmThreadCpuTimeScope.inline.hpp" 28 #include "jfr/jfrEvents.hpp" 29 #include "jfr/support/jfrThreadId.hpp" 30 #include "logging/log.hpp" 31 #include "logging/logConfiguration.hpp" 32 #include "logging/logStream.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "oops/verifyOopClosure.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/cpuTimeCounters.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/interfaceSupport.inline.hpp" 41 #include "runtime/javaThread.inline.hpp" 42 #include "runtime/jniHandles.hpp" 43 #include "runtime/mutexLocker.hpp" 44 #include "runtime/os.hpp" 45 #include "runtime/perfData.hpp" 46 #include "runtime/safepoint.hpp" 47 #include "runtime/synchronizer.hpp" 48 #include "runtime/timerTrace.hpp" 49 #include "runtime/vmOperations.hpp" 50 #include "runtime/vmThread.hpp" 51 #include "utilities/dtrace.hpp" 52 #include "utilities/events.hpp" 53 #include "utilities/vmError.hpp" 54 55 56 //------------------------------------------------------------------------------------------------------------------ 57 // Timeout machinery 58 59 void VMOperationTimeoutTask::task() { 60 assert(AbortVMOnVMOperationTimeout, "only if enabled"); 61 if (is_armed()) { 62 jlong delay = nanos_to_millis(os::javaTimeNanos() - _arm_time); 63 if (delay > AbortVMOnVMOperationTimeoutDelay) { 64 fatal("%s VM operation took too long: " JLONG_FORMAT " ms elapsed since VM-op start (timeout: %zd ms)", 65 _vm_op_name, delay, AbortVMOnVMOperationTimeoutDelay); 66 } 67 } 68 } 69 70 bool VMOperationTimeoutTask::is_armed() { 71 return Atomic::load_acquire(&_armed) != 0; 72 } 73 74 void VMOperationTimeoutTask::arm(const char* vm_op_name) { 75 _vm_op_name = vm_op_name; 76 _arm_time = os::javaTimeNanos(); 77 Atomic::release_store_fence(&_armed, 1); 78 } 79 80 void VMOperationTimeoutTask::disarm() { 81 Atomic::release_store_fence(&_armed, 0); 82 83 // The two stores to `_armed` are counted in VM-op, but they should be 84 // insignificant compared to the actual VM-op duration. 85 jlong vm_op_duration = nanos_to_millis(os::javaTimeNanos() - _arm_time); 86 87 // Repeat the timeout-check logic on the VM thread, because 88 // VMOperationTimeoutTask might miss the arm-disarm window depending on 89 // the scheduling. 90 if (vm_op_duration > AbortVMOnVMOperationTimeoutDelay) { 91 fatal("%s VM operation took too long: completed in " JLONG_FORMAT " ms (timeout: %zd ms)", 92 _vm_op_name, vm_op_duration, AbortVMOnVMOperationTimeoutDelay); 93 } 94 _vm_op_name = nullptr; 95 } 96 97 //------------------------------------------------------------------------------------------------------------------ 98 // Implementation of VMThread stuff 99 100 static VM_SafepointALot safepointALot_op; 101 static VM_ForceSafepoint no_op; 102 103 bool VMThread::_should_terminate = false; 104 bool VMThread::_terminated = false; 105 Monitor* VMThread::_terminate_lock = nullptr; 106 VMThread* VMThread::_vm_thread = nullptr; 107 VM_Operation* VMThread::_cur_vm_operation = nullptr; 108 VM_Operation* VMThread::_next_vm_operation = &no_op; // Prevent any thread from setting an operation until VM thread is ready. 109 PerfCounter* VMThread::_perf_accumulated_vm_operation_time = nullptr; 110 VMOperationTimeoutTask* VMThread::_timeout_task = nullptr; 111 112 113 void VMThread::create() { 114 assert(vm_thread() == nullptr, "we can only allocate one VMThread"); 115 _vm_thread = new VMThread(); 116 117 if (AbortVMOnVMOperationTimeout) { 118 // Make sure we call the timeout task frequently enough, but not too frequent. 119 // Try to make the interval 10% of the timeout delay, so that we miss the timeout 120 // by those 10% at max. Periodic task also expects it to fit min/max intervals. 121 size_t interval = (size_t)AbortVMOnVMOperationTimeoutDelay / 10; 122 interval = interval / PeriodicTask::interval_gran * PeriodicTask::interval_gran; 123 interval = MAX2<size_t>(interval, PeriodicTask::min_interval); 124 interval = MIN2<size_t>(interval, PeriodicTask::max_interval); 125 126 _timeout_task = new VMOperationTimeoutTask(interval); 127 _timeout_task->enroll(); 128 } else { 129 assert(_timeout_task == nullptr, "sanity"); 130 } 131 132 _terminate_lock = new Monitor(Mutex::nosafepoint, "VMThreadTerminate_lock"); 133 134 if (UsePerfData) { 135 // jvmstat performance counters 136 JavaThread* THREAD = JavaThread::current(); // For exception macros. 137 _perf_accumulated_vm_operation_time = 138 PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime", 139 PerfData::U_Ticks, CHECK); 140 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::vm); 141 } 142 } 143 144 VMThread::VMThread() : NamedThread(), _is_running(false) { 145 set_name("VM Thread"); 146 } 147 148 void VMThread::destroy() { 149 _vm_thread = nullptr; // VM thread is gone 150 } 151 152 static VM_Halt halt_op; 153 154 void VMThread::run() { 155 assert(this == vm_thread(), "check"); 156 157 // Notify_lock wait checks on is_running() to rewait in 158 // case of spurious wakeup, it should wait on the last 159 // value set prior to the notify 160 Atomic::store(&_is_running, true); 161 162 { 163 MutexLocker ml(Notify_lock); 164 Notify_lock->notify(); 165 } 166 // Notify_lock is destroyed by Threads::create_vm() 167 168 int prio = (VMThreadPriority == -1) 169 ? os::java_to_os_priority[NearMaxPriority] 170 : VMThreadPriority; 171 // Note that I cannot call os::set_priority because it expects Java 172 // priorities and I am *explicitly* using OS priorities so that it's 173 // possible to set the VM thread priority higher than any Java thread. 174 os::set_native_priority( this, prio ); 175 176 // Wait for VM_Operations until termination 177 this->loop(); 178 179 // Note the intention to exit before safepointing. 180 // 6295565 This has the effect of waiting for any large tty 181 // outputs to finish. 182 if (xtty != nullptr) { 183 ttyLocker ttyl; 184 xtty->begin_elem("destroy_vm"); 185 xtty->stamp(); 186 xtty->end_elem(); 187 assert(should_terminate(), "termination flag must be set"); 188 } 189 190 // 4526887 let VM thread exit at Safepoint 191 _cur_vm_operation = &halt_op; 192 SafepointSynchronize::begin(); 193 194 if (VerifyBeforeExit) { 195 HandleMark hm(VMThread::vm_thread()); 196 // Among other things, this ensures that Eden top is correct. 197 Universe::heap()->prepare_for_verify(); 198 // Silent verification so as not to pollute normal output, 199 // unless we really asked for it. 200 Universe::verify(); 201 } 202 203 CompileBroker::set_should_block(); 204 205 // wait for threads (compiler threads or daemon threads) in the 206 // _thread_in_native state to block. 207 VM_Exit::wait_for_threads_in_native_to_block(); 208 209 // The ObjectMonitor subsystem uses perf counters so do this before 210 // we signal that the VM thread is gone. We don't want to run afoul 211 // of perfMemory_exit() in exit_globals(). 212 ObjectSynchronizer::do_final_audit_and_print_stats(); 213 214 // signal other threads that VM process is gone 215 { 216 // Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows 217 // VM thread to enter any lock at Safepoint as long as its _owner is null. 218 // If that happens after _terminate_lock->wait() has unset _owner 219 // but before it actually drops the lock and waits, the notification below 220 // may get lost and we will have a hang. To avoid this, we need to use 221 // Mutex::lock_without_safepoint_check(). 222 MonitorLocker ml(_terminate_lock, Mutex::_no_safepoint_check_flag); 223 _terminated = true; 224 ml.notify(); 225 } 226 227 // We are now racing with the VM termination being carried out in 228 // another thread, so we don't "delete this". Numerous threads don't 229 // get deleted when the VM terminates 230 231 } 232 233 234 // Notify the VMThread that the last non-daemon JavaThread has terminated, 235 // and wait until operation is performed. 236 void VMThread::wait_for_vm_thread_exit() { 237 assert(JavaThread::current()->is_terminated(), "Should be terminated"); 238 { 239 MonitorLocker mu(VMOperation_lock); 240 _should_terminate = true; 241 mu.notify_all(); 242 } 243 244 // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint 245 // because this thread has been removed from the threads list. But anything 246 // that could get blocked by Safepoint should not be used after this point, 247 // otherwise we will hang, since there is no one can end the safepoint. 248 249 // Wait until VM thread is terminated 250 // Note: it should be OK to use Terminator_lock here. But this is called 251 // at a very delicate time (VM shutdown) and we are operating in non- VM 252 // thread at Safepoint. It's safer to not share lock with other threads. 253 { 254 MonitorLocker ml(_terminate_lock, Mutex::_no_safepoint_check_flag); 255 while (!VMThread::is_terminated()) { 256 ml.wait(); 257 } 258 } 259 } 260 261 static void post_vm_operation_event(EventExecuteVMOperation* event, VM_Operation* op) { 262 assert(event != nullptr, "invariant"); 263 assert(op != nullptr, "invariant"); 264 const bool evaluate_at_safepoint = op->evaluate_at_safepoint(); 265 event->set_operation(op->type()); 266 event->set_safepoint(evaluate_at_safepoint); 267 event->set_blocking(true); 268 event->set_caller(JFR_THREAD_ID(op->calling_thread())); 269 event->set_safepointId(evaluate_at_safepoint ? SafepointSynchronize::safepoint_id() : 0); 270 event->commit(); 271 } 272 273 void VMThread::evaluate_operation(VM_Operation* op) { 274 ResourceMark rm; 275 276 { 277 PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time()); 278 HOTSPOT_VMOPS_BEGIN( 279 (char*) op->name(), strlen(op->name()), 280 op->evaluate_at_safepoint() ? 0 : 1); 281 282 EventExecuteVMOperation event; 283 VMThreadCPUTimeScope CPUTimeScope(this, op->is_gc_operation()); 284 op->evaluate(); 285 if (event.should_commit()) { 286 post_vm_operation_event(&event, op); 287 } 288 289 HOTSPOT_VMOPS_END( 290 (char*) op->name(), strlen(op->name()), 291 op->evaluate_at_safepoint() ? 0 : 1); 292 } 293 } 294 295 class ALotOfHandshakeClosure : public HandshakeClosure { 296 public: 297 ALotOfHandshakeClosure() : HandshakeClosure("ALotOfHandshakeClosure") {} 298 void do_thread(Thread* thread) { 299 #ifdef ASSERT 300 JavaThread::cast(thread)->verify_states_for_handshake(); 301 #endif 302 } 303 }; 304 305 bool VMThread::handshake_or_safepoint_alot() { 306 assert(_cur_vm_operation == nullptr, "should not have an op yet"); 307 assert(_next_vm_operation == nullptr, "should not have an op yet"); 308 if (!HandshakeALot && !SafepointALot) { 309 return false; 310 } 311 static jlong last_alot_ms = 0; 312 jlong now_ms = nanos_to_millis(os::javaTimeNanos()); 313 // If HandshakeALot or SafepointALot are set, but GuaranteedSafepointInterval is explicitly 314 // set to 0 on the command line, we emit the operation if it's been more than a second 315 // since the last one. 316 jlong interval = GuaranteedSafepointInterval != 0 ? GuaranteedSafepointInterval : 1000; 317 jlong deadline_ms = interval + last_alot_ms; 318 if (now_ms > deadline_ms) { 319 last_alot_ms = now_ms; 320 return true; 321 } 322 return false; 323 } 324 325 bool VMThread::set_next_operation(VM_Operation *op) { 326 if (_next_vm_operation != nullptr) { 327 return false; 328 } 329 log_debug(vmthread)("Adding VM operation: %s", op->name()); 330 331 _next_vm_operation = op; 332 333 HOTSPOT_VMOPS_REQUEST( 334 (char *) op->name(), strlen(op->name()), 335 op->evaluate_at_safepoint() ? 0 : 1); 336 return true; 337 } 338 339 void VMThread::wait_until_executed(VM_Operation* op) { 340 MonitorLocker ml(VMOperation_lock, 341 Thread::current()->is_Java_thread() ? 342 Mutex::_safepoint_check_flag : 343 Mutex::_no_safepoint_check_flag); 344 { 345 TraceTime timer("Installing VM operation", TRACETIME_LOG(Trace, vmthread)); 346 while (true) { 347 if (VMThread::vm_thread()->set_next_operation(op)) { 348 ml.notify_all(); 349 break; 350 } 351 // Wait to install this operation as the next operation in the VM Thread 352 log_trace(vmthread)("A VM operation already set, waiting"); 353 ml.wait(); 354 } 355 } 356 { 357 // Wait until the operation has been processed 358 TraceTime timer("Waiting for VM operation to be completed", TRACETIME_LOG(Trace, vmthread)); 359 // _next_vm_operation is cleared holding VMOperation_lock after it has been 360 // executed. We wait until _next_vm_operation is not our op. 361 while (_next_vm_operation == op) { 362 // VM Thread can process it once we unlock the mutex on wait. 363 ml.wait(); 364 } 365 } 366 } 367 368 static void self_destruct_if_needed() { 369 // Support for self destruction 370 if ((SelfDestructTimer != 0.0) && !VMError::is_error_reported() && 371 (os::elapsedTime() > SelfDestructTimer * 60.0)) { 372 tty->print_cr("VM self-destructed"); 373 os::exit(-1); 374 } 375 } 376 377 void VMThread::inner_execute(VM_Operation* op) { 378 assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); 379 380 VM_Operation* prev_vm_operation = nullptr; 381 if (_cur_vm_operation != nullptr) { 382 // Check that the VM operation allows nested VM operation. 383 // This is normally not the case, e.g., the compiler 384 // does not allow nested scavenges or compiles. 385 if (!_cur_vm_operation->allow_nested_vm_operations()) { 386 fatal("Unexpected nested VM operation %s requested by operation %s", 387 op->name(), _cur_vm_operation->name()); 388 } 389 op->set_calling_thread(_cur_vm_operation->calling_thread()); 390 prev_vm_operation = _cur_vm_operation; 391 } 392 393 _cur_vm_operation = op; 394 395 HandleMark hm(VMThread::vm_thread()); 396 397 const char* const cause = op->cause(); 398 stringStream ss; 399 ss.print("Executing%s%s VM operation: %s", 400 prev_vm_operation != nullptr ? " nested" : "", 401 op->evaluate_at_safepoint() ? " safepoint" : " non-safepoint", 402 op->name()); 403 if (cause != nullptr) { 404 ss.print(" (%s)", cause); 405 } 406 407 EventMarkVMOperation em("%s", ss.freeze()); 408 log_debug(vmthread)("%s", ss.freeze()); 409 410 bool end_safepoint = false; 411 bool has_timeout_task = (_timeout_task != nullptr); 412 if (_cur_vm_operation->evaluate_at_safepoint() && 413 !SafepointSynchronize::is_at_safepoint()) { 414 SafepointSynchronize::begin(); 415 if (has_timeout_task) { 416 _timeout_task->arm(_cur_vm_operation->name()); 417 } 418 end_safepoint = true; 419 } 420 421 evaluate_operation(_cur_vm_operation); 422 423 if (end_safepoint) { 424 if (has_timeout_task) { 425 _timeout_task->disarm(); 426 } 427 SafepointSynchronize::end(); 428 } 429 430 _cur_vm_operation = prev_vm_operation; 431 } 432 433 void VMThread::wait_for_operation() { 434 assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); 435 MonitorLocker ml_op_lock(VMOperation_lock, Mutex::_no_safepoint_check_flag); 436 437 // Clear previous operation. 438 // On first call this clears a dummy place-holder. 439 _next_vm_operation = nullptr; 440 // Notify operation is done and notify a next operation can be installed. 441 ml_op_lock.notify_all(); 442 443 while (!should_terminate()) { 444 self_destruct_if_needed(); 445 if (_next_vm_operation != nullptr) { 446 return; 447 } 448 if (handshake_or_safepoint_alot()) { 449 if (HandshakeALot) { 450 MutexUnlocker mul(VMOperation_lock); 451 ALotOfHandshakeClosure aohc; 452 Handshake::execute(&aohc); 453 } 454 // When we unlocked above someone might have setup a new op. 455 if (_next_vm_operation != nullptr) { 456 return; 457 } 458 if (SafepointALot) { 459 _next_vm_operation = &safepointALot_op; 460 return; 461 } 462 } 463 assert(_next_vm_operation == nullptr, "Must be"); 464 assert(_cur_vm_operation == nullptr, "Must be"); 465 466 // We didn't find anything to execute, notify any waiter so they can install an op. 467 ml_op_lock.notify_all(); 468 ml_op_lock.wait(GuaranteedSafepointInterval); 469 } 470 } 471 472 void VMThread::loop() { 473 assert(_cur_vm_operation == nullptr, "no current one should be executing"); 474 475 SafepointSynchronize::init(_vm_thread); 476 477 // Need to set a calling thread for ops not passed 478 // via the normal way. 479 no_op.set_calling_thread(_vm_thread); 480 safepointALot_op.set_calling_thread(_vm_thread); 481 482 while (true) { 483 if (should_terminate()) break; 484 wait_for_operation(); 485 if (should_terminate()) break; 486 assert(_next_vm_operation != nullptr, "Must have one"); 487 inner_execute(_next_vm_operation); 488 } 489 } 490 491 // A SkipGCALot object is used to elide the usual effect of gc-a-lot 492 // over a section of execution by a thread. Currently, it's used only to 493 // prevent re-entrant calls to GC. 494 class SkipGCALot : public StackObj { 495 private: 496 bool _saved; 497 Thread* _t; 498 499 public: 500 #ifdef ASSERT 501 SkipGCALot(Thread* t) : _t(t) { 502 _saved = _t->skip_gcalot(); 503 _t->set_skip_gcalot(true); 504 } 505 506 ~SkipGCALot() { 507 assert(_t->skip_gcalot(), "Save-restore protocol invariant"); 508 _t->set_skip_gcalot(_saved); 509 } 510 #else 511 SkipGCALot(Thread* t) { } 512 ~SkipGCALot() { } 513 #endif 514 }; 515 516 void VMThread::execute(VM_Operation* op) { 517 Thread* t = Thread::current(); 518 519 if (t->is_VM_thread()) { 520 op->set_calling_thread(t); 521 ((VMThread*)t)->inner_execute(op); 522 return; 523 } 524 525 // The current thread must not belong to the SuspendibleThreadSet, because an 526 // on-the-fly safepoint can be waiting for the current thread, and the 527 // current thread will be blocked in wait_until_executed, resulting in 528 // deadlock. 529 assert(!t->is_suspendible_thread(), "precondition"); 530 assert(!t->is_indirectly_suspendible_thread(), "precondition"); 531 532 // Avoid re-entrant attempts to gc-a-lot 533 SkipGCALot sgcalot(t); 534 535 // JavaThread or WatcherThread 536 if (t->is_Java_thread()) { 537 JavaThread::cast(t)->check_for_valid_safepoint_state(); 538 } 539 540 // New request from Java thread, evaluate prologue 541 if (!op->doit_prologue()) { 542 return; // op was cancelled 543 } 544 545 op->set_calling_thread(t); 546 547 wait_until_executed(op); 548 549 op->doit_epilogue(); 550 } 551 552 void VMThread::verify() { 553 oops_do(&VerifyOopClosure::verify_oop, nullptr); 554 }