1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/codeCache.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/isGCActiveMark.hpp"
33 #include "logging/log.hpp"
34 #include "logging/logConfiguration.hpp"
35 #include "logging/logStream.hpp"
36 #include "memory/heapInspection.hpp"
37 #include "memory/metaspace/metaspaceReporter.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/symbol.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/interfaceSupport.inline.hpp"
45 #include "runtime/javaThread.inline.hpp"
46 #include "runtime/jniHandles.hpp"
47 #include "runtime/objectMonitor.inline.hpp"
48 #include "runtime/stackFrameStream.inline.hpp"
49 #include "runtime/synchronizer.hpp"
50 #include "runtime/threads.hpp"
51 #include "runtime/threadSMR.inline.hpp"
52 #include "runtime/vmOperations.hpp"
53 #include "services/threadService.hpp"
54 #include "utilities/growableArray.hpp"
55 #include "utilities/ticks.hpp"
56
57 #define VM_OP_NAME_INITIALIZE(name) #name,
58
59 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
60 { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
61
62 void VM_Operation::set_calling_thread(Thread* thread) {
63 _calling_thread = thread;
64 }
65
66 void VM_Operation::evaluate() {
67 ResourceMark rm;
68 LogTarget(Debug, vmoperation) lt;
69 if (lt.is_enabled()) {
70 LogStream ls(lt);
71 ls.print("begin ");
72 print_on_error(&ls);
73 ls.cr();
74 }
75 doit();
76 if (lt.is_enabled()) {
77 LogStream ls(lt);
78 ls.print("end ");
79 print_on_error(&ls);
80 ls.cr();
81 }
82 }
83
84 // Called by fatal error handler.
85 void VM_Operation::print_on_error(outputStream* st) const {
86 st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this));
87 st->print("%s", name());
88
89 st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint");
90
91 if (calling_thread()) {
92 st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread()));
93 }
94 }
95
96 void VM_ClearICs::doit() {
97 if (_preserve_static_stubs) {
98 CodeCache::cleanup_inline_caches_whitebox();
99 } else {
100 CodeCache::clear_inline_caches();
101 }
102 }
103
104 void VM_CleanClassLoaderDataMetaspaces::doit() {
105 ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
106 }
107
108 void VM_RehashStringTable::doit() {
109 StringTable::rehash_table();
110 }
111
112 void VM_RehashSymbolTable::doit() {
113 SymbolTable::rehash_table();
114 }
115
116 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) {
117 _thread = thread;
118 _id = id;
119 _reason = reason;
120 }
121
122
123 void VM_DeoptimizeFrame::doit() {
124 assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
125 Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason);
126 }
127
128
129 #ifndef PRODUCT
130
131 void VM_DeoptimizeAll::doit() {
132 JavaThreadIteratorWithHandle jtiwh;
133 // deoptimize all java threads in the system
134 if (DeoptimizeALot) {
135 for (; JavaThread *thread = jtiwh.next(); ) {
136 if (thread->has_last_Java_frame()) {
137 thread->deoptimize();
138 }
139 }
140 } else if (DeoptimizeRandom) {
141
142 // Deoptimize some selected threads and frames
143 int tnum = os::random() & 0x3;
144 int fnum = os::random() & 0x3;
145 int tcount = 0;
146 for (; JavaThread *thread = jtiwh.next(); ) {
147 if (thread->has_last_Java_frame()) {
148 if (tcount++ == tnum) {
149 tcount = 0;
150 int fcount = 0;
151 // Deoptimize some selected frames.
152 for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
153 if (fst.current()->can_be_deoptimized()) {
154 if (fcount++ == fnum) {
155 fcount = 0;
156 Deoptimization::deoptimize(thread, *fst.current());
157 }
158 }
159 }
160 }
161 }
162 }
163 }
164 }
165
166
167 void VM_ZombieAll::doit() {
168 JavaThread::cast(calling_thread())->make_zombies();
169 }
170
171 #endif // !PRODUCT
172
173 bool VM_PrintThreads::doit_prologue() {
174 // Get Heap_lock if concurrent locks will be dumped
175 if (_print_concurrent_locks) {
176 Heap_lock->lock();
177 }
178 return true;
179 }
180
181 void VM_PrintThreads::doit() {
182 Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info);
183 if (_print_jni_handle_info) {
184 JNIHandles::print_on(_out);
185 }
186 }
187
188 void VM_PrintThreads::doit_epilogue() {
189 if (_print_concurrent_locks) {
190 // Release Heap_lock
191 Heap_lock->unlock();
192 }
193 }
194
195 void VM_PrintMetadata::doit() {
196 metaspace::MetaspaceReporter::print_report(_out, _scale, _flags);
197 }
198
199 VM_FindDeadlocks::~VM_FindDeadlocks() {
200 if (_deadlocks != nullptr) {
201 DeadlockCycle* cycle = _deadlocks;
202 while (cycle != nullptr) {
203 DeadlockCycle* d = cycle;
204 cycle = cycle->next();
205 delete d;
206 }
207 }
208 }
209
210 void VM_FindDeadlocks::doit() {
211 // Update the hazard ptr in the originating thread to the current
212 // list of threads. This VM operation needs the current list of
213 // threads for proper deadlock detection and those are the
214 // JavaThreads we need to be protected when we return info to the
215 // originating thread.
216 _setter.set();
217
218 _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks);
219 if (_out != nullptr) {
220 int num_deadlocks = 0;
221 for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) {
222 num_deadlocks++;
223 cycle->print_on_with(_setter.list(), _out);
224 }
225
226 if (num_deadlocks == 1) {
227 _out->print_cr("\nFound 1 deadlock.\n");
228 _out->flush();
229 } else if (num_deadlocks > 1) {
230 _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks);
231 _out->flush();
232 }
233 }
234 }
235
236 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
237 int max_depth,
238 bool with_locked_monitors,
239 bool with_locked_synchronizers) {
240 _result = result;
241 _num_threads = 0; // 0 indicates all threads
242 _threads = nullptr;
243 _max_depth = max_depth;
244 _with_locked_monitors = with_locked_monitors;
245 _with_locked_synchronizers = with_locked_synchronizers;
246 }
247
248 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
249 GrowableArray<instanceHandle>* threads,
250 int num_threads,
251 int max_depth,
252 bool with_locked_monitors,
253 bool with_locked_synchronizers) {
254 _result = result;
255 _num_threads = num_threads;
256 _threads = threads;
257 _max_depth = max_depth;
258 _with_locked_monitors = with_locked_monitors;
259 _with_locked_synchronizers = with_locked_synchronizers;
260 }
261
262 bool VM_ThreadDump::doit_prologue() {
263 if (_with_locked_synchronizers) {
264 // Acquire Heap_lock to dump concurrent locks
265 Heap_lock->lock();
266 }
267
268 return true;
269 }
270
271 void VM_ThreadDump::doit_epilogue() {
272 if (_with_locked_synchronizers) {
273 // Release Heap_lock
274 Heap_lock->unlock();
275 }
276 }
277
278 // Hash table of int64_t to a list of ObjectMonitor* owned by the JavaThread.
279 // The JavaThread's owner key is either a JavaThread* or a stack lock
280 // address in the JavaThread so we use "int64_t".
281 //
282 class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
283 private:
284 static unsigned int ptr_hash(int64_t const& s1) {
285 // 2654435761 = 2^32 * Phi (golden ratio)
286 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
287 }
288
289 private:
290 using ObjectMonitorList = GrowableArrayCHeap<ObjectMonitor*, mtThread>;
291
292 // HashTable SIZE is specified at compile time so we
293 // use 1031 which is the first prime after 1024.
294 typedef HashTable<int64_t, ObjectMonitorList, 1031, AnyObj::C_HEAP, mtThread,
295 &ObjectMonitorsDump::ptr_hash> PtrTable;
296 PtrTable* _ptrs;
297 size_t _key_count;
298 size_t _om_count;
299
300 void add(ObjectMonitor* monitor) {
301 int64_t key = monitor->owner();
302
303 bool created = false;
304 ObjectMonitorList* list = _ptrs->put_if_absent(key, &created);
305 if (created) {
306 _key_count++;
307 }
308
309 assert(list->find(monitor) == -1, "Should not contain duplicates");
310 list->push(monitor); // Add the ObjectMonitor to the list.
311 _om_count++;
312 }
313
314 public:
315 // HashTable is passed to various functions and populated in
316 // different places so we allocate it using C_HEAP to make it immune
317 // from any ResourceMarks that happen to be in the code paths.
318 ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
319
320 ~ObjectMonitorsDump() {
321 delete _ptrs;
322 }
323
324 // Implements MonitorClosure used to collect all owned monitors in the system
325 void do_monitor(ObjectMonitor* monitor) override {
326 assert(monitor->has_owner(), "Expects only owned monitors");
327
328 if (monitor->has_anonymous_owner()) {
329 // There's no need to collect anonymous owned monitors
330 // because the caller of this code is only interested
331 // in JNI owned monitors.
332 return;
333 }
334
335 if (monitor->object_peek() == nullptr) {
336 // JNI code doesn't necessarily keep the monitor object
337 // alive. Filter out monitors with dead objects.
338 return;
339 }
340
341 add(monitor);
342 }
343
344 // Implements the ObjectMonitorsView interface
345 void visit(MonitorClosure* closure, JavaThread* thread) override {
346 int64_t key = ObjectMonitor::owner_id_from(thread);
347 ObjectMonitorList* list = _ptrs->get(key);
348 if (list == nullptr) {
349 return;
350 }
351 for (int i = 0; i < list->length(); i++) {
352 closure->do_monitor(list->at(i));
353 }
354 }
355
356 size_t key_count() { return _key_count; }
357 size_t om_count() { return _om_count; }
358 };
359
360 void VM_ThreadDump::doit() {
361 ResourceMark rm;
362
363 // Set the hazard ptr in the originating thread to protect the
364 // current list of threads. This VM operation needs the current list
365 // of threads for a proper dump and those are the JavaThreads we need
366 // to be protected when we return info to the originating thread.
367 _result->set_t_list();
368
369 ConcurrentLocksDump concurrent_locks(true);
370 if (_with_locked_synchronizers) {
371 concurrent_locks.dump_at_safepoint();
372 }
373
374 ObjectMonitorsDump object_monitors;
375 if (_with_locked_monitors) {
376 // Gather information about owned monitors.
377 ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
378
379 // If there are many object monitors in the system then the above iteration
380 // can start to take time. Be friendly to following thread dumps by telling
381 // the MonitorDeflationThread to deflate monitors.
382 //
383 // This is trying to be somewhat backwards compatible with the previous
384 // implementation, which performed monitor deflation right here. We might
385 // want to reconsider the need to trigger monitor deflation from the thread
386 // dumping and instead maybe tweak the deflation heuristics.
387 ObjectSynchronizer::request_deflate_idle_monitors();
388 }
389
390 if (_num_threads == 0) {
391 // Snapshot all live threads
392
393 for (uint i = 0; i < _result->t_list()->length(); i++) {
394 JavaThread* jt = _result->t_list()->thread_at(i);
395 if (jt->is_exiting() ||
396 jt->is_hidden_from_external_view()) {
397 // skip terminating threads and hidden threads
398 continue;
399 }
400 ThreadConcurrentLocks* tcl = nullptr;
401 if (_with_locked_synchronizers) {
402 tcl = concurrent_locks.thread_concurrent_locks(jt);
403 }
404 snapshot_thread(jt, tcl, &object_monitors);
405 }
406 } else {
407 // Snapshot threads in the given _threads array
408 // A dummy snapshot is created if a thread doesn't exist
409
410 for (int i = 0; i < _num_threads; i++) {
411 instanceHandle th = _threads->at(i);
412 if (th() == nullptr) {
413 // skip if the thread doesn't exist
414 // Add a dummy snapshot
415 _result->add_thread_snapshot();
416 continue;
417 }
418
419 // Dump thread stack only if the thread is alive and not exiting
420 // and not VM internal thread.
421 JavaThread* jt = java_lang_Thread::thread(th());
422 if (jt != nullptr && !_result->t_list()->includes(jt)) {
423 // _threads[i] doesn't refer to a valid JavaThread; this check
424 // is primarily for JVM_DumpThreads() which doesn't have a good
425 // way to validate the _threads array.
426 jt = nullptr;
427 }
428 if (jt == nullptr || /* thread not alive */
429 jt->is_exiting() ||
430 jt->is_hidden_from_external_view()) {
431 // add a null snapshot if skipped
432 _result->add_thread_snapshot();
433 continue;
434 }
435 ThreadConcurrentLocks* tcl = nullptr;
436 if (_with_locked_synchronizers) {
437 tcl = concurrent_locks.thread_concurrent_locks(jt);
438 }
439 snapshot_thread(jt, tcl, &object_monitors);
440 }
441 }
442 }
443
444 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
445 ObjectMonitorsView* monitors) {
446 ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
447 snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
448 snapshot->set_concurrent_locks(tcl);
449 }
450
451 volatile bool VM_Exit::_vm_exited = false;
452 Thread * volatile VM_Exit::_shutdown_thread = nullptr;
453
454 int VM_Exit::set_vm_exited() {
455
456 Thread * thr_cur = Thread::current();
457
458 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
459
460 int num_active = 0;
461
462 _shutdown_thread = thr_cur;
463 _vm_exited = true; // global flag
464 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
465 if (thr != thr_cur && thr->thread_state() == _thread_in_native) {
466 ++num_active;
467 thr->set_terminated(JavaThread::_vm_exited); // per-thread flag
468 }
469 }
470
471 return num_active;
472 }
473
474 int VM_Exit::wait_for_threads_in_native_to_block() {
475 // VM exits at safepoint. This function must be called at the final safepoint
476 // to wait for threads in _thread_in_native state to be quiescent.
477 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
478
479 Thread * thr_cur = Thread::current();
480
481 // Compiler threads need longer wait because they can access VM data directly
482 // while in native. If they are active and some structures being used are
483 // deleted by the shutdown sequence, they will crash. On the other hand, user
484 // threads must go through native=>Java/VM transitions first to access VM
485 // data, and they will be stopped during state transition. In theory, we
486 // don't have to wait for user threads to be quiescent, but it's always
487 // better to terminate VM when current thread is the only active thread, so
488 // wait for user threads too.
489
490 // Time per attempt. It is practical to start waiting with 10us delays
491 // (around scheduling delay / timer slack), and exponentially ramp up
492 // to 10ms if compiler threads are not responding.
493 jlong max_wait_time = millis_to_nanos(10);
494 jlong wait_time = 10000;
495
496 jlong start_time = os::javaTimeNanos();
497
498 // Deadline for user threads in native code.
499 // User-settable flag counts "attempts" in 10ms units, to a maximum of 10s.
500 jlong user_threads_deadline = start_time + (UserThreadWaitAttemptsAtExit * millis_to_nanos(10));
501
502 // Deadline for compiler threads: at least 10 seconds.
503 jlong compiler_threads_deadline = start_time + millis_to_nanos(10000);
504
505 JavaThreadIteratorWithHandle jtiwh;
506 while (true) {
507 int num_active = 0;
508 int num_active_compiler_thread = 0;
509
510 jtiwh.rewind();
511 for (; JavaThread *thr = jtiwh.next(); ) {
512 if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
513 num_active++;
514 if (thr->is_Compiler_thread()) {
515 #if INCLUDE_JVMCI
516 CompilerThread* ct = (CompilerThread*) thr;
517 if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) {
518 num_active_compiler_thread++;
519 } else {
520 // A JVMCI compiler thread never accesses VM data structures
521 // while in _thread_in_native state so there's no need to wait
522 // for it and potentially add a 300 millisecond delay to VM
523 // shutdown.
524 num_active--;
525 }
526 #else
527 num_active_compiler_thread++;
528 #endif
529 }
530 }
531 }
532
533 jlong time = os::javaTimeNanos();
534
535 if (num_active == 0) {
536 return 0;
537 }
538 if (time >= compiler_threads_deadline) {
539 return num_active;
540 }
541 if ((num_active_compiler_thread == 0) && (time >= user_threads_deadline)) {
542 return num_active;
543 }
544
545 os::naked_short_nanosleep(wait_time);
546 wait_time = MIN2(max_wait_time, wait_time * 2);
547 }
548 }
549
550 void VM_Exit::doit() {
551
552 if (VerifyBeforeExit) {
553 HandleMark hm(VMThread::vm_thread());
554 // Among other things, this ensures that Eden top is correct.
555 Universe::heap()->prepare_for_verify();
556 // Silent verification so as not to pollute normal output,
557 // unless we really asked for it.
558 Universe::verify();
559 }
560
561 CompileBroker::set_should_block();
562
563 // Wait for a short period for threads in native to block. Any thread
564 // still executing native code after the wait will be stopped at
565 // native==>Java/VM barriers.
566 // Among 16276 JCK tests, 94% of them come here without any threads still
567 // running in native; the other 6% are quiescent within 250ms (Ultra 80).
568 wait_for_threads_in_native_to_block();
569
570 set_vm_exited();
571
572 // The ObjectMonitor subsystem uses perf counters so do this before
573 // we call exit_globals() so we don't run afoul of perfMemory_exit().
574 ObjectSynchronizer::do_final_audit_and_print_stats();
575
576 // We'd like to call IdealGraphPrinter::clean_up() to finalize the
577 // XML logging, but we can't safely do that here. The logic to make
578 // XML termination logging safe is tied to the termination of the
579 // VMThread, and it doesn't terminate on this exit path. See 8222534.
580
581 // cleanup globals resources before exiting. exit_globals() currently
582 // cleans up outputStream resources and PerfMemory resources.
583 exit_globals();
584
585 LogConfiguration::finalize();
586
587 // Check for exit hook
588 exit_hook_t exit_hook = Arguments::exit_hook();
589 if (exit_hook != nullptr) {
590 // exit hook should exit.
591 exit_hook(_exit_code);
592 // ... but if it didn't, we must do it here
593 vm_direct_exit(_exit_code);
594 } else {
595 vm_direct_exit(_exit_code);
596 }
597 }
598
599
600 void VM_Exit::wait_if_vm_exited() {
601 if (_vm_exited) {
602 // Need to check for an unattached thread as only attached threads
603 // can acquire the lock.
604 Thread* current = Thread::current_or_null();
605 if (current != nullptr && current != _shutdown_thread) {
606 // _vm_exited is set at safepoint, and the Threads_lock is never released
607 // so we will block here until the process dies.
608 Threads_lock->lock();
609 ShouldNotReachHere();
610 }
611 }
612 }
613
614 void VM_PrintCompileQueue::doit() {
615 CompileBroker::print_compile_queues(_out);
616 }
617
618 #if INCLUDE_SERVICES
619 void VM_PrintClassHierarchy::doit() {
620 KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname);
621 }
622
623 void VM_PrintClassLayout::doit() {
624 PrintClassLayout::print_class_layout(_out, _class_name);
625 }
626 #endif