1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/codeCache.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/isGCActiveMark.hpp"
33 #include "logging/log.hpp"
34 #include "logging/logConfiguration.hpp"
35 #include "logging/logStream.hpp"
36 #include "memory/heapInspection.hpp"
37 #include "memory/metaspace/metaspaceReporter.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/symbol.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/interfaceSupport.inline.hpp"
45 #include "runtime/javaThread.inline.hpp"
46 #include "runtime/jniHandles.hpp"
47 #include "runtime/objectMonitor.inline.hpp"
48 #include "runtime/stackFrameStream.inline.hpp"
49 #include "runtime/synchronizer.hpp"
50 #include "runtime/threads.hpp"
51 #include "runtime/threadSMR.inline.hpp"
52 #include "runtime/vmOperations.hpp"
53 #include "services/threadService.hpp"
54 #include "utilities/growableArray.hpp"
55 #include "utilities/ticks.hpp"
56
57 #define VM_OP_NAME_INITIALIZE(name) #name,
58
59 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
60 { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
61
62 void VM_Operation::set_calling_thread(Thread* thread) {
63 _calling_thread = thread;
64 }
65
66 void VM_Operation::evaluate() {
67 ResourceMark rm;
68 LogTarget(Debug, vmoperation) lt;
69 if (lt.is_enabled()) {
70 LogStream ls(lt);
71 ls.print("begin ");
72 print_on_error(&ls);
73 ls.cr();
74 }
75 doit();
76 if (lt.is_enabled()) {
77 LogStream ls(lt);
78 ls.print("end ");
79 print_on_error(&ls);
80 ls.cr();
81 }
82 }
83
84 // Called by fatal error handler.
85 void VM_Operation::print_on_error(outputStream* st) const {
86 st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this));
87 st->print("%s", name());
88
89 st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint");
90
91 if (calling_thread()) {
92 st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread()));
93 }
94 }
95
96 void VM_ClearICs::doit() {
97 if (_preserve_static_stubs) {
98 CodeCache::cleanup_inline_caches_whitebox();
99 } else {
100 CodeCache::clear_inline_caches();
101 }
102 }
103
104 void VM_CleanClassLoaderDataMetaspaces::doit() {
105 ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
106 }
107
108 void VM_RehashStringTable::doit() {
109 StringTable::rehash_table();
110 }
111
112 void VM_RehashSymbolTable::doit() {
113 SymbolTable::rehash_table();
114 }
115
116 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) {
117 _thread = thread;
118 _id = id;
119 _reason = reason;
120 }
121
122
123 void VM_DeoptimizeFrame::doit() {
124 assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
125 Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason);
126 }
127
128
129 #ifndef PRODUCT
130
131 void VM_DeoptimizeAll::doit() {
132 JavaThreadIteratorWithHandle jtiwh;
133 // deoptimize all java threads in the system
134 if (DeoptimizeALot) {
135 for (; JavaThread *thread = jtiwh.next(); ) {
136 if (thread->has_last_Java_frame()) {
137 thread->deoptimize();
138 }
139 }
140 } else if (DeoptimizeRandom) {
141
142 // Deoptimize some selected threads and frames
143 int tnum = os::random() & 0x3;
144 int fnum = os::random() & 0x3;
145 int tcount = 0;
146 for (; JavaThread *thread = jtiwh.next(); ) {
147 if (thread->has_last_Java_frame()) {
148 if (tcount++ == tnum) {
149 tcount = 0;
150 int fcount = 0;
151 // Deoptimize some selected frames.
152 for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
153 if (fst.current()->can_be_deoptimized()) {
154 if (fcount++ == fnum) {
155 fcount = 0;
156 Deoptimization::deoptimize(thread, *fst.current());
157 }
158 }
159 }
160 }
161 }
162 }
163 }
164 }
165
166
167 void VM_ZombieAll::doit() {
168 JavaThread::cast(calling_thread())->make_zombies();
169 }
170
171 #endif // !PRODUCT
172
173 bool VM_PrintThreads::doit_prologue() {
174 // Get Heap_lock if concurrent locks will be dumped
175 if (_print_concurrent_locks) {
176 Heap_lock->lock();
177 }
178 return true;
179 }
180
181 void VM_PrintThreads::doit() {
182 Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info);
183 if (_print_jni_handle_info) {
184 JNIHandles::print_on(_out);
185 }
186 }
187
188 void VM_PrintThreads::doit_epilogue() {
189 if (_print_concurrent_locks) {
190 // Release Heap_lock
191 Heap_lock->unlock();
192 }
193 }
194
195 void VM_PrintMetadata::doit() {
196 metaspace::MetaspaceReporter::print_report(_out, _scale, _flags);
197 }
198
199 VM_FindDeadlocks::~VM_FindDeadlocks() {
200 if (_deadlocks != nullptr) {
201 DeadlockCycle* cycle = _deadlocks;
202 while (cycle != nullptr) {
203 DeadlockCycle* d = cycle;
204 cycle = cycle->next();
205 delete d;
206 }
207 }
208 }
209
210 void VM_FindDeadlocks::doit() {
211 // Update the hazard ptr in the originating thread to the current
212 // list of threads. This VM operation needs the current list of
213 // threads for proper deadlock detection and those are the
214 // JavaThreads we need to be protected when we return info to the
215 // originating thread.
216 _setter.set();
217
218 _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks);
219 if (_out != nullptr) {
220 int num_deadlocks = 0;
221 for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) {
222 num_deadlocks++;
223 cycle->print_on_with(_setter.list(), _out);
224 }
225
226 if (num_deadlocks == 1) {
227 _out->print_cr("\nFound 1 deadlock.\n");
228 _out->flush();
229 } else if (num_deadlocks > 1) {
230 _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks);
231 _out->flush();
232 }
233 }
234 }
235
236 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
237 int max_depth,
238 bool with_locked_monitors,
239 bool with_locked_synchronizers) {
240 _result = result;
241 _num_threads = 0; // 0 indicates all threads
242 _threads = nullptr;
243 _max_depth = max_depth;
244 _with_locked_monitors = with_locked_monitors;
245 _with_locked_synchronizers = with_locked_synchronizers;
246 }
247
248 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
249 GrowableArray<instanceHandle>* threads,
250 int num_threads,
251 int max_depth,
252 bool with_locked_monitors,
253 bool with_locked_synchronizers) {
254 _result = result;
255 _num_threads = num_threads;
256 _threads = threads;
257 _max_depth = max_depth;
258 _with_locked_monitors = with_locked_monitors;
259 _with_locked_synchronizers = with_locked_synchronizers;
260 }
261
262 bool VM_ThreadDump::doit_prologue() {
263 if (_with_locked_synchronizers) {
264 // Acquire Heap_lock to dump concurrent locks
265 Heap_lock->lock();
266 }
267
268 return true;
269 }
270
271 void VM_ThreadDump::doit_epilogue() {
272 if (_with_locked_synchronizers) {
273 // Release Heap_lock
274 Heap_lock->unlock();
275 }
276 }
277
278 // Hash table of int64_t to a list of ObjectMonitor* owned by the JavaThread.
279 class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
280 private:
281 static unsigned int ptr_hash(int64_t const& s1) {
282 // 2654435761 = 2^32 * Phi (golden ratio)
283 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
284 }
285
286 private:
287 using ObjectMonitorList = GrowableArrayCHeap<ObjectMonitor*, mtThread>;
288
289 // HashTable SIZE is specified at compile time so we
290 // use 1031 which is the first prime after 1024.
291 typedef HashTable<int64_t, ObjectMonitorList, 1031, AnyObj::C_HEAP, mtThread,
292 &ObjectMonitorsDump::ptr_hash> PtrTable;
293 PtrTable* _ptrs;
294 size_t _key_count;
295 size_t _om_count;
296
297 void add(ObjectMonitor* monitor) {
298 int64_t key = monitor->owner();
299
300 bool created = false;
301 ObjectMonitorList* list = _ptrs->put_if_absent(key, &created);
302 if (created) {
303 _key_count++;
304 }
305
306 assert(list->find(monitor) == -1, "Should not contain duplicates");
307 list->push(monitor); // Add the ObjectMonitor to the list.
308 _om_count++;
309 }
310
311 public:
312 // HashTable is passed to various functions and populated in
313 // different places so we allocate it using C_HEAP to make it immune
314 // from any ResourceMarks that happen to be in the code paths.
315 ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
316
317 ~ObjectMonitorsDump() {
318 delete _ptrs;
319 }
320
321 // Implements MonitorClosure used to collect all owned monitors in the system
322 void do_monitor(ObjectMonitor* monitor) override {
323 assert(monitor->has_owner(), "Expects only owned monitors");
324
325 if (monitor->has_anonymous_owner()) {
326 // There's no need to collect anonymous owned monitors
327 // because the caller of this code is only interested
328 // in JNI owned monitors.
329 return;
330 }
331
332 if (monitor->object_peek() == nullptr) {
333 // JNI code doesn't necessarily keep the monitor object
334 // alive. Filter out monitors with dead objects.
335 return;
336 }
337
338 add(monitor);
339 }
340
341 // Implements the ObjectMonitorsView interface
342 void visit(MonitorClosure* closure, JavaThread* thread) override {
343 int64_t key = ObjectMonitor::owner_id_from(thread);
344 ObjectMonitorList* list = _ptrs->get(key);
345 if (list == nullptr) {
346 return;
347 }
348 for (int i = 0; i < list->length(); i++) {
349 closure->do_monitor(list->at(i));
350 }
351 }
352
353 size_t key_count() { return _key_count; }
354 size_t om_count() { return _om_count; }
355 };
356
357 void VM_ThreadDump::doit() {
358 ResourceMark rm;
359
360 // Set the hazard ptr in the originating thread to protect the
361 // current list of threads. This VM operation needs the current list
362 // of threads for a proper dump and those are the JavaThreads we need
363 // to be protected when we return info to the originating thread.
364 _result->set_t_list();
365
366 ConcurrentLocksDump concurrent_locks(true);
367 if (_with_locked_synchronizers) {
368 concurrent_locks.dump_at_safepoint();
369 }
370
371 ObjectMonitorsDump object_monitors;
372 if (_with_locked_monitors) {
373 // Gather information about owned monitors.
374 ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
375
376 // If there are many object monitors in the system then the above iteration
377 // can start to take time. Be friendly to following thread dumps by telling
378 // the MonitorDeflationThread to deflate monitors.
379 //
380 // This is trying to be somewhat backwards compatible with the previous
381 // implementation, which performed monitor deflation right here. We might
382 // want to reconsider the need to trigger monitor deflation from the thread
383 // dumping and instead maybe tweak the deflation heuristics.
384 ObjectSynchronizer::request_deflate_idle_monitors();
385 }
386
387 if (_num_threads == 0) {
388 // Snapshot all live threads
389
390 for (uint i = 0; i < _result->t_list()->length(); i++) {
391 JavaThread* jt = _result->t_list()->thread_at(i);
392 if (jt->is_exiting() ||
393 jt->is_hidden_from_external_view()) {
394 // skip terminating threads and hidden threads
395 continue;
396 }
397 ThreadConcurrentLocks* tcl = nullptr;
398 if (_with_locked_synchronizers) {
399 tcl = concurrent_locks.thread_concurrent_locks(jt);
400 }
401 snapshot_thread(jt, tcl, &object_monitors);
402 }
403 } else {
404 // Snapshot threads in the given _threads array
405 // A dummy snapshot is created if a thread doesn't exist
406
407 for (int i = 0; i < _num_threads; i++) {
408 instanceHandle th = _threads->at(i);
409 if (th() == nullptr) {
410 // skip if the thread doesn't exist
411 // Add a dummy snapshot
412 _result->add_thread_snapshot();
413 continue;
414 }
415
416 // Dump thread stack only if the thread is alive and not exiting
417 // and not VM internal thread.
418 JavaThread* jt = java_lang_Thread::thread(th());
419 if (jt != nullptr && !_result->t_list()->includes(jt)) {
420 // _threads[i] doesn't refer to a valid JavaThread; this check
421 // is primarily for JVM_DumpThreads() which doesn't have a good
422 // way to validate the _threads array.
423 jt = nullptr;
424 }
425 if (jt == nullptr || /* thread not alive */
426 jt->is_exiting() ||
427 jt->is_hidden_from_external_view()) {
428 // add a null snapshot if skipped
429 _result->add_thread_snapshot();
430 continue;
431 }
432 ThreadConcurrentLocks* tcl = nullptr;
433 if (_with_locked_synchronizers) {
434 tcl = concurrent_locks.thread_concurrent_locks(jt);
435 }
436 snapshot_thread(jt, tcl, &object_monitors);
437 }
438 }
439 }
440
441 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
442 ObjectMonitorsView* monitors) {
443 ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
444 snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
445 snapshot->set_concurrent_locks(tcl);
446 }
447
448 volatile bool VM_Exit::_vm_exited = false;
449 Thread * volatile VM_Exit::_shutdown_thread = nullptr;
450
451 int VM_Exit::set_vm_exited() {
452
453 Thread * thr_cur = Thread::current();
454
455 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
456
457 int num_active = 0;
458
459 _shutdown_thread = thr_cur;
460 _vm_exited = true; // global flag
461 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
462 if (thr != thr_cur && thr->thread_state() == _thread_in_native) {
463 ++num_active;
464 thr->set_terminated(JavaThread::_vm_exited); // per-thread flag
465 }
466 }
467
468 return num_active;
469 }
470
471 int VM_Exit::wait_for_threads_in_native_to_block() {
472 // VM exits at safepoint. This function must be called at the final safepoint
473 // to wait for threads in _thread_in_native state to be quiescent.
474 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
475
476 Thread * thr_cur = Thread::current();
477
478 // Compiler threads need longer wait because they can access VM data directly
479 // while in native. If they are active and some structures being used are
480 // deleted by the shutdown sequence, they will crash. On the other hand, user
481 // threads must go through native=>Java/VM transitions first to access VM
482 // data, and they will be stopped during state transition. In theory, we
483 // don't have to wait for user threads to be quiescent, but it's always
484 // better to terminate VM when current thread is the only active thread, so
485 // wait for user threads too.
486
487 // Time per attempt. It is practical to start waiting with 10us delays
488 // (around scheduling delay / timer slack), and exponentially ramp up
489 // to 10ms if compiler threads are not responding.
490 jlong max_wait_time = millis_to_nanos(10);
491 jlong wait_time = 10000;
492
493 jlong start_time = os::javaTimeNanos();
494
495 // Deadline for user threads in native code.
496 // User-settable flag counts "attempts" in 10ms units, to a maximum of 10s.
497 jlong user_threads_deadline = start_time + (UserThreadWaitAttemptsAtExit * millis_to_nanos(10));
498
499 // Deadline for compiler threads: at least 10 seconds.
500 jlong compiler_threads_deadline = start_time + millis_to_nanos(10000);
501
502 JavaThreadIteratorWithHandle jtiwh;
503 while (true) {
504 int num_active = 0;
505 int num_active_compiler_thread = 0;
506
507 jtiwh.rewind();
508 for (; JavaThread *thr = jtiwh.next(); ) {
509 if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
510 num_active++;
511 if (thr->is_Compiler_thread()) {
512 #if INCLUDE_JVMCI
513 CompilerThread* ct = (CompilerThread*) thr;
514 if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) {
515 num_active_compiler_thread++;
516 } else {
517 // A JVMCI compiler thread never accesses VM data structures
518 // while in _thread_in_native state so there's no need to wait
519 // for it and potentially add a 300 millisecond delay to VM
520 // shutdown.
521 num_active--;
522 }
523 #else
524 num_active_compiler_thread++;
525 #endif
526 }
527 }
528 }
529
530 jlong time = os::javaTimeNanos();
531
532 if (num_active == 0) {
533 return 0;
534 }
535 if (time >= compiler_threads_deadline) {
536 return num_active;
537 }
538 if ((num_active_compiler_thread == 0) && (time >= user_threads_deadline)) {
539 return num_active;
540 }
541
542 os::naked_short_nanosleep(wait_time);
543 wait_time = MIN2(max_wait_time, wait_time * 2);
544 }
545 }
546
547 void VM_Exit::doit() {
548
549 if (VerifyBeforeExit) {
550 HandleMark hm(VMThread::vm_thread());
551 // Among other things, this ensures that Eden top is correct.
552 Universe::heap()->prepare_for_verify();
553 // Silent verification so as not to pollute normal output,
554 // unless we really asked for it.
555 Universe::verify();
556 }
557
558 CompileBroker::set_should_block();
559
560 // Wait for a short period for threads in native to block. Any thread
561 // still executing native code after the wait will be stopped at
562 // native==>Java/VM barriers.
563 // Among 16276 JCK tests, 94% of them come here without any threads still
564 // running in native; the other 6% are quiescent within 250ms (Ultra 80).
565 wait_for_threads_in_native_to_block();
566
567 set_vm_exited();
568
569 // The ObjectMonitor subsystem uses perf counters so do this before
570 // we call exit_globals() so we don't run afoul of perfMemory_exit().
571 ObjectSynchronizer::do_final_audit_and_print_stats();
572
573 // We'd like to call IdealGraphPrinter::clean_up() to finalize the
574 // XML logging, but we can't safely do that here. The logic to make
575 // XML termination logging safe is tied to the termination of the
576 // VMThread, and it doesn't terminate on this exit path. See 8222534.
577
578 // cleanup globals resources before exiting. exit_globals() currently
579 // cleans up outputStream resources and PerfMemory resources.
580 exit_globals();
581
582 LogConfiguration::finalize();
583
584 // Check for exit hook
585 exit_hook_t exit_hook = Arguments::exit_hook();
586 if (exit_hook != nullptr) {
587 // exit hook should exit.
588 exit_hook(_exit_code);
589 // ... but if it didn't, we must do it here
590 vm_direct_exit(_exit_code);
591 } else {
592 vm_direct_exit(_exit_code);
593 }
594 }
595
596
597 void VM_Exit::wait_if_vm_exited() {
598 if (_vm_exited) {
599 // Need to check for an unattached thread as only attached threads
600 // can acquire the lock.
601 Thread* current = Thread::current_or_null();
602 if (current != nullptr && current != _shutdown_thread) {
603 // _vm_exited is set at safepoint, and the Threads_lock is never released
604 // so we will block here until the process dies.
605 Threads_lock->lock();
606 ShouldNotReachHere();
607 }
608 }
609 }
610
611 void VM_PrintCompileQueue::doit() {
612 CompileBroker::print_compile_queues(_out);
613 }
614
615 #if INCLUDE_SERVICES
616 void VM_PrintClassHierarchy::doit() {
617 KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname);
618 }
619
620 void VM_PrintClassLayout::doit() {
621 PrintClassLayout::print_class_layout(_out, _class_name);
622 }
623 #endif