1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/codeCache.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/isGCActiveMark.hpp"
33 #include "logging/log.hpp"
34 #include "logging/logConfiguration.hpp"
35 #include "logging/logStream.hpp"
36 #include "memory/heapInspection.hpp"
37 #include "memory/metaspace/metaspaceReporter.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/symbol.hpp"
41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/interfaceSupport.inline.hpp"
45 #include "runtime/javaThread.inline.hpp"
46 #include "runtime/jniHandles.hpp"
47 #include "runtime/objectMonitor.inline.hpp"
48 #include "runtime/stackFrameStream.inline.hpp"
49 #include "runtime/synchronizer.hpp"
50 #include "runtime/threads.hpp"
51 #include "runtime/threadSMR.inline.hpp"
52 #include "runtime/vmOperations.hpp"
53 #include "services/threadService.hpp"
54 #include "utilities/ticks.hpp"
55
56 #define VM_OP_NAME_INITIALIZE(name) #name,
57
58 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
59 { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
60
61 void VM_Operation::set_calling_thread(Thread* thread) {
62 _calling_thread = thread;
63 }
64
65 void VM_Operation::evaluate() {
66 ResourceMark rm;
67 LogTarget(Debug, vmoperation) lt;
68 if (lt.is_enabled()) {
69 LogStream ls(lt);
70 ls.print("begin ");
71 print_on_error(&ls);
72 ls.cr();
73 }
74 doit();
75 if (lt.is_enabled()) {
76 LogStream ls(lt);
77 ls.print("end ");
78 print_on_error(&ls);
79 ls.cr();
80 }
81 }
82
83 // Called by fatal error handler.
84 void VM_Operation::print_on_error(outputStream* st) const {
85 st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this));
86 st->print("%s", name());
87
88 st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint");
89
90 if (calling_thread()) {
91 st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread()));
92 }
93 }
94
95 void VM_ClearICs::doit() {
96 if (_preserve_static_stubs) {
97 CodeCache::cleanup_inline_caches_whitebox();
98 } else {
99 CodeCache::clear_inline_caches();
100 }
101 }
102
103 void VM_CleanClassLoaderDataMetaspaces::doit() {
104 ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
105 }
106
107 void VM_RehashStringTable::doit() {
108 StringTable::rehash_table();
109 }
110
111 void VM_RehashSymbolTable::doit() {
112 SymbolTable::rehash_table();
113 }
114
115 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) {
116 _thread = thread;
117 _id = id;
118 _reason = reason;
119 }
120
121
122 void VM_DeoptimizeFrame::doit() {
123 assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
124 Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason);
125 }
126
127
128 #ifndef PRODUCT
129
130 void VM_DeoptimizeAll::doit() {
131 JavaThreadIteratorWithHandle jtiwh;
132 // deoptimize all java threads in the system
133 if (DeoptimizeALot) {
134 for (; JavaThread *thread = jtiwh.next(); ) {
135 if (thread->has_last_Java_frame()) {
136 thread->deoptimize();
137 }
138 }
139 } else if (DeoptimizeRandom) {
140
141 // Deoptimize some selected threads and frames
142 int tnum = os::random() & 0x3;
143 int fnum = os::random() & 0x3;
144 int tcount = 0;
145 for (; JavaThread *thread = jtiwh.next(); ) {
146 if (thread->has_last_Java_frame()) {
147 if (tcount++ == tnum) {
148 tcount = 0;
149 int fcount = 0;
150 // Deoptimize some selected frames.
151 for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
152 if (fst.current()->can_be_deoptimized()) {
153 if (fcount++ == fnum) {
154 fcount = 0;
155 Deoptimization::deoptimize(thread, *fst.current());
156 }
157 }
158 }
159 }
160 }
161 }
162 }
163 }
164
165
166 void VM_ZombieAll::doit() {
167 JavaThread::cast(calling_thread())->make_zombies();
168 }
169
170 #endif // !PRODUCT
171
172 bool VM_PrintThreads::doit_prologue() {
173 // Get Heap_lock if concurrent locks will be dumped
174 if (_print_concurrent_locks) {
175 Heap_lock->lock();
176 }
177 return true;
178 }
179
180 void VM_PrintThreads::doit() {
181 Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info);
182 if (_print_jni_handle_info) {
183 JNIHandles::print_on(_out);
184 }
185 }
186
187 void VM_PrintThreads::doit_epilogue() {
188 if (_print_concurrent_locks) {
189 // Release Heap_lock
190 Heap_lock->unlock();
191 }
192 }
193
194 void VM_PrintMetadata::doit() {
195 metaspace::MetaspaceReporter::print_report(_out, _scale, _flags);
196 }
197
198 VM_FindDeadlocks::~VM_FindDeadlocks() {
199 if (_deadlocks != nullptr) {
200 DeadlockCycle* cycle = _deadlocks;
201 while (cycle != nullptr) {
202 DeadlockCycle* d = cycle;
203 cycle = cycle->next();
204 delete d;
205 }
206 }
207 }
208
209 void VM_FindDeadlocks::doit() {
210 // Update the hazard ptr in the originating thread to the current
211 // list of threads. This VM operation needs the current list of
212 // threads for proper deadlock detection and those are the
213 // JavaThreads we need to be protected when we return info to the
214 // originating thread.
215 _setter.set();
216
217 _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks);
218 if (_out != nullptr) {
219 int num_deadlocks = 0;
220 for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) {
221 num_deadlocks++;
222 cycle->print_on_with(_setter.list(), _out);
223 }
224
225 if (num_deadlocks == 1) {
226 _out->print_cr("\nFound 1 deadlock.\n");
227 _out->flush();
228 } else if (num_deadlocks > 1) {
229 _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks);
230 _out->flush();
231 }
232 }
233 }
234
235 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
236 int max_depth,
237 bool with_locked_monitors,
238 bool with_locked_synchronizers) {
239 _result = result;
240 _num_threads = 0; // 0 indicates all threads
241 _threads = nullptr;
242 _max_depth = max_depth;
243 _with_locked_monitors = with_locked_monitors;
244 _with_locked_synchronizers = with_locked_synchronizers;
245 }
246
247 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
248 GrowableArray<instanceHandle>* threads,
249 int num_threads,
250 int max_depth,
251 bool with_locked_monitors,
252 bool with_locked_synchronizers) {
253 _result = result;
254 _num_threads = num_threads;
255 _threads = threads;
256 _max_depth = max_depth;
257 _with_locked_monitors = with_locked_monitors;
258 _with_locked_synchronizers = with_locked_synchronizers;
259 }
260
261 bool VM_ThreadDump::doit_prologue() {
262 if (_with_locked_synchronizers) {
263 // Acquire Heap_lock to dump concurrent locks
264 Heap_lock->lock();
265 }
266
267 return true;
268 }
269
270 void VM_ThreadDump::doit_epilogue() {
271 if (_with_locked_synchronizers) {
272 // Release Heap_lock
273 Heap_lock->unlock();
274 }
275 }
276
277 // Hash table of int64_t to a list of ObjectMonitor* owned by the JavaThread.
278 // The JavaThread's owner key is either a JavaThread* or a stack lock
279 // address in the JavaThread so we use "int64_t".
280 //
281 class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
282 private:
283 static unsigned int ptr_hash(int64_t const& s1) {
284 // 2654435761 = 2^32 * Phi (golden ratio)
285 return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
286 }
287
288 private:
289 class ObjectMonitorLinkedList :
290 public LinkedListImpl<ObjectMonitor*,
291 AnyObj::C_HEAP, mtThread,
292 AllocFailStrategy::RETURN_NULL> {};
293
294 // HashTable SIZE is specified at compile time so we
295 // use 1031 which is the first prime after 1024.
296 typedef HashTable<int64_t, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread,
297 &ObjectMonitorsDump::ptr_hash> PtrTable;
298 PtrTable* _ptrs;
299 size_t _key_count;
300 size_t _om_count;
301
302 void add_list(int64_t key, ObjectMonitorLinkedList* list) {
303 _ptrs->put(key, list);
304 _key_count++;
305 }
306
307 ObjectMonitorLinkedList* get_list(int64_t key) {
308 ObjectMonitorLinkedList** listpp = _ptrs->get(key);
309 return (listpp == nullptr) ? nullptr : *listpp;
310 }
311
312 void add(ObjectMonitor* monitor) {
313 int64_t key = monitor->owner();
314
315 ObjectMonitorLinkedList* list = get_list(key);
316 if (list == nullptr) {
317 // Create new list and add it to the hash table:
318 list = new (mtThread) ObjectMonitorLinkedList;
319 _ptrs->put(key, list);
320 _key_count++;
321 }
322
323 assert(list->find(monitor) == nullptr, "Should not contain duplicates");
324 list->add(monitor); // Add the ObjectMonitor to the list.
325 _om_count++;
326 }
327
328 public:
329 // HashTable is passed to various functions and populated in
330 // different places so we allocate it using C_HEAP to make it immune
331 // from any ResourceMarks that happen to be in the code paths.
332 ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
333
334 ~ObjectMonitorsDump() {
335 class CleanupObjectMonitorsDump: StackObj {
336 public:
337 bool do_entry(int64_t& key, ObjectMonitorLinkedList*& list) {
338 list->clear(); // clear the LinkListNodes
339 delete list; // then delete the LinkedList
340 return true;
341 }
342 } cleanup;
343
344 _ptrs->unlink(&cleanup); // cleanup the LinkedLists
345 delete _ptrs; // then delete the hash table
346 }
347
348 // Implements MonitorClosure used to collect all owned monitors in the system
349 void do_monitor(ObjectMonitor* monitor) override {
350 assert(monitor->has_owner(), "Expects only owned monitors");
351
352 if (monitor->has_anonymous_owner()) {
353 // There's no need to collect anonymous owned monitors
354 // because the caller of this code is only interested
355 // in JNI owned monitors.
356 return;
357 }
358
359 if (monitor->object_peek() == nullptr) {
360 // JNI code doesn't necessarily keep the monitor object
361 // alive. Filter out monitors with dead objects.
362 return;
363 }
364
365 add(monitor);
366 }
367
368 // Implements the ObjectMonitorsView interface
369 void visit(MonitorClosure* closure, JavaThread* thread) override {
370 int64_t key = ObjectMonitor::owner_id_from(thread);
371 ObjectMonitorLinkedList* list = get_list(key);
372 LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr);
373 while (!iter.is_empty()) {
374 ObjectMonitor* monitor = *iter.next();
375 closure->do_monitor(monitor);
376 }
377 }
378
379 size_t key_count() { return _key_count; }
380 size_t om_count() { return _om_count; }
381 };
382
383 void VM_ThreadDump::doit() {
384 ResourceMark rm;
385
386 // Set the hazard ptr in the originating thread to protect the
387 // current list of threads. This VM operation needs the current list
388 // of threads for a proper dump and those are the JavaThreads we need
389 // to be protected when we return info to the originating thread.
390 _result->set_t_list();
391
392 ConcurrentLocksDump concurrent_locks(true);
393 if (_with_locked_synchronizers) {
394 concurrent_locks.dump_at_safepoint();
395 }
396
397 ObjectMonitorsDump object_monitors;
398 if (_with_locked_monitors) {
399 // Gather information about owned monitors.
400 ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
401
402 // If there are many object monitors in the system then the above iteration
403 // can start to take time. Be friendly to following thread dumps by telling
404 // the MonitorDeflationThread to deflate monitors.
405 //
406 // This is trying to be somewhat backwards compatible with the previous
407 // implementation, which performed monitor deflation right here. We might
408 // want to reconsider the need to trigger monitor deflation from the thread
409 // dumping and instead maybe tweak the deflation heuristics.
410 ObjectSynchronizer::request_deflate_idle_monitors();
411 }
412
413 if (_num_threads == 0) {
414 // Snapshot all live threads
415
416 for (uint i = 0; i < _result->t_list()->length(); i++) {
417 JavaThread* jt = _result->t_list()->thread_at(i);
418 if (jt->is_exiting() ||
419 jt->is_hidden_from_external_view()) {
420 // skip terminating threads and hidden threads
421 continue;
422 }
423 ThreadConcurrentLocks* tcl = nullptr;
424 if (_with_locked_synchronizers) {
425 tcl = concurrent_locks.thread_concurrent_locks(jt);
426 }
427 snapshot_thread(jt, tcl, &object_monitors);
428 }
429 } else {
430 // Snapshot threads in the given _threads array
431 // A dummy snapshot is created if a thread doesn't exist
432
433 for (int i = 0; i < _num_threads; i++) {
434 instanceHandle th = _threads->at(i);
435 if (th() == nullptr) {
436 // skip if the thread doesn't exist
437 // Add a dummy snapshot
438 _result->add_thread_snapshot();
439 continue;
440 }
441
442 // Dump thread stack only if the thread is alive and not exiting
443 // and not VM internal thread.
444 JavaThread* jt = java_lang_Thread::thread(th());
445 if (jt != nullptr && !_result->t_list()->includes(jt)) {
446 // _threads[i] doesn't refer to a valid JavaThread; this check
447 // is primarily for JVM_DumpThreads() which doesn't have a good
448 // way to validate the _threads array.
449 jt = nullptr;
450 }
451 if (jt == nullptr || /* thread not alive */
452 jt->is_exiting() ||
453 jt->is_hidden_from_external_view()) {
454 // add a null snapshot if skipped
455 _result->add_thread_snapshot();
456 continue;
457 }
458 ThreadConcurrentLocks* tcl = nullptr;
459 if (_with_locked_synchronizers) {
460 tcl = concurrent_locks.thread_concurrent_locks(jt);
461 }
462 snapshot_thread(jt, tcl, &object_monitors);
463 }
464 }
465 }
466
467 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
468 ObjectMonitorsView* monitors) {
469 ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
470 snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
471 snapshot->set_concurrent_locks(tcl);
472 }
473
474 volatile bool VM_Exit::_vm_exited = false;
475 Thread * volatile VM_Exit::_shutdown_thread = nullptr;
476
477 int VM_Exit::set_vm_exited() {
478
479 Thread * thr_cur = Thread::current();
480
481 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
482
483 int num_active = 0;
484
485 _shutdown_thread = thr_cur;
486 _vm_exited = true; // global flag
487 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
488 if (thr != thr_cur && thr->thread_state() == _thread_in_native) {
489 ++num_active;
490 thr->set_terminated(JavaThread::_vm_exited); // per-thread flag
491 }
492 }
493
494 return num_active;
495 }
496
497 int VM_Exit::wait_for_threads_in_native_to_block() {
498 // VM exits at safepoint. This function must be called at the final safepoint
499 // to wait for threads in _thread_in_native state to be quiescent.
500 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
501
502 Thread * thr_cur = Thread::current();
503
504 // Compiler threads need longer wait because they can access VM data directly
505 // while in native. If they are active and some structures being used are
506 // deleted by the shutdown sequence, they will crash. On the other hand, user
507 // threads must go through native=>Java/VM transitions first to access VM
508 // data, and they will be stopped during state transition. In theory, we
509 // don't have to wait for user threads to be quiescent, but it's always
510 // better to terminate VM when current thread is the only active thread, so
511 // wait for user threads too.
512
513 // Time per attempt. It is practical to start waiting with 10us delays
514 // (around scheduling delay / timer slack), and exponentially ramp up
515 // to 10ms if compiler threads are not responding.
516 jlong max_wait_time = millis_to_nanos(10);
517 jlong wait_time = 10000;
518
519 jlong start_time = os::javaTimeNanos();
520
521 // Deadline for user threads in native code.
522 // User-settable flag counts "attempts" in 10ms units, to a maximum of 10s.
523 jlong user_threads_deadline = start_time + (UserThreadWaitAttemptsAtExit * millis_to_nanos(10));
524
525 // Deadline for compiler threads: at least 10 seconds.
526 jlong compiler_threads_deadline = start_time + millis_to_nanos(10000);
527
528 JavaThreadIteratorWithHandle jtiwh;
529 while (true) {
530 int num_active = 0;
531 int num_active_compiler_thread = 0;
532
533 jtiwh.rewind();
534 for (; JavaThread *thr = jtiwh.next(); ) {
535 if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
536 num_active++;
537 if (thr->is_Compiler_thread()) {
538 #if INCLUDE_JVMCI
539 CompilerThread* ct = (CompilerThread*) thr;
540 if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) {
541 num_active_compiler_thread++;
542 } else {
543 // A JVMCI compiler thread never accesses VM data structures
544 // while in _thread_in_native state so there's no need to wait
545 // for it and potentially add a 300 millisecond delay to VM
546 // shutdown.
547 num_active--;
548 }
549 #else
550 num_active_compiler_thread++;
551 #endif
552 }
553 }
554 }
555
556 jlong time = os::javaTimeNanos();
557
558 if (num_active == 0) {
559 return 0;
560 }
561 if (time >= compiler_threads_deadline) {
562 return num_active;
563 }
564 if ((num_active_compiler_thread == 0) && (time >= user_threads_deadline)) {
565 return num_active;
566 }
567
568 os::naked_short_nanosleep(wait_time);
569 wait_time = MIN2(max_wait_time, wait_time * 2);
570 }
571 }
572
573 void VM_Exit::doit() {
574
575 if (VerifyBeforeExit) {
576 HandleMark hm(VMThread::vm_thread());
577 // Among other things, this ensures that Eden top is correct.
578 Universe::heap()->prepare_for_verify();
579 // Silent verification so as not to pollute normal output,
580 // unless we really asked for it.
581 Universe::verify();
582 }
583
584 CompileBroker::set_should_block();
585
586 // Wait for a short period for threads in native to block. Any thread
587 // still executing native code after the wait will be stopped at
588 // native==>Java/VM barriers.
589 // Among 16276 JCK tests, 94% of them come here without any threads still
590 // running in native; the other 6% are quiescent within 250ms (Ultra 80).
591 wait_for_threads_in_native_to_block();
592
593 set_vm_exited();
594
595 // The ObjectMonitor subsystem uses perf counters so do this before
596 // we call exit_globals() so we don't run afoul of perfMemory_exit().
597 ObjectSynchronizer::do_final_audit_and_print_stats();
598
599 // We'd like to call IdealGraphPrinter::clean_up() to finalize the
600 // XML logging, but we can't safely do that here. The logic to make
601 // XML termination logging safe is tied to the termination of the
602 // VMThread, and it doesn't terminate on this exit path. See 8222534.
603
604 // cleanup globals resources before exiting. exit_globals() currently
605 // cleans up outputStream resources and PerfMemory resources.
606 exit_globals();
607
608 LogConfiguration::finalize();
609
610 // Check for exit hook
611 exit_hook_t exit_hook = Arguments::exit_hook();
612 if (exit_hook != nullptr) {
613 // exit hook should exit.
614 exit_hook(_exit_code);
615 // ... but if it didn't, we must do it here
616 vm_direct_exit(_exit_code);
617 } else {
618 vm_direct_exit(_exit_code);
619 }
620 }
621
622
623 void VM_Exit::wait_if_vm_exited() {
624 if (_vm_exited) {
625 // Need to check for an unattached thread as only attached threads
626 // can acquire the lock.
627 Thread* current = Thread::current_or_null();
628 if (current != nullptr && current != _shutdown_thread) {
629 // _vm_exited is set at safepoint, and the Threads_lock is never released
630 // so we will block here until the process dies.
631 Threads_lock->lock();
632 ShouldNotReachHere();
633 }
634 }
635 }
636
637 void VM_PrintCompileQueue::doit() {
638 CompileBroker::print_compile_queues(_out);
639 }
640
641 #if INCLUDE_SERVICES
642 void VM_PrintClassHierarchy::doit() {
643 KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname);
644 }
645 #endif