1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/classLoaderDataGraph.hpp"
 26 #include "classfile/stringTable.hpp"
 27 #include "classfile/symbolTable.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "compiler/compileBroker.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "gc/shared/isGCActiveMark.hpp"
 33 #include "logging/log.hpp"
 34 #include "logging/logStream.hpp"
 35 #include "logging/logConfiguration.hpp"
 36 #include "memory/heapInspection.hpp"
 37 #include "memory/metaspace/metaspaceReporter.hpp"
 38 #include "memory/resourceArea.hpp"
 39 #include "memory/universe.hpp"
 40 #include "oops/symbol.hpp"
 41 #include "runtime/arguments.hpp"
 42 #include "runtime/deoptimization.hpp"
 43 #include "runtime/frame.inline.hpp"
 44 #include "runtime/interfaceSupport.inline.hpp"
 45 #include "runtime/javaThread.inline.hpp"
 46 #include "runtime/jniHandles.hpp"
 47 #include "runtime/objectMonitor.inline.hpp"
 48 #include "runtime/stackFrameStream.inline.hpp"
 49 #include "runtime/synchronizer.hpp"
 50 #include "runtime/threads.hpp"
 51 #include "runtime/threadSMR.inline.hpp"
 52 #include "runtime/vmOperations.hpp"
 53 #include "services/threadService.hpp"
 54 #include "utilities/ticks.hpp"
 55 
 56 #define VM_OP_NAME_INITIALIZE(name) #name,
 57 
 58 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
 59   { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
 60 
 61 void VM_Operation::set_calling_thread(Thread* thread) {
 62   _calling_thread = thread;
 63 }
 64 
 65 void VM_Operation::evaluate() {
 66   ResourceMark rm;
 67   LogTarget(Debug, vmoperation) lt;
 68   if (lt.is_enabled()) {
 69     LogStream ls(lt);
 70     ls.print("begin ");
 71     print_on_error(&ls);
 72     ls.cr();
 73   }
 74   doit();
 75   if (lt.is_enabled()) {
 76     LogStream ls(lt);
 77     ls.print("end ");
 78     print_on_error(&ls);
 79     ls.cr();
 80   }
 81 }
 82 
 83 // Called by fatal error handler.
 84 void VM_Operation::print_on_error(outputStream* st) const {
 85   st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this));
 86   st->print("%s", name());
 87 
 88   st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint");
 89 
 90   if (calling_thread()) {
 91     st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread()));
 92   }
 93 }
 94 
 95 void VM_ClearICs::doit() {
 96   if (_preserve_static_stubs) {
 97     CodeCache::cleanup_inline_caches_whitebox();
 98   } else {
 99     CodeCache::clear_inline_caches();
100   }
101 }
102 
103 void VM_CleanClassLoaderDataMetaspaces::doit() {
104   ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
105 }
106 
107 void VM_RehashStringTable::doit() {
108   StringTable::rehash_table();
109 }
110 
111 void VM_RehashSymbolTable::doit() {
112   SymbolTable::rehash_table();
113 }
114 
115 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) {
116   _thread = thread;
117   _id     = id;
118   _reason = reason;
119 }
120 
121 
122 void VM_DeoptimizeFrame::doit() {
123   assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
124   Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason);
125 }
126 
127 
128 #ifndef PRODUCT
129 
130 void VM_DeoptimizeAll::doit() {
131   JavaThreadIteratorWithHandle jtiwh;
132   // deoptimize all java threads in the system
133   if (DeoptimizeALot) {
134     for (; JavaThread *thread = jtiwh.next(); ) {
135       if (thread->has_last_Java_frame()) {
136         thread->deoptimize();
137       }
138     }
139   } else if (DeoptimizeRandom) {
140 
141     // Deoptimize some selected threads and frames
142     int tnum = os::random() & 0x3;
143     int fnum =  os::random() & 0x3;
144     int tcount = 0;
145     for (; JavaThread *thread = jtiwh.next(); ) {
146       if (thread->has_last_Java_frame()) {
147         if (tcount++ == tnum)  {
148         tcount = 0;
149           int fcount = 0;
150           // Deoptimize some selected frames.
151           for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
152             if (fst.current()->can_be_deoptimized()) {
153               if (fcount++ == fnum) {
154                 fcount = 0;
155                 Deoptimization::deoptimize(thread, *fst.current());
156               }
157             }
158           }
159         }
160       }
161     }
162   }
163 }
164 
165 
166 void VM_ZombieAll::doit() {
167   JavaThread::cast(calling_thread())->make_zombies();
168 }
169 
170 #endif // !PRODUCT
171 
172 bool VM_PrintThreads::doit_prologue() {
173   // Get Heap_lock if concurrent locks will be dumped
174   if (_print_concurrent_locks) {
175     Heap_lock->lock();
176   }
177   return true;
178 }
179 
180 void VM_PrintThreads::doit() {
181   Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info);
182   if (_print_jni_handle_info) {
183     JNIHandles::print_on(_out);
184   }
185 }
186 
187 void VM_PrintThreads::doit_epilogue() {
188   if (_print_concurrent_locks) {
189     // Release Heap_lock
190     Heap_lock->unlock();
191   }
192 }
193 
194 void VM_PrintMetadata::doit() {
195   metaspace::MetaspaceReporter::print_report(_out, _scale, _flags);
196 }
197 
198 VM_FindDeadlocks::~VM_FindDeadlocks() {
199   if (_deadlocks != nullptr) {
200     DeadlockCycle* cycle = _deadlocks;
201     while (cycle != nullptr) {
202       DeadlockCycle* d = cycle;
203       cycle = cycle->next();
204       delete d;
205     }
206   }
207 }
208 
209 void VM_FindDeadlocks::doit() {
210   // Update the hazard ptr in the originating thread to the current
211   // list of threads. This VM operation needs the current list of
212   // threads for proper deadlock detection and those are the
213   // JavaThreads we need to be protected when we return info to the
214   // originating thread.
215   _setter.set();
216 
217   _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks);
218   if (_out != nullptr) {
219     int num_deadlocks = 0;
220     for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) {
221       num_deadlocks++;
222       cycle->print_on_with(_setter.list(), _out);
223     }
224 
225     if (num_deadlocks == 1) {
226       _out->print_cr("\nFound 1 deadlock.\n");
227       _out->flush();
228     } else if (num_deadlocks > 1) {
229       _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks);
230       _out->flush();
231     }
232   }
233 }
234 
235 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
236                              int max_depth,
237                              bool with_locked_monitors,
238                              bool with_locked_synchronizers) {
239   _result = result;
240   _num_threads = 0; // 0 indicates all threads
241   _threads = nullptr;
242   _max_depth = max_depth;
243   _with_locked_monitors = with_locked_monitors;
244   _with_locked_synchronizers = with_locked_synchronizers;
245 }
246 
247 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
248                              GrowableArray<instanceHandle>* threads,
249                              int num_threads,
250                              int max_depth,
251                              bool with_locked_monitors,
252                              bool with_locked_synchronizers) {
253   _result = result;
254   _num_threads = num_threads;
255   _threads = threads;
256   _max_depth = max_depth;
257   _with_locked_monitors = with_locked_monitors;
258   _with_locked_synchronizers = with_locked_synchronizers;
259 }
260 
261 bool VM_ThreadDump::doit_prologue() {
262   if (_with_locked_synchronizers) {
263     // Acquire Heap_lock to dump concurrent locks
264     Heap_lock->lock();
265   }
266 
267   return true;
268 }
269 
270 void VM_ThreadDump::doit_epilogue() {
271   if (_with_locked_synchronizers) {
272     // Release Heap_lock
273     Heap_lock->unlock();
274   }
275 }
276 
277 // Hash table of int64_t to a list of ObjectMonitor* owned by the JavaThread.
278 // The JavaThread's owner key is either a JavaThread* or a stack lock
279 // address in the JavaThread so we use "int64_t".
280 //
281 class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
282  private:
283   static unsigned int ptr_hash(int64_t const& s1) {
284     // 2654435761 = 2^32 * Phi (golden ratio)
285     return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
286   }
287 
288  private:
289   class ObjectMonitorLinkedList :
290     public LinkedListImpl<ObjectMonitor*,
291                           AnyObj::C_HEAP, mtThread,
292                           AllocFailStrategy::RETURN_NULL> {};
293 
294   // ResourceHashtable SIZE is specified at compile time so we
295   // use 1031 which is the first prime after 1024.
296   typedef ResourceHashtable<int64_t, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread,
297                             &ObjectMonitorsDump::ptr_hash> PtrTable;
298   PtrTable* _ptrs;
299   size_t _key_count;
300   size_t _om_count;
301 
302   void add_list(int64_t key, ObjectMonitorLinkedList* list) {
303     _ptrs->put(key, list);
304     _key_count++;
305   }
306 
307   ObjectMonitorLinkedList* get_list(int64_t key) {
308     ObjectMonitorLinkedList** listpp = _ptrs->get(key);
309     return (listpp == nullptr) ? nullptr : *listpp;
310   }
311 
312   void add(ObjectMonitor* monitor) {
313     int64_t key = monitor->owner();
314 
315     ObjectMonitorLinkedList* list = get_list(key);
316     if (list == nullptr) {
317       // Create new list and add it to the hash table:
318       list = new (mtThread) ObjectMonitorLinkedList;
319       _ptrs->put(key, list);
320       _key_count++;
321     }
322 
323     assert(list->find(monitor) == nullptr, "Should not contain duplicates");
324     list->add(monitor);  // Add the ObjectMonitor to the list.
325     _om_count++;
326   }
327 
328  public:
329   // ResourceHashtable is passed to various functions and populated in
330   // different places so we allocate it using C_HEAP to make it immune
331   // from any ResourceMarks that happen to be in the code paths.
332   ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
333 
334   ~ObjectMonitorsDump() {
335     class CleanupObjectMonitorsDump: StackObj {
336      public:
337       bool do_entry(int64_t& key, ObjectMonitorLinkedList*& list) {
338         list->clear();  // clear the LinkListNodes
339         delete list;    // then delete the LinkedList
340         return true;
341       }
342     } cleanup;
343 
344     _ptrs->unlink(&cleanup);  // cleanup the LinkedLists
345     delete _ptrs;             // then delete the hash table
346   }
347 
348   // Implements MonitorClosure used to collect all owned monitors in the system
349   void do_monitor(ObjectMonitor* monitor) override {
350     assert(monitor->has_owner(), "Expects only owned monitors");
351 
352     if (monitor->has_anonymous_owner()) {
353       // There's no need to collect anonymous owned monitors
354       // because the caller of this code is only interested
355       // in JNI owned monitors.
356       return;
357     }
358 
359     if (monitor->object_peek() == nullptr) {
360       // JNI code doesn't necessarily keep the monitor object
361       // alive. Filter out monitors with dead objects.
362       return;
363     }
364 
365     add(monitor);
366   }
367 
368   // Implements the ObjectMonitorsView interface
369   void visit(MonitorClosure* closure, JavaThread* thread) override {
370     int64_t key = ObjectMonitor::owner_id_from(thread);
371     ObjectMonitorLinkedList* list = get_list(key);
372     LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr);
373     while (!iter.is_empty()) {
374       ObjectMonitor* monitor = *iter.next();
375       closure->do_monitor(monitor);
376     }
377   }
378 
379   size_t key_count() { return _key_count; }
380   size_t om_count() { return _om_count; }
381 };
382 
383 void VM_ThreadDump::doit() {
384   ResourceMark rm;
385 
386   // Set the hazard ptr in the originating thread to protect the
387   // current list of threads. This VM operation needs the current list
388   // of threads for a proper dump and those are the JavaThreads we need
389   // to be protected when we return info to the originating thread.
390   _result->set_t_list();
391 
392   ConcurrentLocksDump concurrent_locks(true);
393   if (_with_locked_synchronizers) {
394     concurrent_locks.dump_at_safepoint();
395   }
396 
397   ObjectMonitorsDump object_monitors;
398   if (_with_locked_monitors) {
399     // Gather information about owned monitors.
400     ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
401 
402     // If there are many object monitors in the system then the above iteration
403     // can start to take time. Be friendly to following thread dumps by telling
404     // the MonitorDeflationThread to deflate monitors.
405     //
406     // This is trying to be somewhat backwards compatible with the previous
407     // implementation, which performed monitor deflation right here. We might
408     // want to reconsider the need to trigger monitor deflation from the thread
409     // dumping and instead maybe tweak the deflation heuristics.
410     ObjectSynchronizer::request_deflate_idle_monitors();
411   }
412 
413   if (_num_threads == 0) {
414     // Snapshot all live threads
415 
416     for (uint i = 0; i < _result->t_list()->length(); i++) {
417       JavaThread* jt = _result->t_list()->thread_at(i);
418       if (jt->is_exiting() ||
419           jt->is_hidden_from_external_view())  {
420         // skip terminating threads and hidden threads
421         continue;
422       }
423       ThreadConcurrentLocks* tcl = nullptr;
424       if (_with_locked_synchronizers) {
425         tcl = concurrent_locks.thread_concurrent_locks(jt);
426       }
427       snapshot_thread(jt, tcl, &object_monitors);
428     }
429   } else {
430     // Snapshot threads in the given _threads array
431     // A dummy snapshot is created if a thread doesn't exist
432 
433     for (int i = 0; i < _num_threads; i++) {
434       instanceHandle th = _threads->at(i);
435       if (th() == nullptr) {
436         // skip if the thread doesn't exist
437         // Add a dummy snapshot
438         _result->add_thread_snapshot();
439         continue;
440       }
441 
442       // Dump thread stack only if the thread is alive and not exiting
443       // and not VM internal thread.
444       JavaThread* jt = java_lang_Thread::thread(th());
445       if (jt != nullptr && !_result->t_list()->includes(jt)) {
446         // _threads[i] doesn't refer to a valid JavaThread; this check
447         // is primarily for JVM_DumpThreads() which doesn't have a good
448         // way to validate the _threads array.
449         jt = nullptr;
450       }
451       if (jt == nullptr || /* thread not alive */
452           jt->is_exiting() ||
453           jt->is_hidden_from_external_view())  {
454         // add a null snapshot if skipped
455         _result->add_thread_snapshot();
456         continue;
457       }
458       ThreadConcurrentLocks* tcl = nullptr;
459       if (_with_locked_synchronizers) {
460         tcl = concurrent_locks.thread_concurrent_locks(jt);
461       }
462       snapshot_thread(jt, tcl, &object_monitors);
463     }
464   }
465 }
466 
467 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
468                                     ObjectMonitorsView* monitors) {
469   ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
470   snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
471   snapshot->set_concurrent_locks(tcl);
472 }
473 
474 volatile bool VM_Exit::_vm_exited = false;
475 Thread * volatile VM_Exit::_shutdown_thread = nullptr;
476 
477 int VM_Exit::set_vm_exited() {
478 
479   Thread * thr_cur = Thread::current();
480 
481   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
482 
483   int num_active = 0;
484 
485   _shutdown_thread = thr_cur;
486   _vm_exited = true;                                // global flag
487   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
488     if (thr != thr_cur && thr->thread_state() == _thread_in_native) {
489       ++num_active;
490       thr->set_terminated(JavaThread::_vm_exited);  // per-thread flag
491     }
492   }
493 
494   return num_active;
495 }
496 
497 int VM_Exit::wait_for_threads_in_native_to_block() {
498   // VM exits at safepoint. This function must be called at the final safepoint
499   // to wait for threads in _thread_in_native state to be quiescent.
500   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
501 
502   Thread * thr_cur = Thread::current();
503   Monitor timer(Mutex::nosafepoint, "VM_ExitTimer_lock");
504 
505   // Compiler threads need longer wait because they can access VM data directly
506   // while in native. If they are active and some structures being used are
507   // deleted by the shutdown sequence, they will crash. On the other hand, user
508   // threads must go through native=>Java/VM transitions first to access VM
509   // data, and they will be stopped during state transition. In theory, we
510   // don't have to wait for user threads to be quiescent, but it's always
511   // better to terminate VM when current thread is the only active thread, so
512   // wait for user threads too. Numbers are in 10 milliseconds.
513   int wait_time_per_attempt = 10;               // in milliseconds
514   int max_wait_attempts_user_thread = UserThreadWaitAttemptsAtExit;
515   int max_wait_attempts_compiler_thread = 1000; // at least 10 seconds
516 
517   int attempts = 0;
518   JavaThreadIteratorWithHandle jtiwh;
519   while (true) {
520     int num_active = 0;
521     int num_active_compiler_thread = 0;
522 
523     jtiwh.rewind();
524     for (; JavaThread *thr = jtiwh.next(); ) {
525       if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
526         num_active++;
527         if (thr->is_Compiler_thread()) {
528 #if INCLUDE_JVMCI
529           CompilerThread* ct = (CompilerThread*) thr;
530           if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) {
531             num_active_compiler_thread++;
532           } else {
533             // A JVMCI compiler thread never accesses VM data structures
534             // while in _thread_in_native state so there's no need to wait
535             // for it and potentially add a 300 millisecond delay to VM
536             // shutdown.
537             num_active--;
538           }
539 #else
540           num_active_compiler_thread++;
541 #endif
542         }
543       }
544     }
545 
546     if (num_active == 0) {
547        return 0;
548     } else if (attempts >= max_wait_attempts_compiler_thread) {
549        return num_active;
550     } else if (num_active_compiler_thread == 0 &&
551                attempts >= max_wait_attempts_user_thread) {
552        return num_active;
553     }
554 
555     attempts++;
556 
557     MonitorLocker ml(&timer, Mutex::_no_safepoint_check_flag);
558     ml.wait(wait_time_per_attempt);
559   }
560 }
561 
562 void VM_Exit::doit() {
563 
564   if (VerifyBeforeExit) {
565     HandleMark hm(VMThread::vm_thread());
566     // Among other things, this ensures that Eden top is correct.
567     Universe::heap()->prepare_for_verify();
568     // Silent verification so as not to pollute normal output,
569     // unless we really asked for it.
570     Universe::verify();
571   }
572 
573   CompileBroker::set_should_block();
574 
575   // Wait for a short period for threads in native to block. Any thread
576   // still executing native code after the wait will be stopped at
577   // native==>Java/VM barriers.
578   // Among 16276 JCK tests, 94% of them come here without any threads still
579   // running in native; the other 6% are quiescent within 250ms (Ultra 80).
580   wait_for_threads_in_native_to_block();
581 
582   set_vm_exited();
583 
584   // The ObjectMonitor subsystem uses perf counters so do this before
585   // we call exit_globals() so we don't run afoul of perfMemory_exit().
586   ObjectSynchronizer::do_final_audit_and_print_stats();
587 
588   // We'd like to call IdealGraphPrinter::clean_up() to finalize the
589   // XML logging, but we can't safely do that here. The logic to make
590   // XML termination logging safe is tied to the termination of the
591   // VMThread, and it doesn't terminate on this exit path. See 8222534.
592 
593   // cleanup globals resources before exiting. exit_globals() currently
594   // cleans up outputStream resources and PerfMemory resources.
595   exit_globals();
596 
597   LogConfiguration::finalize();
598 
599   // Check for exit hook
600   exit_hook_t exit_hook = Arguments::exit_hook();
601   if (exit_hook != nullptr) {
602     // exit hook should exit.
603     exit_hook(_exit_code);
604     // ... but if it didn't, we must do it here
605     vm_direct_exit(_exit_code);
606   } else {
607     vm_direct_exit(_exit_code);
608   }
609 }
610 
611 
612 void VM_Exit::wait_if_vm_exited() {
613   if (_vm_exited) {
614     // Need to check for an unattached thread as only attached threads
615     // can acquire the lock.
616     Thread* current = Thread::current_or_null();
617     if (current != nullptr && current != _shutdown_thread) {
618       // _vm_exited is set at safepoint, and the Threads_lock is never released
619       // so we will block here until the process dies.
620       Threads_lock->lock();
621       ShouldNotReachHere();
622     }
623   }
624 }
625 
626 void VM_PrintCompileQueue::doit() {
627   CompileBroker::print_compile_queues(_out);
628 }
629 
630 #if INCLUDE_SERVICES
631 void VM_PrintClassHierarchy::doit() {
632   KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname);
633 }
634 
635 void VM_PrintClassLayout::doit() {
636   PrintClassLayout::print_class_layout(_out, _class_name);
637 }
638 #endif