1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "compiler/compileBroker.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/isGCActiveMark.hpp"
 32 #include "logging/log.hpp"
 33 #include "logging/logStream.hpp"
 34 #include "logging/logConfiguration.hpp"
 35 #include "memory/heapInspection.hpp"
 36 #include "memory/metaspace/metaspaceReporter.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "memory/universe.hpp"
 39 #include "oops/symbol.hpp"
 40 #include "runtime/arguments.hpp"
 41 #include "runtime/deoptimization.hpp"
 42 #include "runtime/frame.inline.hpp"
 43 #include "runtime/interfaceSupport.inline.hpp"
 44 #include "runtime/javaThread.inline.hpp"
 45 #include "runtime/jniHandles.hpp"
 46 #include "runtime/objectMonitor.inline.hpp"
 47 #include "runtime/stackFrameStream.inline.hpp"
 48 #include "runtime/synchronizer.hpp"
 49 #include "runtime/threads.hpp"
 50 #include "runtime/threadSMR.inline.hpp"
 51 #include "runtime/vmOperations.hpp"
 52 #include "services/threadService.hpp"
 53 #include "utilities/ticks.hpp"
 54 
 55 #define VM_OP_NAME_INITIALIZE(name) #name,
 56 
 57 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
 58   { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
 59 
 60 void VM_Operation::set_calling_thread(Thread* thread) {
 61   _calling_thread = thread;
 62 }
 63 
 64 void VM_Operation::evaluate() {
 65   ResourceMark rm;
 66   LogTarget(Debug, vmoperation) lt;
 67   if (lt.is_enabled()) {
 68     LogStream ls(lt);
 69     ls.print("begin ");
 70     print_on_error(&ls);
 71     ls.cr();
 72   }
 73   doit();
 74   if (lt.is_enabled()) {
 75     LogStream ls(lt);
 76     ls.print("end ");
 77     print_on_error(&ls);
 78     ls.cr();
 79   }
 80 }
 81 
 82 // Called by fatal error handler.
 83 void VM_Operation::print_on_error(outputStream* st) const {
 84   st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this));
 85   st->print("%s", name());
 86 
 87   st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint");
 88 
 89   if (calling_thread()) {
 90     st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread()));
 91   }
 92 }
 93 
 94 void VM_ClearICs::doit() {
 95   if (_preserve_static_stubs) {
 96     CodeCache::cleanup_inline_caches_whitebox();
 97   } else {
 98     CodeCache::clear_inline_caches();
 99   }
100 }
101 
102 void VM_CleanClassLoaderDataMetaspaces::doit() {
103   ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
104 }
105 
106 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) {
107   _thread = thread;
108   _id     = id;
109   _reason = reason;
110 }
111 
112 
113 void VM_DeoptimizeFrame::doit() {
114   assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
115   Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason);
116 }
117 
118 
119 #ifndef PRODUCT
120 
121 void VM_DeoptimizeAll::doit() {
122   JavaThreadIteratorWithHandle jtiwh;
123   // deoptimize all java threads in the system
124   if (DeoptimizeALot) {
125     for (; JavaThread *thread = jtiwh.next(); ) {
126       if (thread->has_last_Java_frame()) {
127         thread->deoptimize();
128       }
129     }
130   } else if (DeoptimizeRandom) {
131 
132     // Deoptimize some selected threads and frames
133     int tnum = os::random() & 0x3;
134     int fnum =  os::random() & 0x3;
135     int tcount = 0;
136     for (; JavaThread *thread = jtiwh.next(); ) {
137       if (thread->has_last_Java_frame()) {
138         if (tcount++ == tnum)  {
139         tcount = 0;
140           int fcount = 0;
141           // Deoptimize some selected frames.
142           for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
143             if (fst.current()->can_be_deoptimized()) {
144               if (fcount++ == fnum) {
145                 fcount = 0;
146                 Deoptimization::deoptimize(thread, *fst.current());
147               }
148             }
149           }
150         }
151       }
152     }
153   }
154 }
155 
156 
157 void VM_ZombieAll::doit() {
158   JavaThread::cast(calling_thread())->make_zombies();
159 }
160 
161 #endif // !PRODUCT
162 
163 bool VM_PrintThreads::doit_prologue() {
164   // Get Heap_lock if concurrent locks will be dumped
165   if (_print_concurrent_locks) {
166     Heap_lock->lock();
167   }
168   return true;
169 }
170 
171 void VM_PrintThreads::doit() {
172   Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info);
173   if (_print_jni_handle_info) {
174     JNIHandles::print_on(_out);
175   }
176 }
177 
178 void VM_PrintThreads::doit_epilogue() {
179   if (_print_concurrent_locks) {
180     // Release Heap_lock
181     Heap_lock->unlock();
182   }
183 }
184 
185 void VM_PrintMetadata::doit() {
186   metaspace::MetaspaceReporter::print_report(_out, _scale, _flags);
187 }
188 
189 VM_FindDeadlocks::~VM_FindDeadlocks() {
190   if (_deadlocks != nullptr) {
191     DeadlockCycle* cycle = _deadlocks;
192     while (cycle != nullptr) {
193       DeadlockCycle* d = cycle;
194       cycle = cycle->next();
195       delete d;
196     }
197   }
198 }
199 
200 void VM_FindDeadlocks::doit() {
201   // Update the hazard ptr in the originating thread to the current
202   // list of threads. This VM operation needs the current list of
203   // threads for proper deadlock detection and those are the
204   // JavaThreads we need to be protected when we return info to the
205   // originating thread.
206   _setter.set();
207 
208   _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks);
209   if (_out != nullptr) {
210     int num_deadlocks = 0;
211     for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) {
212       num_deadlocks++;
213       cycle->print_on_with(_setter.list(), _out);
214     }
215 
216     if (num_deadlocks == 1) {
217       _out->print_cr("\nFound 1 deadlock.\n");
218       _out->flush();
219     } else if (num_deadlocks > 1) {
220       _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks);
221       _out->flush();
222     }
223   }
224 }
225 
226 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
227                              int max_depth,
228                              bool with_locked_monitors,
229                              bool with_locked_synchronizers) {
230   _result = result;
231   _num_threads = 0; // 0 indicates all threads
232   _threads = nullptr;
233   _result = result;
234   _max_depth = max_depth;
235   _with_locked_monitors = with_locked_monitors;
236   _with_locked_synchronizers = with_locked_synchronizers;
237 }
238 
239 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
240                              GrowableArray<instanceHandle>* threads,
241                              int num_threads,
242                              int max_depth,
243                              bool with_locked_monitors,
244                              bool with_locked_synchronizers) {
245   _result = result;
246   _num_threads = num_threads;
247   _threads = threads;
248   _result = result;
249   _max_depth = max_depth;
250   _with_locked_monitors = with_locked_monitors;
251   _with_locked_synchronizers = with_locked_synchronizers;
252 }
253 
254 bool VM_ThreadDump::doit_prologue() {
255   if (_with_locked_synchronizers) {
256     // Acquire Heap_lock to dump concurrent locks
257     Heap_lock->lock();
258   }
259 
260   return true;
261 }
262 
263 void VM_ThreadDump::doit_epilogue() {
264   if (_with_locked_synchronizers) {
265     // Release Heap_lock
266     Heap_lock->unlock();
267   }
268 }
269 
270 // Hash table of void* to a list of ObjectMonitor* owned by the JavaThread.
271 // The JavaThread's owner key is either a JavaThread* or a stack lock
272 // address in the JavaThread so we use "void*".
273 //
274 class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
275  private:
276   static unsigned int ptr_hash(void* const& s1) {
277     // 2654435761 = 2^32 * Phi (golden ratio)
278     return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
279   }
280 
281  private:
282   class ObjectMonitorLinkedList :
283     public LinkedListImpl<ObjectMonitor*,
284                           AnyObj::C_HEAP, mtThread,
285                           AllocFailStrategy::RETURN_NULL> {};
286 
287   // ResourceHashtable SIZE is specified at compile time so we
288   // use 1031 which is the first prime after 1024.
289   typedef ResourceHashtable<void*, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread,
290                             &ObjectMonitorsDump::ptr_hash> PtrTable;
291   PtrTable* _ptrs;
292   size_t _key_count;
293   size_t _om_count;
294 
295   void add_list(void* key, ObjectMonitorLinkedList* list) {
296     _ptrs->put(key, list);
297     _key_count++;
298   }
299 
300   ObjectMonitorLinkedList* get_list(void* key) {
301     ObjectMonitorLinkedList** listpp = _ptrs->get(key);
302     return (listpp == nullptr) ? nullptr : *listpp;
303   }
304 
305   void add(ObjectMonitor* monitor) {
306     void* key = monitor->owner();
307 
308     ObjectMonitorLinkedList* list = get_list(key);
309     if (list == nullptr) {
310       // Create new list and add it to the hash table:
311       list = new (mtThread) ObjectMonitorLinkedList;
312       _ptrs->put(key, list);
313       _key_count++;
314     }
315 
316     assert(list->find(monitor) == nullptr, "Should not contain duplicates");
317     list->add(monitor);  // Add the ObjectMonitor to the list.
318     _om_count++;
319   }
320 
321  public:
322   // ResourceHashtable is passed to various functions and populated in
323   // different places so we allocate it using C_HEAP to make it immune
324   // from any ResourceMarks that happen to be in the code paths.
325   ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
326 
327   ~ObjectMonitorsDump() {
328     class CleanupObjectMonitorsDump: StackObj {
329      public:
330       bool do_entry(void*& key, ObjectMonitorLinkedList*& list) {
331         list->clear();  // clear the LinkListNodes
332         delete list;    // then delete the LinkedList
333         return true;
334       }
335     } cleanup;
336 
337     _ptrs->unlink(&cleanup);  // cleanup the LinkedLists
338     delete _ptrs;             // then delete the hash table
339   }
340 
341   // Implements MonitorClosure used to collect all owned monitors in the system
342   void do_monitor(ObjectMonitor* monitor) override {
343     assert(monitor->has_owner(), "Expects only owned monitors");
344 
345     if (monitor->is_owner_anonymous()) {
346       // There's no need to collect anonymous owned monitors
347       // because the caller of this code is only interested
348       // in JNI owned monitors.
349       return;
350     }
351 
352     if (monitor->object_peek() == nullptr) {
353       // JNI code doesn't necessarily keep the monitor object
354       // alive. Filter out monitors with dead objects.
355       return;
356     }
357 
358     add(monitor);
359   }
360 
361   // Implements the ObjectMonitorsView interface
362   void visit(MonitorClosure* closure, JavaThread* thread) override {
363     ObjectMonitorLinkedList* list = get_list(thread);
364     LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr);
365     while (!iter.is_empty()) {
366       ObjectMonitor* monitor = *iter.next();
367       closure->do_monitor(monitor);
368     }
369   }
370 
371   size_t key_count() { return _key_count; }
372   size_t om_count() { return _om_count; }
373 };
374 
375 void VM_ThreadDump::doit() {
376   ResourceMark rm;
377 
378   // Set the hazard ptr in the originating thread to protect the
379   // current list of threads. This VM operation needs the current list
380   // of threads for a proper dump and those are the JavaThreads we need
381   // to be protected when we return info to the originating thread.
382   _result->set_t_list();
383 
384   ConcurrentLocksDump concurrent_locks(true);
385   if (_with_locked_synchronizers) {
386     concurrent_locks.dump_at_safepoint();
387   }
388 
389   ObjectMonitorsDump object_monitors;
390   if (_with_locked_monitors) {
391     // Gather information about owned monitors.
392     ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
393 
394     // If there are many object monitors in the system then the above iteration
395     // can start to take time. Be friendly to following thread dumps by telling
396     // the MonitorDeflationThread to deflate monitors.
397     //
398     // This is trying to be somewhat backwards compatible with the previous
399     // implementation, which performed monitor deflation right here. We might
400     // want to reconsider the need to trigger monitor deflation from the thread
401     // dumping and instead maybe tweak the deflation heuristics.
402     ObjectSynchronizer::request_deflate_idle_monitors();
403   }
404 
405   if (_num_threads == 0) {
406     // Snapshot all live threads
407 
408     for (uint i = 0; i < _result->t_list()->length(); i++) {
409       JavaThread* jt = _result->t_list()->thread_at(i);
410       if (jt->is_exiting() ||
411           jt->is_hidden_from_external_view())  {
412         // skip terminating threads and hidden threads
413         continue;
414       }
415       ThreadConcurrentLocks* tcl = nullptr;
416       if (_with_locked_synchronizers) {
417         tcl = concurrent_locks.thread_concurrent_locks(jt);
418       }
419       snapshot_thread(jt, tcl, &object_monitors);
420     }
421   } else {
422     // Snapshot threads in the given _threads array
423     // A dummy snapshot is created if a thread doesn't exist
424 
425     for (int i = 0; i < _num_threads; i++) {
426       instanceHandle th = _threads->at(i);
427       if (th() == nullptr) {
428         // skip if the thread doesn't exist
429         // Add a dummy snapshot
430         _result->add_thread_snapshot();
431         continue;
432       }
433 
434       // Dump thread stack only if the thread is alive and not exiting
435       // and not VM internal thread.
436       JavaThread* jt = java_lang_Thread::thread(th());
437       if (jt != nullptr && !_result->t_list()->includes(jt)) {
438         // _threads[i] doesn't refer to a valid JavaThread; this check
439         // is primarily for JVM_DumpThreads() which doesn't have a good
440         // way to validate the _threads array.
441         jt = nullptr;
442       }
443       if (jt == nullptr || /* thread not alive */
444           jt->is_exiting() ||
445           jt->is_hidden_from_external_view())  {
446         // add a null snapshot if skipped
447         _result->add_thread_snapshot();
448         continue;
449       }
450       ThreadConcurrentLocks* tcl = nullptr;
451       if (_with_locked_synchronizers) {
452         tcl = concurrent_locks.thread_concurrent_locks(jt);
453       }
454       snapshot_thread(jt, tcl, &object_monitors);
455     }
456   }
457 }
458 
459 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
460                                     ObjectMonitorsView* monitors) {
461   ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
462   snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
463   snapshot->set_concurrent_locks(tcl);
464 }
465 
466 volatile bool VM_Exit::_vm_exited = false;
467 Thread * volatile VM_Exit::_shutdown_thread = nullptr;
468 
469 int VM_Exit::set_vm_exited() {
470 
471   Thread * thr_cur = Thread::current();
472 
473   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
474 
475   int num_active = 0;
476 
477   _shutdown_thread = thr_cur;
478   _vm_exited = true;                                // global flag
479   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
480     if (thr != thr_cur && thr->thread_state() == _thread_in_native) {
481       ++num_active;
482       thr->set_terminated(JavaThread::_vm_exited);  // per-thread flag
483     }
484   }
485 
486   return num_active;
487 }
488 
489 int VM_Exit::wait_for_threads_in_native_to_block() {
490   // VM exits at safepoint. This function must be called at the final safepoint
491   // to wait for threads in _thread_in_native state to be quiescent.
492   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
493 
494   Thread * thr_cur = Thread::current();
495   Monitor timer(Mutex::nosafepoint, "VM_ExitTimer_lock");
496 
497   // Compiler threads need longer wait because they can access VM data directly
498   // while in native. If they are active and some structures being used are
499   // deleted by the shutdown sequence, they will crash. On the other hand, user
500   // threads must go through native=>Java/VM transitions first to access VM
501   // data, and they will be stopped during state transition. In theory, we
502   // don't have to wait for user threads to be quiescent, but it's always
503   // better to terminate VM when current thread is the only active thread, so
504   // wait for user threads too. Numbers are in 10 milliseconds.
505   int max_wait_user_thread = 30;                  // at least 300 milliseconds
506   int max_wait_compiler_thread = 1000;            // at least 10 seconds
507 
508   int max_wait = max_wait_compiler_thread;
509 
510   int attempts = 0;
511   JavaThreadIteratorWithHandle jtiwh;
512   while (true) {
513     int num_active = 0;
514     int num_active_compiler_thread = 0;
515 
516     jtiwh.rewind();
517     for (; JavaThread *thr = jtiwh.next(); ) {
518       if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
519         num_active++;
520         if (thr->is_Compiler_thread()) {
521 #if INCLUDE_JVMCI
522           CompilerThread* ct = (CompilerThread*) thr;
523           if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) {
524             num_active_compiler_thread++;
525           } else {
526             // A JVMCI compiler thread never accesses VM data structures
527             // while in _thread_in_native state so there's no need to wait
528             // for it and potentially add a 300 millisecond delay to VM
529             // shutdown.
530             num_active--;
531           }
532 #else
533           num_active_compiler_thread++;
534 #endif
535         }
536       }
537     }
538 
539     if (num_active == 0) {
540        return 0;
541     } else if (attempts > max_wait) {
542        return num_active;
543     } else if (num_active_compiler_thread == 0 && attempts > max_wait_user_thread) {
544        return num_active;
545     }
546 
547     attempts++;
548 
549     MonitorLocker ml(&timer, Mutex::_no_safepoint_check_flag);
550     ml.wait(10);
551   }
552 }
553 
554 void VM_Exit::doit() {
555 
556   if (VerifyBeforeExit) {
557     HandleMark hm(VMThread::vm_thread());
558     // Among other things, this ensures that Eden top is correct.
559     Universe::heap()->prepare_for_verify();
560     // Silent verification so as not to pollute normal output,
561     // unless we really asked for it.
562     Universe::verify();
563   }
564 
565   CompileBroker::set_should_block();
566 
567   // Wait for a short period for threads in native to block. Any thread
568   // still executing native code after the wait will be stopped at
569   // native==>Java/VM barriers.
570   // Among 16276 JCK tests, 94% of them come here without any threads still
571   // running in native; the other 6% are quiescent within 250ms (Ultra 80).
572   wait_for_threads_in_native_to_block();
573 
574   set_vm_exited();
575 
576   // The ObjectMonitor subsystem uses perf counters so do this before
577   // we call exit_globals() so we don't run afoul of perfMemory_exit().
578   ObjectSynchronizer::do_final_audit_and_print_stats();
579 
580   // We'd like to call IdealGraphPrinter::clean_up() to finalize the
581   // XML logging, but we can't safely do that here. The logic to make
582   // XML termination logging safe is tied to the termination of the
583   // VMThread, and it doesn't terminate on this exit path. See 8222534.
584 
585   // cleanup globals resources before exiting. exit_globals() currently
586   // cleans up outputStream resources and PerfMemory resources.
587   exit_globals();
588 
589   LogConfiguration::finalize();
590 
591   // Check for exit hook
592   exit_hook_t exit_hook = Arguments::exit_hook();
593   if (exit_hook != nullptr) {
594     // exit hook should exit.
595     exit_hook(_exit_code);
596     // ... but if it didn't, we must do it here
597     vm_direct_exit(_exit_code);
598   } else {
599     vm_direct_exit(_exit_code);
600   }
601 }
602 
603 
604 void VM_Exit::wait_if_vm_exited() {
605   if (_vm_exited &&
606       Thread::current_or_null() != _shutdown_thread) {
607     // _vm_exited is set at safepoint, and the Threads_lock is never released
608     // so we will block here until the process dies.
609     Threads_lock->lock();
610     ShouldNotReachHere();
611   }
612 }
613 
614 void VM_PrintCompileQueue::doit() {
615   CompileBroker::print_compile_queues(_out);
616 }
617 
618 #if INCLUDE_SERVICES
619 void VM_PrintClassHierarchy::doit() {
620   KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname);
621 }
622 #endif