1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "compiler/compileBroker.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/isGCActiveMark.hpp"
 32 #include "logging/log.hpp"
 33 #include "logging/logStream.hpp"
 34 #include "logging/logConfiguration.hpp"
 35 #include "memory/heapInspection.hpp"
 36 #include "memory/metaspace/metaspaceReporter.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "memory/universe.hpp"
 39 #include "oops/symbol.hpp"
 40 #include "runtime/arguments.hpp"
 41 #include "runtime/deoptimization.hpp"
 42 #include "runtime/frame.inline.hpp"
 43 #include "runtime/interfaceSupport.inline.hpp"
 44 #include "runtime/javaThread.inline.hpp"
 45 #include "runtime/jniHandles.hpp"
 46 #include "runtime/stackFrameStream.inline.hpp"
 47 #include "runtime/synchronizer.hpp"
 48 #include "runtime/threads.hpp"
 49 #include "runtime/threadSMR.inline.hpp"
 50 #include "runtime/vmOperations.hpp"
 51 #include "services/threadService.hpp"
 52 
 53 #define VM_OP_NAME_INITIALIZE(name) #name,
 54 
 55 const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
 56   { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
 57 
 58 void VM_Operation::set_calling_thread(Thread* thread) {
 59   _calling_thread = thread;
 60 }
 61 
 62 void VM_Operation::evaluate() {
 63   ResourceMark rm;
 64   LogTarget(Debug, vmoperation) lt;
 65   if (lt.is_enabled()) {
 66     LogStream ls(lt);
 67     ls.print("begin ");
 68     print_on_error(&ls);
 69     ls.cr();
 70   }
 71   doit();
 72   if (lt.is_enabled()) {
 73     LogStream ls(lt);
 74     ls.print("end ");
 75     print_on_error(&ls);
 76     ls.cr();
 77   }
 78 }
 79 
 80 // Called by fatal error handler.
 81 void VM_Operation::print_on_error(outputStream* st) const {
 82   st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this));
 83   st->print("%s", name());
 84 
 85   st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint");
 86 
 87   if (calling_thread()) {
 88     st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread()));
 89   }
 90 }
 91 
 92 void VM_ClearICs::doit() {
 93   if (_preserve_static_stubs) {
 94     CodeCache::cleanup_inline_caches_whitebox();
 95   } else {
 96     CodeCache::clear_inline_caches();
 97   }
 98 }
 99 
100 void VM_CleanClassLoaderDataMetaspaces::doit() {
101   ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces();
102 }
103 
104 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id, int reason) {
105   _thread = thread;
106   _id     = id;
107   _reason = reason;
108 }
109 
110 
111 void VM_DeoptimizeFrame::doit() {
112   assert(_reason > Deoptimization::Reason_none && _reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
113   Deoptimization::deoptimize_frame_internal(_thread, _id, (Deoptimization::DeoptReason)_reason);
114 }
115 
116 
117 #ifndef PRODUCT
118 
119 void VM_DeoptimizeAll::doit() {
120   JavaThreadIteratorWithHandle jtiwh;
121   // deoptimize all java threads in the system
122   if (DeoptimizeALot) {
123     for (; JavaThread *thread = jtiwh.next(); ) {
124       if (thread->has_last_Java_frame()) {
125         thread->deoptimize();
126       }
127     }
128   } else if (DeoptimizeRandom) {
129 
130     // Deoptimize some selected threads and frames
131     int tnum = os::random() & 0x3;
132     int fnum =  os::random() & 0x3;
133     int tcount = 0;
134     for (; JavaThread *thread = jtiwh.next(); ) {
135       if (thread->has_last_Java_frame()) {
136         if (tcount++ == tnum)  {
137         tcount = 0;
138           int fcount = 0;
139           // Deoptimize some selected frames.
140           for(StackFrameStream fst(thread, false /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
141             if (fst.current()->can_be_deoptimized()) {
142               if (fcount++ == fnum) {
143                 fcount = 0;
144                 Deoptimization::deoptimize(thread, *fst.current());
145               }
146             }
147           }
148         }
149       }
150     }
151   }
152 }
153 
154 
155 void VM_ZombieAll::doit() {
156   JavaThread::cast(calling_thread())->make_zombies();
157 }
158 
159 #endif // !PRODUCT
160 
161 bool VM_PrintThreads::doit_prologue() {
162   // Get Heap_lock if concurrent locks will be dumped
163   if (_print_concurrent_locks) {
164     Heap_lock->lock();
165   }
166   return true;
167 }
168 
169 void VM_PrintThreads::doit() {
170   Threads::print_on(_out, true, false, _print_concurrent_locks, _print_extended_info);
171   if (_print_jni_handle_info) {
172     JNIHandles::print_on(_out);
173   }
174 }
175 
176 void VM_PrintThreads::doit_epilogue() {
177   if (_print_concurrent_locks) {
178     // Release Heap_lock
179     Heap_lock->unlock();
180   }
181 }
182 
183 void VM_PrintMetadata::doit() {
184   metaspace::MetaspaceReporter::print_report(_out, _scale, _flags);
185 }
186 
187 VM_FindDeadlocks::~VM_FindDeadlocks() {
188   if (_deadlocks != nullptr) {
189     DeadlockCycle* cycle = _deadlocks;
190     while (cycle != nullptr) {
191       DeadlockCycle* d = cycle;
192       cycle = cycle->next();
193       delete d;
194     }
195   }
196 }
197 
198 void VM_FindDeadlocks::doit() {
199   // Update the hazard ptr in the originating thread to the current
200   // list of threads. This VM operation needs the current list of
201   // threads for proper deadlock detection and those are the
202   // JavaThreads we need to be protected when we return info to the
203   // originating thread.
204   _setter.set();
205 
206   _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks);
207   if (_out != nullptr) {
208     int num_deadlocks = 0;
209     for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) {
210       num_deadlocks++;
211       cycle->print_on_with(_setter.list(), _out);
212     }
213 
214     if (num_deadlocks == 1) {
215       _out->print_cr("\nFound 1 deadlock.\n");
216       _out->flush();
217     } else if (num_deadlocks > 1) {
218       _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks);
219       _out->flush();
220     }
221   }
222 }
223 
224 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
225                              int max_depth,
226                              bool with_locked_monitors,
227                              bool with_locked_synchronizers) {
228   _result = result;
229   _num_threads = 0; // 0 indicates all threads
230   _threads = nullptr;
231   _result = result;
232   _max_depth = max_depth;
233   _with_locked_monitors = with_locked_monitors;
234   _with_locked_synchronizers = with_locked_synchronizers;
235 }
236 
237 VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
238                              GrowableArray<instanceHandle>* threads,
239                              int num_threads,
240                              int max_depth,
241                              bool with_locked_monitors,
242                              bool with_locked_synchronizers) {
243   _result = result;
244   _num_threads = num_threads;
245   _threads = threads;
246   _result = result;
247   _max_depth = max_depth;
248   _with_locked_monitors = with_locked_monitors;
249   _with_locked_synchronizers = with_locked_synchronizers;
250 }
251 
252 bool VM_ThreadDump::doit_prologue() {
253   if (_with_locked_synchronizers) {
254     // Acquire Heap_lock to dump concurrent locks
255     Heap_lock->lock();
256   }
257 
258   return true;
259 }
260 
261 void VM_ThreadDump::doit_epilogue() {
262   if (_with_locked_synchronizers) {
263     // Release Heap_lock
264     Heap_lock->unlock();
265   }
266 }
267 
268 void VM_ThreadDump::doit() {
269   ResourceMark rm;
270 
271   // Set the hazard ptr in the originating thread to protect the
272   // current list of threads. This VM operation needs the current list
273   // of threads for a proper dump and those are the JavaThreads we need
274   // to be protected when we return info to the originating thread.
275   _result->set_t_list();
276 
277   ConcurrentLocksDump concurrent_locks(true);
278   if (_with_locked_synchronizers) {
279     concurrent_locks.dump_at_safepoint();
280   }
281 
282   ObjectMonitorsHashtable table;
283   ObjectMonitorsHashtable* tablep = nullptr;
284   if (_with_locked_monitors) {
285     // The caller wants locked monitor information and that's expensive to gather
286     // when there are a lot of inflated monitors. So we deflate idle monitors and
287     // gather information about owned monitors at the same time.
288     tablep = &table;
289     while (ObjectSynchronizer::deflate_idle_monitors(tablep) > 0) {
290       ; /* empty */
291     }
292   }
293 
294   if (_num_threads == 0) {
295     // Snapshot all live threads
296 
297     for (uint i = 0; i < _result->t_list()->length(); i++) {
298       JavaThread* jt = _result->t_list()->thread_at(i);
299       if (jt->is_exiting() ||
300           jt->is_hidden_from_external_view())  {
301         // skip terminating threads and hidden threads
302         continue;
303       }
304       ThreadConcurrentLocks* tcl = nullptr;
305       if (_with_locked_synchronizers) {
306         tcl = concurrent_locks.thread_concurrent_locks(jt);
307       }
308       snapshot_thread(jt, tcl, tablep);
309     }
310   } else {
311     // Snapshot threads in the given _threads array
312     // A dummy snapshot is created if a thread doesn't exist
313 
314     for (int i = 0; i < _num_threads; i++) {
315       instanceHandle th = _threads->at(i);
316       if (th() == nullptr) {
317         // skip if the thread doesn't exist
318         // Add a dummy snapshot
319         _result->add_thread_snapshot();
320         continue;
321       }
322 
323       // Dump thread stack only if the thread is alive and not exiting
324       // and not VM internal thread.
325       JavaThread* jt = java_lang_Thread::thread(th());
326       if (jt != nullptr && !_result->t_list()->includes(jt)) {
327         // _threads[i] doesn't refer to a valid JavaThread; this check
328         // is primarily for JVM_DumpThreads() which doesn't have a good
329         // way to validate the _threads array.
330         jt = nullptr;
331       }
332       if (jt == nullptr || /* thread not alive */
333           jt->is_exiting() ||
334           jt->is_hidden_from_external_view())  {
335         // add a nullptr snapshot if skipped
336         _result->add_thread_snapshot();
337         continue;
338       }
339       ThreadConcurrentLocks* tcl = nullptr;
340       if (_with_locked_synchronizers) {
341         tcl = concurrent_locks.thread_concurrent_locks(jt);
342       }
343       snapshot_thread(jt, tcl, tablep);
344     }
345   }
346 }
347 
348 void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
349                                     ObjectMonitorsHashtable* table) {
350   ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
351   snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, table, false);
352   snapshot->set_concurrent_locks(tcl);
353 }
354 
355 volatile bool VM_Exit::_vm_exited = false;
356 Thread * volatile VM_Exit::_shutdown_thread = nullptr;
357 
358 int VM_Exit::set_vm_exited() {
359 
360   Thread * thr_cur = Thread::current();
361 
362   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
363 
364   int num_active = 0;
365 
366   _shutdown_thread = thr_cur;
367   _vm_exited = true;                                // global flag
368   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
369     if (thr != thr_cur && thr->thread_state() == _thread_in_native) {
370       ++num_active;
371       thr->set_terminated(JavaThread::_vm_exited);  // per-thread flag
372     }
373   }
374 
375   return num_active;
376 }
377 
378 int VM_Exit::wait_for_threads_in_native_to_block() {
379   // VM exits at safepoint. This function must be called at the final safepoint
380   // to wait for threads in _thread_in_native state to be quiescent.
381   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
382 
383   Thread * thr_cur = Thread::current();
384   Monitor timer(Mutex::nosafepoint, "VM_ExitTimer_lock");
385 
386   // Compiler threads need longer wait because they can access VM data directly
387   // while in native. If they are active and some structures being used are
388   // deleted by the shutdown sequence, they will crash. On the other hand, user
389   // threads must go through native=>Java/VM transitions first to access VM
390   // data, and they will be stopped during state transition. In theory, we
391   // don't have to wait for user threads to be quiescent, but it's always
392   // better to terminate VM when current thread is the only active thread, so
393   // wait for user threads too. Numbers are in 10 milliseconds.
394   int max_wait_user_thread = 30;                  // at least 300 milliseconds
395   int max_wait_compiler_thread = 1000;            // at least 10 seconds
396 
397   int max_wait = max_wait_compiler_thread;
398 
399   int attempts = 0;
400   JavaThreadIteratorWithHandle jtiwh;
401   while (true) {
402     int num_active = 0;
403     int num_active_compiler_thread = 0;
404 
405     jtiwh.rewind();
406     for (; JavaThread *thr = jtiwh.next(); ) {
407       if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
408         num_active++;
409         if (thr->is_Compiler_thread()) {
410 #if INCLUDE_JVMCI
411           CompilerThread* ct = (CompilerThread*) thr;
412           if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) {
413             num_active_compiler_thread++;
414           } else {
415             // A JVMCI compiler thread never accesses VM data structures
416             // while in _thread_in_native state so there's no need to wait
417             // for it and potentially add a 300 millisecond delay to VM
418             // shutdown.
419             num_active--;
420           }
421 #else
422           num_active_compiler_thread++;
423 #endif
424         }
425       }
426     }
427 
428     if (num_active == 0) {
429        return 0;
430     } else if (attempts > max_wait) {
431        return num_active;
432     } else if (num_active_compiler_thread == 0 && attempts > max_wait_user_thread) {
433        return num_active;
434     }
435 
436     attempts++;
437 
438     MonitorLocker ml(&timer, Mutex::_no_safepoint_check_flag);
439     ml.wait(10);
440   }
441 }
442 
443 void VM_Exit::doit() {
444 
445   if (VerifyBeforeExit) {
446     HandleMark hm(VMThread::vm_thread());
447     // Among other things, this ensures that Eden top is correct.
448     Universe::heap()->prepare_for_verify();
449     // Silent verification so as not to pollute normal output,
450     // unless we really asked for it.
451     Universe::verify();
452   }
453 
454   CompileBroker::set_should_block();
455 
456   // Wait for a short period for threads in native to block. Any thread
457   // still executing native code after the wait will be stopped at
458   // native==>Java/VM barriers.
459   // Among 16276 JCK tests, 94% of them come here without any threads still
460   // running in native; the other 6% are quiescent within 250ms (Ultra 80).
461   wait_for_threads_in_native_to_block();
462 
463   set_vm_exited();
464 
465   // The ObjectMonitor subsystem uses perf counters so do this before
466   // we call exit_globals() so we don't run afoul of perfMemory_exit().
467   ObjectSynchronizer::do_final_audit_and_print_stats();
468 
469   // We'd like to call IdealGraphPrinter::clean_up() to finalize the
470   // XML logging, but we can't safely do that here. The logic to make
471   // XML termination logging safe is tied to the termination of the
472   // VMThread, and it doesn't terminate on this exit path. See 8222534.
473 
474   // cleanup globals resources before exiting. exit_globals() currently
475   // cleans up outputStream resources and PerfMemory resources.
476   exit_globals();
477 
478   LogConfiguration::finalize();
479 
480   // Check for exit hook
481   exit_hook_t exit_hook = Arguments::exit_hook();
482   if (exit_hook != nullptr) {
483     // exit hook should exit.
484     exit_hook(_exit_code);
485     // ... but if it didn't, we must do it here
486     vm_direct_exit(_exit_code);
487   } else {
488     vm_direct_exit(_exit_code);
489   }
490 }
491 
492 
493 void VM_Exit::wait_if_vm_exited() {
494   if (_vm_exited &&
495       Thread::current_or_null() != _shutdown_thread) {
496     // _vm_exited is set at safepoint, and the Threads_lock is never released
497     // so we will block here until the process dies.
498     Threads_lock->lock();
499     ShouldNotReachHere();
500   }
501 }
502 
503 void VM_PrintCompileQueue::doit() {
504   CompileBroker::print_compile_queues(_out);
505 }
506 
507 #if INCLUDE_SERVICES
508 void VM_PrintClassHierarchy::doit() {
509   KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname);
510 }
511 
512 void VM_PrintClassLayout::doit() {
513   PrintClassLayout::print_class_layout(_out, _class_name);
514 }
515 #endif