1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_VM_OPERATIONS_HPP 26 #define SHARE_VM_RUNTIME_VM_OPERATIONS_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "memory/allocation.hpp" 30 #include "oops/oop.hpp" 31 #include "runtime/thread.hpp" 32 #include "utilities/top.hpp" 33 34 // The following classes are used for operations 35 // initiated by a Java thread but that must 36 // take place in the VMThread. 37 38 #define VM_OP_ENUM(type) VMOp_##type, 39 40 // Note: When new VM_XXX comes up, add 'XXX' to the template table. 41 #define VM_OPS_DO(template) \ 42 template(Dummy) \ 43 template(ThreadStop) \ 44 template(ThreadDump) \ 45 template(PrintThreads) \ 46 template(FindDeadlocks) \ 47 template(ForceSafepoint) \ 48 template(ForceAsyncSafepoint) \ 49 template(Deoptimize) \ 50 template(DeoptimizeFrame) \ 51 template(DeoptimizeAll) \ 52 template(ZombieAll) \ 53 template(UnlinkSymbols) \ 54 template(Verify) \ 55 template(PrintJNI) \ 56 template(HeapDumper) \ 57 template(DeoptimizeTheWorld) \ 58 template(CollectForMetadataAllocation) \ 59 template(GC_HeapInspection) \ 60 template(GenCollectFull) \ 61 template(GenCollectFullConcurrent) \ 62 template(GenCollectForAllocation) \ 63 template(ParallelGCFailedAllocation) \ 64 template(ParallelGCSystemGC) \ 65 template(CGC_Operation) \ 66 template(CMS_Initial_Mark) \ 67 template(CMS_Final_Remark) \ 68 template(G1CollectFull) \ 69 template(G1CollectForAllocation) \ 70 template(G1IncCollectionPause) \ 71 template(DestroyAllocationContext) \ 72 template(EnableBiasedLocking) \ 73 template(RevokeBias) \ 74 template(BulkRevokeBias) \ 75 template(PopulateDumpSharedSpace) \ 76 template(JNIFunctionTableCopier) \ 77 template(RedefineClasses) \ 78 template(GetOwnedMonitorInfo) \ 79 template(GetObjectMonitorUsage) \ 80 template(GetCurrentContendedMonitor) \ 81 template(GetStackTrace) \ 82 template(GetMultipleStackTraces) \ 83 template(GetAllStackTraces) \ 84 template(GetThreadListStackTraces) \ 85 template(GetFrameCount) \ 86 template(GetFrameLocation) \ 87 template(ChangeBreakpoints) \ 88 template(GetOrSetLocal) \ 89 template(GetCurrentLocation) \ 90 template(EnterInterpOnlyMode) \ 91 template(ChangeSingleStep) \ 92 template(HeapWalkOperation) \ 93 template(HeapIterateOperation) \ 94 template(ReportJavaOutOfMemory) \ 95 template(JFRCheckpoint) \ 96 template(ShenandoahFullGC) \ 97 template(ShenandoahInitMark) \ 98 template(ShenandoahFinalMarkStartEvac) \ 99 template(ShenandoahInitUpdateRefs) \ 100 template(ShenandoahFinalUpdateRefs) \ 101 template(ShenandoahDegeneratedGC) \ 102 template(Exit) \ 103 template(LinuxDllLoad) \ 104 template(RotateGCLog) \ 105 template(WhiteBoxOperation) \ 106 template(ClassLoaderStatsOperation) \ 107 template(JFROldObject) \ 108 109 class VM_Operation: public CHeapObj<mtInternal> { 110 public: 111 enum Mode { 112 _safepoint, // blocking, safepoint, vm_op C-heap allocated 113 _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated 114 _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated 115 _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated 116 }; 117 118 enum VMOp_Type { 119 VM_OPS_DO(VM_OP_ENUM) 120 VMOp_Terminating 121 }; 122 123 private: 124 Thread* _calling_thread; 125 ThreadPriority _priority; 126 long _timestamp; 127 VM_Operation* _next; 128 VM_Operation* _prev; 129 130 // The VM operation name array 131 static const char* _names[]; 132 133 public: 134 VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; } 135 virtual ~VM_Operation() {} 136 137 // VM operation support (used by VM thread) 138 Thread* calling_thread() const { return _calling_thread; } 139 ThreadPriority priority() { return _priority; } 140 void set_calling_thread(Thread* thread, ThreadPriority priority); 141 142 long timestamp() const { return _timestamp; } 143 void set_timestamp(long timestamp) { _timestamp = timestamp; } 144 145 // Called by VM thread - does in turn invoke doit(). Do not override this 146 void evaluate(); 147 148 // evaluate() is called by the VMThread and in turn calls doit(). 149 // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, 150 // doit_prologue() is called in that thread before transferring control to 151 // the VMThread. 152 // If doit_prologue() returns true the VM operation will proceed, and 153 // doit_epilogue() will be called by the JavaThread once the VM operation 154 // completes. If doit_prologue() returns false the VM operation is cancelled. 155 virtual void doit() = 0; 156 virtual bool doit_prologue() { return true; }; 157 virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent 158 159 // Type test 160 virtual bool is_methodCompiler() const { return false; } 161 162 // Linking 163 VM_Operation *next() const { return _next; } 164 VM_Operation *prev() const { return _prev; } 165 void set_next(VM_Operation *next) { _next = next; } 166 void set_prev(VM_Operation *prev) { _prev = prev; } 167 168 // Configuration. Override these appropriatly in subclasses. 169 virtual VMOp_Type type() const = 0; 170 virtual Mode evaluation_mode() const { return _safepoint; } 171 virtual bool allow_nested_vm_operations() const { return false; } 172 virtual bool is_cheap_allocated() const { return false; } 173 virtual void oops_do(OopClosure* f) { /* do nothing */ }; 174 175 // CAUTION: <don't hang yourself with following rope> 176 // If you override these methods, make sure that the evaluation 177 // of these methods is race-free and non-blocking, since these 178 // methods may be evaluated either by the mutators or by the 179 // vm thread, either concurrently with mutators or with the mutators 180 // stopped. In other words, taking locks is verboten, and if there 181 // are any races in evaluating the conditions, they'd better be benign. 182 virtual bool evaluate_at_safepoint() const { 183 return evaluation_mode() == _safepoint || 184 evaluation_mode() == _async_safepoint; 185 } 186 virtual bool evaluate_concurrently() const { 187 return evaluation_mode() == _concurrent || 188 evaluation_mode() == _async_safepoint; 189 } 190 191 static const char* mode_to_string(Mode mode); 192 193 // Debugging 194 virtual void print_on_error(outputStream* st) const; 195 const char* name() const { return _names[type()]; } 196 static const char* name(int type) { 197 assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); 198 return _names[type]; 199 } 200 #ifndef PRODUCT 201 void print_on(outputStream* st) const { print_on_error(st); } 202 #endif 203 }; 204 205 class VM_ThreadStop: public VM_Operation { 206 private: 207 oop _thread; // The Thread that the Throwable is thrown against 208 oop _throwable; // The Throwable thrown at the target Thread 209 public: 210 // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the 211 // VM operation is executed. 212 VM_ThreadStop(oop thread, oop throwable) { 213 _thread = thread; 214 _throwable = throwable; 215 } 216 VMOp_Type type() const { return VMOp_ThreadStop; } 217 oop target_thread() const { return _thread; } 218 oop throwable() const { return _throwable;} 219 void doit(); 220 // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated 221 bool allow_nested_vm_operations() const { return true; } 222 Mode evaluation_mode() const { return _async_safepoint; } 223 bool is_cheap_allocated() const { return true; } 224 225 // GC support 226 void oops_do(OopClosure* f) { 227 f->do_oop(&_thread); f->do_oop(&_throwable); 228 } 229 }; 230 231 // dummy vm op, evaluated just to force a safepoint 232 class VM_ForceSafepoint: public VM_Operation { 233 public: 234 VM_ForceSafepoint() {} 235 void doit() {} 236 VMOp_Type type() const { return VMOp_ForceSafepoint; } 237 }; 238 239 // dummy vm op, evaluated just to force a safepoint 240 class VM_ForceAsyncSafepoint: public VM_Operation { 241 public: 242 VM_ForceAsyncSafepoint() {} 243 void doit() {} 244 VMOp_Type type() const { return VMOp_ForceAsyncSafepoint; } 245 Mode evaluation_mode() const { return _async_safepoint; } 246 bool is_cheap_allocated() const { return true; } 247 }; 248 249 class VM_Deoptimize: public VM_Operation { 250 public: 251 VM_Deoptimize() {} 252 VMOp_Type type() const { return VMOp_Deoptimize; } 253 void doit(); 254 bool allow_nested_vm_operations() const { return true; } 255 }; 256 257 258 // Deopt helper that can deoptimize frames in threads other than the 259 // current thread. Only used through Deoptimization::deoptimize_frame. 260 class VM_DeoptimizeFrame: public VM_Operation { 261 friend class Deoptimization; 262 263 private: 264 JavaThread* _thread; 265 intptr_t* _id; 266 VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id); 267 268 public: 269 VMOp_Type type() const { return VMOp_DeoptimizeFrame; } 270 void doit(); 271 bool allow_nested_vm_operations() const { return true; } 272 }; 273 274 #ifndef PRODUCT 275 class VM_DeoptimizeAll: public VM_Operation { 276 private: 277 KlassHandle _dependee; 278 public: 279 VM_DeoptimizeAll() {} 280 VMOp_Type type() const { return VMOp_DeoptimizeAll; } 281 void doit(); 282 bool allow_nested_vm_operations() const { return true; } 283 }; 284 285 286 class VM_ZombieAll: public VM_Operation { 287 public: 288 VM_ZombieAll() {} 289 VMOp_Type type() const { return VMOp_ZombieAll; } 290 void doit(); 291 bool allow_nested_vm_operations() const { return true; } 292 }; 293 #endif // PRODUCT 294 295 class VM_UnlinkSymbols: public VM_Operation { 296 public: 297 VM_UnlinkSymbols() {} 298 VMOp_Type type() const { return VMOp_UnlinkSymbols; } 299 void doit(); 300 bool allow_nested_vm_operations() const { return true; } 301 }; 302 303 class VM_Verify: public VM_Operation { 304 private: 305 bool _silent; 306 public: 307 VM_Verify(bool silent = VerifySilently) : _silent(silent) {} 308 VMOp_Type type() const { return VMOp_Verify; } 309 void doit(); 310 }; 311 312 313 class VM_PrintThreads: public VM_Operation { 314 private: 315 outputStream* _out; 316 bool _print_concurrent_locks; 317 public: 318 VM_PrintThreads() { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; } 319 VM_PrintThreads(outputStream* out, bool print_concurrent_locks) { _out = out; _print_concurrent_locks = print_concurrent_locks; } 320 VMOp_Type type() const { return VMOp_PrintThreads; } 321 void doit(); 322 bool doit_prologue(); 323 void doit_epilogue(); 324 }; 325 326 class VM_PrintJNI: public VM_Operation { 327 private: 328 outputStream* _out; 329 public: 330 VM_PrintJNI() { _out = tty; } 331 VM_PrintJNI(outputStream* out) { _out = out; } 332 VMOp_Type type() const { return VMOp_PrintJNI; } 333 void doit(); 334 }; 335 336 class DeadlockCycle; 337 class VM_FindDeadlocks: public VM_Operation { 338 private: 339 bool _concurrent_locks; 340 DeadlockCycle* _deadlocks; 341 outputStream* _out; 342 343 public: 344 VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {}; 345 VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {}; 346 ~VM_FindDeadlocks(); 347 348 DeadlockCycle* result() { return _deadlocks; }; 349 VMOp_Type type() const { return VMOp_FindDeadlocks; } 350 void doit(); 351 bool doit_prologue(); 352 }; 353 354 class ThreadDumpResult; 355 class ThreadSnapshot; 356 class ThreadConcurrentLocks; 357 358 class VM_ThreadDump : public VM_Operation { 359 private: 360 ThreadDumpResult* _result; 361 int _num_threads; 362 GrowableArray<instanceHandle>* _threads; 363 int _max_depth; 364 bool _with_locked_monitors; 365 bool _with_locked_synchronizers; 366 367 ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl); 368 369 public: 370 VM_ThreadDump(ThreadDumpResult* result, 371 int max_depth, // -1 indicates entire stack 372 bool with_locked_monitors, 373 bool with_locked_synchronizers); 374 375 VM_ThreadDump(ThreadDumpResult* result, 376 GrowableArray<instanceHandle>* threads, 377 int num_threads, // -1 indicates entire stack 378 int max_depth, 379 bool with_locked_monitors, 380 bool with_locked_synchronizers); 381 382 VMOp_Type type() const { return VMOp_ThreadDump; } 383 void doit(); 384 bool doit_prologue(); 385 void doit_epilogue(); 386 }; 387 388 389 class VM_Exit: public VM_Operation { 390 private: 391 int _exit_code; 392 static volatile bool _vm_exited; 393 static Thread * _shutdown_thread; 394 static void wait_if_vm_exited(); 395 public: 396 VM_Exit(int exit_code) { 397 _exit_code = exit_code; 398 } 399 static int wait_for_threads_in_native_to_block(); 400 static int set_vm_exited(); 401 static bool vm_exited() { return _vm_exited; } 402 static void block_if_vm_exited() { 403 if (_vm_exited) { 404 wait_if_vm_exited(); 405 } 406 } 407 VMOp_Type type() const { return VMOp_Exit; } 408 void doit(); 409 }; 410 411 412 class VM_RotateGCLog: public VM_Operation { 413 private: 414 outputStream* _out; 415 416 public: 417 VM_RotateGCLog(outputStream* st) : _out(st) {} 418 VMOp_Type type() const { return VMOp_RotateGCLog; } 419 void doit() { gclog_or_tty->rotate_log(true, _out); } 420 }; 421 422 #endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP