1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_RUNTIME_THREAD_HPP 27 #define SHARE_RUNTIME_THREAD_HPP 28 29 #include "gc/shared/gcThreadLocalData.hpp" 30 #include "gc/shared/threadLocalAllocBuffer.hpp" 31 #include "jni.h" 32 #include "memory/allocation.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/globals.hpp" 35 #include "runtime/os.hpp" 36 #include "runtime/safepointMechanism.hpp" 37 #include "runtime/threadHeapSampler.hpp" 38 #include "runtime/threadLocalStorage.hpp" 39 #include "runtime/threadStatisticalInfo.hpp" 40 #include "runtime/unhandledOops.hpp" 41 #include "utilities/globalDefinitions.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_JFR 44 #include "jfr/support/jfrThreadExtension.hpp" 45 #endif 46 47 class CompilerThread; 48 class HandleArea; 49 class HandleMark; 50 class JvmtiRawMonitor; 51 class NMethodClosure; 52 class Metadata; 53 class OopClosure; 54 class OSThread; 55 class ParkEvent; 56 class ResourceArea; 57 class SafeThreadsListPtr; 58 class ThreadClosure; 59 class ThreadsList; 60 class ThreadsSMRSupport; 61 class VMErrorCallback; 62 63 64 class PerfTraceTime; 65 66 DEBUG_ONLY(class ResourceMark;) 67 68 class WorkerThread; 69 70 class JavaThread; 71 72 // Class hierarchy 73 // - Thread 74 // - JavaThread 75 // - various subclasses eg CompilerThread, ServiceThread 76 // - NonJavaThread 77 // - NamedThread 78 // - VMThread 79 // - ConcurrentGCThread 80 // - WorkerThread 81 // - WatcherThread 82 // - JfrThreadSampler 83 // - LogAsyncWriter 84 // 85 // All Thread subclasses must be either JavaThread or NonJavaThread. 86 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is 87 // a partially constructed/destroyed Thread. 88 89 // Thread execution sequence and actions: 90 // All threads: 91 // - thread_native_entry // per-OS native entry point 92 // - stack initialization 93 // - other OS-level initialization (signal masks etc) 94 // - handshake with creating thread (if not started suspended) 95 // - this->call_run() // common shared entry point 96 // - shared common initialization 97 // - this->pre_run() // virtual per-thread-type initialization 98 // - this->run() // virtual per-thread-type "main" logic 99 // - shared common tear-down 100 // - this->post_run() // virtual per-thread-type tear-down 101 // - // 'this' no longer referenceable 102 // - OS-level tear-down (minimal) 103 // - final logging 104 // 105 // For JavaThread: 106 // - this->run() // virtual but not normally overridden 107 // - this->thread_main_inner() // extra call level to ensure correct stack calculations 108 // - this->entry_point() // set differently for each kind of JavaThread 109 110 class Thread: public ThreadShadow { 111 friend class VMError; 112 friend class VMErrorCallbackMark; 113 friend class VMStructs; 114 friend class JVMCIVMStructs; 115 friend class JavaThread; 116 private: 117 118 #ifndef USE_LIBRARY_BASED_TLS_ONLY 119 // Current thread is maintained as a thread-local variable 120 static THREAD_LOCAL Thread* _thr_current; 121 #endif 122 123 // On AArch64, the high order 32 bits are used by a "patching epoch" number 124 // which reflects if this thread has executed the required fences, after 125 // an nmethod gets disarmed. The low order 32 bits denote the disarmed value. 126 uint64_t _nmethod_disarmed_guard_value; 127 128 public: 129 void set_nmethod_disarmed_guard_value(int value) { 130 _nmethod_disarmed_guard_value = (uint64_t)(uint32_t)value; 131 } 132 133 static ByteSize nmethod_disarmed_guard_value_offset() { 134 ByteSize offset = byte_offset_of(Thread, _nmethod_disarmed_guard_value); 135 // At least on x86_64, nmethod entry barrier encodes disarmed value offset 136 // in instruction as disp8 immed 137 assert(in_bytes(offset) < 128, "Offset >= 128"); 138 return offset; 139 } 140 141 private: 142 // Poll data is used in generated code for safepoint polls. 143 // It is important for performance to put this at lower offset 144 // in Thread. The accessors are in JavaThread. 145 SafepointMechanism::ThreadData _poll_data; 146 147 // Thread local data area available to the GC. The internal 148 // structure and contents of this data area is GC-specific. 149 // Only GC and GC barrier code should access this data area. 150 GCThreadLocalData _gc_data; 151 152 public: 153 static ByteSize gc_data_offset() { 154 return byte_offset_of(Thread, _gc_data); 155 } 156 157 template <typename T> T* gc_data() { 158 STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data)); 159 return reinterpret_cast<T*>(&_gc_data); 160 } 161 162 // Exception handling 163 // (Note: _pending_exception and friends are in ThreadShadow) 164 //oop _pending_exception; // pending exception for current thread 165 // const char* _exception_file; // file information for exception (debugging only) 166 // int _exception_line; // line information for exception (debugging only) 167 protected: 168 // JavaThread lifecycle support: 169 friend class SafeThreadsListPtr; // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access 170 friend class ScanHazardPtrGatherProtectedThreadsClosure; // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 171 friend class ScanHazardPtrGatherThreadsListClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access 172 friend class ScanHazardPtrPrintMatchingThreadsClosure; // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access 173 friend class ThreadsSMRSupport; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access 174 friend class ThreadsListHandleTest; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access 175 friend class ValidateHazardPtrsClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access 176 177 ThreadsList* volatile _threads_hazard_ptr; 178 SafeThreadsListPtr* _threads_list_ptr; 179 ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value); 180 ThreadsList* get_threads_hazard_ptr() const; 181 void set_threads_hazard_ptr(ThreadsList* new_list); 182 static bool is_hazard_ptr_tagged(ThreadsList* list) { 183 return (intptr_t(list) & intptr_t(1)) == intptr_t(1); 184 } 185 static ThreadsList* tag_hazard_ptr(ThreadsList* list) { 186 return (ThreadsList*)(intptr_t(list) | intptr_t(1)); 187 } 188 static ThreadsList* untag_hazard_ptr(ThreadsList* list) { 189 return (ThreadsList*)(intptr_t(list) & ~intptr_t(1)); 190 } 191 // This field is enabled via -XX:+EnableThreadSMRStatistics: 192 uint _nested_threads_hazard_ptr_cnt; 193 void dec_nested_threads_hazard_ptr_cnt() { 194 assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()"); 195 _nested_threads_hazard_ptr_cnt--; 196 } 197 void inc_nested_threads_hazard_ptr_cnt() { 198 _nested_threads_hazard_ptr_cnt++; 199 } 200 uint nested_threads_hazard_ptr_cnt() { 201 return _nested_threads_hazard_ptr_cnt; 202 } 203 204 public: 205 // Is the target JavaThread protected by the calling Thread or by some other 206 // mechanism? 207 static bool is_JavaThread_protected(const JavaThread* target); 208 // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated 209 // with the calling Thread? 210 static bool is_JavaThread_protected_by_TLH(const JavaThread* target); 211 212 private: 213 DEBUG_ONLY(static Thread* _starting_thread;) 214 DEBUG_ONLY(bool _suspendible_thread;) 215 DEBUG_ONLY(bool _indirectly_suspendible_thread;) 216 DEBUG_ONLY(bool _indirectly_safepoint_thread;) 217 218 public: 219 #ifdef ASSERT 220 static bool is_starting_thread(const Thread* t); 221 222 void set_suspendible_thread() { _suspendible_thread = true; } 223 void clear_suspendible_thread() { _suspendible_thread = false; } 224 bool is_suspendible_thread() { return _suspendible_thread; } 225 226 void set_indirectly_suspendible_thread() { _indirectly_suspendible_thread = true; } 227 void clear_indirectly_suspendible_thread() { _indirectly_suspendible_thread = false; } 228 bool is_indirectly_suspendible_thread() { return _indirectly_suspendible_thread; } 229 230 void set_indirectly_safepoint_thread() { _indirectly_safepoint_thread = true; } 231 void clear_indirectly_safepoint_thread() { _indirectly_safepoint_thread = false; } 232 bool is_indirectly_safepoint_thread() { return _indirectly_safepoint_thread; } 233 #endif 234 235 private: 236 // Point to the last handle mark 237 HandleMark* _last_handle_mark; 238 239 // Claim value for parallel iteration over threads. 240 uintx _threads_do_token; 241 242 // Support for GlobalCounter 243 private: 244 volatile uintx _rcu_counter; 245 public: 246 volatile uintx* get_rcu_counter() { 247 return &_rcu_counter; 248 } 249 250 public: 251 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } 252 HandleMark* last_handle_mark() const { return _last_handle_mark; } 253 254 private: 255 // Used by SkipGCALot class. 256 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot? 257 258 friend class GCLocker; 259 260 private: 261 ThreadLocalAllocBuffer _tlab; // Thread-local eden 262 jlong _allocated_bytes; // Cumulative number of bytes allocated on 263 // the Java heap 264 ThreadHeapSampler _heap_sampler; // For use when sampling the memory. 265 266 ThreadStatisticalInfo _statistical_info; // Statistics about the thread 267 268 JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr 269 270 JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread 271 // is waiting to lock 272 public: 273 // Constructor 274 Thread(MemTag mem_tag = mtThread); 275 virtual ~Thread() = 0; // Thread is abstract. 276 277 // Manage Thread::current() 278 void initialize_thread_current(); 279 static void clear_thread_current(); // TLS cleanup needed before threads terminate 280 281 protected: 282 // To be implemented by children. 283 virtual void run() = 0; 284 virtual void pre_run() = 0; 285 virtual void post_run() = 0; // Note: Thread must not be deleted prior to calling this! 286 287 #ifdef ASSERT 288 enum RunState { 289 PRE_CALL_RUN, 290 CALL_RUN, 291 PRE_RUN, 292 RUN, 293 POST_RUN 294 // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it 295 }; 296 RunState _run_state; // for lifecycle checks 297 #endif 298 299 300 public: 301 // invokes <ChildThreadClass>::run(), with common preparations and cleanups. 302 void call_run(); 303 304 // Testers 305 virtual bool is_VM_thread() const { return false; } 306 virtual bool is_Java_thread() const { return false; } 307 virtual bool is_Compiler_thread() const { return false; } 308 virtual bool is_service_thread() const { return false; } 309 virtual bool is_hidden_from_external_view() const { return false; } 310 virtual bool is_jvmti_agent_thread() const { return false; } 311 virtual bool is_Watcher_thread() const { return false; } 312 virtual bool is_ConcurrentGC_thread() const { return false; } 313 virtual bool is_Named_thread() const { return false; } 314 virtual bool is_Worker_thread() const { return false; } 315 virtual bool is_JfrSampler_thread() const { return false; } 316 virtual bool is_AttachListener_thread() const { return false; } 317 virtual bool is_monitor_deflation_thread() const { return false; } 318 319 // Convenience cast functions 320 CompilerThread* as_Compiler_thread() const { 321 assert(is_Compiler_thread(), "Must be compiler thread"); 322 return (CompilerThread*)this; 323 } 324 325 // Can this thread make Java upcalls 326 virtual bool can_call_java() const { return false; } 327 328 // Is this a JavaThread that is on the VM's current ThreadsList? 329 // If so it must participate in the safepoint protocol. 330 virtual bool is_active_Java_thread() const { return false; } 331 332 // All threads are given names. For singleton subclasses we can 333 // just hard-wire the known name of the instance. JavaThreads and 334 // NamedThreads support multiple named instances, and dynamic 335 // changing of the name of an instance. 336 virtual const char* name() const { return "Unknown thread"; } 337 338 // A thread's type name is also made available for debugging 339 // and logging. 340 virtual const char* type_name() const { return "Thread"; } 341 342 // Returns the current thread (ASSERTS if null) 343 static inline Thread* current(); 344 // Returns the current thread, or null if not attached 345 static inline Thread* current_or_null(); 346 // Returns the current thread, or null if not attached, and is 347 // safe for use from signal-handlers 348 static inline Thread* current_or_null_safe(); 349 350 // Common thread operations 351 #ifdef ASSERT 352 static void check_for_dangling_thread_pointer(Thread *thread); 353 #endif 354 static void set_priority(Thread* thread, ThreadPriority priority); 355 static void start(Thread* thread); 356 357 void set_native_thread_name(const char *name) { 358 assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread"); 359 os::set_native_thread_name(name); 360 } 361 362 // Support for Unhandled Oop detection 363 // Add the field for both, fastdebug and debug, builds to keep 364 // Thread's fields layout the same. 365 // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build. 366 #ifdef CHECK_UNHANDLED_OOPS 367 private: 368 UnhandledOops* _unhandled_oops; 369 #elif defined(ASSERT) 370 private: 371 void* _unhandled_oops; 372 #endif 373 #ifdef CHECK_UNHANDLED_OOPS 374 public: 375 UnhandledOops* unhandled_oops() { return _unhandled_oops; } 376 // Mark oop safe for gc. It may be stack allocated but won't move. 377 void allow_unhandled_oop(oop *op) { 378 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); 379 } 380 // Clear oops at safepoint so crashes point to unhandled oop violator 381 void clear_unhandled_oops() { 382 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); 383 } 384 #endif // CHECK_UNHANDLED_OOPS 385 386 public: 387 #ifndef PRODUCT 388 bool skip_gcalot() { return _skip_gcalot; } 389 void set_skip_gcalot(bool v) { _skip_gcalot = v; } 390 #endif 391 392 // Resource area 393 ResourceArea* resource_area() const { return _resource_area; } 394 void set_resource_area(ResourceArea* area) { _resource_area = area; } 395 396 OSThread* osthread() const { return _osthread; } 397 void set_osthread(OSThread* thread) { _osthread = thread; } 398 399 // Internal handle support 400 HandleArea* handle_area() const { return _handle_area; } 401 void set_handle_area(HandleArea* area) { _handle_area = area; } 402 403 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; } 404 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; } 405 406 // Thread-Local Allocation Buffer (TLAB) support 407 ThreadLocalAllocBuffer& tlab() { return _tlab; } 408 void initialize_tlab(); 409 410 jlong allocated_bytes() { return _allocated_bytes; } 411 void set_allocated_bytes(jlong value) { _allocated_bytes = value; } 412 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } 413 inline jlong cooked_allocated_bytes(); 414 415 ThreadHeapSampler& heap_sampler() { return _heap_sampler; } 416 417 ThreadStatisticalInfo& statistical_info() { return _statistical_info; } 418 419 JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) 420 421 // For tracking the Jvmti raw monitor the thread is pending on. 422 JvmtiRawMonitor* current_pending_raw_monitor() { 423 return _current_pending_raw_monitor; 424 } 425 void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) { 426 _current_pending_raw_monitor = monitor; 427 } 428 429 // GC support 430 // Apply "f->do_oop" to all root oops in "this". 431 // Used by JavaThread::oops_do. 432 // Apply "cf->do_nmethod" (if !nullptr) to all nmethods active in frames 433 virtual void oops_do_no_frames(OopClosure* f, NMethodClosure* cf); 434 virtual void oops_do_frames(OopClosure* f, NMethodClosure* cf) {} 435 void oops_do(OopClosure* f, NMethodClosure* cf); 436 437 // Handles the parallel case for claim_threads_do. 438 private: 439 bool claim_par_threads_do(uintx claim_token); 440 public: 441 // Requires that "claim_token" is that of the current iteration. 442 // If "is_par" is false, sets the token of "this" to 443 // "claim_token", and returns "true". If "is_par" is true, 444 // uses an atomic instruction to set the current thread's token to 445 // "claim_token", if it is not already. Returns "true" iff the 446 // calling thread does the update, this indicates that the calling thread 447 // has claimed the thread in the current iteration. 448 bool claim_threads_do(bool is_par, uintx claim_token) { 449 if (!is_par) { 450 _threads_do_token = claim_token; 451 return true; 452 } else { 453 return claim_par_threads_do(claim_token); 454 } 455 } 456 457 uintx threads_do_token() const { return _threads_do_token; } 458 459 // jvmtiRedefineClasses support 460 void metadata_handles_do(void f(Metadata*)); 461 462 private: 463 // Check if address is within the given range of this thread's 464 // stack: stack_base() > adr >/>= limit 465 // The check is inclusive of limit if passed true, else exclusive. 466 bool is_in_stack_range(address adr, address limit, bool inclusive) const { 467 assert(stack_base() > limit && limit >= stack_end(), "limit is outside of stack"); 468 return stack_base() > adr && (inclusive ? adr >= limit : adr > limit); 469 } 470 471 public: 472 // Check if address is within the given range of this thread's 473 // stack: stack_base() > adr >= limit 474 bool is_in_stack_range_incl(address adr, address limit) const { 475 return is_in_stack_range(adr, limit, true); 476 } 477 478 // Check if address is within the given range of this thread's 479 // stack: stack_base() > adr > limit 480 bool is_in_stack_range_excl(address adr, address limit) const { 481 return is_in_stack_range(adr, limit, false); 482 } 483 484 // Check if address is in the stack mapped to this thread. Used mainly in 485 // error reporting (so has to include guard zone) and frame printing. 486 // Expects _stack_base to be initialized - checked with assert. 487 bool is_in_full_stack_checked(address adr) const { 488 return is_in_stack_range_incl(adr, stack_end()); 489 } 490 491 // Like is_in_full_stack_checked but without the assertions as this 492 // may be called in a thread before _stack_base is initialized. 493 bool is_in_full_stack(address adr) const { 494 address stack_end = _stack_base - _stack_size; 495 return _stack_base > adr && adr >= stack_end; 496 } 497 498 // Check if address is in the live stack of this thread (not just for locks). 499 // Warning: can only be called by the current thread on itself. 500 bool is_in_live_stack(address adr) const { 501 assert(Thread::current() == this, "is_in_live_stack can only be called from current thread"); 502 return is_in_stack_range_incl(adr, os::current_stack_pointer()); 503 } 504 505 // Sets the argument thread as starting thread. Returns failure if thread 506 // creation fails due to lack of memory, too many threads etc. 507 static bool set_as_starting_thread(JavaThread* jt); 508 509 protected: 510 // OS data associated with the thread 511 OSThread* _osthread; // Platform-specific thread information 512 513 // Thread local resource area for temporary allocation within the VM 514 ResourceArea* _resource_area; 515 516 DEBUG_ONLY(ResourceMark* _current_resource_mark;) 517 518 // Thread local handle area for allocation of handles within the VM 519 HandleArea* _handle_area; 520 GrowableArray<Metadata*>* _metadata_handles; 521 522 // Support for stack overflow handling, get_thread, etc. 523 address _stack_base; 524 size_t _stack_size; 525 int _lgrp_id; 526 527 public: 528 // Stack overflow support 529 address stack_base() const DEBUG_ONLY(;) NOT_DEBUG({ return _stack_base; }) 530 // Needed for code that can query a new thread before the stack has been set. 531 address stack_base_or_null() const { return _stack_base; } 532 void set_stack_base(address base) { _stack_base = base; } 533 size_t stack_size() const { return _stack_size; } 534 void set_stack_size(size_t size) { _stack_size = size; } 535 address stack_end() const { return stack_base() - stack_size(); } 536 void record_stack_base_and_size(); 537 void register_thread_stack_with_NMT(); 538 void unregister_thread_stack_with_NMT(); 539 540 int lgrp_id() const { return _lgrp_id; } 541 void set_lgrp_id(int value) { _lgrp_id = value; } 542 543 // Printing 544 void print_on(outputStream* st, bool print_extended_info) const; 545 virtual void print_on(outputStream* st) const { print_on(st, false); } 546 void print() const; 547 virtual void print_on_error(outputStream* st, char* buf, int buflen) const; 548 // Basic, non-virtual, printing support that is simple and always safe. 549 void print_value_on(outputStream* st) const; 550 551 // Debug-only code 552 #ifdef ASSERT 553 private: 554 // Deadlock detection support for Mutex locks. List of locks own by thread. 555 Mutex* _owned_locks; 556 // Mutex::set_owner_implementation is the only place where _owned_locks is modified, 557 // thus the friendship 558 friend class Mutex; 559 friend class Monitor; 560 561 public: 562 void print_owned_locks_on(outputStream* st) const; 563 void print_owned_locks() const { print_owned_locks_on(tty); } 564 Mutex* owned_locks() const { return _owned_locks; } 565 bool owns_locks() const { return owned_locks() != nullptr; } 566 567 // Deadlock detection 568 ResourceMark* current_resource_mark() { return _current_resource_mark; } 569 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; } 570 #endif // ASSERT 571 572 private: 573 volatile int _jvmti_env_iteration_count; 574 575 public: 576 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } 577 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } 578 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } 579 580 // Code generation 581 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); } 582 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); } 583 584 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); } 585 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); } 586 587 static ByteSize tlab_start_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); } 588 static ByteSize tlab_end_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); } 589 static ByteSize tlab_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); } 590 static ByteSize tlab_pf_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); } 591 592 JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;) 593 594 public: 595 ParkEvent * volatile _ParkEvent; // for Object monitors, JVMTI raw monitors, 596 // and ObjectSynchronizer::read_stable_mark 597 598 // Termination indicator used by the signal handler. 599 // _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state 600 // (which can't itself be read from the signal handler if a signal hits during the Thread destructor). 601 bool has_terminated() { return Atomic::load(&_ParkEvent) == nullptr; }; 602 603 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG 604 jint _hashStateX; // thread-specific hashCode generator state 605 jint _hashStateY; 606 jint _hashStateZ; 607 608 // Low-level leaf-lock primitives used to implement synchronization. 609 // Not for general synchronization use. 610 static void SpinAcquire(volatile int * Lock, const char * Name); 611 static void SpinRelease(volatile int * Lock); 612 613 #if defined(__APPLE__) && defined(AARCH64) 614 private: 615 DEBUG_ONLY(bool _wx_init); 616 WXMode _wx_state; 617 public: 618 void init_wx(); 619 WXMode enable_wx(WXMode new_state); 620 621 void assert_wx_state(WXMode expected) { 622 assert(_wx_state == expected, "wrong state"); 623 } 624 #endif // __APPLE__ && AARCH64 625 626 private: 627 bool _in_asgct = false; 628 public: 629 bool in_asgct() const { return _in_asgct; } 630 void set_in_asgct(bool value) { _in_asgct = value; } 631 static bool current_in_asgct() { 632 Thread *cur = Thread::current_or_null_safe(); 633 return cur != nullptr && cur->in_asgct(); 634 } 635 636 private: 637 VMErrorCallback* _vm_error_callbacks; 638 639 bool _profile_vm_locks; 640 bool _profile_vm_calls; 641 bool _profile_vm_ops; 642 bool _profile_rt_calls; 643 bool _profile_upcalls; 644 645 jlong _all_bc_counter_value; 646 jlong _clinit_bc_counter_value; 647 648 PerfTraceTime* _current_rt_call_timer; 649 public: 650 bool profile_vm_locks() const { return _profile_vm_locks; } 651 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; } 652 653 bool profile_vm_calls() const { return _profile_vm_calls; } 654 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; } 655 656 bool profile_vm_ops() const { return _profile_vm_ops; } 657 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; } 658 659 bool profile_rt_calls() const { return _profile_rt_calls; } 660 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; } 661 662 bool profile_upcalls() const { return _profile_upcalls; } 663 void set_profile_upcalls(bool v) { _profile_upcalls = v; } 664 665 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; } 666 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; } 667 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; } 668 669 bool do_profile_rt_call() const { 670 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer(); 671 } 672 673 jlong bc_counter_value() const { return _all_bc_counter_value; } 674 675 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; } 676 677 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; } 678 679 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); } 680 }; 681 682 class ProfileVMCallContext : StackObj { 683 private: 684 Thread* _thread; 685 bool _enabled; 686 PerfTraceTime* _timer; 687 688 static int _perf_nested_runtime_calls_count; 689 690 static const char* name(PerfTraceTime* t); 691 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer); 692 public: 693 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on) 694 : _thread(current), _enabled(is_on), _timer(timer) { 695 if (_enabled) { 696 assert(timer != nullptr, ""); 697 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer())); 698 _thread->set_current_rt_call_timer(timer); 699 } else if (current->profile_rt_calls()) { 700 notify_nested_rt_call(current->current_rt_call_timer(), timer); 701 } 702 } 703 704 inline ~ProfileVMCallContext() { 705 if (_enabled) { 706 assert(_timer == _thread->current_rt_call_timer(), 707 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer())); 708 _thread->set_current_rt_call_timer(nullptr); 709 } 710 } 711 712 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; }; 713 }; 714 715 class PauseRuntimeCallProfiling : public StackObj { 716 protected: 717 Thread* _thread; 718 bool _enabled; 719 PerfTraceTime* _timer; 720 721 public: 722 inline PauseRuntimeCallProfiling(Thread* current, bool is_on) 723 : _thread(current), _enabled(is_on), _timer(nullptr) { 724 if (_enabled) { 725 _timer = _thread->current_rt_call_timer(); 726 _thread->set_current_rt_call_timer(nullptr); 727 } 728 } 729 730 inline ~PauseRuntimeCallProfiling () { 731 if (_enabled) { 732 guarantee(_thread->current_rt_call_timer() == nullptr, ""); 733 _thread->set_current_rt_call_timer(_timer); // restore 734 } 735 } 736 }; 737 738 class ThreadInAsgct { 739 private: 740 Thread* _thread; 741 bool _saved_in_asgct; 742 public: 743 ThreadInAsgct(Thread* thread) : _thread(thread) { 744 assert(thread != nullptr, "invariant"); 745 // Allow AsyncGetCallTrace to be reentrant - save the previous state. 746 _saved_in_asgct = thread->in_asgct(); 747 thread->set_in_asgct(true); 748 } 749 ~ThreadInAsgct() { 750 assert(_thread->in_asgct(), "invariant"); 751 _thread->set_in_asgct(_saved_in_asgct); 752 } 753 }; 754 755 // Inline implementation of Thread::current() 756 inline Thread* Thread::current() { 757 Thread* current = current_or_null(); 758 assert(current != nullptr, "Thread::current() called on detached thread"); 759 return current; 760 } 761 762 inline Thread* Thread::current_or_null() { 763 #ifndef USE_LIBRARY_BASED_TLS_ONLY 764 return _thr_current; 765 #else 766 if (ThreadLocalStorage::is_initialized()) { 767 return ThreadLocalStorage::thread(); 768 } 769 return nullptr; 770 #endif 771 } 772 773 inline Thread* Thread::current_or_null_safe() { 774 if (ThreadLocalStorage::is_initialized()) { 775 return ThreadLocalStorage::thread(); 776 } 777 return nullptr; 778 } 779 780 #endif // SHARE_RUNTIME_THREAD_HPP