1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_RUNTIME_THREAD_HPP
27 #define SHARE_RUNTIME_THREAD_HPP
28
29 #include "gc/shared/gcThreadLocalData.hpp"
30 #include "gc/shared/threadLocalAllocBuffer.hpp"
31 #include "jni.h"
32 #include "memory/allocation.hpp"
33 #include "runtime/atomicAccess.hpp"
34 #include "runtime/globals.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/safepointMechanism.hpp"
37 #include "runtime/threadHeapSampler.hpp"
38 #include "runtime/threadLocalStorage.hpp"
39 #include "runtime/threadStatisticalInfo.hpp"
40 #include "runtime/unhandledOops.hpp"
41 #include "utilities/globalDefinitions.hpp"
42 #include "utilities/macros.hpp"
43 #if INCLUDE_JFR
44 #include "jfr/support/jfrThreadExtension.hpp"
45 #endif
46
47 class CompilerThread;
48 class HandleArea;
49 class HandleMark;
50 class JvmtiRawMonitor;
51 class NMethodClosure;
52 class Metadata;
53 class OopClosure;
54 class OSThread;
55 class ParkEvent;
56 class ResourceArea;
57 class SafeThreadsListPtr;
58 class ThreadClosure;
59 class ThreadsList;
60 class ThreadsSMRSupport;
61 class VMErrorCallback;
62
63
64 class PerfTraceTime;
65
66 DEBUG_ONLY(class ResourceMark;)
67
68 class WorkerThread;
69
70 class JavaThread;
71
72 // Class hierarchy
73 // - Thread
74 // - JavaThread
75 // - various subclasses eg CompilerThread, ServiceThread
76 // - NonJavaThread
77 // - NamedThread
78 // - VMThread
79 // - ConcurrentGCThread
80 // - WorkerThread
81 // - WatcherThread
82 // - JfrThreadSampler
83 // - JfrCPUSamplerThread
84 // - LogAsyncWriter
85 //
86 // All Thread subclasses must be either JavaThread or NonJavaThread.
87 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is
88 // a partially constructed/destroyed Thread.
89
90 // Thread execution sequence and actions:
91 // All threads:
92 // - thread_native_entry // per-OS native entry point
93 // - stack initialization
94 // - other OS-level initialization (signal masks etc)
95 // - handshake with creating thread (if not started suspended)
96 // - this->call_run() // common shared entry point
97 // - shared common initialization
98 // - this->pre_run() // virtual per-thread-type initialization
99 // - this->run() // virtual per-thread-type "main" logic
100 // - shared common tear-down
101 // - this->post_run() // virtual per-thread-type tear-down
102 // - // 'this' no longer referenceable
103 // - OS-level tear-down (minimal)
104 // - final logging
105 //
106 // For JavaThread:
107 // - this->run() // virtual but not normally overridden
108 // - this->thread_main_inner() // extra call level to ensure correct stack calculations
109 // - this->entry_point() // set differently for each kind of JavaThread
110
111 class Thread: public ThreadShadow {
112 friend class VMError;
113 friend class VMErrorCallbackMark;
114 friend class VMStructs;
115 friend class JVMCIVMStructs;
116 friend class JavaThread;
117 private:
118
119 // Current thread is maintained as a thread-local variable
120 static THREAD_LOCAL Thread* _thr_current;
121
122 // On AArch64, the high order 32 bits are used by a "patching epoch" number
123 // which reflects if this thread has executed the required fences, after
124 // an nmethod gets disarmed. The low order 32 bits denote the disarmed value.
125 uint64_t _nmethod_disarmed_guard_value;
126
127 public:
128 void set_nmethod_disarmed_guard_value(int value) {
129 _nmethod_disarmed_guard_value = (uint64_t)(uint32_t)value;
130 }
131
132 static ByteSize nmethod_disarmed_guard_value_offset() {
133 ByteSize offset = byte_offset_of(Thread, _nmethod_disarmed_guard_value);
134 // At least on x86_64, nmethod entry barrier encodes disarmed value offset
135 // in instruction as disp8 immed
136 assert(in_bytes(offset) < 128, "Offset >= 128");
137 return offset;
138 }
139
140 private:
141 // Poll data is used in generated code for safepoint polls.
142 // It is important for performance to put this at lower offset
143 // in Thread. The accessors are in JavaThread.
144 SafepointMechanism::ThreadData _poll_data;
145
146 // Thread local data area available to the GC. The internal
147 // structure and contents of this data area is GC-specific.
148 // Only GC and GC barrier code should access this data area.
149 GCThreadLocalData _gc_data;
150
151 public:
152 static ByteSize gc_data_offset() {
153 return byte_offset_of(Thread, _gc_data);
154 }
155
156 template <typename T> T* gc_data() {
157 STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data));
158 return reinterpret_cast<T*>(&_gc_data);
159 }
160
161 // Exception handling
162 // (Note: _pending_exception and friends are in ThreadShadow)
163 //oop _pending_exception; // pending exception for current thread
164 // const char* _exception_file; // file information for exception (debugging only)
165 // int _exception_line; // line information for exception (debugging only)
166 protected:
167 // JavaThread lifecycle support:
168 friend class SafeThreadsListPtr; // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access
169 friend class ScanHazardPtrGatherProtectedThreadsClosure; // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
170 friend class ScanHazardPtrGatherThreadsListClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access
171 friend class ScanHazardPtrPrintMatchingThreadsClosure; // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
172 friend class ThreadsSMRSupport; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
173 friend class ThreadsListHandleTest; // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
174 friend class ValidateHazardPtrsClosure; // for get_threads_hazard_ptr(), untag_hazard_ptr() access
175
176 ThreadsList* volatile _threads_hazard_ptr;
177 SafeThreadsListPtr* _threads_list_ptr;
178 ThreadsList* cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value);
179 ThreadsList* get_threads_hazard_ptr() const;
180 void set_threads_hazard_ptr(ThreadsList* new_list);
181 static bool is_hazard_ptr_tagged(ThreadsList* list) {
182 return (intptr_t(list) & intptr_t(1)) == intptr_t(1);
183 }
184 static ThreadsList* tag_hazard_ptr(ThreadsList* list) {
185 return (ThreadsList*)(intptr_t(list) | intptr_t(1));
186 }
187 static ThreadsList* untag_hazard_ptr(ThreadsList* list) {
188 return (ThreadsList*)(intptr_t(list) & ~intptr_t(1));
189 }
190 // This field is enabled via -XX:+EnableThreadSMRStatistics:
191 uint _nested_threads_hazard_ptr_cnt;
192 void dec_nested_threads_hazard_ptr_cnt() {
193 assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()");
194 _nested_threads_hazard_ptr_cnt--;
195 }
196 void inc_nested_threads_hazard_ptr_cnt() {
197 _nested_threads_hazard_ptr_cnt++;
198 }
199 uint nested_threads_hazard_ptr_cnt() {
200 return _nested_threads_hazard_ptr_cnt;
201 }
202
203 public:
204 // Is the target JavaThread protected by the calling Thread or by some other
205 // mechanism?
206 static bool is_JavaThread_protected(const JavaThread* target);
207 // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated
208 // with the calling Thread?
209 static bool is_JavaThread_protected_by_TLH(const JavaThread* target);
210
211 private:
212 DEBUG_ONLY(static Thread* _starting_thread;)
213 DEBUG_ONLY(bool _suspendible_thread;)
214 DEBUG_ONLY(bool _indirectly_suspendible_thread;)
215 DEBUG_ONLY(bool _indirectly_safepoint_thread;)
216
217 public:
218 #ifdef ASSERT
219 static bool is_starting_thread(const Thread* t);
220
221 void set_suspendible_thread() { _suspendible_thread = true; }
222 void clear_suspendible_thread() { _suspendible_thread = false; }
223 bool is_suspendible_thread() { return _suspendible_thread; }
224
225 void set_indirectly_suspendible_thread() { _indirectly_suspendible_thread = true; }
226 void clear_indirectly_suspendible_thread() { _indirectly_suspendible_thread = false; }
227 bool is_indirectly_suspendible_thread() { return _indirectly_suspendible_thread; }
228
229 void set_indirectly_safepoint_thread() { _indirectly_safepoint_thread = true; }
230 void clear_indirectly_safepoint_thread() { _indirectly_safepoint_thread = false; }
231 bool is_indirectly_safepoint_thread() { return _indirectly_safepoint_thread; }
232 #endif
233
234 private:
235 // Point to the last handle mark
236 HandleMark* _last_handle_mark;
237
238 // Claim value for parallel iteration over threads.
239 uintx _threads_do_token;
240
241 // Support for GlobalCounter
242 private:
243 volatile uintx _rcu_counter;
244 public:
245 volatile uintx* get_rcu_counter() {
246 return &_rcu_counter;
247 }
248
249 public:
250 void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; }
251 HandleMark* last_handle_mark() const { return _last_handle_mark; }
252
253 private:
254 // Used by SkipGCALot class.
255 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
256
257 friend class GCLocker;
258
259 private:
260 ThreadLocalAllocBuffer _tlab; // Thread-local eden
261 jlong _allocated_bytes; // Cumulative number of bytes allocated on
262 // the Java heap
263 ThreadHeapSampler _heap_sampler; // For use when sampling the memory.
264
265 ThreadStatisticalInfo _statistical_info; // Statistics about the thread
266
267 JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr
268
269 JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread
270 // is waiting to lock
271 public:
272 // Constructor
273 Thread(MemTag mem_tag = mtThread);
274 virtual ~Thread() = 0; // Thread is abstract.
275
276 // Manage Thread::current()
277 void initialize_thread_current();
278 static void clear_thread_current(); // TLS cleanup needed before threads terminate
279
280 protected:
281 // To be implemented by children.
282 virtual void run() = 0;
283 virtual void pre_run() = 0;
284 virtual void post_run() = 0; // Note: Thread must not be deleted prior to calling this!
285
286 #ifdef ASSERT
287 enum RunState {
288 PRE_CALL_RUN,
289 CALL_RUN,
290 PRE_RUN,
291 RUN,
292 POST_RUN
293 // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it
294 };
295 RunState _run_state; // for lifecycle checks
296 #endif
297
298
299 public:
300 // invokes <ChildThreadClass>::run(), with common preparations and cleanups.
301 void call_run();
302
303 // Testers
304 virtual bool is_VM_thread() const { return false; }
305 virtual bool is_Java_thread() const { return false; }
306 virtual bool is_Compiler_thread() const { return false; }
307 virtual bool is_service_thread() const { return false; }
308 virtual bool is_hidden_from_external_view() const { return false; }
309 virtual bool is_jvmti_agent_thread() const { return false; }
310 virtual bool is_Watcher_thread() const { return false; }
311 virtual bool is_ConcurrentGC_thread() const { return false; }
312 virtual bool is_Named_thread() const { return false; }
313 virtual bool is_Worker_thread() const { return false; }
314 virtual bool is_JfrSampler_thread() const { return false; }
315 virtual bool is_JfrRecorder_thread() const { return false; }
316 virtual bool is_AttachListener_thread() const { return false; }
317 virtual bool is_monitor_deflation_thread() const { return false; }
318
319 // Convenience cast functions
320 CompilerThread* as_Compiler_thread() const {
321 assert(is_Compiler_thread(), "Must be compiler thread");
322 return (CompilerThread*)this;
323 }
324
325 // Can this thread make Java upcalls
326 virtual bool can_call_java() const { return false; }
327
328 // Is this a JavaThread that is on the VM's current ThreadsList?
329 // If so it must participate in the safepoint protocol.
330 virtual bool is_active_Java_thread() const { return false; }
331
332 // All threads are given names. For singleton subclasses we can
333 // just hard-wire the known name of the instance. JavaThreads and
334 // NamedThreads support multiple named instances, and dynamic
335 // changing of the name of an instance.
336 virtual const char* name() const { return "Unknown thread"; }
337
338 // A thread's type name is also made available for debugging
339 // and logging.
340 virtual const char* type_name() const { return "Thread"; }
341
342 // Returns the current thread (ASSERTS if null)
343 static inline Thread* current();
344 // Returns the current thread, or null if not attached
345 static inline Thread* current_or_null();
346 // Returns the current thread, or null if not attached, and is
347 // safe for use from signal-handlers
348 static inline Thread* current_or_null_safe();
349
350 // Common thread operations
351 #ifdef ASSERT
352 static void check_for_dangling_thread_pointer(Thread *thread);
353 #endif
354 static void set_priority(Thread* thread, ThreadPriority priority);
355 static void start(Thread* thread);
356
357 void set_native_thread_name(const char *name) {
358 assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
359 os::set_native_thread_name(name);
360 }
361
362 // Support for Unhandled Oop detection
363 // Add the field for both, fastdebug and debug, builds to keep
364 // Thread's fields layout the same.
365 // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
366 #ifdef CHECK_UNHANDLED_OOPS
367 private:
368 UnhandledOops* _unhandled_oops;
369 #elif defined(ASSERT)
370 private:
371 void* _unhandled_oops;
372 #endif
373 #ifdef CHECK_UNHANDLED_OOPS
374 public:
375 UnhandledOops* unhandled_oops() { return _unhandled_oops; }
376 // Mark oop safe for gc. It may be stack allocated but won't move.
377 void allow_unhandled_oop(oop *op) {
378 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
379 }
380 // Clear oops at safepoint so crashes point to unhandled oop violator
381 void clear_unhandled_oops() {
382 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
383 }
384 #endif // CHECK_UNHANDLED_OOPS
385
386 public:
387 #ifndef PRODUCT
388 bool skip_gcalot() { return _skip_gcalot; }
389 void set_skip_gcalot(bool v) { _skip_gcalot = v; }
390 #endif
391
392 // Resource area
393 ResourceArea* resource_area() const { return _resource_area; }
394 void set_resource_area(ResourceArea* area) { _resource_area = area; }
395
396 OSThread* osthread() const { return _osthread; }
397 void set_osthread(OSThread* thread) { _osthread = thread; }
398
399 // Internal handle support
400 HandleArea* handle_area() const { return _handle_area; }
401 void set_handle_area(HandleArea* area) { _handle_area = area; }
402
403 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; }
404 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
405
406 // Thread-Local Allocation Buffer (TLAB) support
407 ThreadLocalAllocBuffer& tlab() { return _tlab; }
408 void initialize_tlab();
409 void retire_tlab(ThreadLocalAllocStats* stats = nullptr);
410 void fill_tlab(HeapWord* start, size_t pre_reserved, size_t new_size);
411
412 jlong allocated_bytes() { return _allocated_bytes; }
413 void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
414 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
415 inline jlong cooked_allocated_bytes();
416
417 ThreadHeapSampler& heap_sampler() { return _heap_sampler; }
418
419 ThreadStatisticalInfo& statistical_info() { return _statistical_info; }
420
421 JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
422
423 // For tracking the Jvmti raw monitor the thread is pending on.
424 JvmtiRawMonitor* current_pending_raw_monitor() {
425 return _current_pending_raw_monitor;
426 }
427 void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) {
428 _current_pending_raw_monitor = monitor;
429 }
430
431 // GC support
432 // Apply "f->do_oop" to all root oops in "this".
433 // Used by JavaThread::oops_do.
434 // Apply "cf->do_nmethod" (if !nullptr) to all nmethods active in frames
435 virtual void oops_do_no_frames(OopClosure* f, NMethodClosure* cf);
436 virtual void oops_do_frames(OopClosure* f, NMethodClosure* cf) {}
437 void oops_do(OopClosure* f, NMethodClosure* cf);
438
439 // Handles the parallel case for claim_threads_do.
440 private:
441 bool claim_par_threads_do(uintx claim_token);
442 public:
443 // Requires that "claim_token" is that of the current iteration.
444 // If "is_par" is false, sets the token of "this" to
445 // "claim_token", and returns "true". If "is_par" is true,
446 // uses an atomic instruction to set the current thread's token to
447 // "claim_token", if it is not already. Returns "true" iff the
448 // calling thread does the update, this indicates that the calling thread
449 // has claimed the thread in the current iteration.
450 bool claim_threads_do(bool is_par, uintx claim_token) {
451 if (!is_par) {
452 _threads_do_token = claim_token;
453 return true;
454 } else {
455 return claim_par_threads_do(claim_token);
456 }
457 }
458
459 uintx threads_do_token() const { return _threads_do_token; }
460
461 // jvmtiRedefineClasses support
462 void metadata_handles_do(void f(Metadata*));
463
464 private:
465 // Check if address is within the given range of this thread's
466 // stack: stack_base() > adr >/>= limit
467 // The check is inclusive of limit if passed true, else exclusive.
468 bool is_in_stack_range(address adr, address limit, bool inclusive) const {
469 assert(stack_base() > limit && limit >= stack_end(), "limit is outside of stack");
470 return stack_base() > adr && (inclusive ? adr >= limit : adr > limit);
471 }
472
473 public:
474 // Check if address is within the given range of this thread's
475 // stack: stack_base() > adr >= limit
476 bool is_in_stack_range_incl(address adr, address limit) const {
477 return is_in_stack_range(adr, limit, true);
478 }
479
480 // Check if address is within the given range of this thread's
481 // stack: stack_base() > adr > limit
482 bool is_in_stack_range_excl(address adr, address limit) const {
483 return is_in_stack_range(adr, limit, false);
484 }
485
486 // Check if address is in the stack mapped to this thread. Used mainly in
487 // error reporting (so has to include guard zone) and frame printing.
488 // Expects _stack_base to be initialized - checked with assert.
489 bool is_in_full_stack_checked(address adr) const {
490 return is_in_stack_range_incl(adr, stack_end());
491 }
492
493 // Like is_in_full_stack_checked but without the assertions as this
494 // may be called in a thread before _stack_base is initialized.
495 bool is_in_full_stack(address adr) const {
496 address stack_end = _stack_base - _stack_size;
497 return _stack_base > adr && adr >= stack_end;
498 }
499
500 // Check if address is in the live stack of this thread (not just for locks).
501 // Warning: can only be called by the current thread on itself.
502 bool is_in_live_stack(address adr) const {
503 assert(Thread::current() == this, "is_in_live_stack can only be called from current thread");
504 return is_in_stack_range_incl(adr, os::current_stack_pointer());
505 }
506
507 // Sets the argument thread as starting thread. Returns failure if thread
508 // creation fails due to lack of memory, too many threads etc.
509 static bool set_as_starting_thread(JavaThread* jt);
510
511 protected:
512 // OS data associated with the thread
513 OSThread* _osthread; // Platform-specific thread information
514
515 // Thread local resource area for temporary allocation within the VM
516 ResourceArea* _resource_area;
517
518 DEBUG_ONLY(ResourceMark* _current_resource_mark;)
519
520 // Thread local handle area for allocation of handles within the VM
521 HandleArea* _handle_area;
522 GrowableArray<Metadata*>* _metadata_handles;
523
524 // Support for stack overflow handling, get_thread, etc.
525 address _stack_base;
526 size_t _stack_size;
527 int _lgrp_id;
528
529 public:
530 // Stack overflow support
531 address stack_base() const DEBUG_ONLY(;) NOT_DEBUG({ return _stack_base; })
532 // Needed for code that can query a new thread before the stack has been set.
533 address stack_base_or_null() const { return _stack_base; }
534 void set_stack_base(address base) { _stack_base = base; }
535 size_t stack_size() const { return _stack_size; }
536 void set_stack_size(size_t size) { _stack_size = size; }
537 address stack_end() const { return stack_base() - stack_size(); }
538 void record_stack_base_and_size();
539 void register_thread_stack_with_NMT();
540 void unregister_thread_stack_with_NMT();
541
542 int lgrp_id() const { return _lgrp_id; }
543 void update_lgrp_id() { _lgrp_id = os::numa_get_group_id(); }
544
545 // Printing
546 void print_on(outputStream* st, bool print_extended_info) const;
547 virtual void print_on(outputStream* st) const { print_on(st, false); }
548 void print() const;
549 virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
550 // Basic, non-virtual, printing support that is simple and always safe.
551 void print_value_on(outputStream* st) const;
552
553 // Debug-only code
554 #ifdef ASSERT
555 private:
556 // Deadlock detection support for Mutex locks. List of locks own by thread.
557 Mutex* _owned_locks;
558 // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
559 // thus the friendship
560 friend class Mutex;
561 friend class Monitor;
562
563 public:
564 void print_owned_locks_on(outputStream* st) const;
565 void print_owned_locks() const { print_owned_locks_on(tty); }
566 Mutex* owned_locks() const { return _owned_locks; }
567 bool owns_locks() const { return owned_locks() != nullptr; }
568
569 // Deadlock detection
570 ResourceMark* current_resource_mark() { return _current_resource_mark; }
571 void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
572 #endif // ASSERT
573
574 private:
575 volatile int _jvmti_env_iteration_count;
576
577 public:
578 void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
579 void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
580 bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
581
582 // Code generation
583 static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
584 static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
585
586 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
587 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
588
589 static ByteSize tlab_start_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); }
590 static ByteSize tlab_end_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); }
591 static ByteSize tlab_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); }
592 static ByteSize tlab_pf_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); }
593
594 JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
595
596 public:
597 ParkEvent * volatile _ParkEvent; // for Object monitors, JVMTI raw monitors,
598 // and ObjectSynchronizer::read_stable_mark
599
600 // Termination indicator used by the signal handler.
601 // _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state
602 // (which can't itself be read from the signal handler if a signal hits during the Thread destructor).
603 bool has_terminated() { return AtomicAccess::load(&_ParkEvent) == nullptr; };
604
605 jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG
606 jint _hashStateX; // thread-specific hashCode generator state
607 jint _hashStateY;
608 jint _hashStateZ;
609
610 // Low-level leaf-lock primitives used to implement synchronization.
611 // Not for general synchronization use.
612 static void SpinAcquire(volatile int * Lock);
613 static void SpinRelease(volatile int * Lock);
614
615 #if defined(__APPLE__) && defined(AARCH64)
616 private:
617 DEBUG_ONLY(bool _wx_init);
618 WXMode _wx_state;
619 public:
620 void init_wx();
621 WXMode enable_wx(WXMode new_state);
622
623 void assert_wx_state(WXMode expected) {
624 assert(_wx_state == expected, "wrong state");
625 }
626 #endif // __APPLE__ && AARCH64
627
628 private:
629 bool _in_asgct = false;
630 public:
631 bool in_asgct() const { return _in_asgct; }
632 void set_in_asgct(bool value) { _in_asgct = value; }
633 static bool current_in_asgct() {
634 Thread *cur = Thread::current_or_null_safe();
635 return cur != nullptr && cur->in_asgct();
636 }
637
638 private:
639 VMErrorCallback* _vm_error_callbacks;
640
641 bool _profile_vm_locks;
642 bool _profile_vm_calls;
643 bool _profile_vm_ops;
644 bool _profile_rt_calls;
645 bool _profile_upcalls;
646
647 jlong _all_bc_counter_value;
648 jlong _clinit_bc_counter_value;
649
650 PerfTraceTime* _current_rt_call_timer;
651 public:
652 bool profile_vm_locks() const { return _profile_vm_locks; }
653 void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
654
655 bool profile_vm_calls() const { return _profile_vm_calls; }
656 void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
657
658 bool profile_vm_ops() const { return _profile_vm_ops; }
659 void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
660
661 bool profile_rt_calls() const { return _profile_rt_calls; }
662 void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
663
664 bool profile_upcalls() const { return _profile_upcalls; }
665 void set_profile_upcalls(bool v) { _profile_upcalls = v; }
666
667 PerfTraceTime* current_rt_call_timer() const { return _current_rt_call_timer; }
668 void set_current_rt_call_timer(PerfTraceTime* c) { _current_rt_call_timer = c; }
669 bool has_current_rt_call_timer() const { return _current_rt_call_timer != nullptr; }
670
671 bool do_profile_rt_call() const {
672 return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
673 }
674
675 jlong bc_counter_value() const { return _all_bc_counter_value; }
676
677 jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
678
679 void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
680
681 static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
682 };
683
684 class ProfileVMCallContext : StackObj {
685 private:
686 Thread* _thread;
687 bool _enabled;
688 PerfTraceTime* _timer;
689
690 static int _perf_nested_runtime_calls_count;
691
692 static const char* name(PerfTraceTime* t);
693 static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
694 public:
695 inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
696 : _thread(current), _enabled(is_on), _timer(timer) {
697 if (_enabled) {
698 assert(timer != nullptr, "");
699 assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
700 _thread->set_current_rt_call_timer(timer);
701 } else if (current->profile_rt_calls()) {
702 notify_nested_rt_call(current->current_rt_call_timer(), timer);
703 }
704 }
705
706 inline ~ProfileVMCallContext() {
707 if (_enabled) {
708 assert(_timer == _thread->current_rt_call_timer(),
709 "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
710 _thread->set_current_rt_call_timer(nullptr);
711 }
712 }
713
714 static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
715 };
716
717 class PauseRuntimeCallProfiling : public StackObj {
718 protected:
719 Thread* _thread;
720 bool _enabled;
721 PerfTraceTime* _timer;
722
723 public:
724 inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
725 : _thread(current), _enabled(is_on), _timer(nullptr) {
726 if (_enabled) {
727 _timer = _thread->current_rt_call_timer();
728 _thread->set_current_rt_call_timer(nullptr);
729 }
730 }
731
732 inline ~PauseRuntimeCallProfiling () {
733 if (_enabled) {
734 guarantee(_thread->current_rt_call_timer() == nullptr, "");
735 _thread->set_current_rt_call_timer(_timer); // restore
736 }
737 }
738 };
739
740 class ThreadInAsgct {
741 private:
742 Thread* _thread;
743 bool _saved_in_asgct;
744 public:
745 ThreadInAsgct(Thread* thread) : _thread(thread) {
746 assert(thread != nullptr, "invariant");
747 // Allow AsyncGetCallTrace to be reentrant - save the previous state.
748 _saved_in_asgct = thread->in_asgct();
749 thread->set_in_asgct(true);
750 }
751 ~ThreadInAsgct() {
752 assert(_thread->in_asgct(), "invariant");
753 _thread->set_in_asgct(_saved_in_asgct);
754 }
755 };
756
757 // Inline implementation of Thread::current()
758 inline Thread* Thread::current() {
759 Thread* current = current_or_null();
760 assert(current != nullptr, "Thread::current() called on detached thread");
761 return current;
762 }
763
764 inline Thread* Thread::current_or_null() {
765 return _thr_current;
766 }
767
768 inline Thread* Thread::current_or_null_safe() {
769 if (ThreadLocalStorage::is_initialized()) {
770 return ThreadLocalStorage::thread();
771 }
772 return nullptr;
773 }
774
775 #endif // SHARE_RUNTIME_THREAD_HPP