1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_RUNTIME_THREAD_HPP
 27 #define SHARE_RUNTIME_THREAD_HPP
 28 
 29 #include "jni.h"
 30 #include "gc/shared/gcThreadLocalData.hpp"
 31 #include "gc/shared/threadLocalAllocBuffer.hpp"
 32 #include "memory/allocation.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "runtime/globals.hpp"
 35 #include "runtime/os.hpp"
 36 #include "runtime/threadHeapSampler.hpp"
 37 #include "runtime/threadLocalStorage.hpp"
 38 #include "runtime/threadStatisticalInfo.hpp"
 39 #include "runtime/unhandledOops.hpp"
 40 #include "utilities/globalDefinitions.hpp"
 41 #include "utilities/macros.hpp"
 42 #if INCLUDE_JFR
 43 #include "jfr/support/jfrThreadExtension.hpp"
 44 #endif
 45 
 46 class CompilerThread;
 47 class HandleArea;
 48 class HandleMark;
 49 class JvmtiRawMonitor;
 50 class NMethodClosure;
 51 class Metadata;
 52 class OopClosure;
 53 class OSThread;
 54 class ParkEvent;
 55 class ResourceArea;
 56 class SafeThreadsListPtr;
 57 class ThreadClosure;
 58 class ThreadsList;
 59 class ThreadsSMRSupport;
 60 class VMErrorCallback;
 61 
 62 
 63 DEBUG_ONLY(class ResourceMark;)
 64 
 65 class WorkerThread;
 66 
 67 class JavaThread;
 68 
 69 // Class hierarchy
 70 // - Thread
 71 //   - JavaThread
 72 //     - various subclasses eg CompilerThread, ServiceThread
 73 //   - NonJavaThread
 74 //     - NamedThread
 75 //       - VMThread
 76 //       - ConcurrentGCThread
 77 //       - WorkerThread
 78 //     - WatcherThread
 79 //     - JfrThreadSampler
 80 //     - LogAsyncWriter
 81 //
 82 // All Thread subclasses must be either JavaThread or NonJavaThread.
 83 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is
 84 // a partially constructed/destroyed Thread.
 85 
 86 // Thread execution sequence and actions:
 87 // All threads:
 88 //  - thread_native_entry  // per-OS native entry point
 89 //    - stack initialization
 90 //    - other OS-level initialization (signal masks etc)
 91 //    - handshake with creating thread (if not started suspended)
 92 //    - this->call_run()  // common shared entry point
 93 //      - shared common initialization
 94 //      - this->pre_run()  // virtual per-thread-type initialization
 95 //      - this->run()      // virtual per-thread-type "main" logic
 96 //      - shared common tear-down
 97 //      - this->post_run()  // virtual per-thread-type tear-down
 98 //      - // 'this' no longer referenceable
 99 //    - OS-level tear-down (minimal)
100 //    - final logging
101 //
102 // For JavaThread:
103 //   - this->run()  // virtual but not normally overridden
104 //     - this->thread_main_inner()  // extra call level to ensure correct stack calculations
105 //       - this->entry_point()  // set differently for each kind of JavaThread
106 
107 class Thread: public ThreadShadow {
108   friend class VMError;
109   friend class VMErrorCallbackMark;
110   friend class VMStructs;
111   friend class JVMCIVMStructs;
112  private:
113 
114 #ifndef USE_LIBRARY_BASED_TLS_ONLY
115   // Current thread is maintained as a thread-local variable
116   static THREAD_LOCAL Thread* _thr_current;
117 #endif
118 
119   // On AArch64, the high order 32 bits are used by a "patching epoch" number
120   // which reflects if this thread has executed the required fences, after
121   // an nmethod gets disarmed. The low order 32 bits denote the disarmed value.
122   uint64_t _nmethod_disarmed_guard_value;
123 
124  public:
125   void set_nmethod_disarmed_guard_value(int value) {
126     _nmethod_disarmed_guard_value = (uint64_t)(uint32_t)value;
127   }
128 
129   static ByteSize nmethod_disarmed_guard_value_offset() {
130     ByteSize offset = byte_offset_of(Thread, _nmethod_disarmed_guard_value);
131     // At least on x86_64, nmethod entry barrier encodes disarmed value offset
132     // in instruction as disp8 immed
133     assert(in_bytes(offset) < 128, "Offset >= 128");
134     return offset;
135   }
136 
137  private:
138   // Thread local data area available to the GC. The internal
139   // structure and contents of this data area is GC-specific.
140   // Only GC and GC barrier code should access this data area.
141   GCThreadLocalData _gc_data;
142 
143  public:
144   static ByteSize gc_data_offset() {
145     return byte_offset_of(Thread, _gc_data);
146   }
147 
148   template <typename T> T* gc_data() {
149     STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data));
150     return reinterpret_cast<T*>(&_gc_data);
151   }
152 
153   // Exception handling
154   // (Note: _pending_exception and friends are in ThreadShadow)
155   //oop       _pending_exception;                // pending exception for current thread
156   // const char* _exception_file;                   // file information for exception (debugging only)
157   // int         _exception_line;                   // line information for exception (debugging only)
158  protected:
159 
160   DEBUG_ONLY(static Thread* _starting_thread;)
161 
162   // JavaThread lifecycle support:
163   friend class SafeThreadsListPtr;  // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access
164   friend class ScanHazardPtrGatherProtectedThreadsClosure;  // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
165   friend class ScanHazardPtrGatherThreadsListClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
166   friend class ScanHazardPtrPrintMatchingThreadsClosure;  // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
167   friend class ThreadsSMRSupport;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
168   friend class ThreadsListHandleTest;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
169   friend class ValidateHazardPtrsClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
170 
171   ThreadsList* volatile _threads_hazard_ptr;
172   SafeThreadsListPtr*   _threads_list_ptr;
173   ThreadsList*          cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value);
174   ThreadsList*          get_threads_hazard_ptr() const;
175   void                  set_threads_hazard_ptr(ThreadsList* new_list);
176   static bool           is_hazard_ptr_tagged(ThreadsList* list) {
177     return (intptr_t(list) & intptr_t(1)) == intptr_t(1);
178   }
179   static ThreadsList*   tag_hazard_ptr(ThreadsList* list) {
180     return (ThreadsList*)(intptr_t(list) | intptr_t(1));
181   }
182   static ThreadsList*   untag_hazard_ptr(ThreadsList* list) {
183     return (ThreadsList*)(intptr_t(list) & ~intptr_t(1));
184   }
185   // This field is enabled via -XX:+EnableThreadSMRStatistics:
186   uint _nested_threads_hazard_ptr_cnt;
187   void dec_nested_threads_hazard_ptr_cnt() {
188     assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()");
189     _nested_threads_hazard_ptr_cnt--;
190   }
191   void inc_nested_threads_hazard_ptr_cnt() {
192     _nested_threads_hazard_ptr_cnt++;
193   }
194   uint nested_threads_hazard_ptr_cnt() {
195     return _nested_threads_hazard_ptr_cnt;
196   }
197 
198  public:
199   // Is the target JavaThread protected by the calling Thread or by some other
200   // mechanism?
201   static bool is_JavaThread_protected(const JavaThread* target);
202   // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated
203   // with the calling Thread?
204   static bool is_JavaThread_protected_by_TLH(const JavaThread* target);
205 
206  private:
207   DEBUG_ONLY(bool _suspendible_thread;)
208   DEBUG_ONLY(bool _indirectly_suspendible_thread;)
209   DEBUG_ONLY(bool _indirectly_safepoint_thread;)
210 
211  public:
212 #ifdef ASSERT
213   void set_suspendible_thread()   { _suspendible_thread = true; }
214   void clear_suspendible_thread() { _suspendible_thread = false; }
215   bool is_suspendible_thread()    { return _suspendible_thread; }
216 
217   void set_indirectly_suspendible_thread()   { _indirectly_suspendible_thread = true; }
218   void clear_indirectly_suspendible_thread() { _indirectly_suspendible_thread = false; }
219   bool is_indirectly_suspendible_thread()    { return _indirectly_suspendible_thread; }
220 
221   void set_indirectly_safepoint_thread()   { _indirectly_safepoint_thread = true; }
222   void clear_indirectly_safepoint_thread() { _indirectly_safepoint_thread = false; }
223   bool is_indirectly_safepoint_thread()    { return _indirectly_safepoint_thread; }
224 #endif
225 
226  private:
227   // Point to the last handle mark
228   HandleMark* _last_handle_mark;
229 
230   // Claim value for parallel iteration over threads.
231   uintx _threads_do_token;
232 
233   // Support for GlobalCounter
234  private:
235   volatile uintx _rcu_counter;
236  public:
237   volatile uintx* get_rcu_counter() {
238     return &_rcu_counter;
239   }
240 
241  public:
242   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
243   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
244 
245  private:
246   // Used by SkipGCALot class.
247   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
248 
249   friend class GCLocker;
250 
251  private:
252   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
253   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
254                                                 // the Java heap
255   ThreadHeapSampler _heap_sampler;              // For use when sampling the memory.
256 
257   ThreadStatisticalInfo _statistical_info;      // Statistics about the thread
258 
259   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
260 
261   JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread
262                                                  // is waiting to lock
263  public:
264   // Constructor
265   Thread(MemTag mem_tag = mtThread);
266   virtual ~Thread() = 0;        // Thread is abstract.
267 
268   // Manage Thread::current()
269   void initialize_thread_current();
270   static void clear_thread_current(); // TLS cleanup needed before threads terminate
271 
272  protected:
273   // To be implemented by children.
274   virtual void run() = 0;
275   virtual void pre_run() = 0;
276   virtual void post_run() = 0;  // Note: Thread must not be deleted prior to calling this!
277 
278 #ifdef ASSERT
279   enum RunState {
280     PRE_CALL_RUN,
281     CALL_RUN,
282     PRE_RUN,
283     RUN,
284     POST_RUN
285     // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it
286   };
287   RunState _run_state;  // for lifecycle checks
288 #endif
289 
290 
291  public:
292   // invokes <ChildThreadClass>::run(), with common preparations and cleanups.
293   void call_run();
294 
295   // Testers
296   virtual bool is_VM_thread()       const            { return false; }
297   virtual bool is_Java_thread()     const            { return false; }
298   virtual bool is_Compiler_thread() const            { return false; }
299   virtual bool is_service_thread() const             { return false; }
300   virtual bool is_hidden_from_external_view() const  { return false; }
301   virtual bool is_jvmti_agent_thread() const         { return false; }
302   virtual bool is_Watcher_thread() const             { return false; }
303   virtual bool is_ConcurrentGC_thread() const        { return false; }
304   virtual bool is_Named_thread() const               { return false; }
305   virtual bool is_Worker_thread() const              { return false; }
306   virtual bool is_JfrSampler_thread() const          { return false; }
307   virtual bool is_AttachListener_thread() const      { return false; }
308   virtual bool is_monitor_deflation_thread() const   { return false; }
309 
310   // Convenience cast functions
311   CompilerThread* as_Compiler_thread() const {
312     assert(is_Compiler_thread(), "Must be compiler thread");
313     return (CompilerThread*)this;
314   }
315 
316   // Can this thread make Java upcalls
317   virtual bool can_call_java() const                 { return false; }
318 
319   // Is this a JavaThread that is on the VM's current ThreadsList?
320   // If so it must participate in the safepoint protocol.
321   virtual bool is_active_Java_thread() const         { return false; }
322 
323   // All threads are given names. For singleton subclasses we can
324   // just hard-wire the known name of the instance. JavaThreads and
325   // NamedThreads support multiple named instances, and dynamic
326   // changing of the name of an instance.
327   virtual const char* name() const { return "Unknown thread"; }
328 
329   // A thread's type name is also made available for debugging
330   // and logging.
331   virtual const char* type_name() const { return "Thread"; }
332 
333   // Returns the current thread (ASSERTS if null)
334   static inline Thread* current();
335   // Returns the current thread, or null if not attached
336   static inline Thread* current_or_null();
337   // Returns the current thread, or null if not attached, and is
338   // safe for use from signal-handlers
339   static inline Thread* current_or_null_safe();
340 
341   // Common thread operations
342 #ifdef ASSERT
343   static void check_for_dangling_thread_pointer(Thread *thread);
344 #endif
345   static void set_priority(Thread* thread, ThreadPriority priority);
346   static void start(Thread* thread);
347 
348   void set_native_thread_name(const char *name) {
349     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
350     os::set_native_thread_name(name);
351   }
352 
353   // Support for Unhandled Oop detection
354   // Add the field for both, fastdebug and debug, builds to keep
355   // Thread's fields layout the same.
356   // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
357 #ifdef CHECK_UNHANDLED_OOPS
358  private:
359   UnhandledOops* _unhandled_oops;
360 #elif defined(ASSERT)
361  private:
362   void* _unhandled_oops;
363 #endif
364 #ifdef CHECK_UNHANDLED_OOPS
365  public:
366   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
367   // Mark oop safe for gc.  It may be stack allocated but won't move.
368   void allow_unhandled_oop(oop *op) {
369     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
370   }
371   // Clear oops at safepoint so crashes point to unhandled oop violator
372   void clear_unhandled_oops() {
373     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
374   }
375 #endif // CHECK_UNHANDLED_OOPS
376 
377  public:
378 #ifndef PRODUCT
379   bool skip_gcalot()           { return _skip_gcalot; }
380   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
381 #endif
382 
383   // Resource area
384   ResourceArea* resource_area() const            { return _resource_area; }
385   void set_resource_area(ResourceArea* area)     { _resource_area = area; }
386 
387   OSThread* osthread() const                     { return _osthread;   }
388   void set_osthread(OSThread* thread)            { _osthread = thread; }
389 
390   // Internal handle support
391   HandleArea* handle_area() const                { return _handle_area; }
392   void set_handle_area(HandleArea* area)         { _handle_area = area; }
393 
394   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
395   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
396 
397   // Thread-Local Allocation Buffer (TLAB) support
398   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
399   void initialize_tlab();
400 
401   jlong allocated_bytes()               { return _allocated_bytes; }
402   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
403   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
404   inline jlong cooked_allocated_bytes();
405 
406   ThreadHeapSampler& heap_sampler()     { return _heap_sampler; }
407 
408   ThreadStatisticalInfo& statistical_info() { return _statistical_info; }
409 
410   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
411 
412   // For tracking the Jvmti raw monitor the thread is pending on.
413   JvmtiRawMonitor* current_pending_raw_monitor() {
414     return _current_pending_raw_monitor;
415   }
416   void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) {
417     _current_pending_raw_monitor = monitor;
418   }
419 
420   // GC support
421   // Apply "f->do_oop" to all root oops in "this".
422   //   Used by JavaThread::oops_do.
423   // Apply "cf->do_nmethod" (if !nullptr) to all nmethods active in frames
424   virtual void oops_do_no_frames(OopClosure* f, NMethodClosure* cf);
425   virtual void oops_do_frames(OopClosure* f, NMethodClosure* cf) {}
426   void oops_do(OopClosure* f, NMethodClosure* cf);
427 
428   // Handles the parallel case for claim_threads_do.
429  private:
430   bool claim_par_threads_do(uintx claim_token);
431  public:
432   // Requires that "claim_token" is that of the current iteration.
433   // If "is_par" is false, sets the token of "this" to
434   // "claim_token", and returns "true".  If "is_par" is true,
435   // uses an atomic instruction to set the current thread's token to
436   // "claim_token", if it is not already.  Returns "true" iff the
437   // calling thread does the update, this indicates that the calling thread
438   // has claimed the thread in the current iteration.
439   bool claim_threads_do(bool is_par, uintx claim_token) {
440     if (!is_par) {
441       _threads_do_token = claim_token;
442       return true;
443     } else {
444       return claim_par_threads_do(claim_token);
445     }
446   }
447 
448   uintx threads_do_token() const { return _threads_do_token; }
449 
450   // jvmtiRedefineClasses support
451   void metadata_handles_do(void f(Metadata*));
452 
453  private:
454   // Check if address is within the given range of this thread's
455   // stack:  stack_base() > adr >/>= limit
456   // The check is inclusive of limit if passed true, else exclusive.
457   bool is_in_stack_range(address adr, address limit, bool inclusive) const {
458     assert(stack_base() > limit && limit >= stack_end(), "limit is outside of stack");
459     return stack_base() > adr && (inclusive ? adr >= limit : adr > limit);
460   }
461 
462  public:
463   // Check if address is within the given range of this thread's
464   // stack:  stack_base() > adr >= limit
465   bool is_in_stack_range_incl(address adr, address limit) const {
466     return is_in_stack_range(adr, limit, true);
467   }
468 
469   // Check if address is within the given range of this thread's
470   // stack:  stack_base() > adr > limit
471   bool is_in_stack_range_excl(address adr, address limit) const {
472     return is_in_stack_range(adr, limit, false);
473   }
474 
475   // Check if address is in the stack mapped to this thread. Used mainly in
476   // error reporting (so has to include guard zone) and frame printing.
477   // Expects _stack_base to be initialized - checked with assert.
478   bool is_in_full_stack_checked(address adr) const {
479     return is_in_stack_range_incl(adr, stack_end());
480   }
481 
482   // Like is_in_full_stack_checked but without the assertions as this
483   // may be called in a thread before _stack_base is initialized.
484   bool is_in_full_stack(address adr) const {
485     address stack_end = _stack_base - _stack_size;
486     return _stack_base > adr && adr >= stack_end;
487   }
488 
489   // Check if address is in the live stack of this thread (not just for locks).
490   // Warning: can only be called by the current thread on itself.
491   bool is_in_live_stack(address adr) const {
492     assert(Thread::current() == this, "is_in_live_stack can only be called from current thread");
493     return is_in_stack_range_incl(adr, os::current_stack_pointer());
494   }
495 
496   // Sets this thread as starting thread. Returns failure if thread
497   // creation fails due to lack of memory, too many threads etc.
498   bool set_as_starting_thread();
499 
500 protected:
501   // OS data associated with the thread
502   OSThread* _osthread;  // Platform-specific thread information
503 
504   // Thread local resource area for temporary allocation within the VM
505   ResourceArea* _resource_area;
506 
507   DEBUG_ONLY(ResourceMark* _current_resource_mark;)
508 
509   // Thread local handle area for allocation of handles within the VM
510   HandleArea* _handle_area;
511   GrowableArray<Metadata*>* _metadata_handles;
512 
513   // Support for stack overflow handling, get_thread, etc.
514   address          _stack_base;
515   size_t           _stack_size;
516   int              _lgrp_id;
517 
518  public:
519   // Stack overflow support
520   address stack_base() const DEBUG_ONLY(;) NOT_DEBUG({ return _stack_base; })
521   void    set_stack_base(address base) { _stack_base = base; }
522   size_t  stack_size() const           { return _stack_size; }
523   void    set_stack_size(size_t size)  { _stack_size = size; }
524   address stack_end()  const           { return stack_base() - stack_size(); }
525   void    record_stack_base_and_size();
526   void    register_thread_stack_with_NMT();
527   void    unregister_thread_stack_with_NMT();
528 
529   int     lgrp_id() const        { return _lgrp_id; }
530   void    set_lgrp_id(int value) { _lgrp_id = value; }
531 
532   // Printing
533   void print_on(outputStream* st, bool print_extended_info) const;
534   virtual void print_on(outputStream* st) const { print_on(st, false); }
535   void print() const;
536   virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
537   // Basic, non-virtual, printing support that is simple and always safe.
538   void print_value_on(outputStream* st) const;
539 
540   // Debug-only code
541 #ifdef ASSERT
542  private:
543   // Deadlock detection support for Mutex locks. List of locks own by thread.
544   Mutex* _owned_locks;
545   // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
546   // thus the friendship
547   friend class Mutex;
548   friend class Monitor;
549 
550  public:
551   void print_owned_locks_on(outputStream* st) const;
552   void print_owned_locks() const                 { print_owned_locks_on(tty);    }
553   Mutex* owned_locks() const                     { return _owned_locks;          }
554   bool owns_locks() const                        { return owned_locks() != nullptr; }
555 
556   // Deadlock detection
557   ResourceMark* current_resource_mark()          { return _current_resource_mark; }
558   void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
559 #endif // ASSERT
560 
561  private:
562   volatile int _jvmti_env_iteration_count;
563 
564  public:
565   void entering_jvmti_env_iteration()            { ++_jvmti_env_iteration_count; }
566   void leaving_jvmti_env_iteration()             { --_jvmti_env_iteration_count; }
567   bool is_inside_jvmti_env_iteration()           { return _jvmti_env_iteration_count > 0; }
568 
569   // Code generation
570   static ByteSize exception_file_offset()        { return byte_offset_of(Thread, _exception_file); }
571   static ByteSize exception_line_offset()        { return byte_offset_of(Thread, _exception_line); }
572 
573   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base); }
574   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size); }
575 
576   static ByteSize tlab_start_offset()            { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); }
577   static ByteSize tlab_end_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); }
578   static ByteSize tlab_top_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); }
579   static ByteSize tlab_pf_top_offset()           { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); }
580 
581   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
582 
583  public:
584   ParkEvent * volatile _ParkEvent;            // for Object monitors, JVMTI raw monitors,
585                                               // and ObjectSynchronizer::read_stable_mark
586 
587   // Termination indicator used by the signal handler.
588   // _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state
589   // (which can't itself be read from the signal handler if a signal hits during the Thread destructor).
590   bool has_terminated()                       { return Atomic::load(&_ParkEvent) == nullptr; };
591 
592   jint _hashStateW;                           // Marsaglia Shift-XOR thread-local RNG
593   jint _hashStateX;                           // thread-specific hashCode generator state
594   jint _hashStateY;
595   jint _hashStateZ;
596 
597   // Low-level leaf-lock primitives used to implement synchronization.
598   // Not for general synchronization use.
599   static void SpinAcquire(volatile int * Lock, const char * Name);
600   static void SpinRelease(volatile int * Lock);
601 
602 #if defined(__APPLE__) && defined(AARCH64)
603  private:
604   DEBUG_ONLY(bool _wx_init);
605   WXMode _wx_state;
606  public:
607   void init_wx();
608   WXMode enable_wx(WXMode new_state);
609 
610   void assert_wx_state(WXMode expected) {
611     assert(_wx_state == expected, "wrong state");
612   }
613 #endif // __APPLE__ && AARCH64
614 
615  private:
616   bool _in_asgct = false;
617  public:
618   bool in_asgct() const { return _in_asgct; }
619   void set_in_asgct(bool value) { _in_asgct = value; }
620   static bool current_in_asgct() {
621     Thread *cur = Thread::current_or_null_safe();
622     return cur != nullptr && cur->in_asgct();
623   }
624 
625  private:
626   VMErrorCallback* _vm_error_callbacks;
627 };
628 
629 class ThreadInAsgct {
630  private:
631   Thread* _thread;
632   bool _saved_in_asgct;
633  public:
634   ThreadInAsgct(Thread* thread) : _thread(thread) {
635     assert(thread != nullptr, "invariant");
636     // Allow AsyncGetCallTrace to be reentrant - save the previous state.
637     _saved_in_asgct = thread->in_asgct();
638     thread->set_in_asgct(true);
639   }
640   ~ThreadInAsgct() {
641     assert(_thread->in_asgct(), "invariant");
642     _thread->set_in_asgct(_saved_in_asgct);
643   }
644 };
645 
646 // Inline implementation of Thread::current()
647 inline Thread* Thread::current() {
648   Thread* current = current_or_null();
649   assert(current != nullptr, "Thread::current() called on detached thread");
650   return current;
651 }
652 
653 inline Thread* Thread::current_or_null() {
654 #ifndef USE_LIBRARY_BASED_TLS_ONLY
655   return _thr_current;
656 #else
657   if (ThreadLocalStorage::is_initialized()) {
658     return ThreadLocalStorage::thread();
659   }
660   return nullptr;
661 #endif
662 }
663 
664 inline Thread* Thread::current_or_null_safe() {
665   if (ThreadLocalStorage::is_initialized()) {
666     return ThreadLocalStorage::thread();
667   }
668   return nullptr;
669 }
670 
671 #endif // SHARE_RUNTIME_THREAD_HPP