1 /*
  2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_RUNTIME_THREAD_HPP
 27 #define SHARE_RUNTIME_THREAD_HPP
 28 
 29 #include "jni.h"
 30 #include "gc/shared/gcThreadLocalData.hpp"
 31 #include "gc/shared/threadLocalAllocBuffer.hpp"
 32 #include "memory/allocation.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "runtime/globals.hpp"
 35 #include "runtime/os.hpp"
 36 #include "runtime/threadHeapSampler.hpp"
 37 #include "runtime/threadLocalStorage.hpp"
 38 #include "runtime/threadStatisticalInfo.hpp"
 39 #include "runtime/unhandledOops.hpp"
 40 #include "utilities/globalDefinitions.hpp"
 41 #include "utilities/macros.hpp"
 42 #if INCLUDE_JFR
 43 #include "jfr/support/jfrThreadExtension.hpp"
 44 #endif
 45 
 46 class HandleArea;
 47 class HandleMark;
 48 class ICRefillVerifier;
 49 class JvmtiRawMonitor;
 50 class Metadata;
 51 class OSThread;
 52 class ParkEvent;
 53 class ResourceArea;
 54 class SafeThreadsListPtr;
 55 class ThreadClosure;
 56 class ThreadsList;
 57 class ThreadsSMRSupport;
 58 
 59 class OopClosure;
 60 class CodeBlobClosure;
 61 
 62 DEBUG_ONLY(class ResourceMark;)
 63 
 64 class WorkerThread;
 65 
 66 class JavaThread;
 67 
 68 // Class hierarchy
 69 // - Thread
 70 //   - JavaThread
 71 //     - various subclasses eg CompilerThread, ServiceThread
 72 //   - NonJavaThread
 73 //     - NamedThread
 74 //       - VMThread
 75 //       - ConcurrentGCThread
 76 //       - WorkerThread
 77 //     - WatcherThread
 78 //     - JfrThreadSampler
 79 //     - LogAsyncWriter
 80 //
 81 // All Thread subclasses must be either JavaThread or NonJavaThread.
 82 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is
 83 // a partially constructed/destroyed Thread.
 84 
 85 // Thread execution sequence and actions:
 86 // All threads:
 87 //  - thread_native_entry  // per-OS native entry point
 88 //    - stack initialization
 89 //    - other OS-level initialization (signal masks etc)
 90 //    - handshake with creating thread (if not started suspended)
 91 //    - this->call_run()  // common shared entry point
 92 //      - shared common initialization
 93 //      - this->pre_run()  // virtual per-thread-type initialization
 94 //      - this->run()      // virtual per-thread-type "main" logic
 95 //      - shared common tear-down
 96 //      - this->post_run()  // virtual per-thread-type tear-down
 97 //      - // 'this' no longer referenceable
 98 //    - OS-level tear-down (minimal)
 99 //    - final logging
100 //
101 // For JavaThread:
102 //   - this->run()  // virtual but not normally overridden
103 //     - this->thread_main_inner()  // extra call level to ensure correct stack calculations
104 //       - this->entry_point()  // set differently for each kind of JavaThread
105 
106 class Thread: public ThreadShadow {
107   friend class VMStructs;
108   friend class JVMCIVMStructs;
109  private:
110 
111 #ifndef USE_LIBRARY_BASED_TLS_ONLY
112   // Current thread is maintained as a thread-local variable
113   static THREAD_LOCAL Thread* _thr_current;
114 #endif
115 
116   // On AArch64, the high order 32 bits are used by a "patching epoch" number
117   // which reflects if this thread has executed the required fences, after
118   // an nmethod gets disarmed. The low order 32 bit denote the disarm value.
119   uint64_t _nmethod_disarm_value;
120 
121  public:
122   int nmethod_disarm_value() {
123     return (int)(uint32_t)_nmethod_disarm_value;
124   }
125 
126   void set_nmethod_disarm_value(int value) {
127     _nmethod_disarm_value = (uint64_t)(uint32_t)value;
128   }
129 
130   static ByteSize nmethod_disarmed_offset() {
131     ByteSize offset = byte_offset_of(Thread, _nmethod_disarm_value);
132     // At least on x86_64, nmethod entry barrier encodes disarmed value offset
133     // in instruction as disp8 immed
134     assert(in_bytes(offset) < 128, "Offset >= 128");
135     return offset;
136   }
137 
138  private:
139   // Thread local data area available to the GC. The internal
140   // structure and contents of this data area is GC-specific.
141   // Only GC and GC barrier code should access this data area.
142   GCThreadLocalData _gc_data;
143 
144  public:
145   static ByteSize gc_data_offset() {
146     return byte_offset_of(Thread, _gc_data);
147   }
148 
149   template <typename T> T* gc_data() {
150     STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data));
151     return reinterpret_cast<T*>(&_gc_data);
152   }
153 
154   // Exception handling
155   // (Note: _pending_exception and friends are in ThreadShadow)
156   //oop       _pending_exception;                // pending exception for current thread
157   // const char* _exception_file;                   // file information for exception (debugging only)
158   // int         _exception_line;                   // line information for exception (debugging only)
159  protected:
160 
161   DEBUG_ONLY(static Thread* _starting_thread;)
162 
163   // JavaThread lifecycle support:
164   friend class SafeThreadsListPtr;  // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access
165   friend class ScanHazardPtrGatherProtectedThreadsClosure;  // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
166   friend class ScanHazardPtrGatherThreadsListClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
167   friend class ScanHazardPtrPrintMatchingThreadsClosure;  // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
168   friend class ThreadsSMRSupport;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
169   friend class ThreadsListHandleTest;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
170   friend class ValidateHazardPtrsClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
171 
172   ThreadsList* volatile _threads_hazard_ptr;
173   SafeThreadsListPtr*   _threads_list_ptr;
174   ThreadsList*          cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value);
175   ThreadsList*          get_threads_hazard_ptr() const;
176   void                  set_threads_hazard_ptr(ThreadsList* new_list);
177   static bool           is_hazard_ptr_tagged(ThreadsList* list) {
178     return (intptr_t(list) & intptr_t(1)) == intptr_t(1);
179   }
180   static ThreadsList*   tag_hazard_ptr(ThreadsList* list) {
181     return (ThreadsList*)(intptr_t(list) | intptr_t(1));
182   }
183   static ThreadsList*   untag_hazard_ptr(ThreadsList* list) {
184     return (ThreadsList*)(intptr_t(list) & ~intptr_t(1));
185   }
186   // This field is enabled via -XX:+EnableThreadSMRStatistics:
187   uint _nested_threads_hazard_ptr_cnt;
188   void dec_nested_threads_hazard_ptr_cnt() {
189     assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()");
190     _nested_threads_hazard_ptr_cnt--;
191   }
192   void inc_nested_threads_hazard_ptr_cnt() {
193     _nested_threads_hazard_ptr_cnt++;
194   }
195   uint nested_threads_hazard_ptr_cnt() {
196     return _nested_threads_hazard_ptr_cnt;
197   }
198 
199  public:
200   // Is the target JavaThread protected by the calling Thread or by some other
201   // mechanism?
202   static bool is_JavaThread_protected(const JavaThread* target);
203   // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated
204   // with the calling Thread?
205   static bool is_JavaThread_protected_by_TLH(const JavaThread* target);
206 
207   void* operator new(size_t size) throw() { return allocate(size, true); }
208   void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
209     return allocate(size, false); }
210   void  operator delete(void* p);
211 
212  protected:
213   static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
214 
215  private:
216   DEBUG_ONLY(bool _suspendible_thread;)
217 
218  public:
219   // Determines if a heap allocation failure will be retried
220   // (e.g., by deoptimizing and re-executing in the interpreter).
221   // In this case, the failed allocation must raise
222   // Universe::out_of_memory_error_retry() and omit side effects
223   // such as JVMTI events and handling -XX:+HeapDumpOnOutOfMemoryError
224   // and -XX:OnOutOfMemoryError.
225   virtual bool in_retryable_allocation() const { return false; }
226 
227 #ifdef ASSERT
228   void set_suspendible_thread() {
229     _suspendible_thread = true;
230   }
231 
232   void clear_suspendible_thread() {
233     _suspendible_thread = false;
234   }
235 
236   bool is_suspendible_thread() { return _suspendible_thread; }
237 #endif
238 
239  private:
240   // Point to the last handle mark
241   HandleMark* _last_handle_mark;
242 
243   // Claim value for parallel iteration over threads.
244   uintx _threads_do_token;
245 
246   // Support for GlobalCounter
247  private:
248   volatile uintx _rcu_counter;
249  public:
250   volatile uintx* get_rcu_counter() {
251     return &_rcu_counter;
252   }
253 
254  public:
255   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
256   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
257  private:
258 
259 #ifdef ASSERT
260   ICRefillVerifier* _missed_ic_stub_refill_verifier;
261 
262  public:
263   ICRefillVerifier* missed_ic_stub_refill_verifier() {
264     return _missed_ic_stub_refill_verifier;
265   }
266 
267   void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) {
268     _missed_ic_stub_refill_verifier = verifier;
269   }
270 #endif // ASSERT
271 
272  private:
273   // Used by SkipGCALot class.
274   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
275 
276   friend class GCLocker;
277 
278  private:
279   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
280   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
281                                                 // the Java heap
282   ThreadHeapSampler _heap_sampler;              // For use when sampling the memory.
283 
284   ThreadStatisticalInfo _statistical_info;      // Statistics about the thread
285 
286   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
287 
288   JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread
289                                                  // is waiting to lock
290  public:
291   // Constructor
292   Thread();
293   virtual ~Thread() = 0;        // Thread is abstract.
294 
295   // Manage Thread::current()
296   void initialize_thread_current();
297   static void clear_thread_current(); // TLS cleanup needed before threads terminate
298 
299  protected:
300   // To be implemented by children.
301   virtual void run() = 0;
302   virtual void pre_run() = 0;
303   virtual void post_run() = 0;  // Note: Thread must not be deleted prior to calling this!
304 
305 #ifdef ASSERT
306   enum RunState {
307     PRE_CALL_RUN,
308     CALL_RUN,
309     PRE_RUN,
310     RUN,
311     POST_RUN
312     // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it
313   };
314   RunState _run_state;  // for lifecycle checks
315 #endif
316 
317 
318  public:
319   // invokes <ChildThreadClass>::run(), with common preparations and cleanups.
320   void call_run();
321 
322   // Testers
323   virtual bool is_VM_thread()       const            { return false; }
324   virtual bool is_Java_thread()     const            { return false; }
325   virtual bool is_Compiler_thread() const            { return false; }
326   virtual bool is_service_thread() const             { return false; }
327   virtual bool is_monitor_deflation_thread() const   { return false; }
328   virtual bool is_hidden_from_external_view() const  { return false; }
329   virtual bool is_jvmti_agent_thread() const         { return false; }
330   virtual bool is_Watcher_thread() const             { return false; }
331   virtual bool is_ConcurrentGC_thread() const        { return false; }
332   virtual bool is_Named_thread() const               { return false; }
333   virtual bool is_Worker_thread() const              { return false; }
334   virtual bool is_JfrSampler_thread() const          { return false; }
335 
336   // Can this thread make Java upcalls
337   virtual bool can_call_java() const                 { return false; }
338 
339   // Is this a JavaThread that is on the VM's current ThreadsList?
340   // If so it must participate in the safepoint protocol.
341   virtual bool is_active_Java_thread() const         { return false; }
342 
343   // All threads are given names. For singleton subclasses we can
344   // just hard-wire the known name of the instance. JavaThreads and
345   // NamedThreads support multiple named instances, and dynamic
346   // changing of the name of an instance.
347   virtual const char* name() const { return "Unknown thread"; }
348 
349   // A thread's type name is also made available for debugging
350   // and logging.
351   virtual const char* type_name() const { return "Thread"; }
352 
353   // Returns the current thread (ASSERTS if NULL)
354   static inline Thread* current();
355   // Returns the current thread, or NULL if not attached
356   static inline Thread* current_or_null();
357   // Returns the current thread, or NULL if not attached, and is
358   // safe for use from signal-handlers
359   static inline Thread* current_or_null_safe();
360 
361   // Common thread operations
362 #ifdef ASSERT
363   static void check_for_dangling_thread_pointer(Thread *thread);
364 #endif
365   static void set_priority(Thread* thread, ThreadPriority priority);
366   static ThreadPriority get_priority(const Thread* const thread);
367   static void start(Thread* thread);
368 
369   void set_native_thread_name(const char *name) {
370     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
371     os::set_native_thread_name(name);
372   }
373 
374   // Support for Unhandled Oop detection
375   // Add the field for both, fastdebug and debug, builds to keep
376   // Thread's fields layout the same.
377   // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
378 #ifdef CHECK_UNHANDLED_OOPS
379  private:
380   UnhandledOops* _unhandled_oops;
381 #elif defined(ASSERT)
382  private:
383   void* _unhandled_oops;
384 #endif
385 #ifdef CHECK_UNHANDLED_OOPS
386  public:
387   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
388   // Mark oop safe for gc.  It may be stack allocated but won't move.
389   void allow_unhandled_oop(oop *op) {
390     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
391   }
392   // Clear oops at safepoint so crashes point to unhandled oop violator
393   void clear_unhandled_oops() {
394     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
395   }
396 #endif // CHECK_UNHANDLED_OOPS
397 
398  public:
399 #ifndef PRODUCT
400   bool skip_gcalot()           { return _skip_gcalot; }
401   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
402 #endif
403 
404   // Resource area
405   ResourceArea* resource_area() const            { return _resource_area; }
406   void set_resource_area(ResourceArea* area)     { _resource_area = area; }
407 
408   OSThread* osthread() const                     { return _osthread;   }
409   void set_osthread(OSThread* thread)            { _osthread = thread; }
410 
411   // Internal handle support
412   HandleArea* handle_area() const                { return _handle_area; }
413   void set_handle_area(HandleArea* area)         { _handle_area = area; }
414 
415   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
416   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
417 
418   // Thread-Local Allocation Buffer (TLAB) support
419   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
420   void initialize_tlab();
421 
422   jlong allocated_bytes()               { return _allocated_bytes; }
423   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
424   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
425   inline jlong cooked_allocated_bytes();
426 
427   ThreadHeapSampler& heap_sampler()     { return _heap_sampler; }
428 
429   ThreadStatisticalInfo& statistical_info() { return _statistical_info; }
430 
431   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
432 
433   // For tracking the Jvmti raw monitor the thread is pending on.
434   JvmtiRawMonitor* current_pending_raw_monitor() {
435     return _current_pending_raw_monitor;
436   }
437   void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) {
438     _current_pending_raw_monitor = monitor;
439   }
440 
441   // GC support
442   // Apply "f->do_oop" to all root oops in "this".
443   //   Used by JavaThread::oops_do.
444   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
445   virtual void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf);
446   virtual void oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {}
447   void oops_do(OopClosure* f, CodeBlobClosure* cf);
448 
449   // Handles the parallel case for claim_threads_do.
450  private:
451   bool claim_par_threads_do(uintx claim_token);
452  public:
453   // Requires that "claim_token" is that of the current iteration.
454   // If "is_par" is false, sets the token of "this" to
455   // "claim_token", and returns "true".  If "is_par" is true,
456   // uses an atomic instruction to set the current thread's token to
457   // "claim_token", if it is not already.  Returns "true" iff the
458   // calling thread does the update, this indicates that the calling thread
459   // has claimed the thread in the current iteration.
460   bool claim_threads_do(bool is_par, uintx claim_token) {
461     if (!is_par) {
462       _threads_do_token = claim_token;
463       return true;
464     } else {
465       return claim_par_threads_do(claim_token);
466     }
467   }
468 
469   uintx threads_do_token() const { return _threads_do_token; }
470 
471   // jvmtiRedefineClasses support
472   void metadata_handles_do(void f(Metadata*));
473 
474  private:
475   // Check if address is within the given range of this thread's
476   // stack:  stack_base() > adr >/>= limit
477   // The check is inclusive of limit if passed true, else exclusive.
478   bool is_in_stack_range(address adr, address limit, bool inclusive) const {
479     assert(stack_base() > limit && limit >= stack_end(), "limit is outside of stack");
480     return stack_base() > adr && (inclusive ? adr >= limit : adr > limit);
481   }
482 
483  public:
484   // Used by fast lock support
485   virtual bool is_lock_owned(address adr) const;
486 
487   // Check if address is within the given range of this thread's
488   // stack:  stack_base() > adr >= limit
489   bool is_in_stack_range_incl(address adr, address limit) const {
490     return is_in_stack_range(adr, limit, true);
491   }
492 
493   // Check if address is within the given range of this thread's
494   // stack:  stack_base() > adr > limit
495   bool is_in_stack_range_excl(address adr, address limit) const {
496     return is_in_stack_range(adr, limit, false);
497   }
498 
499   // Check if address is in the stack mapped to this thread. Used mainly in
500   // error reporting (so has to include guard zone) and frame printing.
501   // Expects _stack_base to be initialized - checked with assert.
502   bool is_in_full_stack_checked(address adr) const {
503     return is_in_stack_range_incl(adr, stack_end());
504   }
505 
506   // Like is_in_full_stack_checked but without the assertions as this
507   // may be called in a thread before _stack_base is initialized.
508   bool is_in_full_stack(address adr) const {
509     address stack_end = _stack_base - _stack_size;
510     return _stack_base > adr && adr >= stack_end;
511   }
512 
513   // Check if address is in the live stack of this thread (not just for locks).
514   // Warning: can only be called by the current thread on itself.
515   bool is_in_live_stack(address adr) const {
516     assert(Thread::current() == this, "is_in_live_stack can only be called from current thread");
517     return is_in_stack_range_incl(adr, os::current_stack_pointer());
518   }
519 
520   // Sets this thread as starting thread. Returns failure if thread
521   // creation fails due to lack of memory, too many threads etc.
522   bool set_as_starting_thread();
523 
524 protected:
525   // OS data associated with the thread
526   OSThread* _osthread;  // Platform-specific thread information
527 
528   // Thread local resource area for temporary allocation within the VM
529   ResourceArea* _resource_area;
530 
531   DEBUG_ONLY(ResourceMark* _current_resource_mark;)
532 
533   // Thread local handle area for allocation of handles within the VM
534   HandleArea* _handle_area;
535   GrowableArray<Metadata*>* _metadata_handles;
536 
537   // Support for stack overflow handling, get_thread, etc.
538   address          _stack_base;
539   size_t           _stack_size;
540   int              _lgrp_id;
541 
542  public:
543   // Stack overflow support
544   address stack_base() const           { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
545   void    set_stack_base(address base) { _stack_base = base; }
546   size_t  stack_size() const           { return _stack_size; }
547   void    set_stack_size(size_t size)  { _stack_size = size; }
548   address stack_end()  const           { return stack_base() - stack_size(); }
549   void    record_stack_base_and_size();
550   void    register_thread_stack_with_NMT();
551   void    unregister_thread_stack_with_NMT();
552 
553   int     lgrp_id() const        { return _lgrp_id; }
554   void    set_lgrp_id(int value) { _lgrp_id = value; }
555 
556   // Printing
557   void print_on(outputStream* st, bool print_extended_info) const;
558   virtual void print_on(outputStream* st) const { print_on(st, false); }
559   void print() const;
560   virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
561   // Basic, non-virtual, printing support that is simple and always safe.
562   void print_value_on(outputStream* st) const;
563 
564   // Debug-only code
565 #ifdef ASSERT
566  private:
567   // Deadlock detection support for Mutex locks. List of locks own by thread.
568   Mutex* _owned_locks;
569   // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
570   // thus the friendship
571   friend class Mutex;
572   friend class Monitor;
573 
574  public:
575   void print_owned_locks_on(outputStream* st) const;
576   void print_owned_locks() const                 { print_owned_locks_on(tty);    }
577   Mutex* owned_locks() const                     { return _owned_locks;          }
578   bool owns_locks() const                        { return owned_locks() != NULL; }
579 
580   // Deadlock detection
581   ResourceMark* current_resource_mark()          { return _current_resource_mark; }
582   void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
583 #endif // ASSERT
584 
585  private:
586   volatile int _jvmti_env_iteration_count;
587 
588  public:
589   void entering_jvmti_env_iteration()            { ++_jvmti_env_iteration_count; }
590   void leaving_jvmti_env_iteration()             { --_jvmti_env_iteration_count; }
591   bool is_inside_jvmti_env_iteration()           { return _jvmti_env_iteration_count > 0; }
592 
593   // Code generation
594   static ByteSize exception_file_offset()        { return byte_offset_of(Thread, _exception_file); }
595   static ByteSize exception_line_offset()        { return byte_offset_of(Thread, _exception_line); }
596 
597   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base); }
598   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size); }
599 
600   static ByteSize tlab_start_offset()            { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); }
601   static ByteSize tlab_end_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); }
602   static ByteSize tlab_top_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); }
603   static ByteSize tlab_pf_top_offset()           { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); }
604 
605   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes); }
606 
607   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
608 
609  public:
610   ParkEvent * volatile _ParkEvent;            // for Object monitors, JVMTI raw monitors,
611                                               // and ObjectSynchronizer::read_stable_mark
612 
613   // Termination indicator used by the signal handler.
614   // _ParkEvent is just a convenient field we can NULL out after setting the JavaThread termination state
615   // (which can't itself be read from the signal handler if a signal hits during the Thread destructor).
616   bool has_terminated()                       { return Atomic::load(&_ParkEvent) == NULL; };
617 
618   jint _hashStateW;                           // Marsaglia Shift-XOR thread-local RNG
619   jint _hashStateX;                           // thread-specific hashCode generator state
620   jint _hashStateY;
621   jint _hashStateZ;
622 
623   // Low-level leaf-lock primitives used to implement synchronization.
624   // Not for general synchronization use.
625   static void SpinAcquire(volatile int * Lock, const char * Name);
626   static void SpinRelease(volatile int * Lock);
627 
628 #if defined(__APPLE__) && defined(AARCH64)
629  private:
630   DEBUG_ONLY(bool _wx_init);
631   WXMode _wx_state;
632  public:
633   void init_wx();
634   WXMode enable_wx(WXMode new_state);
635 
636   void assert_wx_state(WXMode expected) {
637     assert(_wx_state == expected, "wrong state");
638   }
639 #endif // __APPLE__ && AARCH64
640 };
641 
642 // Inline implementation of Thread::current()
643 inline Thread* Thread::current() {
644   Thread* current = current_or_null();
645   assert(current != NULL, "Thread::current() called on detached thread");
646   return current;
647 }
648 
649 inline Thread* Thread::current_or_null() {
650 #ifndef USE_LIBRARY_BASED_TLS_ONLY
651   return _thr_current;
652 #else
653   if (ThreadLocalStorage::is_initialized()) {
654     return ThreadLocalStorage::thread();
655   }
656   return NULL;
657 #endif
658 }
659 
660 inline Thread* Thread::current_or_null_safe() {
661   if (ThreadLocalStorage::is_initialized()) {
662     return ThreadLocalStorage::thread();
663   }
664   return NULL;
665 }
666 
667 #endif // SHARE_RUNTIME_THREAD_HPP