1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_RUNTIME_THREAD_HPP
 27 #define SHARE_RUNTIME_THREAD_HPP
 28 
 29 #include "jni.h"
 30 #include "gc/shared/gcThreadLocalData.hpp"
 31 #include "gc/shared/threadLocalAllocBuffer.hpp"
 32 #include "memory/allocation.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "runtime/globals.hpp"
 35 #include "runtime/os.hpp"
 36 #include "runtime/threadHeapSampler.hpp"
 37 #include "runtime/threadLocalStorage.hpp"
 38 #include "runtime/threadStatisticalInfo.hpp"
 39 #include "runtime/unhandledOops.hpp"
 40 #include "utilities/globalDefinitions.hpp"
 41 #include "utilities/macros.hpp"
 42 #if INCLUDE_JFR
 43 #include "jfr/support/jfrThreadExtension.hpp"
 44 #endif
 45 
 46 class CompilerThread;
 47 class HandleArea;
 48 class HandleMark;
 49 class ICRefillVerifier;
 50 class JvmtiRawMonitor;
 51 class NMethodClosure;
 52 class Metadata;
 53 class OopClosure;
 54 class OSThread;
 55 class ParkEvent;
 56 class ResourceArea;
 57 class SafeThreadsListPtr;
 58 class ThreadClosure;
 59 class ThreadsList;
 60 class ThreadsSMRSupport;
 61 class VMErrorCallback;
 62 
 63 
 64 class PerfTraceTime;
 65 
 66 DEBUG_ONLY(class ResourceMark;)
 67 
 68 class WorkerThread;
 69 
 70 class JavaThread;
 71 
 72 // Class hierarchy
 73 // - Thread
 74 //   - JavaThread
 75 //     - various subclasses eg CompilerThread, ServiceThread
 76 //   - NonJavaThread
 77 //     - NamedThread
 78 //       - VMThread
 79 //       - ConcurrentGCThread
 80 //       - WorkerThread
 81 //     - WatcherThread
 82 //     - JfrThreadSampler
 83 //     - LogAsyncWriter
 84 //
 85 // All Thread subclasses must be either JavaThread or NonJavaThread.
 86 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is
 87 // a partially constructed/destroyed Thread.
 88 
 89 // Thread execution sequence and actions:
 90 // All threads:
 91 //  - thread_native_entry  // per-OS native entry point
 92 //    - stack initialization
 93 //    - other OS-level initialization (signal masks etc)
 94 //    - handshake with creating thread (if not started suspended)
 95 //    - this->call_run()  // common shared entry point
 96 //      - shared common initialization
 97 //      - this->pre_run()  // virtual per-thread-type initialization
 98 //      - this->run()      // virtual per-thread-type "main" logic
 99 //      - shared common tear-down
100 //      - this->post_run()  // virtual per-thread-type tear-down
101 //      - // 'this' no longer referenceable
102 //    - OS-level tear-down (minimal)
103 //    - final logging
104 //
105 // For JavaThread:
106 //   - this->run()  // virtual but not normally overridden
107 //     - this->thread_main_inner()  // extra call level to ensure correct stack calculations
108 //       - this->entry_point()  // set differently for each kind of JavaThread
109 
110 class Thread: public ThreadShadow {
111   friend class VMError;
112   friend class VMErrorCallbackMark;
113   friend class VMStructs;
114   friend class JVMCIVMStructs;
115  private:
116 
117 #ifndef USE_LIBRARY_BASED_TLS_ONLY
118   // Current thread is maintained as a thread-local variable
119   static THREAD_LOCAL Thread* _thr_current;
120 #endif
121 
122   // On AArch64, the high order 32 bits are used by a "patching epoch" number
123   // which reflects if this thread has executed the required fences, after
124   // an nmethod gets disarmed. The low order 32 bits denote the disarmed value.
125   uint64_t _nmethod_disarmed_guard_value;
126 
127  public:
128   void set_nmethod_disarmed_guard_value(int value) {
129     _nmethod_disarmed_guard_value = (uint64_t)(uint32_t)value;
130   }
131 
132   static ByteSize nmethod_disarmed_guard_value_offset() {
133     ByteSize offset = byte_offset_of(Thread, _nmethod_disarmed_guard_value);
134     // At least on x86_64, nmethod entry barrier encodes disarmed value offset
135     // in instruction as disp8 immed
136     assert(in_bytes(offset) < 128, "Offset >= 128");
137     return offset;
138   }
139 
140  private:
141   // Thread local data area available to the GC. The internal
142   // structure and contents of this data area is GC-specific.
143   // Only GC and GC barrier code should access this data area.
144   GCThreadLocalData _gc_data;
145 
146  public:
147   static ByteSize gc_data_offset() {
148     return byte_offset_of(Thread, _gc_data);
149   }
150 
151   template <typename T> T* gc_data() {
152     STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data));
153     return reinterpret_cast<T*>(&_gc_data);
154   }
155 
156   // Exception handling
157   // (Note: _pending_exception and friends are in ThreadShadow)
158   //oop       _pending_exception;                // pending exception for current thread
159   // const char* _exception_file;                   // file information for exception (debugging only)
160   // int         _exception_line;                   // line information for exception (debugging only)
161  protected:
162 
163   DEBUG_ONLY(static Thread* _starting_thread;)
164 
165   // JavaThread lifecycle support:
166   friend class SafeThreadsListPtr;  // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access
167   friend class ScanHazardPtrGatherProtectedThreadsClosure;  // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
168   friend class ScanHazardPtrGatherThreadsListClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
169   friend class ScanHazardPtrPrintMatchingThreadsClosure;  // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
170   friend class ThreadsSMRSupport;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
171   friend class ThreadsListHandleTest;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
172   friend class ValidateHazardPtrsClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
173 
174   ThreadsList* volatile _threads_hazard_ptr;
175   SafeThreadsListPtr*   _threads_list_ptr;
176   ThreadsList*          cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value);
177   ThreadsList*          get_threads_hazard_ptr() const;
178   void                  set_threads_hazard_ptr(ThreadsList* new_list);
179   static bool           is_hazard_ptr_tagged(ThreadsList* list) {
180     return (intptr_t(list) & intptr_t(1)) == intptr_t(1);
181   }
182   static ThreadsList*   tag_hazard_ptr(ThreadsList* list) {
183     return (ThreadsList*)(intptr_t(list) | intptr_t(1));
184   }
185   static ThreadsList*   untag_hazard_ptr(ThreadsList* list) {
186     return (ThreadsList*)(intptr_t(list) & ~intptr_t(1));
187   }
188   // This field is enabled via -XX:+EnableThreadSMRStatistics:
189   uint _nested_threads_hazard_ptr_cnt;
190   void dec_nested_threads_hazard_ptr_cnt() {
191     assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()");
192     _nested_threads_hazard_ptr_cnt--;
193   }
194   void inc_nested_threads_hazard_ptr_cnt() {
195     _nested_threads_hazard_ptr_cnt++;
196   }
197   uint nested_threads_hazard_ptr_cnt() {
198     return _nested_threads_hazard_ptr_cnt;
199   }
200 
201  public:
202   // Is the target JavaThread protected by the calling Thread or by some other
203   // mechanism?
204   static bool is_JavaThread_protected(const JavaThread* target);
205   // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated
206   // with the calling Thread?
207   static bool is_JavaThread_protected_by_TLH(const JavaThread* target);
208 
209  private:
210   DEBUG_ONLY(bool _suspendible_thread;)
211   DEBUG_ONLY(bool _indirectly_suspendible_thread;)
212   DEBUG_ONLY(bool _indirectly_safepoint_thread;)
213 
214  public:
215 #ifdef ASSERT
216   void set_suspendible_thread()   { _suspendible_thread = true; }
217   void clear_suspendible_thread() { _suspendible_thread = false; }
218   bool is_suspendible_thread()    { return _suspendible_thread; }
219 
220   void set_indirectly_suspendible_thread()   { _indirectly_suspendible_thread = true; }
221   void clear_indirectly_suspendible_thread() { _indirectly_suspendible_thread = false; }
222   bool is_indirectly_suspendible_thread()    { return _indirectly_suspendible_thread; }
223 
224   void set_indirectly_safepoint_thread()   { _indirectly_safepoint_thread = true; }
225   void clear_indirectly_safepoint_thread() { _indirectly_safepoint_thread = false; }
226   bool is_indirectly_safepoint_thread()    { return _indirectly_safepoint_thread; }
227 #endif
228 
229  private:
230   // Point to the last handle mark
231   HandleMark* _last_handle_mark;
232 
233   // Claim value for parallel iteration over threads.
234   uintx _threads_do_token;
235 
236   // Support for GlobalCounter
237  private:
238   volatile uintx _rcu_counter;
239  public:
240   volatile uintx* get_rcu_counter() {
241     return &_rcu_counter;
242   }
243 
244  public:
245   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
246   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
247  private:
248 
249 #ifdef ASSERT
250   ICRefillVerifier* _missed_ic_stub_refill_verifier;
251 
252  public:
253   ICRefillVerifier* missed_ic_stub_refill_verifier() {
254     return _missed_ic_stub_refill_verifier;
255   }
256 
257   void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) {
258     _missed_ic_stub_refill_verifier = verifier;
259   }
260 #endif // ASSERT
261 
262  private:
263   // Used by SkipGCALot class.
264   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
265 
266   friend class GCLocker;
267 
268  private:
269   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
270   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
271                                                 // the Java heap
272   ThreadHeapSampler _heap_sampler;              // For use when sampling the memory.
273 
274   ThreadStatisticalInfo _statistical_info;      // Statistics about the thread
275 
276   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
277 
278   JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread
279                                                  // is waiting to lock
280  public:
281   // Constructor
282   Thread();
283   virtual ~Thread() = 0;        // Thread is abstract.
284 
285   // Manage Thread::current()
286   void initialize_thread_current();
287   static void clear_thread_current(); // TLS cleanup needed before threads terminate
288 
289  protected:
290   // To be implemented by children.
291   virtual void run() = 0;
292   virtual void pre_run() = 0;
293   virtual void post_run() = 0;  // Note: Thread must not be deleted prior to calling this!
294 
295 #ifdef ASSERT
296   enum RunState {
297     PRE_CALL_RUN,
298     CALL_RUN,
299     PRE_RUN,
300     RUN,
301     POST_RUN
302     // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it
303   };
304   RunState _run_state;  // for lifecycle checks
305 #endif
306 
307 
308  public:
309   // invokes <ChildThreadClass>::run(), with common preparations and cleanups.
310   void call_run();
311 
312   // Testers
313   virtual bool is_VM_thread()       const            { return false; }
314   virtual bool is_Java_thread()     const            { return false; }
315   virtual bool is_Compiler_thread() const            { return false; }
316   virtual bool is_service_thread() const             { return false; }
317   virtual bool is_hidden_from_external_view() const  { return false; }
318   virtual bool is_jvmti_agent_thread() const         { return false; }
319   virtual bool is_Watcher_thread() const             { return false; }
320   virtual bool is_ConcurrentGC_thread() const        { return false; }
321   virtual bool is_Named_thread() const               { return false; }
322   virtual bool is_Worker_thread() const              { return false; }
323   virtual bool is_JfrSampler_thread() const          { return false; }
324   virtual bool is_AttachListener_thread() const      { return false; }
325   virtual bool is_monitor_deflation_thread() const   { return false; }
326 
327   // Convenience cast functions
328   CompilerThread* as_Compiler_thread() const {
329     assert(is_Compiler_thread(), "Must be compiler thread");
330     return (CompilerThread*)this;
331   }
332 
333   // Can this thread make Java upcalls
334   virtual bool can_call_java() const                 { return false; }
335 
336   // Is this a JavaThread that is on the VM's current ThreadsList?
337   // If so it must participate in the safepoint protocol.
338   virtual bool is_active_Java_thread() const         { return false; }
339 
340   // All threads are given names. For singleton subclasses we can
341   // just hard-wire the known name of the instance. JavaThreads and
342   // NamedThreads support multiple named instances, and dynamic
343   // changing of the name of an instance.
344   virtual const char* name() const { return "Unknown thread"; }
345 
346   // A thread's type name is also made available for debugging
347   // and logging.
348   virtual const char* type_name() const { return "Thread"; }
349 
350   // Returns the current thread (ASSERTS if null)
351   static inline Thread* current();
352   // Returns the current thread, or null if not attached
353   static inline Thread* current_or_null();
354   // Returns the current thread, or null if not attached, and is
355   // safe for use from signal-handlers
356   static inline Thread* current_or_null_safe();
357 
358   // Common thread operations
359 #ifdef ASSERT
360   static void check_for_dangling_thread_pointer(Thread *thread);
361 #endif
362   static void set_priority(Thread* thread, ThreadPriority priority);
363   static void start(Thread* thread);
364 
365   void set_native_thread_name(const char *name) {
366     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
367     os::set_native_thread_name(name);
368   }
369 
370   // Support for Unhandled Oop detection
371   // Add the field for both, fastdebug and debug, builds to keep
372   // Thread's fields layout the same.
373   // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
374 #ifdef CHECK_UNHANDLED_OOPS
375  private:
376   UnhandledOops* _unhandled_oops;
377 #elif defined(ASSERT)
378  private:
379   void* _unhandled_oops;
380 #endif
381 #ifdef CHECK_UNHANDLED_OOPS
382  public:
383   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
384   // Mark oop safe for gc.  It may be stack allocated but won't move.
385   void allow_unhandled_oop(oop *op) {
386     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
387   }
388   // Clear oops at safepoint so crashes point to unhandled oop violator
389   void clear_unhandled_oops() {
390     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
391   }
392 #endif // CHECK_UNHANDLED_OOPS
393 
394  public:
395 #ifndef PRODUCT
396   bool skip_gcalot()           { return _skip_gcalot; }
397   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
398 #endif
399 
400   // Resource area
401   ResourceArea* resource_area() const            { return _resource_area; }
402   void set_resource_area(ResourceArea* area)     { _resource_area = area; }
403 
404   OSThread* osthread() const                     { return _osthread;   }
405   void set_osthread(OSThread* thread)            { _osthread = thread; }
406 
407   // Internal handle support
408   HandleArea* handle_area() const                { return _handle_area; }
409   void set_handle_area(HandleArea* area)         { _handle_area = area; }
410 
411   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
412   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
413 
414   // Thread-Local Allocation Buffer (TLAB) support
415   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
416   void initialize_tlab();
417 
418   jlong allocated_bytes()               { return _allocated_bytes; }
419   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
420   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
421   inline jlong cooked_allocated_bytes();
422 
423   ThreadHeapSampler& heap_sampler()     { return _heap_sampler; }
424 
425   ThreadStatisticalInfo& statistical_info() { return _statistical_info; }
426 
427   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
428 
429   // For tracking the Jvmti raw monitor the thread is pending on.
430   JvmtiRawMonitor* current_pending_raw_monitor() {
431     return _current_pending_raw_monitor;
432   }
433   void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) {
434     _current_pending_raw_monitor = monitor;
435   }
436 
437   // GC support
438   // Apply "f->do_oop" to all root oops in "this".
439   //   Used by JavaThread::oops_do.
440   // Apply "cf->do_nmethod" (if !nullptr) to all nmethods active in frames
441   virtual void oops_do_no_frames(OopClosure* f, NMethodClosure* cf);
442   virtual void oops_do_frames(OopClosure* f, NMethodClosure* cf) {}
443   void oops_do(OopClosure* f, NMethodClosure* cf);
444 
445   // Handles the parallel case for claim_threads_do.
446  private:
447   bool claim_par_threads_do(uintx claim_token);
448  public:
449   // Requires that "claim_token" is that of the current iteration.
450   // If "is_par" is false, sets the token of "this" to
451   // "claim_token", and returns "true".  If "is_par" is true,
452   // uses an atomic instruction to set the current thread's token to
453   // "claim_token", if it is not already.  Returns "true" iff the
454   // calling thread does the update, this indicates that the calling thread
455   // has claimed the thread in the current iteration.
456   bool claim_threads_do(bool is_par, uintx claim_token) {
457     if (!is_par) {
458       _threads_do_token = claim_token;
459       return true;
460     } else {
461       return claim_par_threads_do(claim_token);
462     }
463   }
464 
465   uintx threads_do_token() const { return _threads_do_token; }
466 
467   // jvmtiRedefineClasses support
468   void metadata_handles_do(void f(Metadata*));
469 
470  private:
471   // Check if address is within the given range of this thread's
472   // stack:  stack_base() > adr >/>= limit
473   // The check is inclusive of limit if passed true, else exclusive.
474   bool is_in_stack_range(address adr, address limit, bool inclusive) const {
475     assert(stack_base() > limit && limit >= stack_end(), "limit is outside of stack");
476     return stack_base() > adr && (inclusive ? adr >= limit : adr > limit);
477   }
478 
479  public:
480   // Check if address is within the given range of this thread's
481   // stack:  stack_base() > adr >= limit
482   bool is_in_stack_range_incl(address adr, address limit) const {
483     return is_in_stack_range(adr, limit, true);
484   }
485 
486   // Check if address is within the given range of this thread's
487   // stack:  stack_base() > adr > limit
488   bool is_in_stack_range_excl(address adr, address limit) const {
489     return is_in_stack_range(adr, limit, false);
490   }
491 
492   // Check if address is in the stack mapped to this thread. Used mainly in
493   // error reporting (so has to include guard zone) and frame printing.
494   // Expects _stack_base to be initialized - checked with assert.
495   bool is_in_full_stack_checked(address adr) const {
496     return is_in_stack_range_incl(adr, stack_end());
497   }
498 
499   // Like is_in_full_stack_checked but without the assertions as this
500   // may be called in a thread before _stack_base is initialized.
501   bool is_in_full_stack(address adr) const {
502     address stack_end = _stack_base - _stack_size;
503     return _stack_base > adr && adr >= stack_end;
504   }
505 
506   // Check if address is in the live stack of this thread (not just for locks).
507   // Warning: can only be called by the current thread on itself.
508   bool is_in_live_stack(address adr) const {
509     assert(Thread::current() == this, "is_in_live_stack can only be called from current thread");
510     return is_in_stack_range_incl(adr, os::current_stack_pointer());
511   }
512 
513   // Sets this thread as starting thread. Returns failure if thread
514   // creation fails due to lack of memory, too many threads etc.
515   bool set_as_starting_thread();
516 
517 protected:
518   // OS data associated with the thread
519   OSThread* _osthread;  // Platform-specific thread information
520 
521   // Thread local resource area for temporary allocation within the VM
522   ResourceArea* _resource_area;
523 
524   DEBUG_ONLY(ResourceMark* _current_resource_mark;)
525 
526   // Thread local handle area for allocation of handles within the VM
527   HandleArea* _handle_area;
528   GrowableArray<Metadata*>* _metadata_handles;
529 
530   // Support for stack overflow handling, get_thread, etc.
531   address          _stack_base;
532   size_t           _stack_size;
533   int              _lgrp_id;
534 
535  public:
536   // Stack overflow support
537   address stack_base() const           { assert(_stack_base != nullptr,"Sanity check"); return _stack_base; }
538   void    set_stack_base(address base) { _stack_base = base; }
539   size_t  stack_size() const           { return _stack_size; }
540   void    set_stack_size(size_t size)  { _stack_size = size; }
541   address stack_end()  const           { return stack_base() - stack_size(); }
542   void    record_stack_base_and_size();
543   void    register_thread_stack_with_NMT();
544   void    unregister_thread_stack_with_NMT();
545 
546   int     lgrp_id() const        { return _lgrp_id; }
547   void    set_lgrp_id(int value) { _lgrp_id = value; }
548 
549   // Printing
550   void print_on(outputStream* st, bool print_extended_info) const;
551   virtual void print_on(outputStream* st) const { print_on(st, false); }
552   void print() const;
553   virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
554   // Basic, non-virtual, printing support that is simple and always safe.
555   void print_value_on(outputStream* st) const;
556 
557   // Debug-only code
558 #ifdef ASSERT
559  private:
560   // Deadlock detection support for Mutex locks. List of locks own by thread.
561   Mutex* _owned_locks;
562   // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
563   // thus the friendship
564   friend class Mutex;
565   friend class Monitor;
566 
567  public:
568   void print_owned_locks_on(outputStream* st) const;
569   void print_owned_locks() const                 { print_owned_locks_on(tty);    }
570   Mutex* owned_locks() const                     { return _owned_locks;          }
571   bool owns_locks() const                        { return owned_locks() != nullptr; }
572 
573   // Deadlock detection
574   ResourceMark* current_resource_mark()          { return _current_resource_mark; }
575   void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
576 #endif // ASSERT
577 
578  private:
579   volatile int _jvmti_env_iteration_count;
580 
581  public:
582   void entering_jvmti_env_iteration()            { ++_jvmti_env_iteration_count; }
583   void leaving_jvmti_env_iteration()             { --_jvmti_env_iteration_count; }
584   bool is_inside_jvmti_env_iteration()           { return _jvmti_env_iteration_count > 0; }
585 
586   // Code generation
587   static ByteSize exception_file_offset()        { return byte_offset_of(Thread, _exception_file); }
588   static ByteSize exception_line_offset()        { return byte_offset_of(Thread, _exception_line); }
589 
590   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base); }
591   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size); }
592 
593   static ByteSize tlab_start_offset()            { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); }
594   static ByteSize tlab_end_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); }
595   static ByteSize tlab_top_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); }
596   static ByteSize tlab_pf_top_offset()           { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); }
597 
598   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
599 
600  public:
601   ParkEvent * volatile _ParkEvent;            // for Object monitors, JVMTI raw monitors,
602                                               // and ObjectSynchronizer::read_stable_mark
603 
604   // Termination indicator used by the signal handler.
605   // _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state
606   // (which can't itself be read from the signal handler if a signal hits during the Thread destructor).
607   bool has_terminated()                       { return Atomic::load(&_ParkEvent) == nullptr; };
608 
609   jint _hashStateW;                           // Marsaglia Shift-XOR thread-local RNG
610   jint _hashStateX;                           // thread-specific hashCode generator state
611   jint _hashStateY;
612   jint _hashStateZ;
613 
614   // Low-level leaf-lock primitives used to implement synchronization.
615   // Not for general synchronization use.
616   static void SpinAcquire(volatile int * Lock, const char * Name);
617   static void SpinRelease(volatile int * Lock);
618 
619 #if defined(__APPLE__) && defined(AARCH64)
620  private:
621   DEBUG_ONLY(bool _wx_init);
622   WXMode _wx_state;
623  public:
624   void init_wx();
625   WXMode enable_wx(WXMode new_state);
626 
627   void assert_wx_state(WXMode expected) {
628     assert(_wx_state == expected, "wrong state");
629   }
630 #endif // __APPLE__ && AARCH64
631 
632  private:
633   bool _in_asgct = false;
634  public:
635   bool in_asgct() const { return _in_asgct; }
636   void set_in_asgct(bool value) { _in_asgct = value; }
637   static bool current_in_asgct() {
638     Thread *cur = Thread::current_or_null_safe();
639     return cur != nullptr && cur->in_asgct();
640   }
641 
642  private:
643   VMErrorCallback* _vm_error_callbacks;
644 
645   bool  _profile_vm_locks;
646   bool  _profile_vm_calls;
647   bool  _profile_vm_ops;
648   bool  _profile_rt_calls;
649   bool  _profile_upcalls;
650 
651   jlong    _all_bc_counter_value;
652   jlong _clinit_bc_counter_value;
653 
654   PerfTraceTime* _current_rt_call_timer;
655  public:
656   bool     profile_vm_locks() const { return _profile_vm_locks; }
657   void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
658 
659   bool     profile_vm_calls() const { return _profile_vm_calls; }
660   void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
661 
662   bool     profile_vm_ops() const { return _profile_vm_ops; }
663   void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
664 
665   bool     profile_rt_calls() const { return _profile_rt_calls; }
666   void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
667 
668   bool     profile_upcalls() const { return _profile_upcalls; }
669   void set_profile_upcalls(bool v) { _profile_upcalls = v; }
670 
671   PerfTraceTime*     current_rt_call_timer() const           { return _current_rt_call_timer;            }
672   void           set_current_rt_call_timer(PerfTraceTime* c) {        _current_rt_call_timer = c;        }
673   bool           has_current_rt_call_timer() const           { return _current_rt_call_timer != nullptr; }
674 
675   bool do_profile_rt_call() const {
676     return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
677   }
678 
679   jlong        bc_counter_value() const { return    _all_bc_counter_value; }
680 
681   jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
682 
683   void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
684 
685   static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
686 };
687 
688 class ProfileVMCallContext : StackObj {
689  private:
690   Thread* _thread;
691   bool _enabled;
692   PerfTraceTime* _timer;
693 
694   static int _perf_nested_runtime_calls_count;
695 
696   static const char* name(PerfTraceTime* t);
697   static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
698  public:
699   inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
700   : _thread(current), _enabled(is_on), _timer(timer) {
701     if (_enabled) {
702       assert(timer != nullptr, "");
703       assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
704       _thread->set_current_rt_call_timer(timer);
705     } else if (current->profile_rt_calls()) {
706       notify_nested_rt_call(current->current_rt_call_timer(), timer);
707     }
708   }
709 
710   inline ~ProfileVMCallContext() {
711     if (_enabled) {
712       assert(_timer == _thread->current_rt_call_timer(),
713              "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
714       _thread->set_current_rt_call_timer(nullptr);
715     }
716   }
717 
718   static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
719 };
720 
721 class PauseRuntimeCallProfiling : public StackObj {
722  protected:
723   Thread* _thread;
724   bool _enabled;
725   PerfTraceTime* _timer;
726 
727  public:
728   inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
729   : _thread(current), _enabled(is_on), _timer(nullptr) {
730     if (_enabled) {
731       _timer = _thread->current_rt_call_timer();
732       _thread->set_current_rt_call_timer(nullptr);
733     }
734   }
735 
736   inline ~PauseRuntimeCallProfiling () {
737     if (_enabled) {
738       guarantee(_thread->current_rt_call_timer() == nullptr, "");
739       _thread->set_current_rt_call_timer(_timer); // restore
740     }
741   }
742 };
743 
744 class ThreadInAsgct {
745  private:
746   Thread* _thread;
747   bool _saved_in_asgct;
748  public:
749   ThreadInAsgct(Thread* thread) : _thread(thread) {
750     assert(thread != nullptr, "invariant");
751     // Allow AsyncGetCallTrace to be reentrant - save the previous state.
752     _saved_in_asgct = thread->in_asgct();
753     thread->set_in_asgct(true);
754   }
755   ~ThreadInAsgct() {
756     assert(_thread->in_asgct(), "invariant");
757     _thread->set_in_asgct(_saved_in_asgct);
758   }
759 };
760 
761 // Inline implementation of Thread::current()
762 inline Thread* Thread::current() {
763   Thread* current = current_or_null();
764   assert(current != nullptr, "Thread::current() called on detached thread");
765   return current;
766 }
767 
768 inline Thread* Thread::current_or_null() {
769 #ifndef USE_LIBRARY_BASED_TLS_ONLY
770   return _thr_current;
771 #else
772   if (ThreadLocalStorage::is_initialized()) {
773     return ThreadLocalStorage::thread();
774   }
775   return nullptr;
776 #endif
777 }
778 
779 inline Thread* Thread::current_or_null_safe() {
780   if (ThreadLocalStorage::is_initialized()) {
781     return ThreadLocalStorage::thread();
782   }
783   return nullptr;
784 }
785 
786 #endif // SHARE_RUNTIME_THREAD_HPP