1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_RUNTIME_THREAD_HPP
 27 #define SHARE_RUNTIME_THREAD_HPP
 28 
 29 #include "jni.h"
 30 #include "gc/shared/gcThreadLocalData.hpp"
 31 #include "gc/shared/threadLocalAllocBuffer.hpp"
 32 #include "memory/allocation.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "runtime/globals.hpp"
 35 #include "runtime/os.hpp"
 36 #include "runtime/threadHeapSampler.hpp"
 37 #include "runtime/threadLocalStorage.hpp"
 38 #include "runtime/threadStatisticalInfo.hpp"
 39 #include "runtime/unhandledOops.hpp"
 40 #include "utilities/globalDefinitions.hpp"
 41 #include "utilities/macros.hpp"
 42 #if INCLUDE_JFR
 43 #include "jfr/support/jfrThreadExtension.hpp"
 44 #endif
 45 
 46 class CompilerThread;
 47 class HandleArea;
 48 class HandleMark;
 49 class ICRefillVerifier;
 50 class JvmtiRawMonitor;
 51 class Metadata;
 52 class OSThread;
 53 class ParkEvent;
 54 class ResourceArea;
 55 class SafeThreadsListPtr;
 56 class ThreadClosure;
 57 class ThreadsList;
 58 class ThreadsSMRSupport;
 59 class VMErrorCallback;
 60 
 61 class OopClosure;
 62 class CodeBlobClosure;
 63 
 64 class PerfTraceTime;
 65 
 66 DEBUG_ONLY(class ResourceMark;)
 67 
 68 class WorkerThread;
 69 
 70 class JavaThread;
 71 
 72 // Class hierarchy
 73 // - Thread
 74 //   - JavaThread
 75 //     - various subclasses eg CompilerThread, ServiceThread
 76 //   - NonJavaThread
 77 //     - NamedThread
 78 //       - VMThread
 79 //       - ConcurrentGCThread
 80 //       - WorkerThread
 81 //     - WatcherThread
 82 //     - JfrThreadSampler
 83 //     - LogAsyncWriter
 84 //
 85 // All Thread subclasses must be either JavaThread or NonJavaThread.
 86 // This means !t->is_Java_thread() iff t is a NonJavaThread, or t is
 87 // a partially constructed/destroyed Thread.
 88 
 89 // Thread execution sequence and actions:
 90 // All threads:
 91 //  - thread_native_entry  // per-OS native entry point
 92 //    - stack initialization
 93 //    - other OS-level initialization (signal masks etc)
 94 //    - handshake with creating thread (if not started suspended)
 95 //    - this->call_run()  // common shared entry point
 96 //      - shared common initialization
 97 //      - this->pre_run()  // virtual per-thread-type initialization
 98 //      - this->run()      // virtual per-thread-type "main" logic
 99 //      - shared common tear-down
100 //      - this->post_run()  // virtual per-thread-type tear-down
101 //      - // 'this' no longer referenceable
102 //    - OS-level tear-down (minimal)
103 //    - final logging
104 //
105 // For JavaThread:
106 //   - this->run()  // virtual but not normally overridden
107 //     - this->thread_main_inner()  // extra call level to ensure correct stack calculations
108 //       - this->entry_point()  // set differently for each kind of JavaThread
109 
110 class Thread: public ThreadShadow {
111   friend class VMError;
112   friend class VMErrorCallbackMark;
113   friend class VMStructs;
114   friend class JVMCIVMStructs;
115  private:
116 
117 #ifndef USE_LIBRARY_BASED_TLS_ONLY
118   // Current thread is maintained as a thread-local variable
119   static THREAD_LOCAL Thread* _thr_current;
120 #endif
121 
122   // On AArch64, the high order 32 bits are used by a "patching epoch" number
123   // which reflects if this thread has executed the required fences, after
124   // an nmethod gets disarmed. The low order 32 bits denote the disarmed value.
125   uint64_t _nmethod_disarmed_guard_value;
126 
127  public:
128   void set_nmethod_disarmed_guard_value(int value) {
129     _nmethod_disarmed_guard_value = (uint64_t)(uint32_t)value;
130   }
131 
132   static ByteSize nmethod_disarmed_guard_value_offset() {
133     ByteSize offset = byte_offset_of(Thread, _nmethod_disarmed_guard_value);
134     // At least on x86_64, nmethod entry barrier encodes disarmed value offset
135     // in instruction as disp8 immed
136     assert(in_bytes(offset) < 128, "Offset >= 128");
137     return offset;
138   }
139 
140  private:
141   // Thread local data area available to the GC. The internal
142   // structure and contents of this data area is GC-specific.
143   // Only GC and GC barrier code should access this data area.
144   GCThreadLocalData _gc_data;
145 
146  public:
147   static ByteSize gc_data_offset() {
148     return byte_offset_of(Thread, _gc_data);
149   }
150 
151   template <typename T> T* gc_data() {
152     STATIC_ASSERT(sizeof(T) <= sizeof(_gc_data));
153     return reinterpret_cast<T*>(&_gc_data);
154   }
155 
156   // Exception handling
157   // (Note: _pending_exception and friends are in ThreadShadow)
158   //oop       _pending_exception;                // pending exception for current thread
159   // const char* _exception_file;                   // file information for exception (debugging only)
160   // int         _exception_line;                   // line information for exception (debugging only)
161  protected:
162 
163   DEBUG_ONLY(static Thread* _starting_thread;)
164 
165   // JavaThread lifecycle support:
166   friend class SafeThreadsListPtr;  // for _threads_list_ptr, cmpxchg_threads_hazard_ptr(), {dec_,inc_,}nested_threads_hazard_ptr_cnt(), {g,s}et_threads_hazard_ptr(), inc_nested_handle_cnt(), tag_hazard_ptr() access
167   friend class ScanHazardPtrGatherProtectedThreadsClosure;  // for cmpxchg_threads_hazard_ptr(), get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
168   friend class ScanHazardPtrGatherThreadsListClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
169   friend class ScanHazardPtrPrintMatchingThreadsClosure;  // for get_threads_hazard_ptr(), is_hazard_ptr_tagged() access
170   friend class ThreadsSMRSupport;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
171   friend class ThreadsListHandleTest;  // for _nested_threads_hazard_ptr_cnt, _threads_hazard_ptr, _threads_list_ptr access
172   friend class ValidateHazardPtrsClosure;  // for get_threads_hazard_ptr(), untag_hazard_ptr() access
173 
174   ThreadsList* volatile _threads_hazard_ptr;
175   SafeThreadsListPtr*   _threads_list_ptr;
176   ThreadsList*          cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value);
177   ThreadsList*          get_threads_hazard_ptr() const;
178   void                  set_threads_hazard_ptr(ThreadsList* new_list);
179   static bool           is_hazard_ptr_tagged(ThreadsList* list) {
180     return (intptr_t(list) & intptr_t(1)) == intptr_t(1);
181   }
182   static ThreadsList*   tag_hazard_ptr(ThreadsList* list) {
183     return (ThreadsList*)(intptr_t(list) | intptr_t(1));
184   }
185   static ThreadsList*   untag_hazard_ptr(ThreadsList* list) {
186     return (ThreadsList*)(intptr_t(list) & ~intptr_t(1));
187   }
188   // This field is enabled via -XX:+EnableThreadSMRStatistics:
189   uint _nested_threads_hazard_ptr_cnt;
190   void dec_nested_threads_hazard_ptr_cnt() {
191     assert(_nested_threads_hazard_ptr_cnt != 0, "mismatched {dec,inc}_nested_threads_hazard_ptr_cnt()");
192     _nested_threads_hazard_ptr_cnt--;
193   }
194   void inc_nested_threads_hazard_ptr_cnt() {
195     _nested_threads_hazard_ptr_cnt++;
196   }
197   uint nested_threads_hazard_ptr_cnt() {
198     return _nested_threads_hazard_ptr_cnt;
199   }
200 
201  public:
202   // Is the target JavaThread protected by the calling Thread or by some other
203   // mechanism?
204   static bool is_JavaThread_protected(const JavaThread* target);
205   // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated
206   // with the calling Thread?
207   static bool is_JavaThread_protected_by_TLH(const JavaThread* target);
208 
209  private:
210   DEBUG_ONLY(bool _suspendible_thread;)
211   DEBUG_ONLY(bool _indirectly_suspendible_thread;)
212   DEBUG_ONLY(bool _indirectly_safepoint_thread;)
213 
214  public:
215   // Determines if a heap allocation failure will be retried
216   // (e.g., by deoptimizing and re-executing in the interpreter).
217   // In this case, the failed allocation must raise
218   // Universe::out_of_memory_error_retry() and omit side effects
219   // such as JVMTI events and handling -XX:+HeapDumpOnOutOfMemoryError
220   // and -XX:OnOutOfMemoryError.
221   virtual bool in_retryable_allocation() const { return false; }
222 
223 #ifdef ASSERT
224   void set_suspendible_thread()   { _suspendible_thread = true; }
225   void clear_suspendible_thread() { _suspendible_thread = false; }
226   bool is_suspendible_thread()    { return _suspendible_thread; }
227 
228   void set_indirectly_suspendible_thread()   { _indirectly_suspendible_thread = true; }
229   void clear_indirectly_suspendible_thread() { _indirectly_suspendible_thread = false; }
230   bool is_indirectly_suspendible_thread()    { return _indirectly_suspendible_thread; }
231 
232   void set_indirectly_safepoint_thread()   { _indirectly_safepoint_thread = true; }
233   void clear_indirectly_safepoint_thread() { _indirectly_safepoint_thread = false; }
234   bool is_indirectly_safepoint_thread()    { return _indirectly_safepoint_thread; }
235 #endif
236 
237  private:
238   // Point to the last handle mark
239   HandleMark* _last_handle_mark;
240 
241   // Claim value for parallel iteration over threads.
242   uintx _threads_do_token;
243 
244   // Support for GlobalCounter
245  private:
246   volatile uintx _rcu_counter;
247  public:
248   volatile uintx* get_rcu_counter() {
249     return &_rcu_counter;
250   }
251 
252  public:
253   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
254   HandleMark* last_handle_mark() const          { return _last_handle_mark; }
255  private:
256 
257 #ifdef ASSERT
258   ICRefillVerifier* _missed_ic_stub_refill_verifier;
259 
260  public:
261   ICRefillVerifier* missed_ic_stub_refill_verifier() {
262     return _missed_ic_stub_refill_verifier;
263   }
264 
265   void set_missed_ic_stub_refill_verifier(ICRefillVerifier* verifier) {
266     _missed_ic_stub_refill_verifier = verifier;
267   }
268 #endif // ASSERT
269 
270  private:
271   // Used by SkipGCALot class.
272   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
273 
274   friend class GCLocker;
275 
276  private:
277   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
278   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
279                                                 // the Java heap
280   ThreadHeapSampler _heap_sampler;              // For use when sampling the memory.
281 
282   ThreadStatisticalInfo _statistical_info;      // Statistics about the thread
283 
284   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
285 
286   JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread
287                                                  // is waiting to lock
288  public:
289   // Constructor
290   Thread();
291   virtual ~Thread() = 0;        // Thread is abstract.
292 
293   // Manage Thread::current()
294   void initialize_thread_current();
295   static void clear_thread_current(); // TLS cleanup needed before threads terminate
296 
297  protected:
298   // To be implemented by children.
299   virtual void run() = 0;
300   virtual void pre_run() = 0;
301   virtual void post_run() = 0;  // Note: Thread must not be deleted prior to calling this!
302 
303 #ifdef ASSERT
304   enum RunState {
305     PRE_CALL_RUN,
306     CALL_RUN,
307     PRE_RUN,
308     RUN,
309     POST_RUN
310     // POST_CALL_RUN - can't define this one as 'this' may be deleted when we want to set it
311   };
312   RunState _run_state;  // for lifecycle checks
313 #endif
314 
315 
316  public:
317   // invokes <ChildThreadClass>::run(), with common preparations and cleanups.
318   void call_run();
319 
320   // Testers
321   virtual bool is_VM_thread()       const            { return false; }
322   virtual bool is_Java_thread()     const            { return false; }
323   virtual bool is_Compiler_thread() const            { return false; }
324   virtual bool is_service_thread() const             { return false; }
325   virtual bool is_hidden_from_external_view() const  { return false; }
326   virtual bool is_jvmti_agent_thread() const         { return false; }
327   virtual bool is_Watcher_thread() const             { return false; }
328   virtual bool is_ConcurrentGC_thread() const        { return false; }
329   virtual bool is_Named_thread() const               { return false; }
330   virtual bool is_Worker_thread() const              { return false; }
331   virtual bool is_JfrSampler_thread() const          { return false; }
332   virtual bool is_AttachListener_thread() const      { return false; }
333   virtual bool is_monitor_deflation_thread() const   { return false; }
334 
335   // Convenience cast functions
336   CompilerThread* as_Compiler_thread() const {
337     assert(is_Compiler_thread(), "Must be compiler thread");
338     return (CompilerThread*)this;
339   }
340 
341   // Can this thread make Java upcalls
342   virtual bool can_call_java() const                 { return false; }
343 
344   // Is this a JavaThread that is on the VM's current ThreadsList?
345   // If so it must participate in the safepoint protocol.
346   virtual bool is_active_Java_thread() const         { return false; }
347 
348   // All threads are given names. For singleton subclasses we can
349   // just hard-wire the known name of the instance. JavaThreads and
350   // NamedThreads support multiple named instances, and dynamic
351   // changing of the name of an instance.
352   virtual const char* name() const { return "Unknown thread"; }
353 
354   // A thread's type name is also made available for debugging
355   // and logging.
356   virtual const char* type_name() const { return "Thread"; }
357 
358   // Returns the current thread (ASSERTS if null)
359   static inline Thread* current();
360   // Returns the current thread, or null if not attached
361   static inline Thread* current_or_null();
362   // Returns the current thread, or null if not attached, and is
363   // safe for use from signal-handlers
364   static inline Thread* current_or_null_safe();
365 
366   // Common thread operations
367 #ifdef ASSERT
368   static void check_for_dangling_thread_pointer(Thread *thread);
369 #endif
370   static void set_priority(Thread* thread, ThreadPriority priority);
371   static void start(Thread* thread);
372 
373   void set_native_thread_name(const char *name) {
374     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
375     os::set_native_thread_name(name);
376   }
377 
378   // Support for Unhandled Oop detection
379   // Add the field for both, fastdebug and debug, builds to keep
380   // Thread's fields layout the same.
381   // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
382 #ifdef CHECK_UNHANDLED_OOPS
383  private:
384   UnhandledOops* _unhandled_oops;
385 #elif defined(ASSERT)
386  private:
387   void* _unhandled_oops;
388 #endif
389 #ifdef CHECK_UNHANDLED_OOPS
390  public:
391   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
392   // Mark oop safe for gc.  It may be stack allocated but won't move.
393   void allow_unhandled_oop(oop *op) {
394     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
395   }
396   // Clear oops at safepoint so crashes point to unhandled oop violator
397   void clear_unhandled_oops() {
398     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
399   }
400 #endif // CHECK_UNHANDLED_OOPS
401 
402  public:
403 #ifndef PRODUCT
404   bool skip_gcalot()           { return _skip_gcalot; }
405   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
406 #endif
407 
408   // Resource area
409   ResourceArea* resource_area() const            { return _resource_area; }
410   void set_resource_area(ResourceArea* area)     { _resource_area = area; }
411 
412   OSThread* osthread() const                     { return _osthread;   }
413   void set_osthread(OSThread* thread)            { _osthread = thread; }
414 
415   // Internal handle support
416   HandleArea* handle_area() const                { return _handle_area; }
417   void set_handle_area(HandleArea* area)         { _handle_area = area; }
418 
419   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
420   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
421 
422   // Thread-Local Allocation Buffer (TLAB) support
423   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
424   void initialize_tlab();
425 
426   jlong allocated_bytes()               { return _allocated_bytes; }
427   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
428   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
429   inline jlong cooked_allocated_bytes();
430 
431   ThreadHeapSampler& heap_sampler()     { return _heap_sampler; }
432 
433   ThreadStatisticalInfo& statistical_info() { return _statistical_info; }
434 
435   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
436 
437   // For tracking the Jvmti raw monitor the thread is pending on.
438   JvmtiRawMonitor* current_pending_raw_monitor() {
439     return _current_pending_raw_monitor;
440   }
441   void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) {
442     _current_pending_raw_monitor = monitor;
443   }
444 
445   // GC support
446   // Apply "f->do_oop" to all root oops in "this".
447   //   Used by JavaThread::oops_do.
448   // Apply "cf->do_code_blob" (if !nullptr) to all code blobs active in frames
449   virtual void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf);
450   virtual void oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {}
451   void oops_do(OopClosure* f, CodeBlobClosure* cf);
452 
453   // Handles the parallel case for claim_threads_do.
454  private:
455   bool claim_par_threads_do(uintx claim_token);
456  public:
457   // Requires that "claim_token" is that of the current iteration.
458   // If "is_par" is false, sets the token of "this" to
459   // "claim_token", and returns "true".  If "is_par" is true,
460   // uses an atomic instruction to set the current thread's token to
461   // "claim_token", if it is not already.  Returns "true" iff the
462   // calling thread does the update, this indicates that the calling thread
463   // has claimed the thread in the current iteration.
464   bool claim_threads_do(bool is_par, uintx claim_token) {
465     if (!is_par) {
466       _threads_do_token = claim_token;
467       return true;
468     } else {
469       return claim_par_threads_do(claim_token);
470     }
471   }
472 
473   uintx threads_do_token() const { return _threads_do_token; }
474 
475   // jvmtiRedefineClasses support
476   void metadata_handles_do(void f(Metadata*));
477 
478  private:
479   // Check if address is within the given range of this thread's
480   // stack:  stack_base() > adr >/>= limit
481   // The check is inclusive of limit if passed true, else exclusive.
482   bool is_in_stack_range(address adr, address limit, bool inclusive) const {
483     assert(stack_base() > limit && limit >= stack_end(), "limit is outside of stack");
484     return stack_base() > adr && (inclusive ? adr >= limit : adr > limit);
485   }
486 
487  public:
488   // Used by fast lock support
489   virtual bool is_lock_owned(address adr) const;
490 
491   // Check if address is within the given range of this thread's
492   // stack:  stack_base() > adr >= limit
493   bool is_in_stack_range_incl(address adr, address limit) const {
494     return is_in_stack_range(adr, limit, true);
495   }
496 
497   // Check if address is within the given range of this thread's
498   // stack:  stack_base() > adr > limit
499   bool is_in_stack_range_excl(address adr, address limit) const {
500     return is_in_stack_range(adr, limit, false);
501   }
502 
503   // Check if address is in the stack mapped to this thread. Used mainly in
504   // error reporting (so has to include guard zone) and frame printing.
505   // Expects _stack_base to be initialized - checked with assert.
506   bool is_in_full_stack_checked(address adr) const {
507     return is_in_stack_range_incl(adr, stack_end());
508   }
509 
510   // Like is_in_full_stack_checked but without the assertions as this
511   // may be called in a thread before _stack_base is initialized.
512   bool is_in_full_stack(address adr) const {
513     address stack_end = _stack_base - _stack_size;
514     return _stack_base > adr && adr >= stack_end;
515   }
516 
517   // Check if address is in the live stack of this thread (not just for locks).
518   // Warning: can only be called by the current thread on itself.
519   bool is_in_live_stack(address adr) const {
520     assert(Thread::current() == this, "is_in_live_stack can only be called from current thread");
521     return is_in_stack_range_incl(adr, os::current_stack_pointer());
522   }
523 
524   // Sets this thread as starting thread. Returns failure if thread
525   // creation fails due to lack of memory, too many threads etc.
526   bool set_as_starting_thread();
527 
528 protected:
529   // OS data associated with the thread
530   OSThread* _osthread;  // Platform-specific thread information
531 
532   // Thread local resource area for temporary allocation within the VM
533   ResourceArea* _resource_area;
534 
535   DEBUG_ONLY(ResourceMark* _current_resource_mark;)
536 
537   // Thread local handle area for allocation of handles within the VM
538   HandleArea* _handle_area;
539   GrowableArray<Metadata*>* _metadata_handles;
540 
541   // Support for stack overflow handling, get_thread, etc.
542   address          _stack_base;
543   size_t           _stack_size;
544   int              _lgrp_id;
545 
546  public:
547   // Stack overflow support
548   address stack_base() const           { assert(_stack_base != nullptr,"Sanity check"); return _stack_base; }
549   void    set_stack_base(address base) { _stack_base = base; }
550   size_t  stack_size() const           { return _stack_size; }
551   void    set_stack_size(size_t size)  { _stack_size = size; }
552   address stack_end()  const           { return stack_base() - stack_size(); }
553   void    record_stack_base_and_size();
554   void    register_thread_stack_with_NMT();
555   void    unregister_thread_stack_with_NMT();
556 
557   int     lgrp_id() const        { return _lgrp_id; }
558   void    set_lgrp_id(int value) { _lgrp_id = value; }
559 
560   // Printing
561   void print_on(outputStream* st, bool print_extended_info) const;
562   virtual void print_on(outputStream* st) const { print_on(st, false); }
563   void print() const;
564   virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
565   // Basic, non-virtual, printing support that is simple and always safe.
566   void print_value_on(outputStream* st) const;
567 
568   // Debug-only code
569 #ifdef ASSERT
570  private:
571   // Deadlock detection support for Mutex locks. List of locks own by thread.
572   Mutex* _owned_locks;
573   // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
574   // thus the friendship
575   friend class Mutex;
576   friend class Monitor;
577 
578  public:
579   void print_owned_locks_on(outputStream* st) const;
580   void print_owned_locks() const                 { print_owned_locks_on(tty);    }
581   Mutex* owned_locks() const                     { return _owned_locks;          }
582   bool owns_locks() const                        { return owned_locks() != nullptr; }
583 
584   // Deadlock detection
585   ResourceMark* current_resource_mark()          { return _current_resource_mark; }
586   void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
587 #endif // ASSERT
588 
589  private:
590   volatile int _jvmti_env_iteration_count;
591 
592  public:
593   void entering_jvmti_env_iteration()            { ++_jvmti_env_iteration_count; }
594   void leaving_jvmti_env_iteration()             { --_jvmti_env_iteration_count; }
595   bool is_inside_jvmti_env_iteration()           { return _jvmti_env_iteration_count > 0; }
596 
597   // Code generation
598   static ByteSize exception_file_offset()        { return byte_offset_of(Thread, _exception_file); }
599   static ByteSize exception_line_offset()        { return byte_offset_of(Thread, _exception_line); }
600 
601   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base); }
602   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size); }
603 
604   static ByteSize tlab_start_offset()            { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); }
605   static ByteSize tlab_end_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); }
606   static ByteSize tlab_top_offset()              { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); }
607   static ByteSize tlab_pf_top_offset()           { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::pf_top_offset(); }
608 
609   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes); }
610 
611   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
612 
613  public:
614   ParkEvent * volatile _ParkEvent;            // for Object monitors, JVMTI raw monitors,
615                                               // and ObjectSynchronizer::read_stable_mark
616 
617   // Termination indicator used by the signal handler.
618   // _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state
619   // (which can't itself be read from the signal handler if a signal hits during the Thread destructor).
620   bool has_terminated()                       { return Atomic::load(&_ParkEvent) == nullptr; };
621 
622   jint _hashStateW;                           // Marsaglia Shift-XOR thread-local RNG
623   jint _hashStateX;                           // thread-specific hashCode generator state
624   jint _hashStateY;
625   jint _hashStateZ;
626 
627   // Low-level leaf-lock primitives used to implement synchronization.
628   // Not for general synchronization use.
629   static void SpinAcquire(volatile int * Lock, const char * Name);
630   static void SpinRelease(volatile int * Lock);
631 
632 #if defined(__APPLE__) && defined(AARCH64)
633  private:
634   DEBUG_ONLY(bool _wx_init);
635   WXMode _wx_state;
636  public:
637   void init_wx();
638   WXMode enable_wx(WXMode new_state);
639 
640   void assert_wx_state(WXMode expected) {
641     assert(_wx_state == expected, "wrong state");
642   }
643 #endif // __APPLE__ && AARCH64
644 
645  private:
646   bool _in_asgct = false;
647  public:
648   bool in_asgct() const { return _in_asgct; }
649   void set_in_asgct(bool value) { _in_asgct = value; }
650   static bool current_in_asgct() {
651     Thread *cur = Thread::current_or_null_safe();
652     return cur != nullptr && cur->in_asgct();
653   }
654 
655  private:
656   VMErrorCallback* _vm_error_callbacks;
657 
658   bool  _profile_vm_locks;
659   bool  _profile_vm_calls;
660   bool  _profile_vm_ops;
661   bool  _profile_rt_calls;
662   bool  _profile_upcalls;
663 
664   jlong    _all_bc_counter_value;
665   jlong _clinit_bc_counter_value;
666 
667   PerfTraceTime* _current_rt_call_timer;
668  public:
669   bool     profile_vm_locks() const { return _profile_vm_locks; }
670   void set_profile_vm_locks(bool v) { _profile_vm_locks = v; }
671 
672   bool     profile_vm_calls() const { return _profile_vm_calls; }
673   void set_profile_vm_calls(bool v) { _profile_vm_calls = v; }
674 
675   bool     profile_vm_ops() const { return _profile_vm_ops; }
676   void set_profile_vm_ops(bool v) { _profile_vm_ops = v; }
677 
678   bool     profile_rt_calls() const { return _profile_rt_calls; }
679   void set_profile_rt_calls(bool v) { _profile_rt_calls = v; }
680 
681   bool     profile_upcalls() const { return _profile_upcalls; }
682   void set_profile_upcalls(bool v) { _profile_upcalls = v; }
683 
684   PerfTraceTime*     current_rt_call_timer() const           { return _current_rt_call_timer;            }
685   void           set_current_rt_call_timer(PerfTraceTime* c) {        _current_rt_call_timer = c;        }
686   bool           has_current_rt_call_timer() const           { return _current_rt_call_timer != nullptr; }
687 
688   bool do_profile_rt_call() const {
689     return ProfileRuntimeCalls && profile_rt_calls() && !has_current_rt_call_timer();
690   }
691 
692   jlong        bc_counter_value() const { return    _all_bc_counter_value; }
693 
694   jlong clinit_bc_counter_value() const { return _clinit_bc_counter_value; }
695 
696   void inc_clinit_bc_counter_value(jlong l) { _clinit_bc_counter_value += l; }
697 
698   static ByteSize bc_counter_offset() { return byte_offset_of(Thread, _all_bc_counter_value); }
699 };
700 
701 class ProfileVMCallContext : StackObj {
702  private:
703   Thread* _thread;
704   bool _enabled;
705   PerfTraceTime* _timer;
706 
707   static int _perf_nested_runtime_calls_count;
708 
709   static const char* name(PerfTraceTime* t);
710   static void notify_nested_rt_call(PerfTraceTime* current, PerfTraceTime* inner_timer);
711  public:
712   inline ProfileVMCallContext(Thread* current, PerfTraceTime* timer, bool is_on)
713   : _thread(current), _enabled(is_on), _timer(timer) {
714     if (_enabled) {
715       assert(timer != nullptr, "");
716       assert(_thread->current_rt_call_timer() == nullptr, "%s", name(_thread->current_rt_call_timer()));
717       _thread->set_current_rt_call_timer(timer);
718     } else if (current->profile_rt_calls()) {
719       notify_nested_rt_call(current->current_rt_call_timer(), timer);
720     }
721   }
722 
723   inline ~ProfileVMCallContext() {
724     if (_enabled) {
725       assert(_timer == _thread->current_rt_call_timer(),
726              "%s vs %s", name(_timer), name(_thread->current_rt_call_timer()));
727       _thread->set_current_rt_call_timer(nullptr);
728     }
729   }
730 
731   static int nested_runtime_calls_count() { return _perf_nested_runtime_calls_count; };
732 };
733 
734 class PauseRuntimeCallProfiling : public StackObj {
735  protected:
736   Thread* _thread;
737   bool _enabled;
738   PerfTraceTime* _timer;
739 
740  public:
741   inline PauseRuntimeCallProfiling(Thread* current, bool is_on)
742   : _thread(current), _enabled(is_on), _timer(nullptr) {
743     if (_enabled) {
744       _timer = _thread->current_rt_call_timer();
745       _thread->set_current_rt_call_timer(nullptr);
746     }
747   }
748 
749   inline ~PauseRuntimeCallProfiling () {
750     if (_enabled) {
751       guarantee(_thread->current_rt_call_timer() == nullptr, "");
752       _thread->set_current_rt_call_timer(_timer); // restore
753     }
754   }
755 };
756 
757 class ThreadInAsgct {
758  private:
759   Thread* _thread;
760  public:
761   ThreadInAsgct(Thread* thread) : _thread(thread) {
762     assert(thread != nullptr, "invariant");
763     assert(!thread->in_asgct(), "invariant");
764     thread->set_in_asgct(true);
765   }
766   ~ThreadInAsgct() {
767     assert(_thread->in_asgct(), "invariant");
768     _thread->set_in_asgct(false);
769   }
770 };
771 
772 // Inline implementation of Thread::current()
773 inline Thread* Thread::current() {
774   Thread* current = current_or_null();
775   assert(current != nullptr, "Thread::current() called on detached thread");
776   return current;
777 }
778 
779 inline Thread* Thread::current_or_null() {
780 #ifndef USE_LIBRARY_BASED_TLS_ONLY
781   return _thr_current;
782 #else
783   if (ThreadLocalStorage::is_initialized()) {
784     return ThreadLocalStorage::thread();
785   }
786   return nullptr;
787 #endif
788 }
789 
790 inline Thread* Thread::current_or_null_safe() {
791   if (ThreadLocalStorage::is_initialized()) {
792     return ThreadLocalStorage::thread();
793   }
794   return nullptr;
795 }
796 
797 #endif // SHARE_RUNTIME_THREAD_HPP