< prev index next >

src/share/vm/runtime/thread.hpp

Print this page




  85 template <class T, MEMFLAGS F> class ChunkedList;
  86 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
  87 
  88 DEBUG_ONLY(class ResourceMark;)
  89 
  90 class WorkerThread;
  91 
  92 // Class hierarchy
  93 // - Thread
  94 //   - NamedThread
  95 //     - VMThread
  96 //     - ConcurrentGCThread
  97 //     - WorkerThread
  98 //       - GangWorker
  99 //       - GCTaskThread
 100 //   - JavaThread
 101 //   - WatcherThread
 102 
 103 class Thread: public ThreadShadow {
 104   friend class VMStructs;










 105  private:
 106   // Exception handling
 107   // (Note: _pending_exception and friends are in ThreadShadow)
 108   //oop       _pending_exception;                // pending exception for current thread
 109   // const char* _exception_file;                   // file information for exception (debugging only)
 110   // int         _exception_line;                   // line information for exception (debugging only)
 111  protected:
 112   // Support for forcing alignment of thread objects for biased locking
 113   void*       _real_malloc_address;
 114  public:
 115   void* operator new(size_t size) throw() { return allocate(size, true); }
 116   void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
 117     return allocate(size, false); }
 118   void  operator delete(void* p);
 119 
 120  protected:
 121    static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
 122  private:
 123 
 124   // ***************************************************************


 240   // mutex, or blocking on an object synchronizer (Java locking).
 241   // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
 242   // If !allow_allocation(), then an assertion failure will happen during allocation
 243   // (Hence, !allow_safepoint() => !allow_allocation()).
 244   //
 245   // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
 246   //
 247   NOT_PRODUCT(int _allow_safepoint_count;)      // If 0, thread allow a safepoint to happen
 248   debug_only (int _allow_allocation_count;)     // If 0, the thread is allowed to allocate oops.
 249 
 250   // Used by SkipGCALot class.
 251   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 252 
 253   friend class No_Alloc_Verifier;
 254   friend class No_Safepoint_Verifier;
 255   friend class Pause_No_Safepoint_Verifier;
 256   friend class ThreadLocalStorage;
 257   friend class GC_locker;
 258 
 259   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden





 260   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
 261                                                 // the Java heap


 262 
 263   // Thread-local buffer used by MetadataOnStackMark.
 264   MetadataOnStackBuffer* _metadata_on_stack_buffer;
 265 
 266   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
 267 
 268   ThreadExt _ext;
 269 
 270   int   _vm_operation_started_count;            // VM_Operation support
 271   int   _vm_operation_completed_count;          // VM_Operation support
 272 


 273   ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
 274                                                 // is waiting to lock
 275   bool _current_pending_monitor_is_from_java;   // locking is from Java code
 276 
 277   // ObjectMonitor on which this thread called Object.wait()
 278   ObjectMonitor* _current_waiting_monitor;
 279 
 280   // Private thread-local objectmonitor list - a simple cache organized as a SLL.
 281  public:
 282   ObjectMonitor* omFreeList;
 283   int omFreeCount;                              // length of omFreeList
 284   int omFreeProvision;                          // reload chunk size
 285   ObjectMonitor* omInUseList;                   // SLL to track monitors in circulation
 286   int omInUseCount;                             // length of omInUseList
 287 
 288 #ifdef ASSERT
 289  private:
 290   bool _visited_for_critical_count;
 291 
 292  public:


 369                            (volatile jint*)&_suspend_flags,
 370                            (jint)flags) != (jint)flags);
 371   }
 372 
 373   void set_has_async_exception() {
 374     set_suspend_flag(_has_async_exception);
 375   }
 376   void clear_has_async_exception() {
 377     clear_suspend_flag(_has_async_exception);
 378   }
 379 
 380   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 381 
 382   void set_critical_native_unlock() {
 383     set_suspend_flag(_critical_native_unlock);
 384   }
 385   void clear_critical_native_unlock() {
 386     clear_suspend_flag(_critical_native_unlock);
 387   }
 388 








 389   // Support for Unhandled Oop detection
 390 #ifdef CHECK_UNHANDLED_OOPS
 391  private:
 392   UnhandledOops* _unhandled_oops;
 393  public:
 394   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 395   // Mark oop safe for gc.  It may be stack allocated but won't move.
 396   void allow_unhandled_oop(oop *op) {
 397     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 398   }
 399   // Clear oops at safepoint so crashes point to unhandled oop violator
 400   void clear_unhandled_oops() {
 401     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
 402   }
 403 #endif // CHECK_UNHANDLED_OOPS
 404 
 405 #ifndef PRODUCT
 406   bool skip_gcalot()           { return _skip_gcalot; }
 407   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
 408 #endif


 418   OSThread* osthread() const                     { return _osthread;   }
 419   void set_osthread(OSThread* thread)            { _osthread = thread; }
 420 
 421   // JNI handle support
 422   JNIHandleBlock* active_handles() const         { return _active_handles; }
 423   void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
 424   JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
 425   void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
 426 
 427   // Internal handle support
 428   HandleArea* handle_area() const                { return _handle_area; }
 429   void set_handle_area(HandleArea* area)         { _handle_area = area; }
 430 
 431   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
 432   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
 433 
 434   // Thread-Local Allocation Buffer (TLAB) support
 435   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 436   void initialize_tlab() {
 437     if (UseTLAB) {
 438       tlab().initialize();



 439     }
 440   }
 441 


















 442   jlong allocated_bytes()               { return _allocated_bytes; }
 443   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 444   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 445   inline jlong cooked_allocated_bytes();
 446 




 447   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
 448   JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
 449 
 450   const ThreadExt& ext() const          { return _ext; }
 451   ThreadExt& ext()                      { return _ext; }
 452 
 453   // VM operation support
 454   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
 455   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
 456   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 457 
 458   // For tracking the heavyweight monitor the thread is pending on.
 459   ObjectMonitor* current_pending_monitor() {
 460     return _current_pending_monitor;
 461   }
 462   void set_current_pending_monitor(ObjectMonitor* monitor) {
 463     _current_pending_monitor = monitor;
 464   }
 465   void set_current_pending_monitor_is_from_java(bool from_java) {
 466     _current_pending_monitor_is_from_java = from_java;


 611   static ByteSize active_handles_offset()        { return byte_offset_of(Thread, _active_handles   ); }
 612 
 613   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base ); }
 614   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size ); }
 615 
 616 #define TLAB_FIELD_OFFSET(name) \
 617   static ByteSize tlab_##name##_offset()         { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
 618 
 619   TLAB_FIELD_OFFSET(start)
 620   TLAB_FIELD_OFFSET(end)
 621   TLAB_FIELD_OFFSET(top)
 622   TLAB_FIELD_OFFSET(pf_top)
 623   TLAB_FIELD_OFFSET(size)                   // desired_size
 624   TLAB_FIELD_OFFSET(refill_waste_limit)
 625   TLAB_FIELD_OFFSET(number_of_refills)
 626   TLAB_FIELD_OFFSET(fast_refill_waste)
 627   TLAB_FIELD_OFFSET(slow_allocations)
 628 
 629 #undef TLAB_FIELD_OFFSET
 630 




 631   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes ); }
 632 
 633   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
 634 
 635  public:
 636   volatile intptr_t _Stalled ;
 637   volatile int _TypeTag ;
 638   ParkEvent * _ParkEvent ;                     // for synchronized()
 639   ParkEvent * _SleepEvent ;                    // for Thread.sleep
 640   ParkEvent * _MutexEvent ;                    // for native internal Mutex/Monitor
 641   ParkEvent * _MuxEvent ;                      // for low-level muxAcquire-muxRelease
 642   int NativeSyncRecursion ;                    // diagnostic
 643 
 644   volatile int _OnTrap ;                       // Resume-at IP delta
 645   jint _hashStateW ;                           // Marsaglia Shift-XOR thread-local RNG
 646   jint _hashStateX ;                           // thread-specific hashCode generator state
 647   jint _hashStateY ;
 648   jint _hashStateZ ;
 649   void * _schedctl ;
 650 


1026 
1027 
1028   ThreadFunction entry_point() const             { return _entry_point; }
1029 
1030   // Allocates a new Java level thread object for this thread. thread_name may be NULL.
1031   void allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS);
1032 
1033   // Last frame anchor routines
1034 
1035   JavaFrameAnchor* frame_anchor(void)            { return &_anchor; }
1036 
1037   // last_Java_sp
1038   bool has_last_Java_frame() const               { return _anchor.has_last_Java_frame(); }
1039   intptr_t* last_Java_sp() const                 { return _anchor.last_Java_sp(); }
1040 
1041   // last_Java_pc
1042 
1043   address last_Java_pc(void)                     { return _anchor.last_Java_pc(); }
1044 
1045   // Safepoint support
1046 #ifndef PPC64
1047   JavaThreadState thread_state() const           { return _thread_state; }
1048   void set_thread_state(JavaThreadState s)       { _thread_state = s;    }
1049 #else
1050   // Use membars when accessing volatile _thread_state. See
1051   // Threads::create_vm() for size checks.
1052   inline JavaThreadState thread_state() const;
1053   inline void set_thread_state(JavaThreadState s);
1054 #endif
1055   ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
1056   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
1057   bool is_at_poll_safepoint()                    { return _safepoint_state->is_at_poll_safepoint(); }
1058 
1059   // thread has called JavaThread::exit() or is terminated
1060   bool is_exiting()                              { return _terminated == _thread_exiting || is_terminated(); }
1061   // thread is terminated (no longer on the threads list); we compare
1062   // against the two non-terminated values so that a freed JavaThread
1063   // will also be considered terminated.
1064   bool is_terminated()                           { return _terminated != _not_terminated && _terminated != _thread_exiting; }
1065   void set_terminated(TerminatedTypes t)         { _terminated = t; }
1066   // special for Threads::remove() which is static:


1357   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2         ); }
1358   static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state        ); }
1359   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc  ); }
1360   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread            ); }
1361   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
1362   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
1363   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
1364   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1365   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1366   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
1367   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
1368 
1369   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1370   static ByteSize should_post_on_exceptions_flag_offset() {
1371     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1372   }
1373 
1374 #if INCLUDE_ALL_GCS
1375   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
1376   static ByteSize dirty_card_queue_offset()      { return byte_offset_of(JavaThread, _dirty_card_queue); }



1377 #endif // INCLUDE_ALL_GCS
1378 
1379   // Returns the jni environment for this thread
1380   JNIEnv* jni_environment()                      { return &_jni_environment; }
1381 
1382   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1383     JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1384     // Only return NULL if thread is off the thread list; starting to
1385     // exit should not return NULL.
1386     if (thread_from_jni_env->is_terminated()) {
1387        thread_from_jni_env->block_if_vm_exited();
1388        return NULL;
1389     } else {
1390        return thread_from_jni_env;
1391     }
1392   }
1393 
1394   // JNI critical regions. These can nest.
1395   bool in_critical()    { return _jni_active_critical > 0; }
1396   bool in_last_critical()  { return _jni_active_critical == 1; }


1655  public:
1656   static inline size_t stack_size_at_create(void) {
1657     return _stack_size_at_create;
1658   }
1659   static inline void set_stack_size_at_create(size_t value) {
1660     _stack_size_at_create = value;
1661   }
1662 
1663 #if INCLUDE_ALL_GCS
1664   // SATB marking queue support
1665   ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1666   static SATBMarkQueueSet& satb_mark_queue_set() {
1667     return _satb_mark_queue_set;
1668   }
1669 
1670   // Dirty card queue support
1671   DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1672   static DirtyCardQueueSet& dirty_card_queue_set() {
1673     return _dirty_card_queue_set;
1674   }









1675 #endif // INCLUDE_ALL_GCS
1676 
1677   // This method initializes the SATB and dirty card queues before a
1678   // JavaThread is added to the Java thread list. Right now, we don't
1679   // have to do anything to the dirty card queue (it should have been
1680   // activated when the thread was created), but we have to activate
1681   // the SATB queue if the thread is created while a marking cycle is
1682   // in progress. The activation / de-activation of the SATB queues at
1683   // the beginning / end of a marking cycle is done during safepoints
1684   // so we have to make sure this method is called outside one to be
1685   // able to safely read the active field of the SATB queue set. Right
1686   // now, it is called just before the thread is added to the Java
1687   // thread list in the Threads::add() method. That method is holding
1688   // the Threads_lock which ensures we are outside a safepoint. We
1689   // cannot do the obvious and set the active field of the SATB queue
1690   // when the thread is created given that, in some cases, safepoints
1691   // might happen between the JavaThread constructor being called and the
1692   // thread being added to the Java thread list (an example of this is
1693   // when the structure for the DestroyJavaVM thread is created).
1694 #if INCLUDE_ALL_GCS
1695   void initialize_queues();
1696 #else  // INCLUDE_ALL_GCS
1697   void initialize_queues() { }
1698 #endif // INCLUDE_ALL_GCS
1699 
1700   // Machine dependent stuff
1701 #ifdef TARGET_OS_ARCH_linux_x86
1702 # include "thread_linux_x86.hpp"
1703 #endif



1704 #ifdef TARGET_OS_ARCH_linux_sparc
1705 # include "thread_linux_sparc.hpp"
1706 #endif
1707 #ifdef TARGET_OS_ARCH_linux_zero
1708 # include "thread_linux_zero.hpp"
1709 #endif
1710 #ifdef TARGET_OS_ARCH_solaris_x86
1711 # include "thread_solaris_x86.hpp"
1712 #endif
1713 #ifdef TARGET_OS_ARCH_solaris_sparc
1714 # include "thread_solaris_sparc.hpp"
1715 #endif
1716 #ifdef TARGET_OS_ARCH_windows_x86
1717 # include "thread_windows_x86.hpp"
1718 #endif
1719 #ifdef TARGET_OS_ARCH_linux_arm
1720 # include "thread_linux_arm.hpp"
1721 #endif
1722 #ifdef TARGET_OS_ARCH_linux_ppc
1723 # include "thread_linux_ppc.hpp"


1893 class Threads: AllStatic {
1894   friend class VMStructs;
1895  private:
1896   static JavaThread* _thread_list;
1897   static int         _number_of_threads;
1898   static int         _number_of_non_daemon_threads;
1899   static int         _return_code;
1900 #ifdef ASSERT
1901   static bool        _vm_complete;
1902 #endif
1903 
1904  public:
1905   // Thread management
1906   // force_daemon is a concession to JNI, where we may need to add a
1907   // thread to the thread list before allocating its thread object
1908   static void add(JavaThread* p, bool force_daemon = false);
1909   static void remove(JavaThread* p);
1910   static bool includes(JavaThread* p);
1911   static JavaThread* first()                     { return _thread_list; }
1912   static void threads_do(ThreadClosure* tc);

1913 
1914   // Initializes the vm and creates the vm thread
1915   static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1916   static void convert_vm_init_libraries_to_agents();
1917   static void create_vm_init_libraries();
1918   static void create_vm_init_agents();
1919   static void shutdown_vm_agents();
1920   static bool destroy_vm();
1921   // Supported VM versions via JNI
1922   // Includes JNI_VERSION_1_1
1923   static jboolean is_supported_jni_version_including_1_1(jint version);
1924   // Does not include JNI_VERSION_1_1
1925   static jboolean is_supported_jni_version(jint version);
1926 
1927   // Garbage collection
1928   static void follow_other_roots(void f(oop*));
1929 
1930   // Apply "f->do_oop" to all root oops in all threads.
1931   // This version may only be called by sequential code.
1932   static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);




  85 template <class T, MEMFLAGS F> class ChunkedList;
  86 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
  87 
  88 DEBUG_ONLY(class ResourceMark;)
  89 
  90 class WorkerThread;
  91 
  92 // Class hierarchy
  93 // - Thread
  94 //   - NamedThread
  95 //     - VMThread
  96 //     - ConcurrentGCThread
  97 //     - WorkerThread
  98 //       - GangWorker
  99 //       - GCTaskThread
 100 //   - JavaThread
 101 //   - WatcherThread
 102 
 103 class Thread: public ThreadShadow {
 104   friend class VMStructs;
 105 
 106 #if INCLUDE_ALL_GCS
 107 protected:
 108   // Support for Shenandoah barriers. This is only accessible from JavaThread,
 109   // but we really want to keep this field at lower Thread offset (below first
 110   // 128 bytes), because that makes barrier fastpaths optimally encoded.
 111   char _gc_state;
 112   static char _gc_state_global;
 113 #endif
 114 
 115  private:
 116   // Exception handling
 117   // (Note: _pending_exception and friends are in ThreadShadow)
 118   //oop       _pending_exception;                // pending exception for current thread
 119   // const char* _exception_file;                   // file information for exception (debugging only)
 120   // int         _exception_line;                   // line information for exception (debugging only)
 121  protected:
 122   // Support for forcing alignment of thread objects for biased locking
 123   void*       _real_malloc_address;
 124  public:
 125   void* operator new(size_t size) throw() { return allocate(size, true); }
 126   void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
 127     return allocate(size, false); }
 128   void  operator delete(void* p);
 129 
 130  protected:
 131    static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
 132  private:
 133 
 134   // ***************************************************************


 250   // mutex, or blocking on an object synchronizer (Java locking).
 251   // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
 252   // If !allow_allocation(), then an assertion failure will happen during allocation
 253   // (Hence, !allow_safepoint() => !allow_allocation()).
 254   //
 255   // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
 256   //
 257   NOT_PRODUCT(int _allow_safepoint_count;)      // If 0, thread allow a safepoint to happen
 258   debug_only (int _allow_allocation_count;)     // If 0, the thread is allowed to allocate oops.
 259 
 260   // Used by SkipGCALot class.
 261   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 262 
 263   friend class No_Alloc_Verifier;
 264   friend class No_Safepoint_Verifier;
 265   friend class Pause_No_Safepoint_Verifier;
 266   friend class ThreadLocalStorage;
 267   friend class GC_locker;
 268 
 269   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
 270   ThreadLocalAllocBuffer _gclab;                // Thread-local allocation buffer for GC (e.g. evacuation)
 271   uint _worker_id;                              // Worker ID
 272   bool _force_satb_flush;                       // Force SATB flush
 273   double _paced_time;                           // Accumulated paced time
 274 
 275   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
 276                                                 // the Java heap
 277   jlong _allocated_bytes_gclab;                 // Cumulative number of bytes allocated on
 278                                                 // the Java heap, in GCLABs
 279 
 280   // Thread-local buffer used by MetadataOnStackMark.
 281   MetadataOnStackBuffer* _metadata_on_stack_buffer;
 282 
 283   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
 284 
 285   ThreadExt _ext;
 286 
 287   int   _vm_operation_started_count;            // VM_Operation support
 288   int   _vm_operation_completed_count;          // VM_Operation support
 289 
 290   char _oom_during_evac;
 291 
 292   ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
 293                                                 // is waiting to lock
 294   bool _current_pending_monitor_is_from_java;   // locking is from Java code
 295 
 296   // ObjectMonitor on which this thread called Object.wait()
 297   ObjectMonitor* _current_waiting_monitor;
 298 
 299   // Private thread-local objectmonitor list - a simple cache organized as a SLL.
 300  public:
 301   ObjectMonitor* omFreeList;
 302   int omFreeCount;                              // length of omFreeList
 303   int omFreeProvision;                          // reload chunk size
 304   ObjectMonitor* omInUseList;                   // SLL to track monitors in circulation
 305   int omInUseCount;                             // length of omInUseList
 306 
 307 #ifdef ASSERT
 308  private:
 309   bool _visited_for_critical_count;
 310 
 311  public:


 388                            (volatile jint*)&_suspend_flags,
 389                            (jint)flags) != (jint)flags);
 390   }
 391 
 392   void set_has_async_exception() {
 393     set_suspend_flag(_has_async_exception);
 394   }
 395   void clear_has_async_exception() {
 396     clear_suspend_flag(_has_async_exception);
 397   }
 398 
 399   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 400 
 401   void set_critical_native_unlock() {
 402     set_suspend_flag(_critical_native_unlock);
 403   }
 404   void clear_critical_native_unlock() {
 405     clear_suspend_flag(_critical_native_unlock);
 406   }
 407 
 408   bool is_oom_during_evac() const;
 409   void set_oom_during_evac(bool oom);
 410 
 411 #ifdef ASSERT
 412   bool is_evac_allowed() const;
 413   void set_evac_allowed(bool evac_allowed);
 414 #endif
 415 
 416   // Support for Unhandled Oop detection
 417 #ifdef CHECK_UNHANDLED_OOPS
 418  private:
 419   UnhandledOops* _unhandled_oops;
 420  public:
 421   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 422   // Mark oop safe for gc.  It may be stack allocated but won't move.
 423   void allow_unhandled_oop(oop *op) {
 424     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 425   }
 426   // Clear oops at safepoint so crashes point to unhandled oop violator
 427   void clear_unhandled_oops() {
 428     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
 429   }
 430 #endif // CHECK_UNHANDLED_OOPS
 431 
 432 #ifndef PRODUCT
 433   bool skip_gcalot()           { return _skip_gcalot; }
 434   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
 435 #endif


 445   OSThread* osthread() const                     { return _osthread;   }
 446   void set_osthread(OSThread* thread)            { _osthread = thread; }
 447 
 448   // JNI handle support
 449   JNIHandleBlock* active_handles() const         { return _active_handles; }
 450   void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
 451   JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
 452   void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
 453 
 454   // Internal handle support
 455   HandleArea* handle_area() const                { return _handle_area; }
 456   void set_handle_area(HandleArea* area)         { _handle_area = area; }
 457 
 458   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
 459   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
 460 
 461   // Thread-Local Allocation Buffer (TLAB) support
 462   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 463   void initialize_tlab() {
 464     if (UseTLAB) {
 465       tlab().initialize(false);
 466       if (UseShenandoahGC && (is_Java_thread() || is_Worker_thread())) {
 467         gclab().initialize(true);
 468       }
 469     }
 470   }
 471 
 472   // Thread-Local GC Allocation Buffer (GCLAB) support
 473   ThreadLocalAllocBuffer& gclab()                {
 474     assert (UseShenandoahGC, "Only for Shenandoah");
 475     assert (!_gclab.is_initialized() || (is_Java_thread() || is_Worker_thread()),
 476             "Only Java and GC worker threads are allowed to get GCLABs");
 477     return _gclab;
 478   }
 479 
 480   void set_worker_id(uint id)           { _worker_id = id; }
 481   uint worker_id()                      { return _worker_id; }
 482 
 483   void set_force_satb_flush(bool value) { _force_satb_flush = value; }
 484   bool is_force_satb_flush()            { return _force_satb_flush; }
 485 
 486   void add_paced_time(double v)         { _paced_time += v; }
 487   double paced_time()                   { return _paced_time; }
 488   void reset_paced_time()               { _paced_time = 0; }
 489 
 490   jlong allocated_bytes()               { return _allocated_bytes; }
 491   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 492   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 493   inline jlong cooked_allocated_bytes();
 494 
 495   jlong allocated_bytes_gclab()                { return _allocated_bytes_gclab; }
 496   void set_allocated_bytes_gclab(jlong value)  { _allocated_bytes_gclab = value; }
 497   void incr_allocated_bytes_gclab(jlong size)  { _allocated_bytes_gclab += size; }
 498 
 499   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
 500   JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
 501 
 502   const ThreadExt& ext() const          { return _ext; }
 503   ThreadExt& ext()                      { return _ext; }
 504 
 505   // VM operation support
 506   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
 507   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
 508   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 509 
 510   // For tracking the heavyweight monitor the thread is pending on.
 511   ObjectMonitor* current_pending_monitor() {
 512     return _current_pending_monitor;
 513   }
 514   void set_current_pending_monitor(ObjectMonitor* monitor) {
 515     _current_pending_monitor = monitor;
 516   }
 517   void set_current_pending_monitor_is_from_java(bool from_java) {
 518     _current_pending_monitor_is_from_java = from_java;


 663   static ByteSize active_handles_offset()        { return byte_offset_of(Thread, _active_handles   ); }
 664 
 665   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base ); }
 666   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size ); }
 667 
 668 #define TLAB_FIELD_OFFSET(name) \
 669   static ByteSize tlab_##name##_offset()         { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
 670 
 671   TLAB_FIELD_OFFSET(start)
 672   TLAB_FIELD_OFFSET(end)
 673   TLAB_FIELD_OFFSET(top)
 674   TLAB_FIELD_OFFSET(pf_top)
 675   TLAB_FIELD_OFFSET(size)                   // desired_size
 676   TLAB_FIELD_OFFSET(refill_waste_limit)
 677   TLAB_FIELD_OFFSET(number_of_refills)
 678   TLAB_FIELD_OFFSET(fast_refill_waste)
 679   TLAB_FIELD_OFFSET(slow_allocations)
 680 
 681 #undef TLAB_FIELD_OFFSET
 682 
 683   static ByteSize gclab_start_offset()         { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::start_offset(); }
 684   static ByteSize gclab_top_offset()           { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::top_offset(); }
 685   static ByteSize gclab_end_offset()           { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::end_offset(); }
 686 
 687   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes ); }
 688 
 689   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
 690 
 691  public:
 692   volatile intptr_t _Stalled ;
 693   volatile int _TypeTag ;
 694   ParkEvent * _ParkEvent ;                     // for synchronized()
 695   ParkEvent * _SleepEvent ;                    // for Thread.sleep
 696   ParkEvent * _MutexEvent ;                    // for native internal Mutex/Monitor
 697   ParkEvent * _MuxEvent ;                      // for low-level muxAcquire-muxRelease
 698   int NativeSyncRecursion ;                    // diagnostic
 699 
 700   volatile int _OnTrap ;                       // Resume-at IP delta
 701   jint _hashStateW ;                           // Marsaglia Shift-XOR thread-local RNG
 702   jint _hashStateX ;                           // thread-specific hashCode generator state
 703   jint _hashStateY ;
 704   jint _hashStateZ ;
 705   void * _schedctl ;
 706 


1082 
1083 
1084   ThreadFunction entry_point() const             { return _entry_point; }
1085 
1086   // Allocates a new Java level thread object for this thread. thread_name may be NULL.
1087   void allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS);
1088 
1089   // Last frame anchor routines
1090 
1091   JavaFrameAnchor* frame_anchor(void)            { return &_anchor; }
1092 
1093   // last_Java_sp
1094   bool has_last_Java_frame() const               { return _anchor.has_last_Java_frame(); }
1095   intptr_t* last_Java_sp() const                 { return _anchor.last_Java_sp(); }
1096 
1097   // last_Java_pc
1098 
1099   address last_Java_pc(void)                     { return _anchor.last_Java_pc(); }
1100 
1101   // Safepoint support
1102 #if !(defined(PPC64) || defined(AARCH64))
1103   JavaThreadState thread_state() const           { return _thread_state; }
1104   void set_thread_state(JavaThreadState s)       { _thread_state = s;    }
1105 #else
1106   // Use membars when accessing volatile _thread_state. See
1107   // Threads::create_vm() for size checks.
1108   inline JavaThreadState thread_state() const;
1109   inline void set_thread_state(JavaThreadState s);
1110 #endif
1111   ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
1112   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
1113   bool is_at_poll_safepoint()                    { return _safepoint_state->is_at_poll_safepoint(); }
1114 
1115   // thread has called JavaThread::exit() or is terminated
1116   bool is_exiting()                              { return _terminated == _thread_exiting || is_terminated(); }
1117   // thread is terminated (no longer on the threads list); we compare
1118   // against the two non-terminated values so that a freed JavaThread
1119   // will also be considered terminated.
1120   bool is_terminated()                           { return _terminated != _not_terminated && _terminated != _thread_exiting; }
1121   void set_terminated(TerminatedTypes t)         { _terminated = t; }
1122   // special for Threads::remove() which is static:


1413   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2         ); }
1414   static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state        ); }
1415   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc  ); }
1416   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread            ); }
1417   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
1418   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
1419   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
1420   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1421   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1422   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
1423   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
1424 
1425   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1426   static ByteSize should_post_on_exceptions_flag_offset() {
1427     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1428   }
1429 
1430 #if INCLUDE_ALL_GCS
1431   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
1432   static ByteSize dirty_card_queue_offset()      { return byte_offset_of(JavaThread, _dirty_card_queue); }
1433 
1434   static ByteSize gc_state_offset()              { return byte_offset_of(JavaThread, _gc_state); }
1435 
1436 #endif // INCLUDE_ALL_GCS
1437 
1438   // Returns the jni environment for this thread
1439   JNIEnv* jni_environment()                      { return &_jni_environment; }
1440 
1441   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1442     JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1443     // Only return NULL if thread is off the thread list; starting to
1444     // exit should not return NULL.
1445     if (thread_from_jni_env->is_terminated()) {
1446        thread_from_jni_env->block_if_vm_exited();
1447        return NULL;
1448     } else {
1449        return thread_from_jni_env;
1450     }
1451   }
1452 
1453   // JNI critical regions. These can nest.
1454   bool in_critical()    { return _jni_active_critical > 0; }
1455   bool in_last_critical()  { return _jni_active_critical == 1; }


1714  public:
1715   static inline size_t stack_size_at_create(void) {
1716     return _stack_size_at_create;
1717   }
1718   static inline void set_stack_size_at_create(size_t value) {
1719     _stack_size_at_create = value;
1720   }
1721 
1722 #if INCLUDE_ALL_GCS
1723   // SATB marking queue support
1724   ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1725   static SATBMarkQueueSet& satb_mark_queue_set() {
1726     return _satb_mark_queue_set;
1727   }
1728 
1729   // Dirty card queue support
1730   DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1731   static DirtyCardQueueSet& dirty_card_queue_set() {
1732     return _dirty_card_queue_set;
1733   }
1734 
1735   inline char gc_state() const;
1736 
1737 private:
1738   void set_gc_state(char in_prog);
1739 
1740 public:
1741   static void set_gc_state_all_threads(char in_prog);
1742   static void set_force_satb_flush_all_threads(bool value);
1743 #endif // INCLUDE_ALL_GCS
1744 
1745   // This method initializes the SATB and dirty card queues before a
1746   // JavaThread is added to the Java thread list. Right now, we don't
1747   // have to do anything to the dirty card queue (it should have been
1748   // activated when the thread was created), but we have to activate
1749   // the SATB queue if the thread is created while a marking cycle is
1750   // in progress. The activation / de-activation of the SATB queues at
1751   // the beginning / end of a marking cycle is done during safepoints
1752   // so we have to make sure this method is called outside one to be
1753   // able to safely read the active field of the SATB queue set. Right
1754   // now, it is called just before the thread is added to the Java
1755   // thread list in the Threads::add() method. That method is holding
1756   // the Threads_lock which ensures we are outside a safepoint. We
1757   // cannot do the obvious and set the active field of the SATB queue
1758   // when the thread is created given that, in some cases, safepoints
1759   // might happen between the JavaThread constructor being called and the
1760   // thread being added to the Java thread list (an example of this is
1761   // when the structure for the DestroyJavaVM thread is created).
1762 #if INCLUDE_ALL_GCS
1763   void initialize_queues();
1764 #else  // INCLUDE_ALL_GCS
1765   void initialize_queues() { }
1766 #endif // INCLUDE_ALL_GCS
1767 
1768   // Machine dependent stuff
1769 #ifdef TARGET_OS_ARCH_linux_x86
1770 # include "thread_linux_x86.hpp"
1771 #endif
1772 #ifdef TARGET_OS_ARCH_linux_aarch64
1773 # include "thread_linux_aarch64.hpp"
1774 #endif
1775 #ifdef TARGET_OS_ARCH_linux_sparc
1776 # include "thread_linux_sparc.hpp"
1777 #endif
1778 #ifdef TARGET_OS_ARCH_linux_zero
1779 # include "thread_linux_zero.hpp"
1780 #endif
1781 #ifdef TARGET_OS_ARCH_solaris_x86
1782 # include "thread_solaris_x86.hpp"
1783 #endif
1784 #ifdef TARGET_OS_ARCH_solaris_sparc
1785 # include "thread_solaris_sparc.hpp"
1786 #endif
1787 #ifdef TARGET_OS_ARCH_windows_x86
1788 # include "thread_windows_x86.hpp"
1789 #endif
1790 #ifdef TARGET_OS_ARCH_linux_arm
1791 # include "thread_linux_arm.hpp"
1792 #endif
1793 #ifdef TARGET_OS_ARCH_linux_ppc
1794 # include "thread_linux_ppc.hpp"


1964 class Threads: AllStatic {
1965   friend class VMStructs;
1966  private:
1967   static JavaThread* _thread_list;
1968   static int         _number_of_threads;
1969   static int         _number_of_non_daemon_threads;
1970   static int         _return_code;
1971 #ifdef ASSERT
1972   static bool        _vm_complete;
1973 #endif
1974 
1975  public:
1976   // Thread management
1977   // force_daemon is a concession to JNI, where we may need to add a
1978   // thread to the thread list before allocating its thread object
1979   static void add(JavaThread* p, bool force_daemon = false);
1980   static void remove(JavaThread* p);
1981   static bool includes(JavaThread* p);
1982   static JavaThread* first()                     { return _thread_list; }
1983   static void threads_do(ThreadClosure* tc);
1984   static void java_threads_do(ThreadClosure* tc);
1985 
1986   // Initializes the vm and creates the vm thread
1987   static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1988   static void convert_vm_init_libraries_to_agents();
1989   static void create_vm_init_libraries();
1990   static void create_vm_init_agents();
1991   static void shutdown_vm_agents();
1992   static bool destroy_vm();
1993   // Supported VM versions via JNI
1994   // Includes JNI_VERSION_1_1
1995   static jboolean is_supported_jni_version_including_1_1(jint version);
1996   // Does not include JNI_VERSION_1_1
1997   static jboolean is_supported_jni_version(jint version);
1998 
1999   // Garbage collection
2000   static void follow_other_roots(void f(oop*));
2001 
2002   // Apply "f->do_oop" to all root oops in all threads.
2003   // This version may only be called by sequential code.
2004   static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);


< prev index next >