< prev index next >

src/share/vm/runtime/thread.hpp

Print this page




  85 template <class T, MEMFLAGS F> class ChunkedList;
  86 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
  87 
  88 DEBUG_ONLY(class ResourceMark;)
  89 
  90 class WorkerThread;
  91 
  92 // Class hierarchy
  93 // - Thread
  94 //   - NamedThread
  95 //     - VMThread
  96 //     - ConcurrentGCThread
  97 //     - WorkerThread
  98 //       - GangWorker
  99 //       - GCTaskThread
 100 //   - JavaThread
 101 //   - WatcherThread
 102 
 103 class Thread: public ThreadShadow {
 104   friend class VMStructs;










 105  private:
 106   // Exception handling
 107   // (Note: _pending_exception and friends are in ThreadShadow)
 108   //oop       _pending_exception;                // pending exception for current thread
 109   // const char* _exception_file;                   // file information for exception (debugging only)
 110   // int         _exception_line;                   // line information for exception (debugging only)
 111  protected:
 112   // Support for forcing alignment of thread objects for biased locking
 113   void*       _real_malloc_address;
 114  public:
 115   void* operator new(size_t size) throw() { return allocate(size, true); }
 116   void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
 117     return allocate(size, false); }
 118   void  operator delete(void* p);
 119 
 120  protected:
 121    static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
 122  private:
 123 
 124   // ***************************************************************


 240   // mutex, or blocking on an object synchronizer (Java locking).
 241   // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
 242   // If !allow_allocation(), then an assertion failure will happen during allocation
 243   // (Hence, !allow_safepoint() => !allow_allocation()).
 244   //
 245   // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
 246   //
 247   NOT_PRODUCT(int _allow_safepoint_count;)      // If 0, thread allow a safepoint to happen
 248   debug_only (int _allow_allocation_count;)     // If 0, the thread is allowed to allocate oops.
 249 
 250   // Used by SkipGCALot class.
 251   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 252 
 253   friend class No_Alloc_Verifier;
 254   friend class No_Safepoint_Verifier;
 255   friend class Pause_No_Safepoint_Verifier;
 256   friend class ThreadLocalStorage;
 257   friend class GC_locker;
 258 
 259   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden




 260   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
 261                                                 // the Java heap


 262 
 263   // Thread-local buffer used by MetadataOnStackMark.
 264   MetadataOnStackBuffer* _metadata_on_stack_buffer;
 265 
 266   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
 267 
 268   ThreadExt _ext;
 269 
 270   int   _vm_operation_started_count;            // VM_Operation support
 271   int   _vm_operation_completed_count;          // VM_Operation support
 272 


 273   ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
 274                                                 // is waiting to lock
 275   bool _current_pending_monitor_is_from_java;   // locking is from Java code
 276 
 277   // ObjectMonitor on which this thread called Object.wait()
 278   ObjectMonitor* _current_waiting_monitor;
 279 
 280   // Private thread-local objectmonitor list - a simple cache organized as a SLL.
 281  public:
 282   ObjectMonitor* omFreeList;
 283   int omFreeCount;                              // length of omFreeList
 284   int omFreeProvision;                          // reload chunk size
 285   ObjectMonitor* omInUseList;                   // SLL to track monitors in circulation
 286   int omInUseCount;                             // length of omInUseList
 287 
 288 #ifdef ASSERT
 289  private:
 290   bool _visited_for_critical_count;
 291 
 292  public:


 369                            (volatile jint*)&_suspend_flags,
 370                            (jint)flags) != (jint)flags);
 371   }
 372 
 373   void set_has_async_exception() {
 374     set_suspend_flag(_has_async_exception);
 375   }
 376   void clear_has_async_exception() {
 377     clear_suspend_flag(_has_async_exception);
 378   }
 379 
 380   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 381 
 382   void set_critical_native_unlock() {
 383     set_suspend_flag(_critical_native_unlock);
 384   }
 385   void clear_critical_native_unlock() {
 386     clear_suspend_flag(_critical_native_unlock);
 387   }
 388 








 389   // Support for Unhandled Oop detection
 390 #ifdef CHECK_UNHANDLED_OOPS
 391  private:
 392   UnhandledOops* _unhandled_oops;
 393  public:
 394   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 395   // Mark oop safe for gc.  It may be stack allocated but won't move.
 396   void allow_unhandled_oop(oop *op) {
 397     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 398   }
 399   // Clear oops at safepoint so crashes point to unhandled oop violator
 400   void clear_unhandled_oops() {
 401     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
 402   }
 403 #endif // CHECK_UNHANDLED_OOPS
 404 
 405 #ifndef PRODUCT
 406   bool skip_gcalot()           { return _skip_gcalot; }
 407   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
 408 #endif


 418   OSThread* osthread() const                     { return _osthread;   }
 419   void set_osthread(OSThread* thread)            { _osthread = thread; }
 420 
 421   // JNI handle support
 422   JNIHandleBlock* active_handles() const         { return _active_handles; }
 423   void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
 424   JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
 425   void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
 426 
 427   // Internal handle support
 428   HandleArea* handle_area() const                { return _handle_area; }
 429   void set_handle_area(HandleArea* area)         { _handle_area = area; }
 430 
 431   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
 432   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
 433 
 434   // Thread-Local Allocation Buffer (TLAB) support
 435   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 436   void initialize_tlab() {
 437     if (UseTLAB) {
 438       tlab().initialize();



 439     }
 440   }
 441 














 442   jlong allocated_bytes()               { return _allocated_bytes; }
 443   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 444   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 445   inline jlong cooked_allocated_bytes();
 446 




 447   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
 448   JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
 449 
 450   const ThreadExt& ext() const          { return _ext; }
 451   ThreadExt& ext()                      { return _ext; }
 452 
 453   // VM operation support
 454   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
 455   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
 456   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 457 
 458   // For tracking the heavyweight monitor the thread is pending on.
 459   ObjectMonitor* current_pending_monitor() {
 460     return _current_pending_monitor;
 461   }
 462   void set_current_pending_monitor(ObjectMonitor* monitor) {
 463     _current_pending_monitor = monitor;
 464   }
 465   void set_current_pending_monitor_is_from_java(bool from_java) {
 466     _current_pending_monitor_is_from_java = from_java;


 611   static ByteSize active_handles_offset()        { return byte_offset_of(Thread, _active_handles   ); }
 612 
 613   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base ); }
 614   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size ); }
 615 
 616 #define TLAB_FIELD_OFFSET(name) \
 617   static ByteSize tlab_##name##_offset()         { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
 618 
 619   TLAB_FIELD_OFFSET(start)
 620   TLAB_FIELD_OFFSET(end)
 621   TLAB_FIELD_OFFSET(top)
 622   TLAB_FIELD_OFFSET(pf_top)
 623   TLAB_FIELD_OFFSET(size)                   // desired_size
 624   TLAB_FIELD_OFFSET(refill_waste_limit)
 625   TLAB_FIELD_OFFSET(number_of_refills)
 626   TLAB_FIELD_OFFSET(fast_refill_waste)
 627   TLAB_FIELD_OFFSET(slow_allocations)
 628 
 629 #undef TLAB_FIELD_OFFSET
 630 




 631   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes ); }
 632 
 633   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
 634 
 635  public:
 636   volatile intptr_t _Stalled ;
 637   volatile int _TypeTag ;
 638   ParkEvent * _ParkEvent ;                     // for synchronized()
 639   ParkEvent * _SleepEvent ;                    // for Thread.sleep
 640   ParkEvent * _MutexEvent ;                    // for native internal Mutex/Monitor
 641   ParkEvent * _MuxEvent ;                      // for low-level muxAcquire-muxRelease
 642   int NativeSyncRecursion ;                    // diagnostic
 643 
 644   volatile int _OnTrap ;                       // Resume-at IP delta
 645   jint _hashStateW ;                           // Marsaglia Shift-XOR thread-local RNG
 646   jint _hashStateX ;                           // thread-specific hashCode generator state
 647   jint _hashStateY ;
 648   jint _hashStateZ ;
 649   void * _schedctl ;
 650 


1026 
1027 
1028   ThreadFunction entry_point() const             { return _entry_point; }
1029 
1030   // Allocates a new Java level thread object for this thread. thread_name may be NULL.
1031   void allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS);
1032 
1033   // Last frame anchor routines
1034 
1035   JavaFrameAnchor* frame_anchor(void)            { return &_anchor; }
1036 
1037   // last_Java_sp
1038   bool has_last_Java_frame() const               { return _anchor.has_last_Java_frame(); }
1039   intptr_t* last_Java_sp() const                 { return _anchor.last_Java_sp(); }
1040 
1041   // last_Java_pc
1042 
1043   address last_Java_pc(void)                     { return _anchor.last_Java_pc(); }
1044 
1045   // Safepoint support
1046 #ifndef PPC64
1047   JavaThreadState thread_state() const           { return _thread_state; }
1048   void set_thread_state(JavaThreadState s)       { _thread_state = s;    }
1049 #else
1050   // Use membars when accessing volatile _thread_state. See
1051   // Threads::create_vm() for size checks.
1052   inline JavaThreadState thread_state() const;
1053   inline void set_thread_state(JavaThreadState s);
1054 #endif
1055   ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
1056   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
1057   bool is_at_poll_safepoint()                    { return _safepoint_state->is_at_poll_safepoint(); }
1058 
1059   // thread has called JavaThread::exit() or is terminated
1060   bool is_exiting()                              { return _terminated == _thread_exiting || is_terminated(); }
1061   // thread is terminated (no longer on the threads list); we compare
1062   // against the two non-terminated values so that a freed JavaThread
1063   // will also be considered terminated.
1064   bool is_terminated()                           { return _terminated != _not_terminated && _terminated != _thread_exiting; }
1065   void set_terminated(TerminatedTypes t)         { _terminated = t; }
1066   // special for Threads::remove() which is static:


1357   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2         ); }
1358   static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state        ); }
1359   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc  ); }
1360   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread            ); }
1361   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
1362   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
1363   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
1364   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1365   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1366   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
1367   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
1368 
1369   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1370   static ByteSize should_post_on_exceptions_flag_offset() {
1371     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1372   }
1373 
1374 #if INCLUDE_ALL_GCS
1375   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
1376   static ByteSize dirty_card_queue_offset()      { return byte_offset_of(JavaThread, _dirty_card_queue); }



1377 #endif // INCLUDE_ALL_GCS
1378 
1379   // Returns the jni environment for this thread
1380   JNIEnv* jni_environment()                      { return &_jni_environment; }
1381 
1382   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1383     JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1384     // Only return NULL if thread is off the thread list; starting to
1385     // exit should not return NULL.
1386     if (thread_from_jni_env->is_terminated()) {
1387        thread_from_jni_env->block_if_vm_exited();
1388        return NULL;
1389     } else {
1390        return thread_from_jni_env;
1391     }
1392   }
1393 
1394   // JNI critical regions. These can nest.
1395   bool in_critical()    { return _jni_active_critical > 0; }
1396   bool in_last_critical()  { return _jni_active_critical == 1; }


1655  public:
1656   static inline size_t stack_size_at_create(void) {
1657     return _stack_size_at_create;
1658   }
1659   static inline void set_stack_size_at_create(size_t value) {
1660     _stack_size_at_create = value;
1661   }
1662 
1663 #if INCLUDE_ALL_GCS
1664   // SATB marking queue support
1665   ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1666   static SATBMarkQueueSet& satb_mark_queue_set() {
1667     return _satb_mark_queue_set;
1668   }
1669 
1670   // Dirty card queue support
1671   DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1672   static DirtyCardQueueSet& dirty_card_queue_set() {
1673     return _dirty_card_queue_set;
1674   }









1675 #endif // INCLUDE_ALL_GCS
1676 
1677   // This method initializes the SATB and dirty card queues before a
1678   // JavaThread is added to the Java thread list. Right now, we don't
1679   // have to do anything to the dirty card queue (it should have been
1680   // activated when the thread was created), but we have to activate
1681   // the SATB queue if the thread is created while a marking cycle is
1682   // in progress. The activation / de-activation of the SATB queues at
1683   // the beginning / end of a marking cycle is done during safepoints
1684   // so we have to make sure this method is called outside one to be
1685   // able to safely read the active field of the SATB queue set. Right
1686   // now, it is called just before the thread is added to the Java
1687   // thread list in the Threads::add() method. That method is holding
1688   // the Threads_lock which ensures we are outside a safepoint. We
1689   // cannot do the obvious and set the active field of the SATB queue
1690   // when the thread is created given that, in some cases, safepoints
1691   // might happen between the JavaThread constructor being called and the
1692   // thread being added to the Java thread list (an example of this is
1693   // when the structure for the DestroyJavaVM thread is created).
1694 #if INCLUDE_ALL_GCS
1695   void initialize_queues();
1696 #else  // INCLUDE_ALL_GCS
1697   void initialize_queues() { }
1698 #endif // INCLUDE_ALL_GCS
1699 
1700   // Machine dependent stuff
1701 #ifdef TARGET_OS_ARCH_linux_x86
1702 # include "thread_linux_x86.hpp"
1703 #endif



1704 #ifdef TARGET_OS_ARCH_linux_sparc
1705 # include "thread_linux_sparc.hpp"
1706 #endif
1707 #ifdef TARGET_OS_ARCH_linux_zero
1708 # include "thread_linux_zero.hpp"
1709 #endif
1710 #ifdef TARGET_OS_ARCH_solaris_x86
1711 # include "thread_solaris_x86.hpp"
1712 #endif
1713 #ifdef TARGET_OS_ARCH_solaris_sparc
1714 # include "thread_solaris_sparc.hpp"
1715 #endif
1716 #ifdef TARGET_OS_ARCH_windows_x86
1717 # include "thread_windows_x86.hpp"
1718 #endif
1719 #ifdef TARGET_OS_ARCH_linux_arm
1720 # include "thread_linux_arm.hpp"
1721 #endif
1722 #ifdef TARGET_OS_ARCH_linux_ppc
1723 # include "thread_linux_ppc.hpp"


1893 class Threads: AllStatic {
1894   friend class VMStructs;
1895  private:
1896   static JavaThread* _thread_list;
1897   static int         _number_of_threads;
1898   static int         _number_of_non_daemon_threads;
1899   static int         _return_code;
1900 #ifdef ASSERT
1901   static bool        _vm_complete;
1902 #endif
1903 
1904  public:
1905   // Thread management
1906   // force_daemon is a concession to JNI, where we may need to add a
1907   // thread to the thread list before allocating its thread object
1908   static void add(JavaThread* p, bool force_daemon = false);
1909   static void remove(JavaThread* p);
1910   static bool includes(JavaThread* p);
1911   static JavaThread* first()                     { return _thread_list; }
1912   static void threads_do(ThreadClosure* tc);

1913 
1914   // Initializes the vm and creates the vm thread
1915   static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1916   static void convert_vm_init_libraries_to_agents();
1917   static void create_vm_init_libraries();
1918   static void create_vm_init_agents();
1919   static void shutdown_vm_agents();
1920   static bool destroy_vm();
1921   // Supported VM versions via JNI
1922   // Includes JNI_VERSION_1_1
1923   static jboolean is_supported_jni_version_including_1_1(jint version);
1924   // Does not include JNI_VERSION_1_1
1925   static jboolean is_supported_jni_version(jint version);
1926 
1927   // Garbage collection
1928   static void follow_other_roots(void f(oop*));
1929 
1930   // Apply "f->do_oop" to all root oops in all threads.
1931   // This version may only be called by sequential code.
1932   static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);




  85 template <class T, MEMFLAGS F> class ChunkedList;
  86 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
  87 
  88 DEBUG_ONLY(class ResourceMark;)
  89 
  90 class WorkerThread;
  91 
  92 // Class hierarchy
  93 // - Thread
  94 //   - NamedThread
  95 //     - VMThread
  96 //     - ConcurrentGCThread
  97 //     - WorkerThread
  98 //       - GangWorker
  99 //       - GCTaskThread
 100 //   - JavaThread
 101 //   - WatcherThread
 102 
 103 class Thread: public ThreadShadow {
 104   friend class VMStructs;
 105 
 106 #if INCLUDE_ALL_GCS
 107 protected:
 108   // Support for Shenandoah barriers. This is only accessible from JavaThread,
 109   // but we really want to keep this field at lower Thread offset (below first
 110   // 128 bytes), because that makes barrier fastpaths optimally encoded.
 111   char _gc_state;
 112   static char _gc_state_global;
 113 #endif
 114 
 115  private:
 116   // Exception handling
 117   // (Note: _pending_exception and friends are in ThreadShadow)
 118   //oop       _pending_exception;                // pending exception for current thread
 119   // const char* _exception_file;                   // file information for exception (debugging only)
 120   // int         _exception_line;                   // line information for exception (debugging only)
 121  protected:
 122   // Support for forcing alignment of thread objects for biased locking
 123   void*       _real_malloc_address;
 124  public:
 125   void* operator new(size_t size) throw() { return allocate(size, true); }
 126   void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
 127     return allocate(size, false); }
 128   void  operator delete(void* p);
 129 
 130  protected:
 131    static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
 132  private:
 133 
 134   // ***************************************************************


 250   // mutex, or blocking on an object synchronizer (Java locking).
 251   // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
 252   // If !allow_allocation(), then an assertion failure will happen during allocation
 253   // (Hence, !allow_safepoint() => !allow_allocation()).
 254   //
 255   // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
 256   //
 257   NOT_PRODUCT(int _allow_safepoint_count;)      // If 0, thread allow a safepoint to happen
 258   debug_only (int _allow_allocation_count;)     // If 0, the thread is allowed to allocate oops.
 259 
 260   // Used by SkipGCALot class.
 261   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 262 
 263   friend class No_Alloc_Verifier;
 264   friend class No_Safepoint_Verifier;
 265   friend class Pause_No_Safepoint_Verifier;
 266   friend class ThreadLocalStorage;
 267   friend class GC_locker;
 268 
 269   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
 270   ThreadLocalAllocBuffer _gclab;                // Thread-local allocation buffer for GC (e.g. evacuation)
 271   uint _worker_id;                              // Worker ID
 272   bool _force_satb_flush;                       // Force SATB flush
 273 
 274   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
 275                                                 // the Java heap
 276   jlong _allocated_bytes_gclab;                 // Cumulative number of bytes allocated on
 277                                                 // the Java heap, in GCLABs
 278 
 279   // Thread-local buffer used by MetadataOnStackMark.
 280   MetadataOnStackBuffer* _metadata_on_stack_buffer;
 281 
 282   JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;)      // Thread-local data for jfr
 283 
 284   ThreadExt _ext;
 285 
 286   int   _vm_operation_started_count;            // VM_Operation support
 287   int   _vm_operation_completed_count;          // VM_Operation support
 288 
 289   char _oom_during_evac;
 290 
 291   ObjectMonitor* _current_pending_monitor;      // ObjectMonitor this thread
 292                                                 // is waiting to lock
 293   bool _current_pending_monitor_is_from_java;   // locking is from Java code
 294 
 295   // ObjectMonitor on which this thread called Object.wait()
 296   ObjectMonitor* _current_waiting_monitor;
 297 
 298   // Private thread-local objectmonitor list - a simple cache organized as a SLL.
 299  public:
 300   ObjectMonitor* omFreeList;
 301   int omFreeCount;                              // length of omFreeList
 302   int omFreeProvision;                          // reload chunk size
 303   ObjectMonitor* omInUseList;                   // SLL to track monitors in circulation
 304   int omInUseCount;                             // length of omInUseList
 305 
 306 #ifdef ASSERT
 307  private:
 308   bool _visited_for_critical_count;
 309 
 310  public:


 387                            (volatile jint*)&_suspend_flags,
 388                            (jint)flags) != (jint)flags);
 389   }
 390 
 391   void set_has_async_exception() {
 392     set_suspend_flag(_has_async_exception);
 393   }
 394   void clear_has_async_exception() {
 395     clear_suspend_flag(_has_async_exception);
 396   }
 397 
 398   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 399 
 400   void set_critical_native_unlock() {
 401     set_suspend_flag(_critical_native_unlock);
 402   }
 403   void clear_critical_native_unlock() {
 404     clear_suspend_flag(_critical_native_unlock);
 405   }
 406 
 407   bool is_oom_during_evac() const;
 408   void set_oom_during_evac(bool oom);
 409 
 410 #ifdef ASSERT
 411   bool is_evac_allowed() const;
 412   void set_evac_allowed(bool evac_allowed);
 413 #endif
 414 
 415   // Support for Unhandled Oop detection
 416 #ifdef CHECK_UNHANDLED_OOPS
 417  private:
 418   UnhandledOops* _unhandled_oops;
 419  public:
 420   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 421   // Mark oop safe for gc.  It may be stack allocated but won't move.
 422   void allow_unhandled_oop(oop *op) {
 423     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 424   }
 425   // Clear oops at safepoint so crashes point to unhandled oop violator
 426   void clear_unhandled_oops() {
 427     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
 428   }
 429 #endif // CHECK_UNHANDLED_OOPS
 430 
 431 #ifndef PRODUCT
 432   bool skip_gcalot()           { return _skip_gcalot; }
 433   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
 434 #endif


 444   OSThread* osthread() const                     { return _osthread;   }
 445   void set_osthread(OSThread* thread)            { _osthread = thread; }
 446 
 447   // JNI handle support
 448   JNIHandleBlock* active_handles() const         { return _active_handles; }
 449   void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
 450   JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
 451   void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
 452 
 453   // Internal handle support
 454   HandleArea* handle_area() const                { return _handle_area; }
 455   void set_handle_area(HandleArea* area)         { _handle_area = area; }
 456 
 457   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
 458   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
 459 
 460   // Thread-Local Allocation Buffer (TLAB) support
 461   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 462   void initialize_tlab() {
 463     if (UseTLAB) {
 464       tlab().initialize(false);
 465       if (UseShenandoahGC && (is_Java_thread() || is_Worker_thread())) {
 466         gclab().initialize(true);
 467       }
 468     }
 469   }
 470 
 471   // Thread-Local GC Allocation Buffer (GCLAB) support
 472   ThreadLocalAllocBuffer& gclab()                {
 473     assert (UseShenandoahGC, "Only for Shenandoah");
 474     assert (!_gclab.is_initialized() || (is_Java_thread() || is_Worker_thread()),
 475             "Only Java and GC worker threads are allowed to get GCLABs");
 476     return _gclab;
 477   }
 478 
 479   void set_worker_id(uint id)           { _worker_id = id; }
 480   uint worker_id()                      { return _worker_id; }
 481 
 482   void set_force_satb_flush(bool value) { _force_satb_flush = value; }
 483   bool is_force_satb_flush()            { return _force_satb_flush; }
 484 
 485   jlong allocated_bytes()               { return _allocated_bytes; }
 486   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 487   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 488   inline jlong cooked_allocated_bytes();
 489 
 490   jlong allocated_bytes_gclab()                { return _allocated_bytes_gclab; }
 491   void set_allocated_bytes_gclab(jlong value)  { _allocated_bytes_gclab = value; }
 492   void incr_allocated_bytes_gclab(jlong size)  { _allocated_bytes_gclab += size; }
 493 
 494   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
 495   JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
 496 
 497   const ThreadExt& ext() const          { return _ext; }
 498   ThreadExt& ext()                      { return _ext; }
 499 
 500   // VM operation support
 501   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
 502   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
 503   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 504 
 505   // For tracking the heavyweight monitor the thread is pending on.
 506   ObjectMonitor* current_pending_monitor() {
 507     return _current_pending_monitor;
 508   }
 509   void set_current_pending_monitor(ObjectMonitor* monitor) {
 510     _current_pending_monitor = monitor;
 511   }
 512   void set_current_pending_monitor_is_from_java(bool from_java) {
 513     _current_pending_monitor_is_from_java = from_java;


 658   static ByteSize active_handles_offset()        { return byte_offset_of(Thread, _active_handles   ); }
 659 
 660   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base ); }
 661   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size ); }
 662 
 663 #define TLAB_FIELD_OFFSET(name) \
 664   static ByteSize tlab_##name##_offset()         { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
 665 
 666   TLAB_FIELD_OFFSET(start)
 667   TLAB_FIELD_OFFSET(end)
 668   TLAB_FIELD_OFFSET(top)
 669   TLAB_FIELD_OFFSET(pf_top)
 670   TLAB_FIELD_OFFSET(size)                   // desired_size
 671   TLAB_FIELD_OFFSET(refill_waste_limit)
 672   TLAB_FIELD_OFFSET(number_of_refills)
 673   TLAB_FIELD_OFFSET(fast_refill_waste)
 674   TLAB_FIELD_OFFSET(slow_allocations)
 675 
 676 #undef TLAB_FIELD_OFFSET
 677 
 678   static ByteSize gclab_start_offset()         { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::start_offset(); }
 679   static ByteSize gclab_top_offset()           { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::top_offset(); }
 680   static ByteSize gclab_end_offset()           { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::end_offset(); }
 681 
 682   static ByteSize allocated_bytes_offset()       { return byte_offset_of(Thread, _allocated_bytes ); }
 683 
 684   JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
 685 
 686  public:
 687   volatile intptr_t _Stalled ;
 688   volatile int _TypeTag ;
 689   ParkEvent * _ParkEvent ;                     // for synchronized()
 690   ParkEvent * _SleepEvent ;                    // for Thread.sleep
 691   ParkEvent * _MutexEvent ;                    // for native internal Mutex/Monitor
 692   ParkEvent * _MuxEvent ;                      // for low-level muxAcquire-muxRelease
 693   int NativeSyncRecursion ;                    // diagnostic
 694 
 695   volatile int _OnTrap ;                       // Resume-at IP delta
 696   jint _hashStateW ;                           // Marsaglia Shift-XOR thread-local RNG
 697   jint _hashStateX ;                           // thread-specific hashCode generator state
 698   jint _hashStateY ;
 699   jint _hashStateZ ;
 700   void * _schedctl ;
 701 


1077 
1078 
1079   ThreadFunction entry_point() const             { return _entry_point; }
1080 
1081   // Allocates a new Java level thread object for this thread. thread_name may be NULL.
1082   void allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS);
1083 
1084   // Last frame anchor routines
1085 
1086   JavaFrameAnchor* frame_anchor(void)            { return &_anchor; }
1087 
1088   // last_Java_sp
1089   bool has_last_Java_frame() const               { return _anchor.has_last_Java_frame(); }
1090   intptr_t* last_Java_sp() const                 { return _anchor.last_Java_sp(); }
1091 
1092   // last_Java_pc
1093 
1094   address last_Java_pc(void)                     { return _anchor.last_Java_pc(); }
1095 
1096   // Safepoint support
1097 #if !(defined(PPC64) || defined(AARCH64))
1098   JavaThreadState thread_state() const           { return _thread_state; }
1099   void set_thread_state(JavaThreadState s)       { _thread_state = s;    }
1100 #else
1101   // Use membars when accessing volatile _thread_state. See
1102   // Threads::create_vm() for size checks.
1103   inline JavaThreadState thread_state() const;
1104   inline void set_thread_state(JavaThreadState s);
1105 #endif
1106   ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
1107   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
1108   bool is_at_poll_safepoint()                    { return _safepoint_state->is_at_poll_safepoint(); }
1109 
1110   // thread has called JavaThread::exit() or is terminated
1111   bool is_exiting()                              { return _terminated == _thread_exiting || is_terminated(); }
1112   // thread is terminated (no longer on the threads list); we compare
1113   // against the two non-terminated values so that a freed JavaThread
1114   // will also be considered terminated.
1115   bool is_terminated()                           { return _terminated != _not_terminated && _terminated != _thread_exiting; }
1116   void set_terminated(TerminatedTypes t)         { _terminated = t; }
1117   // special for Threads::remove() which is static:


1408   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2         ); }
1409   static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state        ); }
1410   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc  ); }
1411   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread            ); }
1412   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
1413   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
1414   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
1415   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1416   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1417   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
1418   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
1419 
1420   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1421   static ByteSize should_post_on_exceptions_flag_offset() {
1422     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1423   }
1424 
1425 #if INCLUDE_ALL_GCS
1426   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
1427   static ByteSize dirty_card_queue_offset()      { return byte_offset_of(JavaThread, _dirty_card_queue); }
1428 
1429   static ByteSize gc_state_offset()              { return byte_offset_of(JavaThread, _gc_state); }
1430 
1431 #endif // INCLUDE_ALL_GCS
1432 
1433   // Returns the jni environment for this thread
1434   JNIEnv* jni_environment()                      { return &_jni_environment; }
1435 
1436   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1437     JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1438     // Only return NULL if thread is off the thread list; starting to
1439     // exit should not return NULL.
1440     if (thread_from_jni_env->is_terminated()) {
1441        thread_from_jni_env->block_if_vm_exited();
1442        return NULL;
1443     } else {
1444        return thread_from_jni_env;
1445     }
1446   }
1447 
1448   // JNI critical regions. These can nest.
1449   bool in_critical()    { return _jni_active_critical > 0; }
1450   bool in_last_critical()  { return _jni_active_critical == 1; }


1709  public:
1710   static inline size_t stack_size_at_create(void) {
1711     return _stack_size_at_create;
1712   }
1713   static inline void set_stack_size_at_create(size_t value) {
1714     _stack_size_at_create = value;
1715   }
1716 
1717 #if INCLUDE_ALL_GCS
1718   // SATB marking queue support
1719   ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1720   static SATBMarkQueueSet& satb_mark_queue_set() {
1721     return _satb_mark_queue_set;
1722   }
1723 
1724   // Dirty card queue support
1725   DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1726   static DirtyCardQueueSet& dirty_card_queue_set() {
1727     return _dirty_card_queue_set;
1728   }
1729 
1730   inline char gc_state() const;
1731 
1732 private:
1733   void set_gc_state(char in_prog);
1734 
1735 public:
1736   static void set_gc_state_all_threads(char in_prog);
1737   static void set_force_satb_flush_all_threads(bool value);
1738 #endif // INCLUDE_ALL_GCS
1739 
1740   // This method initializes the SATB and dirty card queues before a
1741   // JavaThread is added to the Java thread list. Right now, we don't
1742   // have to do anything to the dirty card queue (it should have been
1743   // activated when the thread was created), but we have to activate
1744   // the SATB queue if the thread is created while a marking cycle is
1745   // in progress. The activation / de-activation of the SATB queues at
1746   // the beginning / end of a marking cycle is done during safepoints
1747   // so we have to make sure this method is called outside one to be
1748   // able to safely read the active field of the SATB queue set. Right
1749   // now, it is called just before the thread is added to the Java
1750   // thread list in the Threads::add() method. That method is holding
1751   // the Threads_lock which ensures we are outside a safepoint. We
1752   // cannot do the obvious and set the active field of the SATB queue
1753   // when the thread is created given that, in some cases, safepoints
1754   // might happen between the JavaThread constructor being called and the
1755   // thread being added to the Java thread list (an example of this is
1756   // when the structure for the DestroyJavaVM thread is created).
1757 #if INCLUDE_ALL_GCS
1758   void initialize_queues();
1759 #else  // INCLUDE_ALL_GCS
1760   void initialize_queues() { }
1761 #endif // INCLUDE_ALL_GCS
1762 
1763   // Machine dependent stuff
1764 #ifdef TARGET_OS_ARCH_linux_x86
1765 # include "thread_linux_x86.hpp"
1766 #endif
1767 #ifdef TARGET_OS_ARCH_linux_aarch64
1768 # include "thread_linux_aarch64.hpp"
1769 #endif
1770 #ifdef TARGET_OS_ARCH_linux_sparc
1771 # include "thread_linux_sparc.hpp"
1772 #endif
1773 #ifdef TARGET_OS_ARCH_linux_zero
1774 # include "thread_linux_zero.hpp"
1775 #endif
1776 #ifdef TARGET_OS_ARCH_solaris_x86
1777 # include "thread_solaris_x86.hpp"
1778 #endif
1779 #ifdef TARGET_OS_ARCH_solaris_sparc
1780 # include "thread_solaris_sparc.hpp"
1781 #endif
1782 #ifdef TARGET_OS_ARCH_windows_x86
1783 # include "thread_windows_x86.hpp"
1784 #endif
1785 #ifdef TARGET_OS_ARCH_linux_arm
1786 # include "thread_linux_arm.hpp"
1787 #endif
1788 #ifdef TARGET_OS_ARCH_linux_ppc
1789 # include "thread_linux_ppc.hpp"


1959 class Threads: AllStatic {
1960   friend class VMStructs;
1961  private:
1962   static JavaThread* _thread_list;
1963   static int         _number_of_threads;
1964   static int         _number_of_non_daemon_threads;
1965   static int         _return_code;
1966 #ifdef ASSERT
1967   static bool        _vm_complete;
1968 #endif
1969 
1970  public:
1971   // Thread management
1972   // force_daemon is a concession to JNI, where we may need to add a
1973   // thread to the thread list before allocating its thread object
1974   static void add(JavaThread* p, bool force_daemon = false);
1975   static void remove(JavaThread* p);
1976   static bool includes(JavaThread* p);
1977   static JavaThread* first()                     { return _thread_list; }
1978   static void threads_do(ThreadClosure* tc);
1979   static void java_threads_do(ThreadClosure* tc);
1980 
1981   // Initializes the vm and creates the vm thread
1982   static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1983   static void convert_vm_init_libraries_to_agents();
1984   static void create_vm_init_libraries();
1985   static void create_vm_init_agents();
1986   static void shutdown_vm_agents();
1987   static bool destroy_vm();
1988   // Supported VM versions via JNI
1989   // Includes JNI_VERSION_1_1
1990   static jboolean is_supported_jni_version_including_1_1(jint version);
1991   // Does not include JNI_VERSION_1_1
1992   static jboolean is_supported_jni_version(jint version);
1993 
1994   // Garbage collection
1995   static void follow_other_roots(void f(oop*));
1996 
1997   // Apply "f->do_oop" to all root oops in all threads.
1998   // This version may only be called by sequential code.
1999   static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);


< prev index next >