85 template <class T, MEMFLAGS F> class ChunkedList;
86 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
87
88 DEBUG_ONLY(class ResourceMark;)
89
90 class WorkerThread;
91
92 // Class hierarchy
93 // - Thread
94 // - NamedThread
95 // - VMThread
96 // - ConcurrentGCThread
97 // - WorkerThread
98 // - GangWorker
99 // - GCTaskThread
100 // - JavaThread
101 // - WatcherThread
102
103 class Thread: public ThreadShadow {
104 friend class VMStructs;
105 private:
106 // Exception handling
107 // (Note: _pending_exception and friends are in ThreadShadow)
108 //oop _pending_exception; // pending exception for current thread
109 // const char* _exception_file; // file information for exception (debugging only)
110 // int _exception_line; // line information for exception (debugging only)
111 protected:
112 // Support for forcing alignment of thread objects for biased locking
113 void* _real_malloc_address;
114 public:
115 void* operator new(size_t size) throw() { return allocate(size, true); }
116 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
117 return allocate(size, false); }
118 void operator delete(void* p);
119
120 protected:
121 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
122 private:
123
124 // ***************************************************************
240 // mutex, or blocking on an object synchronizer (Java locking).
241 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
242 // If !allow_allocation(), then an assertion failure will happen during allocation
243 // (Hence, !allow_safepoint() => !allow_allocation()).
244 //
245 // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
246 //
247 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
248 debug_only (int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
249
250 // Used by SkipGCALot class.
251 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
252
253 friend class No_Alloc_Verifier;
254 friend class No_Safepoint_Verifier;
255 friend class Pause_No_Safepoint_Verifier;
256 friend class ThreadLocalStorage;
257 friend class GC_locker;
258
259 ThreadLocalAllocBuffer _tlab; // Thread-local eden
260 jlong _allocated_bytes; // Cumulative number of bytes allocated on
261 // the Java heap
262
263 // Thread-local buffer used by MetadataOnStackMark.
264 MetadataOnStackBuffer* _metadata_on_stack_buffer;
265
266 JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr
267
268 ThreadExt _ext;
269
270 int _vm_operation_started_count; // VM_Operation support
271 int _vm_operation_completed_count; // VM_Operation support
272
273 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
274 // is waiting to lock
275 bool _current_pending_monitor_is_from_java; // locking is from Java code
276
277 // ObjectMonitor on which this thread called Object.wait()
278 ObjectMonitor* _current_waiting_monitor;
279
280 // Private thread-local objectmonitor list - a simple cache organized as a SLL.
281 public:
282 ObjectMonitor* omFreeList;
283 int omFreeCount; // length of omFreeList
284 int omFreeProvision; // reload chunk size
285 ObjectMonitor* omInUseList; // SLL to track monitors in circulation
286 int omInUseCount; // length of omInUseList
287
288 #ifdef ASSERT
289 private:
290 bool _visited_for_critical_count;
291
292 public:
370 (volatile jint*)&_suspend_flags,
371 (jint)flags) != (jint)flags);
372 }
373
374 void set_has_async_exception() {
375 set_suspend_flag(_has_async_exception);
376 }
377 void clear_has_async_exception() {
378 clear_suspend_flag(_has_async_exception);
379 }
380
381 bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
382
383 void set_critical_native_unlock() {
384 set_suspend_flag(_critical_native_unlock);
385 }
386 void clear_critical_native_unlock() {
387 clear_suspend_flag(_critical_native_unlock);
388 }
389
390 // Support for Unhandled Oop detection
391 #ifdef CHECK_UNHANDLED_OOPS
392 private:
393 UnhandledOops* _unhandled_oops;
394 public:
395 UnhandledOops* unhandled_oops() { return _unhandled_oops; }
396 // Mark oop safe for gc. It may be stack allocated but won't move.
397 void allow_unhandled_oop(oop *op) {
398 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
399 }
400 // Clear oops at safepoint so crashes point to unhandled oop violator
401 void clear_unhandled_oops() {
402 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
403 }
404 #endif // CHECK_UNHANDLED_OOPS
405
406 #ifndef PRODUCT
407 bool skip_gcalot() { return _skip_gcalot; }
408 void set_skip_gcalot(bool v) { _skip_gcalot = v; }
409 #endif
419 OSThread* osthread() const { return _osthread; }
420 void set_osthread(OSThread* thread) { _osthread = thread; }
421
422 // JNI handle support
423 JNIHandleBlock* active_handles() const { return _active_handles; }
424 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
425 JNIHandleBlock* free_handle_block() const { return _free_handle_block; }
426 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
427
428 // Internal handle support
429 HandleArea* handle_area() const { return _handle_area; }
430 void set_handle_area(HandleArea* area) { _handle_area = area; }
431
432 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; }
433 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
434
435 // Thread-Local Allocation Buffer (TLAB) support
436 ThreadLocalAllocBuffer& tlab() { return _tlab; }
437 void initialize_tlab() {
438 if (UseTLAB) {
439 tlab().initialize();
440 }
441 }
442
443 jlong allocated_bytes() { return _allocated_bytes; }
444 void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
445 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
446 inline jlong cooked_allocated_bytes();
447
448 JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
449 JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
450
451 const ThreadExt& ext() const { return _ext; }
452 ThreadExt& ext() { return _ext; }
453
454 // VM operation support
455 int vm_operation_ticket() { return ++_vm_operation_started_count; }
456 int vm_operation_completed_count() { return _vm_operation_completed_count; }
457 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; }
458
459 // For tracking the heavyweight monitor the thread is pending on.
460 ObjectMonitor* current_pending_monitor() {
461 return _current_pending_monitor;
462 }
463 void set_current_pending_monitor(ObjectMonitor* monitor) {
464 _current_pending_monitor = monitor;
465 }
466 void set_current_pending_monitor_is_from_java(bool from_java) {
467 _current_pending_monitor_is_from_java = from_java;
612 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles ); }
613
614 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base ); }
615 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size ); }
616
617 #define TLAB_FIELD_OFFSET(name) \
618 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
619
620 TLAB_FIELD_OFFSET(start)
621 TLAB_FIELD_OFFSET(end)
622 TLAB_FIELD_OFFSET(top)
623 TLAB_FIELD_OFFSET(pf_top)
624 TLAB_FIELD_OFFSET(size) // desired_size
625 TLAB_FIELD_OFFSET(refill_waste_limit)
626 TLAB_FIELD_OFFSET(number_of_refills)
627 TLAB_FIELD_OFFSET(fast_refill_waste)
628 TLAB_FIELD_OFFSET(slow_allocations)
629
630 #undef TLAB_FIELD_OFFSET
631
632 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes ); }
633
634 JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
635
636 public:
637 volatile intptr_t _Stalled ;
638 volatile int _TypeTag ;
639 ParkEvent * _ParkEvent ; // for synchronized()
640 ParkEvent * _SleepEvent ; // for Thread.sleep
641 ParkEvent * _MutexEvent ; // for native internal Mutex/Monitor
642 ParkEvent * _MuxEvent ; // for low-level muxAcquire-muxRelease
643 int NativeSyncRecursion ; // diagnostic
644
645 volatile int _OnTrap ; // Resume-at IP delta
646 jint _hashStateW ; // Marsaglia Shift-XOR thread-local RNG
647 jint _hashStateX ; // thread-specific hashCode generator state
648 jint _hashStateY ;
649 jint _hashStateZ ;
650 void * _schedctl ;
651
1365 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2 ); }
1366 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state ); }
1367 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc ); }
1368 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread ); }
1369 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop ); }
1370 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); }
1371 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
1372 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1373 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1374 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); }
1375 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); }
1376
1377 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1378 static ByteSize should_post_on_exceptions_flag_offset() {
1379 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1380 }
1381
1382 #if INCLUDE_ALL_GCS
1383 static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); }
1384 static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); }
1385 #endif // INCLUDE_ALL_GCS
1386
1387 // Returns the jni environment for this thread
1388 JNIEnv* jni_environment() { return &_jni_environment; }
1389
1390 static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1391 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1392 // Only return NULL if thread is off the thread list; starting to
1393 // exit should not return NULL.
1394 if (thread_from_jni_env->is_terminated()) {
1395 thread_from_jni_env->block_if_vm_exited();
1396 return NULL;
1397 } else {
1398 return thread_from_jni_env;
1399 }
1400 }
1401
1402 // JNI critical regions. These can nest.
1403 bool in_critical() { return _jni_active_critical > 0; }
1404 bool in_last_critical() { return _jni_active_critical == 1; }
1663 public:
1664 static inline size_t stack_size_at_create(void) {
1665 return _stack_size_at_create;
1666 }
1667 static inline void set_stack_size_at_create(size_t value) {
1668 _stack_size_at_create = value;
1669 }
1670
1671 #if INCLUDE_ALL_GCS
1672 // SATB marking queue support
1673 ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1674 static SATBMarkQueueSet& satb_mark_queue_set() {
1675 return _satb_mark_queue_set;
1676 }
1677
1678 // Dirty card queue support
1679 DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1680 static DirtyCardQueueSet& dirty_card_queue_set() {
1681 return _dirty_card_queue_set;
1682 }
1683 #endif // INCLUDE_ALL_GCS
1684
1685 // This method initializes the SATB and dirty card queues before a
1686 // JavaThread is added to the Java thread list. Right now, we don't
1687 // have to do anything to the dirty card queue (it should have been
1688 // activated when the thread was created), but we have to activate
1689 // the SATB queue if the thread is created while a marking cycle is
1690 // in progress. The activation / de-activation of the SATB queues at
1691 // the beginning / end of a marking cycle is done during safepoints
1692 // so we have to make sure this method is called outside one to be
1693 // able to safely read the active field of the SATB queue set. Right
1694 // now, it is called just before the thread is added to the Java
1695 // thread list in the Threads::add() method. That method is holding
1696 // the Threads_lock which ensures we are outside a safepoint. We
1697 // cannot do the obvious and set the active field of the SATB queue
1698 // when the thread is created given that, in some cases, safepoints
1699 // might happen between the JavaThread constructor being called and the
1700 // thread being added to the Java thread list (an example of this is
1701 // when the structure for the DestroyJavaVM thread is created).
1702 #if INCLUDE_ALL_GCS
1904 class Threads: AllStatic {
1905 friend class VMStructs;
1906 private:
1907 static JavaThread* _thread_list;
1908 static int _number_of_threads;
1909 static int _number_of_non_daemon_threads;
1910 static int _return_code;
1911 #ifdef ASSERT
1912 static bool _vm_complete;
1913 #endif
1914
1915 public:
1916 // Thread management
1917 // force_daemon is a concession to JNI, where we may need to add a
1918 // thread to the thread list before allocating its thread object
1919 static void add(JavaThread* p, bool force_daemon = false);
1920 static void remove(JavaThread* p);
1921 static bool includes(JavaThread* p);
1922 static JavaThread* first() { return _thread_list; }
1923 static void threads_do(ThreadClosure* tc);
1924
1925 // Initializes the vm and creates the vm thread
1926 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1927 static void convert_vm_init_libraries_to_agents();
1928 static void create_vm_init_libraries();
1929 static void create_vm_init_agents();
1930 static void shutdown_vm_agents();
1931 static bool destroy_vm();
1932 // Supported VM versions via JNI
1933 // Includes JNI_VERSION_1_1
1934 static jboolean is_supported_jni_version_including_1_1(jint version);
1935 // Does not include JNI_VERSION_1_1
1936 static jboolean is_supported_jni_version(jint version);
1937
1938 // Garbage collection
1939 static void follow_other_roots(void f(oop*));
1940
1941 // Apply "f->do_oop" to all root oops in all threads.
1942 // This version may only be called by sequential code.
1943 static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
85 template <class T, MEMFLAGS F> class ChunkedList;
86 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
87
88 DEBUG_ONLY(class ResourceMark;)
89
90 class WorkerThread;
91
92 // Class hierarchy
93 // - Thread
94 // - NamedThread
95 // - VMThread
96 // - ConcurrentGCThread
97 // - WorkerThread
98 // - GangWorker
99 // - GCTaskThread
100 // - JavaThread
101 // - WatcherThread
102
103 class Thread: public ThreadShadow {
104 friend class VMStructs;
105
106 #if INCLUDE_ALL_GCS
107 protected:
108 // Support for Shenandoah barriers. This is only accessible from JavaThread,
109 // but we really want to keep this field at lower Thread offset (below first
110 // 128 bytes), because that makes barrier fastpaths optimally encoded.
111 char _gc_state;
112 static char _gc_state_global;
113 #endif
114
115 private:
116 // Exception handling
117 // (Note: _pending_exception and friends are in ThreadShadow)
118 //oop _pending_exception; // pending exception for current thread
119 // const char* _exception_file; // file information for exception (debugging only)
120 // int _exception_line; // line information for exception (debugging only)
121 protected:
122 // Support for forcing alignment of thread objects for biased locking
123 void* _real_malloc_address;
124 public:
125 void* operator new(size_t size) throw() { return allocate(size, true); }
126 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
127 return allocate(size, false); }
128 void operator delete(void* p);
129
130 protected:
131 static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
132 private:
133
134 // ***************************************************************
250 // mutex, or blocking on an object synchronizer (Java locking).
251 // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
252 // If !allow_allocation(), then an assertion failure will happen during allocation
253 // (Hence, !allow_safepoint() => !allow_allocation()).
254 //
255 // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
256 //
257 NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
258 debug_only (int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
259
260 // Used by SkipGCALot class.
261 NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
262
263 friend class No_Alloc_Verifier;
264 friend class No_Safepoint_Verifier;
265 friend class Pause_No_Safepoint_Verifier;
266 friend class ThreadLocalStorage;
267 friend class GC_locker;
268
269 ThreadLocalAllocBuffer _tlab; // Thread-local eden
270 ThreadLocalAllocBuffer _gclab; // Thread-local allocation buffer for GC (e.g. evacuation)
271 uint _worker_id; // Worker ID
272 bool _force_satb_flush; // Force SATB flush
273 double _paced_time; // Accumulated paced time
274
275 jlong _allocated_bytes; // Cumulative number of bytes allocated on
276 // the Java heap
277 jlong _allocated_bytes_gclab; // Cumulative number of bytes allocated on
278 // the Java heap, in GCLABs
279
280 // Thread-local buffer used by MetadataOnStackMark.
281 MetadataOnStackBuffer* _metadata_on_stack_buffer;
282
283 JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr
284
285 ThreadExt _ext;
286
287 int _vm_operation_started_count; // VM_Operation support
288 int _vm_operation_completed_count; // VM_Operation support
289
290 char _oom_during_evac;
291
292 ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
293 // is waiting to lock
294 bool _current_pending_monitor_is_from_java; // locking is from Java code
295
296 // ObjectMonitor on which this thread called Object.wait()
297 ObjectMonitor* _current_waiting_monitor;
298
299 // Private thread-local objectmonitor list - a simple cache organized as a SLL.
300 public:
301 ObjectMonitor* omFreeList;
302 int omFreeCount; // length of omFreeList
303 int omFreeProvision; // reload chunk size
304 ObjectMonitor* omInUseList; // SLL to track monitors in circulation
305 int omInUseCount; // length of omInUseList
306
307 #ifdef ASSERT
308 private:
309 bool _visited_for_critical_count;
310
311 public:
389 (volatile jint*)&_suspend_flags,
390 (jint)flags) != (jint)flags);
391 }
392
393 void set_has_async_exception() {
394 set_suspend_flag(_has_async_exception);
395 }
396 void clear_has_async_exception() {
397 clear_suspend_flag(_has_async_exception);
398 }
399
400 bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
401
402 void set_critical_native_unlock() {
403 set_suspend_flag(_critical_native_unlock);
404 }
405 void clear_critical_native_unlock() {
406 clear_suspend_flag(_critical_native_unlock);
407 }
408
409 bool is_oom_during_evac() const;
410 void set_oom_during_evac(bool oom);
411
412 #ifdef ASSERT
413 bool is_evac_allowed() const;
414 void set_evac_allowed(bool evac_allowed);
415 #endif
416
417 // Support for Unhandled Oop detection
418 #ifdef CHECK_UNHANDLED_OOPS
419 private:
420 UnhandledOops* _unhandled_oops;
421 public:
422 UnhandledOops* unhandled_oops() { return _unhandled_oops; }
423 // Mark oop safe for gc. It may be stack allocated but won't move.
424 void allow_unhandled_oop(oop *op) {
425 if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
426 }
427 // Clear oops at safepoint so crashes point to unhandled oop violator
428 void clear_unhandled_oops() {
429 if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
430 }
431 #endif // CHECK_UNHANDLED_OOPS
432
433 #ifndef PRODUCT
434 bool skip_gcalot() { return _skip_gcalot; }
435 void set_skip_gcalot(bool v) { _skip_gcalot = v; }
436 #endif
446 OSThread* osthread() const { return _osthread; }
447 void set_osthread(OSThread* thread) { _osthread = thread; }
448
449 // JNI handle support
450 JNIHandleBlock* active_handles() const { return _active_handles; }
451 void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
452 JNIHandleBlock* free_handle_block() const { return _free_handle_block; }
453 void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
454
455 // Internal handle support
456 HandleArea* handle_area() const { return _handle_area; }
457 void set_handle_area(HandleArea* area) { _handle_area = area; }
458
459 GrowableArray<Metadata*>* metadata_handles() const { return _metadata_handles; }
460 void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
461
462 // Thread-Local Allocation Buffer (TLAB) support
463 ThreadLocalAllocBuffer& tlab() { return _tlab; }
464 void initialize_tlab() {
465 if (UseTLAB) {
466 tlab().initialize(false);
467 if (UseShenandoahGC && (is_Java_thread() || is_Worker_thread())) {
468 gclab().initialize(true);
469 }
470 }
471 }
472
473 // Thread-Local GC Allocation Buffer (GCLAB) support
474 ThreadLocalAllocBuffer& gclab() {
475 assert (UseShenandoahGC, "Only for Shenandoah");
476 assert (!_gclab.is_initialized() || (is_Java_thread() || is_Worker_thread()),
477 "Only Java and GC worker threads are allowed to get GCLABs");
478 return _gclab;
479 }
480
481 void set_worker_id(uint id) { _worker_id = id; }
482 uint worker_id() { return _worker_id; }
483
484 void set_force_satb_flush(bool value) { _force_satb_flush = value; }
485 bool is_force_satb_flush() { return _force_satb_flush; }
486
487 void add_paced_time(double v) { _paced_time += v; }
488 double paced_time() { return _paced_time; }
489 void reset_paced_time() { _paced_time = 0; }
490
491 jlong allocated_bytes() { return _allocated_bytes; }
492 void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
493 void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
494 inline jlong cooked_allocated_bytes();
495
496 jlong allocated_bytes_gclab() { return _allocated_bytes_gclab; }
497 void set_allocated_bytes_gclab(jlong value) { _allocated_bytes_gclab = value; }
498 void incr_allocated_bytes_gclab(jlong size) { _allocated_bytes_gclab += size; }
499
500 JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
501 JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
502
503 const ThreadExt& ext() const { return _ext; }
504 ThreadExt& ext() { return _ext; }
505
506 // VM operation support
507 int vm_operation_ticket() { return ++_vm_operation_started_count; }
508 int vm_operation_completed_count() { return _vm_operation_completed_count; }
509 void increment_vm_operation_completed_count() { _vm_operation_completed_count++; }
510
511 // For tracking the heavyweight monitor the thread is pending on.
512 ObjectMonitor* current_pending_monitor() {
513 return _current_pending_monitor;
514 }
515 void set_current_pending_monitor(ObjectMonitor* monitor) {
516 _current_pending_monitor = monitor;
517 }
518 void set_current_pending_monitor_is_from_java(bool from_java) {
519 _current_pending_monitor_is_from_java = from_java;
664 static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles ); }
665
666 static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base ); }
667 static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size ); }
668
669 #define TLAB_FIELD_OFFSET(name) \
670 static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
671
672 TLAB_FIELD_OFFSET(start)
673 TLAB_FIELD_OFFSET(end)
674 TLAB_FIELD_OFFSET(top)
675 TLAB_FIELD_OFFSET(pf_top)
676 TLAB_FIELD_OFFSET(size) // desired_size
677 TLAB_FIELD_OFFSET(refill_waste_limit)
678 TLAB_FIELD_OFFSET(number_of_refills)
679 TLAB_FIELD_OFFSET(fast_refill_waste)
680 TLAB_FIELD_OFFSET(slow_allocations)
681
682 #undef TLAB_FIELD_OFFSET
683
684 static ByteSize gclab_start_offset() { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::start_offset(); }
685 static ByteSize gclab_top_offset() { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::top_offset(); }
686 static ByteSize gclab_end_offset() { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::end_offset(); }
687
688 static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes ); }
689
690 JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
691
692 public:
693 volatile intptr_t _Stalled ;
694 volatile int _TypeTag ;
695 ParkEvent * _ParkEvent ; // for synchronized()
696 ParkEvent * _SleepEvent ; // for Thread.sleep
697 ParkEvent * _MutexEvent ; // for native internal Mutex/Monitor
698 ParkEvent * _MuxEvent ; // for low-level muxAcquire-muxRelease
699 int NativeSyncRecursion ; // diagnostic
700
701 volatile int _OnTrap ; // Resume-at IP delta
702 jint _hashStateW ; // Marsaglia Shift-XOR thread-local RNG
703 jint _hashStateX ; // thread-specific hashCode generator state
704 jint _hashStateY ;
705 jint _hashStateZ ;
706 void * _schedctl ;
707
1421 static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2 ); }
1422 static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state ); }
1423 static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc ); }
1424 static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread ); }
1425 static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop ); }
1426 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); }
1427 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
1428 static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1429 static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1430 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); }
1431 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); }
1432
1433 static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1434 static ByteSize should_post_on_exceptions_flag_offset() {
1435 return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1436 }
1437
1438 #if INCLUDE_ALL_GCS
1439 static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); }
1440 static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); }
1441
1442 static ByteSize gc_state_offset() { return byte_offset_of(JavaThread, _gc_state); }
1443
1444 #endif // INCLUDE_ALL_GCS
1445
1446 // Returns the jni environment for this thread
1447 JNIEnv* jni_environment() { return &_jni_environment; }
1448
1449 static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1450 JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1451 // Only return NULL if thread is off the thread list; starting to
1452 // exit should not return NULL.
1453 if (thread_from_jni_env->is_terminated()) {
1454 thread_from_jni_env->block_if_vm_exited();
1455 return NULL;
1456 } else {
1457 return thread_from_jni_env;
1458 }
1459 }
1460
1461 // JNI critical regions. These can nest.
1462 bool in_critical() { return _jni_active_critical > 0; }
1463 bool in_last_critical() { return _jni_active_critical == 1; }
1722 public:
1723 static inline size_t stack_size_at_create(void) {
1724 return _stack_size_at_create;
1725 }
1726 static inline void set_stack_size_at_create(size_t value) {
1727 _stack_size_at_create = value;
1728 }
1729
1730 #if INCLUDE_ALL_GCS
1731 // SATB marking queue support
1732 ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; }
1733 static SATBMarkQueueSet& satb_mark_queue_set() {
1734 return _satb_mark_queue_set;
1735 }
1736
1737 // Dirty card queue support
1738 DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
1739 static DirtyCardQueueSet& dirty_card_queue_set() {
1740 return _dirty_card_queue_set;
1741 }
1742
1743 inline char gc_state() const;
1744
1745 private:
1746 void set_gc_state(char in_prog);
1747
1748 public:
1749 static void set_gc_state_all_threads(char in_prog);
1750 static void set_force_satb_flush_all_threads(bool value);
1751 #endif // INCLUDE_ALL_GCS
1752
1753 // This method initializes the SATB and dirty card queues before a
1754 // JavaThread is added to the Java thread list. Right now, we don't
1755 // have to do anything to the dirty card queue (it should have been
1756 // activated when the thread was created), but we have to activate
1757 // the SATB queue if the thread is created while a marking cycle is
1758 // in progress. The activation / de-activation of the SATB queues at
1759 // the beginning / end of a marking cycle is done during safepoints
1760 // so we have to make sure this method is called outside one to be
1761 // able to safely read the active field of the SATB queue set. Right
1762 // now, it is called just before the thread is added to the Java
1763 // thread list in the Threads::add() method. That method is holding
1764 // the Threads_lock which ensures we are outside a safepoint. We
1765 // cannot do the obvious and set the active field of the SATB queue
1766 // when the thread is created given that, in some cases, safepoints
1767 // might happen between the JavaThread constructor being called and the
1768 // thread being added to the Java thread list (an example of this is
1769 // when the structure for the DestroyJavaVM thread is created).
1770 #if INCLUDE_ALL_GCS
1972 class Threads: AllStatic {
1973 friend class VMStructs;
1974 private:
1975 static JavaThread* _thread_list;
1976 static int _number_of_threads;
1977 static int _number_of_non_daemon_threads;
1978 static int _return_code;
1979 #ifdef ASSERT
1980 static bool _vm_complete;
1981 #endif
1982
1983 public:
1984 // Thread management
1985 // force_daemon is a concession to JNI, where we may need to add a
1986 // thread to the thread list before allocating its thread object
1987 static void add(JavaThread* p, bool force_daemon = false);
1988 static void remove(JavaThread* p);
1989 static bool includes(JavaThread* p);
1990 static JavaThread* first() { return _thread_list; }
1991 static void threads_do(ThreadClosure* tc);
1992 static void java_threads_do(ThreadClosure* tc);
1993
1994 // Initializes the vm and creates the vm thread
1995 static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
1996 static void convert_vm_init_libraries_to_agents();
1997 static void create_vm_init_libraries();
1998 static void create_vm_init_agents();
1999 static void shutdown_vm_agents();
2000 static bool destroy_vm();
2001 // Supported VM versions via JNI
2002 // Includes JNI_VERSION_1_1
2003 static jboolean is_supported_jni_version_including_1_1(jint version);
2004 // Does not include JNI_VERSION_1_1
2005 static jboolean is_supported_jni_version(jint version);
2006
2007 // Garbage collection
2008 static void follow_other_roots(void f(oop*));
2009
2010 // Apply "f->do_oop" to all root oops in all threads.
2011 // This version may only be called by sequential code.
2012 static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|