< prev index next >

src/share/vm/runtime/thread.cpp

Print this page




  79 #include "services/memTracker.hpp"
  80 #include "services/threadService.hpp"
  81 #include "utilities/defaultStream.hpp"
  82 #include "utilities/dtrace.hpp"
  83 #include "utilities/events.hpp"
  84 #include "utilities/preserveException.hpp"
  85 #include "utilities/macros.hpp"
  86 #ifdef TARGET_OS_FAMILY_linux
  87 # include "os_linux.inline.hpp"
  88 #endif
  89 #ifdef TARGET_OS_FAMILY_solaris
  90 # include "os_solaris.inline.hpp"
  91 #endif
  92 #ifdef TARGET_OS_FAMILY_windows
  93 # include "os_windows.inline.hpp"
  94 #endif
  95 #ifdef TARGET_OS_FAMILY_bsd
  96 # include "os_bsd.inline.hpp"
  97 #endif
  98 #if INCLUDE_ALL_GCS

  99 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 100 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 101 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
 102 #endif // INCLUDE_ALL_GCS
 103 #ifdef COMPILER1
 104 #include "c1/c1_Compiler.hpp"
 105 #endif
 106 #ifdef COMPILER2
 107 #include "opto/c2compiler.hpp"
 108 #include "opto/idealGraphPrinter.hpp"
 109 #endif
 110 #if INCLUDE_RTM_OPT
 111 #include "runtime/rtmLocking.hpp"
 112 #endif
 113 #if INCLUDE_JFR
 114 #include "jfr/jfr.hpp"
 115 #endif
 116 
 117 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 118 


 286   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 287   // and ::Release()
 288   _ParkEvent   = ParkEvent::Allocate (this) ;
 289   _SleepEvent  = ParkEvent::Allocate (this) ;
 290   _MutexEvent  = ParkEvent::Allocate (this) ;
 291   _MuxEvent    = ParkEvent::Allocate (this) ;
 292 
 293 #ifdef CHECK_UNHANDLED_OOPS
 294   if (CheckUnhandledOops) {
 295     _unhandled_oops = new UnhandledOops(this);
 296   }
 297 #endif // CHECK_UNHANDLED_OOPS
 298 #ifdef ASSERT
 299   if (UseBiasedLocking) {
 300     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 301     assert(this == _real_malloc_address ||
 302            this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
 303            "bug in forced alignment of thread objects");
 304   }
 305 #endif /* ASSERT */




















 306 }
 307 














 308 void Thread::initialize_thread_local_storage() {
 309   // Note: Make sure this method only calls
 310   // non-blocking operations. Otherwise, it might not work
 311   // with the thread-startup/safepoint interaction.
 312 
 313   // During Java thread startup, safepoint code should allow this
 314   // method to complete because it may need to allocate memory to
 315   // store information for the new thread.
 316 
 317   // initialize structure dependent on thread local storage
 318   ThreadLocalStorage::set_thread(this);
 319 }
 320 
 321 void Thread::record_stack_base_and_size() {
 322   set_stack_base(os::current_stack_base());
 323   set_stack_size(os::current_stack_size());
 324   if (is_Java_thread()) {
 325     ((JavaThread*) this)->set_stack_overflow_limit();
 326   }
 327   // CR 7190089: on Solaris, primordial thread's stack is adjusted


1491     set_thread_profiler(pp);
1492   }
1493 
1494   // Setup safepoint state info for this thread
1495   ThreadSafepointState::create(this);
1496 
1497   debug_only(_java_call_counter = 0);
1498 
1499   // JVMTI PopFrame support
1500   _popframe_condition = popframe_inactive;
1501   _popframe_preserved_args = NULL;
1502   _popframe_preserved_args_size = 0;
1503   _frames_to_pop_failed_realloc = 0;
1504 
1505   pd_initialize();
1506 }
1507 
1508 #if INCLUDE_ALL_GCS
1509 SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
1510 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;

1511 #endif // INCLUDE_ALL_GCS
1512 
1513 JavaThread::JavaThread(bool is_attaching_via_jni) :
1514   Thread()
1515 #if INCLUDE_ALL_GCS
1516   , _satb_mark_queue(&_satb_mark_queue_set),
1517   _dirty_card_queue(&_dirty_card_queue_set)
1518 #endif // INCLUDE_ALL_GCS
1519 {
1520   initialize();
1521   if (is_attaching_via_jni) {
1522     _jni_attach_state = _attaching_via_jni;
1523   } else {
1524     _jni_attach_state = _not_attaching_via_jni;
1525   }
1526   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1527 }
1528 
1529 bool JavaThread::reguard_stack(address cur_sp) {
1530   if (_stack_guard_state != stack_guard_yellow_disabled) {


1894   remove_stack_guard_pages();
1895 
1896   if (UseTLAB) {
1897     tlab().make_parsable(true);  // retire TLAB
1898   }
1899 
1900   if (JvmtiEnv::environments_might_exist()) {
1901     JvmtiExport::cleanup_thread(this);
1902   }
1903 
1904   // We must flush any deferred card marks before removing a thread from
1905   // the list of active threads.
1906   Universe::heap()->flush_deferred_store_barrier(this);
1907   assert(deferred_card_mark().is_empty(), "Should have been flushed");
1908 
1909 #if INCLUDE_ALL_GCS
1910   // We must flush the G1-related buffers before removing a thread
1911   // from the list of active threads. We must do this after any deferred
1912   // card marks have been flushed (above) so that any entries that are
1913   // added to the thread's dirty card queue as a result are not lost.
1914   if (UseG1GC) {
1915     flush_barrier_queues();
1916   }



1917 #endif // INCLUDE_ALL_GCS
1918 
1919   // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
1920   Threads::remove(this);
1921 }
1922 
1923 #if INCLUDE_ALL_GCS
1924 // Flush G1-related queues.
1925 void JavaThread::flush_barrier_queues() {
1926   satb_mark_queue().flush();
1927   dirty_card_queue().flush();
1928 }
1929 
1930 void JavaThread::initialize_queues() {
1931   assert(!SafepointSynchronize::is_at_safepoint(),
1932          "we should not be at a safepoint");
1933 
1934   ObjPtrQueue& satb_queue = satb_mark_queue();
1935   SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set();
1936   // The SATB queue should have been constructed with its active
1937   // field set to false.
1938   assert(!satb_queue.is_active(), "SATB queue should not be active");
1939   assert(satb_queue.is_empty(), "SATB queue should be empty");
1940   // If we are creating the thread during a marking cycle, we should
1941   // set the active field of the SATB queue to true.
1942   if (satb_queue_set.is_active()) {
1943     satb_queue.set_active(true);
1944   }
1945 
1946   DirtyCardQueue& dirty_queue = dirty_card_queue();
1947   // The dirty card queue should have been constructed with its
1948   // active field set to true.
1949   assert(dirty_queue.is_active(), "dirty card queue should be active");





















1950 }
1951 #endif // INCLUDE_ALL_GCS
1952 
1953 void JavaThread::cleanup_failed_attach_current_thread() {
1954   if (get_thread_profiler() != NULL) {
1955     get_thread_profiler()->disengage();
1956     ResourceMark rm;
1957     get_thread_profiler()->print(get_thread_name());
1958   }
1959 
1960   if (active_handles() != NULL) {
1961     JNIHandleBlock* block = active_handles();
1962     set_active_handles(NULL);
1963     JNIHandleBlock::release_block(block);
1964   }
1965 
1966   if (free_handle_block() != NULL) {
1967     JNIHandleBlock* block = free_handle_block();
1968     set_free_handle_block(NULL);
1969     JNIHandleBlock::release_block(block);
1970   }
1971 
1972   // These have to be removed while this is still a valid thread.
1973   remove_stack_guard_pages();
1974 
1975   if (UseTLAB) {
1976     tlab().make_parsable(true);  // retire TLAB, if any
1977   }
1978 
1979 #if INCLUDE_ALL_GCS
1980   if (UseG1GC) {
1981     flush_barrier_queues();
1982   }



1983 #endif // INCLUDE_ALL_GCS
1984 
1985   Threads::remove(this);
1986   delete this;
1987 }
1988 
1989 
1990 
1991 
1992 JavaThread* JavaThread::active() {
1993   Thread* thread = ThreadLocalStorage::thread();
1994   assert(thread != NULL, "just checking");
1995   if (thread->is_Java_thread()) {
1996     return (JavaThread*) thread;
1997   } else {
1998     assert(thread->is_VM_thread(), "this must be a vm thread");
1999     VM_Operation* op = ((VMThread*) thread)->vm_operation();
2000     JavaThread *ret=op == NULL ? NULL : (JavaThread *)op->calling_thread();
2001     assert(ret->is_Java_thread(), "must be a Java thread");
2002     return ret;


3267 // ======= Threads ========
3268 
3269 // The Threads class links together all active threads, and provides
3270 // operations over all threads.  It is protected by its own Mutex
3271 // lock, which is also used in other contexts to protect thread
3272 // operations from having the thread being operated on from exiting
3273 // and going away unexpectedly (e.g., safepoint synchronization)
3274 
3275 JavaThread* Threads::_thread_list = NULL;
3276 int         Threads::_number_of_threads = 0;
3277 int         Threads::_number_of_non_daemon_threads = 0;
3278 int         Threads::_return_code = 0;
3279 size_t      JavaThread::_stack_size_at_create = 0;
3280 #ifdef ASSERT
3281 bool        Threads::_vm_complete = false;
3282 #endif
3283 
3284 // All JavaThreads
3285 #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
3286 







3287 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
3288 void Threads::threads_do(ThreadClosure* tc) {
3289   assert_locked_or_safepoint(Threads_lock);
3290   // ALL_JAVA_THREADS iterates through all JavaThreads
3291   ALL_JAVA_THREADS(p) {
3292     tc->do_thread(p);
3293   }
3294   // Someday we could have a table or list of all non-JavaThreads.
3295   // For now, just manually iterate through them.
3296   tc->do_thread(VMThread::vm_thread());
3297   Universe::heap()->gc_threads_do(tc);
3298   WatcherThread *wt = WatcherThread::watcher_thread();
3299   // Strictly speaking, the following NULL check isn't sufficient to make sure
3300   // the data for WatcherThread is still valid upon being examined. However,
3301   // considering that WatchThread terminates when the VM is on the way to
3302   // exit at safepoint, the chance of the above is extremely small. The right
3303   // way to prevent termination of WatcherThread would be to acquire
3304   // Terminator_lock, but we can't do that without violating the lock rank
3305   // checking in some cases.
3306   if (wt != NULL)


3583 
3584   // record VM initialization completion time
3585 #if INCLUDE_MANAGEMENT
3586   Management::record_vm_init_completed();
3587 #endif // INCLUDE_MANAGEMENT
3588 
3589   // Compute system loader. Note that this has to occur after set_init_completed, since
3590   // valid exceptions may be thrown in the process.
3591   // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
3592   // set_init_completed has just been called, causing exceptions not to be shortcut
3593   // anymore. We call vm_exit_during_initialization directly instead.
3594   SystemDictionary::compute_java_system_loader(THREAD);
3595   if (HAS_PENDING_EXCEPTION) {
3596     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3597   }
3598 
3599 #if INCLUDE_ALL_GCS
3600   // Support for ConcurrentMarkSweep. This should be cleaned up
3601   // and better encapsulated. The ugly nested if test would go away
3602   // once things are properly refactored. XXX YSR
3603   if (UseConcMarkSweepGC || UseG1GC) {
3604     if (UseConcMarkSweepGC) {
3605       ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);


3606     } else {
3607       ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
3608     }
3609     if (HAS_PENDING_EXCEPTION) {
3610       vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3611     }
3612   }
3613 #endif // INCLUDE_ALL_GCS
3614 
3615   // Always call even when there are not JVMTI environments yet, since environments
3616   // may be attached late and JVMTI must track phases of VM execution
3617   JvmtiExport::enter_live_phase();
3618 
3619   // Signal Dispatcher needs to be started before VMInit event is posted
3620   os::signal_init();
3621 
3622   // Start Attach Listener if +StartAttachListener or it can't be started lazily
3623   if (!DisableAttachMechanism) {
3624     AttachListener::vm_start();
3625     if (StartAttachListener || AttachListener::init_at_startup()) {


4178 void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4179   ALL_JAVA_THREADS(p) {
4180     p->oops_do(f, cld_f, cf);
4181   }
4182   VMThread::vm_thread()->oops_do(f, cld_f, cf);
4183 }
4184 
4185 void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4186   // Introduce a mechanism allowing parallel threads to claim threads as
4187   // root groups.  Overhead should be small enough to use all the time,
4188   // even in sequential code.
4189   SharedHeap* sh = SharedHeap::heap();
4190   // Cannot yet substitute active_workers for n_par_threads
4191   // because of G1CollectedHeap::verify() use of
4192   // SharedHeap::process_roots().  n_par_threads == 0 will
4193   // turn off parallelism in process_roots while active_workers
4194   // is being used for parallelism elsewhere.
4195   bool is_par = sh->n_par_threads() > 0;
4196   assert(!is_par ||
4197          (SharedHeap::heap()->n_par_threads() ==
4198           SharedHeap::heap()->workers()->active_workers()), "Mismatch");

4199   int cp = SharedHeap::heap()->strong_roots_parity();
4200   ALL_JAVA_THREADS(p) {
4201     if (p->claim_oops_do(is_par, cp)) {
4202       p->oops_do(f, cld_f, cf);
4203     }
4204   }
4205   VMThread* vmt = VMThread::vm_thread();
4206   if (vmt->claim_oops_do(is_par, cp)) {
4207     vmt->oops_do(f, cld_f, cf);
4208   }
4209 }
4210 
4211 #if INCLUDE_ALL_GCS
4212 // Used by ParallelScavenge
4213 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
4214   ALL_JAVA_THREADS(p) {
4215     q->enqueue(new ThreadRootsTask(p));
4216   }
4217   q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
4218 }




  79 #include "services/memTracker.hpp"
  80 #include "services/threadService.hpp"
  81 #include "utilities/defaultStream.hpp"
  82 #include "utilities/dtrace.hpp"
  83 #include "utilities/events.hpp"
  84 #include "utilities/preserveException.hpp"
  85 #include "utilities/macros.hpp"
  86 #ifdef TARGET_OS_FAMILY_linux
  87 # include "os_linux.inline.hpp"
  88 #endif
  89 #ifdef TARGET_OS_FAMILY_solaris
  90 # include "os_solaris.inline.hpp"
  91 #endif
  92 #ifdef TARGET_OS_FAMILY_windows
  93 # include "os_windows.inline.hpp"
  94 #endif
  95 #ifdef TARGET_OS_FAMILY_bsd
  96 # include "os_bsd.inline.hpp"
  97 #endif
  98 #if INCLUDE_ALL_GCS
  99 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
 100 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 101 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 102 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
 103 #endif // INCLUDE_ALL_GCS
 104 #ifdef COMPILER1
 105 #include "c1/c1_Compiler.hpp"
 106 #endif
 107 #ifdef COMPILER2
 108 #include "opto/c2compiler.hpp"
 109 #include "opto/idealGraphPrinter.hpp"
 110 #endif
 111 #if INCLUDE_RTM_OPT
 112 #include "runtime/rtmLocking.hpp"
 113 #endif
 114 #if INCLUDE_JFR
 115 #include "jfr/jfr.hpp"
 116 #endif
 117 
 118 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 119 


 287   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 288   // and ::Release()
 289   _ParkEvent   = ParkEvent::Allocate (this) ;
 290   _SleepEvent  = ParkEvent::Allocate (this) ;
 291   _MutexEvent  = ParkEvent::Allocate (this) ;
 292   _MuxEvent    = ParkEvent::Allocate (this) ;
 293 
 294 #ifdef CHECK_UNHANDLED_OOPS
 295   if (CheckUnhandledOops) {
 296     _unhandled_oops = new UnhandledOops(this);
 297   }
 298 #endif // CHECK_UNHANDLED_OOPS
 299 #ifdef ASSERT
 300   if (UseBiasedLocking) {
 301     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 302     assert(this == _real_malloc_address ||
 303            this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
 304            "bug in forced alignment of thread objects");
 305   }
 306 #endif /* ASSERT */
 307 
 308   _oom_during_evac = 0;
 309 #if INCLUDE_ALL_GCS
 310   _gc_state = _gc_state_global;
 311   _worker_id = (uint)(-1); // Actually, ShenandoahWorkerSession::INVALID_WORKER_ID, but avoid dependencies.
 312   _force_satb_flush = false;
 313   _paced_time = 0;
 314 #endif
 315 }
 316 
 317 void Thread::set_oom_during_evac(bool oom) {
 318   if (oom) {
 319     _oom_during_evac |= 1;
 320   } else {
 321     _oom_during_evac &= ~1;
 322   }
 323 }
 324 
 325 bool Thread::is_oom_during_evac() const {
 326   return (_oom_during_evac & 1) == 1;
 327 }
 328 
 329 #ifdef ASSERT
 330 void Thread::set_evac_allowed(bool evac_allowed) {
 331   if (evac_allowed) {
 332     _oom_during_evac |= 2;
 333   } else {
 334     _oom_during_evac &= ~2;
 335   }
 336 }
 337 
 338 bool Thread::is_evac_allowed() const {
 339   return (_oom_during_evac & 2) == 2;
 340 }
 341 #endif
 342 
 343 void Thread::initialize_thread_local_storage() {
 344   // Note: Make sure this method only calls
 345   // non-blocking operations. Otherwise, it might not work
 346   // with the thread-startup/safepoint interaction.
 347 
 348   // During Java thread startup, safepoint code should allow this
 349   // method to complete because it may need to allocate memory to
 350   // store information for the new thread.
 351 
 352   // initialize structure dependent on thread local storage
 353   ThreadLocalStorage::set_thread(this);
 354 }
 355 
 356 void Thread::record_stack_base_and_size() {
 357   set_stack_base(os::current_stack_base());
 358   set_stack_size(os::current_stack_size());
 359   if (is_Java_thread()) {
 360     ((JavaThread*) this)->set_stack_overflow_limit();
 361   }
 362   // CR 7190089: on Solaris, primordial thread's stack is adjusted


1526     set_thread_profiler(pp);
1527   }
1528 
1529   // Setup safepoint state info for this thread
1530   ThreadSafepointState::create(this);
1531 
1532   debug_only(_java_call_counter = 0);
1533 
1534   // JVMTI PopFrame support
1535   _popframe_condition = popframe_inactive;
1536   _popframe_preserved_args = NULL;
1537   _popframe_preserved_args_size = 0;
1538   _frames_to_pop_failed_realloc = 0;
1539 
1540   pd_initialize();
1541 }
1542 
1543 #if INCLUDE_ALL_GCS
1544 SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
1545 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
1546 char Thread::_gc_state_global = 0;
1547 #endif // INCLUDE_ALL_GCS
1548 
1549 JavaThread::JavaThread(bool is_attaching_via_jni) :
1550   Thread()
1551 #if INCLUDE_ALL_GCS
1552   , _satb_mark_queue(&_satb_mark_queue_set),
1553   _dirty_card_queue(&_dirty_card_queue_set)
1554 #endif // INCLUDE_ALL_GCS
1555 {
1556   initialize();
1557   if (is_attaching_via_jni) {
1558     _jni_attach_state = _attaching_via_jni;
1559   } else {
1560     _jni_attach_state = _not_attaching_via_jni;
1561   }
1562   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1563 }
1564 
1565 bool JavaThread::reguard_stack(address cur_sp) {
1566   if (_stack_guard_state != stack_guard_yellow_disabled) {


1930   remove_stack_guard_pages();
1931 
1932   if (UseTLAB) {
1933     tlab().make_parsable(true);  // retire TLAB
1934   }
1935 
1936   if (JvmtiEnv::environments_might_exist()) {
1937     JvmtiExport::cleanup_thread(this);
1938   }
1939 
1940   // We must flush any deferred card marks before removing a thread from
1941   // the list of active threads.
1942   Universe::heap()->flush_deferred_store_barrier(this);
1943   assert(deferred_card_mark().is_empty(), "Should have been flushed");
1944 
1945 #if INCLUDE_ALL_GCS
1946   // We must flush the G1-related buffers before removing a thread
1947   // from the list of active threads. We must do this after any deferred
1948   // card marks have been flushed (above) so that any entries that are
1949   // added to the thread's dirty card queue as a result are not lost.
1950   if (UseG1GC || (UseShenandoahGC)) {
1951     flush_barrier_queues();
1952   }
1953   if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) {
1954     gclab().make_parsable(true);
1955   }
1956 #endif // INCLUDE_ALL_GCS
1957 
1958   // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
1959   Threads::remove(this);
1960 }
1961 
1962 #if INCLUDE_ALL_GCS
1963 // Flush G1-related queues.
1964 void JavaThread::flush_barrier_queues() {
1965   satb_mark_queue().flush();
1966   dirty_card_queue().flush();
1967 }
1968 
1969 void JavaThread::initialize_queues() {
1970   assert(!SafepointSynchronize::is_at_safepoint(),
1971          "we should not be at a safepoint");
1972 
1973   ObjPtrQueue& satb_queue = satb_mark_queue();
1974   SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set();
1975   // The SATB queue should have been constructed with its active
1976   // field set to false.
1977   assert(!satb_queue.is_active(), "SATB queue should not be active");
1978   assert(satb_queue.is_empty(), "SATB queue should be empty");
1979   // If we are creating the thread during a marking cycle, we should
1980   // set the active field of the SATB queue to true.
1981   if (satb_queue_set.is_active()) {
1982     satb_queue.set_active(true);
1983   }
1984 
1985   DirtyCardQueue& dirty_queue = dirty_card_queue();
1986   // The dirty card queue should have been constructed with its
1987   // active field set to true.
1988   assert(dirty_queue.is_active(), "dirty card queue should be active");
1989 
1990   _gc_state = _gc_state_global;
1991 }
1992 
1993 void JavaThread::set_gc_state(char in_prog) {
1994   _gc_state = in_prog;
1995 }
1996 
1997 void JavaThread::set_gc_state_all_threads(char in_prog) {
1998   assert_locked_or_safepoint(Threads_lock);
1999   _gc_state_global = in_prog;
2000   for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) {
2001     t->set_gc_state(in_prog);
2002   }
2003 }
2004 
2005 void JavaThread::set_force_satb_flush_all_threads(bool value) {
2006   assert_locked_or_safepoint(Threads_lock);
2007   for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) {
2008     t->set_force_satb_flush(value);
2009   }
2010 }
2011 #endif // INCLUDE_ALL_GCS
2012 
2013 void JavaThread::cleanup_failed_attach_current_thread() {
2014   if (get_thread_profiler() != NULL) {
2015     get_thread_profiler()->disengage();
2016     ResourceMark rm;
2017     get_thread_profiler()->print(get_thread_name());
2018   }
2019 
2020   if (active_handles() != NULL) {
2021     JNIHandleBlock* block = active_handles();
2022     set_active_handles(NULL);
2023     JNIHandleBlock::release_block(block);
2024   }
2025 
2026   if (free_handle_block() != NULL) {
2027     JNIHandleBlock* block = free_handle_block();
2028     set_free_handle_block(NULL);
2029     JNIHandleBlock::release_block(block);
2030   }
2031 
2032   // These have to be removed while this is still a valid thread.
2033   remove_stack_guard_pages();
2034 
2035   if (UseTLAB) {
2036     tlab().make_parsable(true);  // retire TLAB, if any
2037   }
2038 
2039 #if INCLUDE_ALL_GCS
2040   if (UseG1GC || (UseShenandoahGC)) {
2041     flush_barrier_queues();
2042   }
2043   if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) {
2044     gclab().make_parsable(true);
2045   }
2046 #endif // INCLUDE_ALL_GCS
2047 
2048   Threads::remove(this);
2049   delete this;
2050 }
2051 
2052 
2053 
2054 
2055 JavaThread* JavaThread::active() {
2056   Thread* thread = ThreadLocalStorage::thread();
2057   assert(thread != NULL, "just checking");
2058   if (thread->is_Java_thread()) {
2059     return (JavaThread*) thread;
2060   } else {
2061     assert(thread->is_VM_thread(), "this must be a vm thread");
2062     VM_Operation* op = ((VMThread*) thread)->vm_operation();
2063     JavaThread *ret=op == NULL ? NULL : (JavaThread *)op->calling_thread();
2064     assert(ret->is_Java_thread(), "must be a Java thread");
2065     return ret;


3330 // ======= Threads ========
3331 
3332 // The Threads class links together all active threads, and provides
3333 // operations over all threads.  It is protected by its own Mutex
3334 // lock, which is also used in other contexts to protect thread
3335 // operations from having the thread being operated on from exiting
3336 // and going away unexpectedly (e.g., safepoint synchronization)
3337 
3338 JavaThread* Threads::_thread_list = NULL;
3339 int         Threads::_number_of_threads = 0;
3340 int         Threads::_number_of_non_daemon_threads = 0;
3341 int         Threads::_return_code = 0;
3342 size_t      JavaThread::_stack_size_at_create = 0;
3343 #ifdef ASSERT
3344 bool        Threads::_vm_complete = false;
3345 #endif
3346 
3347 // All JavaThreads
3348 #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
3349 
3350 void Threads::java_threads_do(ThreadClosure* tc) {
3351   assert_locked_or_safepoint(Threads_lock);
3352   ALL_JAVA_THREADS(p) {
3353     tc->do_thread(p);
3354   }
3355 }
3356 
3357 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
3358 void Threads::threads_do(ThreadClosure* tc) {
3359   assert_locked_or_safepoint(Threads_lock);
3360   // ALL_JAVA_THREADS iterates through all JavaThreads
3361   ALL_JAVA_THREADS(p) {
3362     tc->do_thread(p);
3363   }
3364   // Someday we could have a table or list of all non-JavaThreads.
3365   // For now, just manually iterate through them.
3366   tc->do_thread(VMThread::vm_thread());
3367   Universe::heap()->gc_threads_do(tc);
3368   WatcherThread *wt = WatcherThread::watcher_thread();
3369   // Strictly speaking, the following NULL check isn't sufficient to make sure
3370   // the data for WatcherThread is still valid upon being examined. However,
3371   // considering that WatchThread terminates when the VM is on the way to
3372   // exit at safepoint, the chance of the above is extremely small. The right
3373   // way to prevent termination of WatcherThread would be to acquire
3374   // Terminator_lock, but we can't do that without violating the lock rank
3375   // checking in some cases.
3376   if (wt != NULL)


3653 
3654   // record VM initialization completion time
3655 #if INCLUDE_MANAGEMENT
3656   Management::record_vm_init_completed();
3657 #endif // INCLUDE_MANAGEMENT
3658 
3659   // Compute system loader. Note that this has to occur after set_init_completed, since
3660   // valid exceptions may be thrown in the process.
3661   // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
3662   // set_init_completed has just been called, causing exceptions not to be shortcut
3663   // anymore. We call vm_exit_during_initialization directly instead.
3664   SystemDictionary::compute_java_system_loader(THREAD);
3665   if (HAS_PENDING_EXCEPTION) {
3666     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3667   }
3668 
3669 #if INCLUDE_ALL_GCS
3670   // Support for ConcurrentMarkSweep. This should be cleaned up
3671   // and better encapsulated. The ugly nested if test would go away
3672   // once things are properly refactored. XXX YSR
3673   if (UseConcMarkSweepGC || UseG1GC || UseShenandoahGC) {
3674     if (UseConcMarkSweepGC) {
3675       ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);
3676     } else if (UseShenandoahGC) {
3677       ShenandoahControlThread::makeSurrogateLockerThread(THREAD);
3678     } else {
3679       ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
3680     }
3681     if (HAS_PENDING_EXCEPTION) {
3682       vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3683     }
3684   }
3685 #endif // INCLUDE_ALL_GCS
3686 
3687   // Always call even when there are not JVMTI environments yet, since environments
3688   // may be attached late and JVMTI must track phases of VM execution
3689   JvmtiExport::enter_live_phase();
3690 
3691   // Signal Dispatcher needs to be started before VMInit event is posted
3692   os::signal_init();
3693 
3694   // Start Attach Listener if +StartAttachListener or it can't be started lazily
3695   if (!DisableAttachMechanism) {
3696     AttachListener::vm_start();
3697     if (StartAttachListener || AttachListener::init_at_startup()) {


4250 void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4251   ALL_JAVA_THREADS(p) {
4252     p->oops_do(f, cld_f, cf);
4253   }
4254   VMThread::vm_thread()->oops_do(f, cld_f, cf);
4255 }
4256 
4257 void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4258   // Introduce a mechanism allowing parallel threads to claim threads as
4259   // root groups.  Overhead should be small enough to use all the time,
4260   // even in sequential code.
4261   SharedHeap* sh = SharedHeap::heap();
4262   // Cannot yet substitute active_workers for n_par_threads
4263   // because of G1CollectedHeap::verify() use of
4264   // SharedHeap::process_roots().  n_par_threads == 0 will
4265   // turn off parallelism in process_roots while active_workers
4266   // is being used for parallelism elsewhere.
4267   bool is_par = sh->n_par_threads() > 0;
4268   assert(!is_par ||
4269          (SharedHeap::heap()->n_par_threads() ==
4270           SharedHeap::heap()->workers()->active_workers()
4271           || UseShenandoahGC), "Mismatch");
4272   int cp = SharedHeap::heap()->strong_roots_parity();
4273   ALL_JAVA_THREADS(p) {
4274     if (p->claim_oops_do(is_par, cp)) {
4275       p->oops_do(f, cld_f, cf);
4276     }
4277   }
4278   VMThread* vmt = VMThread::vm_thread();
4279   if (vmt->claim_oops_do(is_par, cp)) {
4280     vmt->oops_do(f, cld_f, cf);
4281   }
4282 }
4283 
4284 #if INCLUDE_ALL_GCS
4285 // Used by ParallelScavenge
4286 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
4287   ALL_JAVA_THREADS(p) {
4288     q->enqueue(new ThreadRootsTask(p));
4289   }
4290   q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
4291 }


< prev index next >