< prev index next >

src/share/vm/runtime/thread.cpp

Print this page




  79 #include "services/memTracker.hpp"
  80 #include "services/threadService.hpp"
  81 #include "utilities/defaultStream.hpp"
  82 #include "utilities/dtrace.hpp"
  83 #include "utilities/events.hpp"
  84 #include "utilities/preserveException.hpp"
  85 #include "utilities/macros.hpp"
  86 #ifdef TARGET_OS_FAMILY_linux
  87 # include "os_linux.inline.hpp"
  88 #endif
  89 #ifdef TARGET_OS_FAMILY_solaris
  90 # include "os_solaris.inline.hpp"
  91 #endif
  92 #ifdef TARGET_OS_FAMILY_windows
  93 # include "os_windows.inline.hpp"
  94 #endif
  95 #ifdef TARGET_OS_FAMILY_bsd
  96 # include "os_bsd.inline.hpp"
  97 #endif
  98 #if INCLUDE_ALL_GCS

  99 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 100 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 101 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
 102 #endif // INCLUDE_ALL_GCS
 103 #ifdef COMPILER1
 104 #include "c1/c1_Compiler.hpp"
 105 #endif
 106 #ifdef COMPILER2
 107 #include "opto/c2compiler.hpp"
 108 #include "opto/idealGraphPrinter.hpp"
 109 #endif
 110 #if INCLUDE_RTM_OPT
 111 #include "runtime/rtmLocking.hpp"
 112 #endif
 113 #if INCLUDE_JFR
 114 #include "jfr/jfr.hpp"
 115 #endif
 116 
 117 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 118 


 286   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 287   // and ::Release()
 288   _ParkEvent   = ParkEvent::Allocate (this) ;
 289   _SleepEvent  = ParkEvent::Allocate (this) ;
 290   _MutexEvent  = ParkEvent::Allocate (this) ;
 291   _MuxEvent    = ParkEvent::Allocate (this) ;
 292 
 293 #ifdef CHECK_UNHANDLED_OOPS
 294   if (CheckUnhandledOops) {
 295     _unhandled_oops = new UnhandledOops(this);
 296   }
 297 #endif // CHECK_UNHANDLED_OOPS
 298 #ifdef ASSERT
 299   if (UseBiasedLocking) {
 300     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 301     assert(this == _real_malloc_address ||
 302            this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
 303            "bug in forced alignment of thread objects");
 304   }
 305 #endif /* ASSERT */




















 306 }
 307 














 308 void Thread::initialize_thread_local_storage() {
 309   // Note: Make sure this method only calls
 310   // non-blocking operations. Otherwise, it might not work
 311   // with the thread-startup/safepoint interaction.
 312 
 313   // During Java thread startup, safepoint code should allow this
 314   // method to complete because it may need to allocate memory to
 315   // store information for the new thread.
 316 
 317   // initialize structure dependent on thread local storage
 318   ThreadLocalStorage::set_thread(this);
 319 }
 320 
 321 void Thread::record_stack_base_and_size() {
 322   set_stack_base(os::current_stack_base());
 323   set_stack_size(os::current_stack_size());
 324   if (is_Java_thread()) {
 325     ((JavaThread*) this)->set_stack_overflow_limit();
 326   }
 327   // CR 7190089: on Solaris, primordial thread's stack is adjusted


1491     set_thread_profiler(pp);
1492   }
1493 
1494   // Setup safepoint state info for this thread
1495   ThreadSafepointState::create(this);
1496 
1497   debug_only(_java_call_counter = 0);
1498 
1499   // JVMTI PopFrame support
1500   _popframe_condition = popframe_inactive;
1501   _popframe_preserved_args = NULL;
1502   _popframe_preserved_args_size = 0;
1503   _frames_to_pop_failed_realloc = 0;
1504 
1505   pd_initialize();
1506 }
1507 
1508 #if INCLUDE_ALL_GCS
1509 SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
1510 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;

1511 #endif // INCLUDE_ALL_GCS
1512 
1513 JavaThread::JavaThread(bool is_attaching_via_jni) :
1514   Thread()
1515 #if INCLUDE_ALL_GCS
1516   , _satb_mark_queue(&_satb_mark_queue_set),
1517   _dirty_card_queue(&_dirty_card_queue_set)
1518 #endif // INCLUDE_ALL_GCS
1519 {
1520   initialize();
1521   if (is_attaching_via_jni) {
1522     _jni_attach_state = _attaching_via_jni;
1523   } else {
1524     _jni_attach_state = _not_attaching_via_jni;
1525   }
1526   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1527 }
1528 
1529 bool JavaThread::reguard_stack(address cur_sp) {
1530   if (_stack_guard_state != stack_guard_yellow_disabled) {
1531     return true; // Stack already guarded or guard pages not needed.
1532   }
1533 
1534   if (register_stack_overflow()) {
1535     // For those architectures which have separate register and
1536     // memory stacks, we must check the register stack to see if
1537     // it has overflowed.


1554 }
1555 
1556 
1557 void JavaThread::block_if_vm_exited() {
1558   if (_terminated == _vm_exited) {
1559     // _vm_exited is set at safepoint, and Threads_lock is never released
1560     // we will block here forever
1561     Threads_lock->lock_without_safepoint_check();
1562     ShouldNotReachHere();
1563   }
1564 }
1565 
1566 
1567 // Remove this ifdef when C1 is ported to the compiler interface.
1568 static void compiler_thread_entry(JavaThread* thread, TRAPS);
1569 
1570 JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
1571   Thread()
1572 #if INCLUDE_ALL_GCS
1573   , _satb_mark_queue(&_satb_mark_queue_set),
1574   _dirty_card_queue(&_dirty_card_queue_set)
1575 #endif // INCLUDE_ALL_GCS
1576 {
1577   if (TraceThreadEvents) {
1578     tty->print_cr("creating thread %p", this);
1579   }
1580   initialize();
1581   _jni_attach_state = _not_attaching_via_jni;
1582   set_entry_point(entry_point);
1583   // Create the native thread itself.
1584   // %note runtime_23
1585   os::ThreadType thr_type = os::java_thread;
1586   thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
1587                                                      os::java_thread;
1588   os::create_thread(this, thr_type, stack_sz);
1589   // The _osthread may be NULL here because we ran out of memory (too many threads active).
1590   // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
1591   // may hold a lock and all locks must be unlocked before throwing the exception (throwing
1592   // the exception consists of creating the exception object & initializing it, initialization
1593   // will leave the VM via a JavaCall and then all locks must be unlocked).
1594   //


1894   remove_stack_guard_pages();
1895 
1896   if (UseTLAB) {
1897     tlab().make_parsable(true);  // retire TLAB
1898   }
1899 
1900   if (JvmtiEnv::environments_might_exist()) {
1901     JvmtiExport::cleanup_thread(this);
1902   }
1903 
1904   // We must flush any deferred card marks before removing a thread from
1905   // the list of active threads.
1906   Universe::heap()->flush_deferred_store_barrier(this);
1907   assert(deferred_card_mark().is_empty(), "Should have been flushed");
1908 
1909 #if INCLUDE_ALL_GCS
1910   // We must flush the G1-related buffers before removing a thread
1911   // from the list of active threads. We must do this after any deferred
1912   // card marks have been flushed (above) so that any entries that are
1913   // added to the thread's dirty card queue as a result are not lost.
1914   if (UseG1GC) {
1915     flush_barrier_queues();
1916   }



1917 #endif // INCLUDE_ALL_GCS
1918 
1919   // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
1920   Threads::remove(this);
1921 }
1922 
1923 #if INCLUDE_ALL_GCS
1924 // Flush G1-related queues.
1925 void JavaThread::flush_barrier_queues() {
1926   satb_mark_queue().flush();
1927   dirty_card_queue().flush();
1928 }
1929 
1930 void JavaThread::initialize_queues() {
1931   assert(!SafepointSynchronize::is_at_safepoint(),
1932          "we should not be at a safepoint");
1933 
1934   ObjPtrQueue& satb_queue = satb_mark_queue();
1935   SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set();
1936   // The SATB queue should have been constructed with its active
1937   // field set to false.
1938   assert(!satb_queue.is_active(), "SATB queue should not be active");
1939   assert(satb_queue.is_empty(), "SATB queue should be empty");
1940   // If we are creating the thread during a marking cycle, we should
1941   // set the active field of the SATB queue to true.
1942   if (satb_queue_set.is_active()) {
1943     satb_queue.set_active(true);
1944   }
1945 
1946   DirtyCardQueue& dirty_queue = dirty_card_queue();
1947   // The dirty card queue should have been constructed with its
1948   // active field set to true.
1949   assert(dirty_queue.is_active(), "dirty card queue should be active");





















1950 }
1951 #endif // INCLUDE_ALL_GCS
1952 
1953 void JavaThread::cleanup_failed_attach_current_thread() {
1954   if (get_thread_profiler() != NULL) {
1955     get_thread_profiler()->disengage();
1956     ResourceMark rm;
1957     get_thread_profiler()->print(get_thread_name());
1958   }
1959 
1960   if (active_handles() != NULL) {
1961     JNIHandleBlock* block = active_handles();
1962     set_active_handles(NULL);
1963     JNIHandleBlock::release_block(block);
1964   }
1965 
1966   if (free_handle_block() != NULL) {
1967     JNIHandleBlock* block = free_handle_block();
1968     set_free_handle_block(NULL);
1969     JNIHandleBlock::release_block(block);
1970   }
1971 
1972   // These have to be removed while this is still a valid thread.
1973   remove_stack_guard_pages();
1974 
1975   if (UseTLAB) {
1976     tlab().make_parsable(true);  // retire TLAB, if any
1977   }
1978 
1979 #if INCLUDE_ALL_GCS
1980   if (UseG1GC) {
1981     flush_barrier_queues();
1982   }



1983 #endif // INCLUDE_ALL_GCS
1984 
1985   Threads::remove(this);
1986   delete this;
1987 }
1988 
1989 
1990 
1991 
1992 JavaThread* JavaThread::active() {
1993   Thread* thread = ThreadLocalStorage::thread();
1994   assert(thread != NULL, "just checking");
1995   if (thread->is_Java_thread()) {
1996     return (JavaThread*) thread;
1997   } else {
1998     assert(thread->is_VM_thread(), "this must be a vm thread");
1999     VM_Operation* op = ((VMThread*) thread)->vm_operation();
2000     JavaThread *ret=op == NULL ? NULL : (JavaThread *)op->calling_thread();
2001     assert(ret->is_Java_thread(), "must be a Java thread");
2002     return ret;


3267 // ======= Threads ========
3268 
3269 // The Threads class links together all active threads, and provides
3270 // operations over all threads.  It is protected by its own Mutex
3271 // lock, which is also used in other contexts to protect thread
3272 // operations from having the thread being operated on from exiting
3273 // and going away unexpectedly (e.g., safepoint synchronization)
3274 
3275 JavaThread* Threads::_thread_list = NULL;
3276 int         Threads::_number_of_threads = 0;
3277 int         Threads::_number_of_non_daemon_threads = 0;
3278 int         Threads::_return_code = 0;
3279 size_t      JavaThread::_stack_size_at_create = 0;
3280 #ifdef ASSERT
3281 bool        Threads::_vm_complete = false;
3282 #endif
3283 
3284 // All JavaThreads
3285 #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
3286 







3287 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
3288 void Threads::threads_do(ThreadClosure* tc) {
3289   assert_locked_or_safepoint(Threads_lock);
3290   // ALL_JAVA_THREADS iterates through all JavaThreads
3291   ALL_JAVA_THREADS(p) {
3292     tc->do_thread(p);
3293   }
3294   // Someday we could have a table or list of all non-JavaThreads.
3295   // For now, just manually iterate through them.
3296   tc->do_thread(VMThread::vm_thread());
3297   Universe::heap()->gc_threads_do(tc);
3298   WatcherThread *wt = WatcherThread::watcher_thread();
3299   // Strictly speaking, the following NULL check isn't sufficient to make sure
3300   // the data for WatcherThread is still valid upon being examined. However,
3301   // considering that WatchThread terminates when the VM is on the way to
3302   // exit at safepoint, the chance of the above is extremely small. The right
3303   // way to prevent termination of WatcherThread would be to acquire
3304   // Terminator_lock, but we can't do that without violating the lock rank
3305   // checking in some cases.
3306   if (wt != NULL)


3582 
3583   // record VM initialization completion time
3584 #if INCLUDE_MANAGEMENT
3585   Management::record_vm_init_completed();
3586 #endif // INCLUDE_MANAGEMENT
3587 
3588   // Compute system loader. Note that this has to occur after set_init_completed, since
3589   // valid exceptions may be thrown in the process.
3590   // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
3591   // set_init_completed has just been called, causing exceptions not to be shortcut
3592   // anymore. We call vm_exit_during_initialization directly instead.
3593   SystemDictionary::compute_java_system_loader(THREAD);
3594   if (HAS_PENDING_EXCEPTION) {
3595     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3596   }
3597 
3598 #if INCLUDE_ALL_GCS
3599   // Support for ConcurrentMarkSweep. This should be cleaned up
3600   // and better encapsulated. The ugly nested if test would go away
3601   // once things are properly refactored. XXX YSR
3602   if (UseConcMarkSweepGC || UseG1GC) {
3603     if (UseConcMarkSweepGC) {
3604       ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);


3605     } else {
3606       ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
3607     }
3608     if (HAS_PENDING_EXCEPTION) {
3609       vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3610     }
3611   }
3612 #endif // INCLUDE_ALL_GCS
3613 
3614   // Always call even when there are not JVMTI environments yet, since environments
3615   // may be attached late and JVMTI must track phases of VM execution
3616   JvmtiExport::enter_live_phase();
3617 
3618   // Signal Dispatcher needs to be started before VMInit event is posted
3619   os::signal_init();
3620 
3621   // Start Attach Listener if +StartAttachListener or it can't be started lazily
3622   if (!DisableAttachMechanism) {
3623     AttachListener::vm_start();
3624     if (StartAttachListener || AttachListener::init_at_startup()) {


4177 void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4178   ALL_JAVA_THREADS(p) {
4179     p->oops_do(f, cld_f, cf);
4180   }
4181   VMThread::vm_thread()->oops_do(f, cld_f, cf);
4182 }
4183 
4184 void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4185   // Introduce a mechanism allowing parallel threads to claim threads as
4186   // root groups.  Overhead should be small enough to use all the time,
4187   // even in sequential code.
4188   SharedHeap* sh = SharedHeap::heap();
4189   // Cannot yet substitute active_workers for n_par_threads
4190   // because of G1CollectedHeap::verify() use of
4191   // SharedHeap::process_roots().  n_par_threads == 0 will
4192   // turn off parallelism in process_roots while active_workers
4193   // is being used for parallelism elsewhere.
4194   bool is_par = sh->n_par_threads() > 0;
4195   assert(!is_par ||
4196          (SharedHeap::heap()->n_par_threads() ==
4197           SharedHeap::heap()->workers()->active_workers()), "Mismatch");

4198   int cp = SharedHeap::heap()->strong_roots_parity();
4199   ALL_JAVA_THREADS(p) {
4200     if (p->claim_oops_do(is_par, cp)) {
4201       p->oops_do(f, cld_f, cf);
4202     }
4203   }
4204   VMThread* vmt = VMThread::vm_thread();
4205   if (vmt->claim_oops_do(is_par, cp)) {
4206     vmt->oops_do(f, cld_f, cf);
4207   }
4208 }
4209 
4210 #if INCLUDE_ALL_GCS
4211 // Used by ParallelScavenge
4212 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
4213   ALL_JAVA_THREADS(p) {
4214     q->enqueue(new ThreadRootsTask(p));
4215   }
4216   q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
4217 }




  79 #include "services/memTracker.hpp"
  80 #include "services/threadService.hpp"
  81 #include "utilities/defaultStream.hpp"
  82 #include "utilities/dtrace.hpp"
  83 #include "utilities/events.hpp"
  84 #include "utilities/preserveException.hpp"
  85 #include "utilities/macros.hpp"
  86 #ifdef TARGET_OS_FAMILY_linux
  87 # include "os_linux.inline.hpp"
  88 #endif
  89 #ifdef TARGET_OS_FAMILY_solaris
  90 # include "os_solaris.inline.hpp"
  91 #endif
  92 #ifdef TARGET_OS_FAMILY_windows
  93 # include "os_windows.inline.hpp"
  94 #endif
  95 #ifdef TARGET_OS_FAMILY_bsd
  96 # include "os_bsd.inline.hpp"
  97 #endif
  98 #if INCLUDE_ALL_GCS
  99 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
 100 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 101 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 102 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
 103 #endif // INCLUDE_ALL_GCS
 104 #ifdef COMPILER1
 105 #include "c1/c1_Compiler.hpp"
 106 #endif
 107 #ifdef COMPILER2
 108 #include "opto/c2compiler.hpp"
 109 #include "opto/idealGraphPrinter.hpp"
 110 #endif
 111 #if INCLUDE_RTM_OPT
 112 #include "runtime/rtmLocking.hpp"
 113 #endif
 114 #if INCLUDE_JFR
 115 #include "jfr/jfr.hpp"
 116 #endif
 117 
 118 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 119 


 287   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
 288   // and ::Release()
 289   _ParkEvent   = ParkEvent::Allocate (this) ;
 290   _SleepEvent  = ParkEvent::Allocate (this) ;
 291   _MutexEvent  = ParkEvent::Allocate (this) ;
 292   _MuxEvent    = ParkEvent::Allocate (this) ;
 293 
 294 #ifdef CHECK_UNHANDLED_OOPS
 295   if (CheckUnhandledOops) {
 296     _unhandled_oops = new UnhandledOops(this);
 297   }
 298 #endif // CHECK_UNHANDLED_OOPS
 299 #ifdef ASSERT
 300   if (UseBiasedLocking) {
 301     assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
 302     assert(this == _real_malloc_address ||
 303            this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
 304            "bug in forced alignment of thread objects");
 305   }
 306 #endif /* ASSERT */
 307 
 308   _oom_during_evac = 0;
 309 #if INCLUDE_ALL_GCS
 310   _gc_state = _gc_state_global;
 311   _worker_id = (uint)(-1); // Actually, ShenandoahWorkerSession::INVALID_WORKER_ID, but avoid dependencies.
 312   _force_satb_flush = false;
 313   _paced_time = 0;
 314 #endif
 315 }
 316 
 317 void Thread::set_oom_during_evac(bool oom) {
 318   if (oom) {
 319     _oom_during_evac |= 1;
 320   } else {
 321     _oom_during_evac &= ~1;
 322   }
 323 }
 324 
 325 bool Thread::is_oom_during_evac() const {
 326   return (_oom_during_evac & 1) == 1;
 327 }
 328 
 329 #ifdef ASSERT
 330 void Thread::set_evac_allowed(bool evac_allowed) {
 331   if (evac_allowed) {
 332     _oom_during_evac |= 2;
 333   } else {
 334     _oom_during_evac &= ~2;
 335   }
 336 }
 337 
 338 bool Thread::is_evac_allowed() const {
 339   return (_oom_during_evac & 2) == 2;
 340 }
 341 #endif
 342 
 343 void Thread::initialize_thread_local_storage() {
 344   // Note: Make sure this method only calls
 345   // non-blocking operations. Otherwise, it might not work
 346   // with the thread-startup/safepoint interaction.
 347 
 348   // During Java thread startup, safepoint code should allow this
 349   // method to complete because it may need to allocate memory to
 350   // store information for the new thread.
 351 
 352   // initialize structure dependent on thread local storage
 353   ThreadLocalStorage::set_thread(this);
 354 }
 355 
 356 void Thread::record_stack_base_and_size() {
 357   set_stack_base(os::current_stack_base());
 358   set_stack_size(os::current_stack_size());
 359   if (is_Java_thread()) {
 360     ((JavaThread*) this)->set_stack_overflow_limit();
 361   }
 362   // CR 7190089: on Solaris, primordial thread's stack is adjusted


1526     set_thread_profiler(pp);
1527   }
1528 
1529   // Setup safepoint state info for this thread
1530   ThreadSafepointState::create(this);
1531 
1532   debug_only(_java_call_counter = 0);
1533 
1534   // JVMTI PopFrame support
1535   _popframe_condition = popframe_inactive;
1536   _popframe_preserved_args = NULL;
1537   _popframe_preserved_args_size = 0;
1538   _frames_to_pop_failed_realloc = 0;
1539 
1540   pd_initialize();
1541 }
1542 
1543 #if INCLUDE_ALL_GCS
1544 SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
1545 DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
1546 char Thread::_gc_state_global = 0;
1547 #endif // INCLUDE_ALL_GCS
1548 
1549 JavaThread::JavaThread(bool is_attaching_via_jni) :
1550   Thread()
1551 #if INCLUDE_ALL_GCS
1552   , _satb_mark_queue(&_satb_mark_queue_set),
1553     _dirty_card_queue(&_dirty_card_queue_set)
1554 #endif // INCLUDE_ALL_GCS
1555 {
1556   initialize();
1557   if (is_attaching_via_jni) {
1558     _jni_attach_state = _attaching_via_jni;
1559   } else {
1560     _jni_attach_state = _not_attaching_via_jni;
1561   }
1562   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
1563 }
1564 
1565 bool JavaThread::reguard_stack(address cur_sp) {
1566   if (_stack_guard_state != stack_guard_yellow_disabled) {
1567     return true; // Stack already guarded or guard pages not needed.
1568   }
1569 
1570   if (register_stack_overflow()) {
1571     // For those architectures which have separate register and
1572     // memory stacks, we must check the register stack to see if
1573     // it has overflowed.


1590 }
1591 
1592 
1593 void JavaThread::block_if_vm_exited() {
1594   if (_terminated == _vm_exited) {
1595     // _vm_exited is set at safepoint, and Threads_lock is never released
1596     // we will block here forever
1597     Threads_lock->lock_without_safepoint_check();
1598     ShouldNotReachHere();
1599   }
1600 }
1601 
1602 
1603 // Remove this ifdef when C1 is ported to the compiler interface.
1604 static void compiler_thread_entry(JavaThread* thread, TRAPS);
1605 
1606 JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
1607   Thread()
1608 #if INCLUDE_ALL_GCS
1609   , _satb_mark_queue(&_satb_mark_queue_set),
1610     _dirty_card_queue(&_dirty_card_queue_set)
1611 #endif // INCLUDE_ALL_GCS
1612 {
1613   if (TraceThreadEvents) {
1614     tty->print_cr("creating thread %p", this);
1615   }
1616   initialize();
1617   _jni_attach_state = _not_attaching_via_jni;
1618   set_entry_point(entry_point);
1619   // Create the native thread itself.
1620   // %note runtime_23
1621   os::ThreadType thr_type = os::java_thread;
1622   thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
1623                                                      os::java_thread;
1624   os::create_thread(this, thr_type, stack_sz);
1625   // The _osthread may be NULL here because we ran out of memory (too many threads active).
1626   // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
1627   // may hold a lock and all locks must be unlocked before throwing the exception (throwing
1628   // the exception consists of creating the exception object & initializing it, initialization
1629   // will leave the VM via a JavaCall and then all locks must be unlocked).
1630   //


1930   remove_stack_guard_pages();
1931 
1932   if (UseTLAB) {
1933     tlab().make_parsable(true);  // retire TLAB
1934   }
1935 
1936   if (JvmtiEnv::environments_might_exist()) {
1937     JvmtiExport::cleanup_thread(this);
1938   }
1939 
1940   // We must flush any deferred card marks before removing a thread from
1941   // the list of active threads.
1942   Universe::heap()->flush_deferred_store_barrier(this);
1943   assert(deferred_card_mark().is_empty(), "Should have been flushed");
1944 
1945 #if INCLUDE_ALL_GCS
1946   // We must flush the G1-related buffers before removing a thread
1947   // from the list of active threads. We must do this after any deferred
1948   // card marks have been flushed (above) so that any entries that are
1949   // added to the thread's dirty card queue as a result are not lost.
1950   if (UseG1GC || (UseShenandoahGC)) {
1951     flush_barrier_queues();
1952   }
1953   if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) {
1954     gclab().make_parsable(true);
1955   }
1956 #endif // INCLUDE_ALL_GCS
1957 
1958   // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
1959   Threads::remove(this);
1960 }
1961 
1962 #if INCLUDE_ALL_GCS
1963 // Flush G1-related queues.
1964 void JavaThread::flush_barrier_queues() {
1965   satb_mark_queue().flush();
1966   dirty_card_queue().flush();
1967 }
1968 
1969 void JavaThread::initialize_queues() {
1970   assert(!SafepointSynchronize::is_at_safepoint(),
1971          "we should not be at a safepoint");
1972 
1973   ObjPtrQueue& satb_queue = satb_mark_queue();
1974   SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set();
1975   // The SATB queue should have been constructed with its active
1976   // field set to false.
1977   assert(!satb_queue.is_active(), "SATB queue should not be active");
1978   assert(satb_queue.is_empty(), "SATB queue should be empty");
1979   // If we are creating the thread during a marking cycle, we should
1980   // set the active field of the SATB queue to true.
1981   if (satb_queue_set.is_active()) {
1982     satb_queue.set_active(true);
1983   }
1984 
1985   DirtyCardQueue& dirty_queue = dirty_card_queue();
1986   // The dirty card queue should have been constructed with its
1987   // active field set to true.
1988   assert(dirty_queue.is_active(), "dirty card queue should be active");
1989 
1990   _gc_state = _gc_state_global;
1991 }
1992 
1993 void JavaThread::set_gc_state(char in_prog) {
1994   _gc_state = in_prog;
1995 }
1996 
1997 void JavaThread::set_gc_state_all_threads(char in_prog) {
1998   assert_locked_or_safepoint(Threads_lock);
1999   _gc_state_global = in_prog;
2000   for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) {
2001     t->set_gc_state(in_prog);
2002   }
2003 }
2004 
2005 void JavaThread::set_force_satb_flush_all_threads(bool value) {
2006   assert_locked_or_safepoint(Threads_lock);
2007   for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) {
2008     t->set_force_satb_flush(value);
2009   }
2010 }
2011 #endif // INCLUDE_ALL_GCS
2012 
2013 void JavaThread::cleanup_failed_attach_current_thread() {
2014   if (get_thread_profiler() != NULL) {
2015     get_thread_profiler()->disengage();
2016     ResourceMark rm;
2017     get_thread_profiler()->print(get_thread_name());
2018   }
2019 
2020   if (active_handles() != NULL) {
2021     JNIHandleBlock* block = active_handles();
2022     set_active_handles(NULL);
2023     JNIHandleBlock::release_block(block);
2024   }
2025 
2026   if (free_handle_block() != NULL) {
2027     JNIHandleBlock* block = free_handle_block();
2028     set_free_handle_block(NULL);
2029     JNIHandleBlock::release_block(block);
2030   }
2031 
2032   // These have to be removed while this is still a valid thread.
2033   remove_stack_guard_pages();
2034 
2035   if (UseTLAB) {
2036     tlab().make_parsable(true);  // retire TLAB, if any
2037   }
2038 
2039 #if INCLUDE_ALL_GCS
2040   if (UseG1GC || (UseShenandoahGC)) {
2041     flush_barrier_queues();
2042   }
2043   if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) {
2044     gclab().make_parsable(true);
2045   }
2046 #endif // INCLUDE_ALL_GCS
2047 
2048   Threads::remove(this);
2049   delete this;
2050 }
2051 
2052 
2053 
2054 
2055 JavaThread* JavaThread::active() {
2056   Thread* thread = ThreadLocalStorage::thread();
2057   assert(thread != NULL, "just checking");
2058   if (thread->is_Java_thread()) {
2059     return (JavaThread*) thread;
2060   } else {
2061     assert(thread->is_VM_thread(), "this must be a vm thread");
2062     VM_Operation* op = ((VMThread*) thread)->vm_operation();
2063     JavaThread *ret=op == NULL ? NULL : (JavaThread *)op->calling_thread();
2064     assert(ret->is_Java_thread(), "must be a Java thread");
2065     return ret;


3330 // ======= Threads ========
3331 
3332 // The Threads class links together all active threads, and provides
3333 // operations over all threads.  It is protected by its own Mutex
3334 // lock, which is also used in other contexts to protect thread
3335 // operations from having the thread being operated on from exiting
3336 // and going away unexpectedly (e.g., safepoint synchronization)
3337 
3338 JavaThread* Threads::_thread_list = NULL;
3339 int         Threads::_number_of_threads = 0;
3340 int         Threads::_number_of_non_daemon_threads = 0;
3341 int         Threads::_return_code = 0;
3342 size_t      JavaThread::_stack_size_at_create = 0;
3343 #ifdef ASSERT
3344 bool        Threads::_vm_complete = false;
3345 #endif
3346 
3347 // All JavaThreads
3348 #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
3349 
3350 void Threads::java_threads_do(ThreadClosure* tc) {
3351   assert_locked_or_safepoint(Threads_lock);
3352   ALL_JAVA_THREADS(p) {
3353     tc->do_thread(p);
3354   }
3355 }
3356 
3357 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
3358 void Threads::threads_do(ThreadClosure* tc) {
3359   assert_locked_or_safepoint(Threads_lock);
3360   // ALL_JAVA_THREADS iterates through all JavaThreads
3361   ALL_JAVA_THREADS(p) {
3362     tc->do_thread(p);
3363   }
3364   // Someday we could have a table or list of all non-JavaThreads.
3365   // For now, just manually iterate through them.
3366   tc->do_thread(VMThread::vm_thread());
3367   Universe::heap()->gc_threads_do(tc);
3368   WatcherThread *wt = WatcherThread::watcher_thread();
3369   // Strictly speaking, the following NULL check isn't sufficient to make sure
3370   // the data for WatcherThread is still valid upon being examined. However,
3371   // considering that WatchThread terminates when the VM is on the way to
3372   // exit at safepoint, the chance of the above is extremely small. The right
3373   // way to prevent termination of WatcherThread would be to acquire
3374   // Terminator_lock, but we can't do that without violating the lock rank
3375   // checking in some cases.
3376   if (wt != NULL)


3652 
3653   // record VM initialization completion time
3654 #if INCLUDE_MANAGEMENT
3655   Management::record_vm_init_completed();
3656 #endif // INCLUDE_MANAGEMENT
3657 
3658   // Compute system loader. Note that this has to occur after set_init_completed, since
3659   // valid exceptions may be thrown in the process.
3660   // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
3661   // set_init_completed has just been called, causing exceptions not to be shortcut
3662   // anymore. We call vm_exit_during_initialization directly instead.
3663   SystemDictionary::compute_java_system_loader(THREAD);
3664   if (HAS_PENDING_EXCEPTION) {
3665     vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3666   }
3667 
3668 #if INCLUDE_ALL_GCS
3669   // Support for ConcurrentMarkSweep. This should be cleaned up
3670   // and better encapsulated. The ugly nested if test would go away
3671   // once things are properly refactored. XXX YSR
3672   if (UseConcMarkSweepGC || UseG1GC || UseShenandoahGC) {
3673     if (UseConcMarkSweepGC) {
3674       ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);
3675     } else if (UseShenandoahGC) {
3676       ShenandoahControlThread::makeSurrogateLockerThread(THREAD);
3677     } else {
3678       ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
3679     }
3680     if (HAS_PENDING_EXCEPTION) {
3681       vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
3682     }
3683   }
3684 #endif // INCLUDE_ALL_GCS
3685 
3686   // Always call even when there are not JVMTI environments yet, since environments
3687   // may be attached late and JVMTI must track phases of VM execution
3688   JvmtiExport::enter_live_phase();
3689 
3690   // Signal Dispatcher needs to be started before VMInit event is posted
3691   os::signal_init();
3692 
3693   // Start Attach Listener if +StartAttachListener or it can't be started lazily
3694   if (!DisableAttachMechanism) {
3695     AttachListener::vm_start();
3696     if (StartAttachListener || AttachListener::init_at_startup()) {


4249 void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4250   ALL_JAVA_THREADS(p) {
4251     p->oops_do(f, cld_f, cf);
4252   }
4253   VMThread::vm_thread()->oops_do(f, cld_f, cf);
4254 }
4255 
4256 void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
4257   // Introduce a mechanism allowing parallel threads to claim threads as
4258   // root groups.  Overhead should be small enough to use all the time,
4259   // even in sequential code.
4260   SharedHeap* sh = SharedHeap::heap();
4261   // Cannot yet substitute active_workers for n_par_threads
4262   // because of G1CollectedHeap::verify() use of
4263   // SharedHeap::process_roots().  n_par_threads == 0 will
4264   // turn off parallelism in process_roots while active_workers
4265   // is being used for parallelism elsewhere.
4266   bool is_par = sh->n_par_threads() > 0;
4267   assert(!is_par ||
4268          (SharedHeap::heap()->n_par_threads() ==
4269           SharedHeap::heap()->workers()->active_workers()
4270           || UseShenandoahGC), "Mismatch");
4271   int cp = SharedHeap::heap()->strong_roots_parity();
4272   ALL_JAVA_THREADS(p) {
4273     if (p->claim_oops_do(is_par, cp)) {
4274       p->oops_do(f, cld_f, cf);
4275     }
4276   }
4277   VMThread* vmt = VMThread::vm_thread();
4278   if (vmt->claim_oops_do(is_par, cp)) {
4279     vmt->oops_do(f, cld_f, cf);
4280   }
4281 }
4282 
4283 #if INCLUDE_ALL_GCS
4284 // Used by ParallelScavenge
4285 void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
4286   ALL_JAVA_THREADS(p) {
4287     q->enqueue(new ThreadRootsTask(p));
4288   }
4289   q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
4290 }


< prev index next >