< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page

126     _unhandled_oops = new UnhandledOops(this);
127   }
128 #endif // CHECK_UNHANDLED_OOPS
129 
130   // Notify the barrier set that a thread is being created. The initial
131   // thread is created before the barrier set is available.  The call to
132   // BarrierSet::on_thread_create() for this thread is therefore deferred
133   // to BarrierSet::set_barrier_set().
134   BarrierSet* const barrier_set = BarrierSet::barrier_set();
135   if (barrier_set != nullptr) {
136     barrier_set->on_thread_create(this);
137   } else {
138     // Only the main thread should be created before the barrier set
139     // and that happens just before Thread::current is set. No other thread
140     // can attach as the VM is not created yet, so they can't execute this code.
141     // If the main thread creates other threads before the barrier set that is an error.
142     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
143   }
144 
145   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));











146 }
147 
148 void Thread::initialize_tlab() {
149   if (UseTLAB) {
150     tlab().initialize();
151   }
152 }
153 
154 void Thread::initialize_thread_current() {
155 #ifndef USE_LIBRARY_BASED_TLS_ONLY
156   assert(_thr_current == nullptr, "Thread::current already initialized");
157   _thr_current = this;
158 #endif
159   assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized");
160   ThreadLocalStorage::set_thread(this);
161   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
162 }
163 
164 void Thread::clear_thread_current() {
165   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");

578     }
579     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
580   }
581 }
582 
583 void Thread::SpinRelease(volatile int * adr) {
584   assert(*adr != 0, "invariant");
585   OrderAccess::fence();      // guarantee at least release consistency.
586   // Roach-motel semantics.
587   // It's safe if subsequent LDs and STs float "up" into the critical section,
588   // but prior LDs and STs within the critical section can't be allowed
589   // to reorder or float past the ST that releases the lock.
590   // Loads and stores in the critical section - which appear in program
591   // order before the store that releases the lock - must also appear
592   // before the store that releases the lock in memory visibility order.
593   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
594   // the ST of 0 into the lock-word which releases the lock, so fence
595   // more than covers this on all platforms.
596   *adr = 0;
597 }













126     _unhandled_oops = new UnhandledOops(this);
127   }
128 #endif // CHECK_UNHANDLED_OOPS
129 
130   // Notify the barrier set that a thread is being created. The initial
131   // thread is created before the barrier set is available.  The call to
132   // BarrierSet::on_thread_create() for this thread is therefore deferred
133   // to BarrierSet::set_barrier_set().
134   BarrierSet* const barrier_set = BarrierSet::barrier_set();
135   if (barrier_set != nullptr) {
136     barrier_set->on_thread_create(this);
137   } else {
138     // Only the main thread should be created before the barrier set
139     // and that happens just before Thread::current is set. No other thread
140     // can attach as the VM is not created yet, so they can't execute this code.
141     // If the main thread creates other threads before the barrier set that is an error.
142     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
143   }
144 
145   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
146 
147   _profile_vm_locks = false;
148   _profile_vm_calls = false;
149   _profile_vm_ops   = false;
150   _profile_rt_calls = false;
151   _profile_upcalls  = false;
152 
153   _all_bc_counter_value = 0;
154   _clinit_bc_counter_value = 0;
155 
156   _current_rt_call_timer = nullptr;
157 }
158 
159 void Thread::initialize_tlab() {
160   if (UseTLAB) {
161     tlab().initialize();
162   }
163 }
164 
165 void Thread::initialize_thread_current() {
166 #ifndef USE_LIBRARY_BASED_TLS_ONLY
167   assert(_thr_current == nullptr, "Thread::current already initialized");
168   _thr_current = this;
169 #endif
170   assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized");
171   ThreadLocalStorage::set_thread(this);
172   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
173 }
174 
175 void Thread::clear_thread_current() {
176   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");

589     }
590     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
591   }
592 }
593 
594 void Thread::SpinRelease(volatile int * adr) {
595   assert(*adr != 0, "invariant");
596   OrderAccess::fence();      // guarantee at least release consistency.
597   // Roach-motel semantics.
598   // It's safe if subsequent LDs and STs float "up" into the critical section,
599   // but prior LDs and STs within the critical section can't be allowed
600   // to reorder or float past the ST that releases the lock.
601   // Loads and stores in the critical section - which appear in program
602   // order before the store that releases the lock - must also appear
603   // before the store that releases the lock in memory visibility order.
604   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
605   // the ST of 0 into the lock-word which releases the lock, so fence
606   // more than covers this on all platforms.
607   *adr = 0;
608 }
609 
610 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
611   return t->name();
612 }
613 
614 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
615 
616 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
617   log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
618   Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
619 }
620 
< prev index next >