< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page

122     _unhandled_oops = new UnhandledOops(this);
123   }
124 #endif // CHECK_UNHANDLED_OOPS
125 
126   // Notify the barrier set that a thread is being created. The initial
127   // thread is created before the barrier set is available.  The call to
128   // BarrierSet::on_thread_create() for this thread is therefore deferred
129   // to BarrierSet::set_barrier_set().
130   BarrierSet* const barrier_set = BarrierSet::barrier_set();
131   if (barrier_set != nullptr) {
132     barrier_set->on_thread_create(this);
133   } else {
134     // Only the main thread should be created before the barrier set
135     // and that happens just before Thread::current is set. No other thread
136     // can attach as the VM is not created yet, so they can't execute this code.
137     // If the main thread creates other threads before the barrier set that is an error.
138     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
139   }
140 
141   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));











142 }
143 
144 void Thread::initialize_tlab() {
145   if (UseTLAB) {
146     tlab().initialize();
147   }
148 }
149 
150 void Thread::initialize_thread_current() {
151 #ifndef USE_LIBRARY_BASED_TLS_ONLY
152   assert(_thr_current == nullptr, "Thread::current already initialized");
153   _thr_current = this;
154 #endif
155   assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized");
156   ThreadLocalStorage::set_thread(this);
157   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
158 }
159 
160 void Thread::clear_thread_current() {
161   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");

582     }
583     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
584   }
585 }
586 
587 void Thread::SpinRelease(volatile int * adr) {
588   assert(*adr != 0, "invariant");
589   OrderAccess::fence();      // guarantee at least release consistency.
590   // Roach-motel semantics.
591   // It's safe if subsequent LDs and STs float "up" into the critical section,
592   // but prior LDs and STs within the critical section can't be allowed
593   // to reorder or float past the ST that releases the lock.
594   // Loads and stores in the critical section - which appear in program
595   // order before the store that releases the lock - must also appear
596   // before the store that releases the lock in memory visibility order.
597   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
598   // the ST of 0 into the lock-word which releases the lock, so fence
599   // more than covers this on all platforms.
600   *adr = 0;
601 }













122     _unhandled_oops = new UnhandledOops(this);
123   }
124 #endif // CHECK_UNHANDLED_OOPS
125 
126   // Notify the barrier set that a thread is being created. The initial
127   // thread is created before the barrier set is available.  The call to
128   // BarrierSet::on_thread_create() for this thread is therefore deferred
129   // to BarrierSet::set_barrier_set().
130   BarrierSet* const barrier_set = BarrierSet::barrier_set();
131   if (barrier_set != nullptr) {
132     barrier_set->on_thread_create(this);
133   } else {
134     // Only the main thread should be created before the barrier set
135     // and that happens just before Thread::current is set. No other thread
136     // can attach as the VM is not created yet, so they can't execute this code.
137     // If the main thread creates other threads before the barrier set that is an error.
138     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
139   }
140 
141   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
142 
143   _profile_vm_locks = false;
144   _profile_vm_calls = false;
145   _profile_vm_ops   = false;
146   _profile_rt_calls = false;
147   _profile_upcalls  = false;
148 
149   _all_bc_counter_value = 0;
150   _clinit_bc_counter_value = 0;
151 
152   _current_rt_call_timer = nullptr;
153 }
154 
155 void Thread::initialize_tlab() {
156   if (UseTLAB) {
157     tlab().initialize();
158   }
159 }
160 
161 void Thread::initialize_thread_current() {
162 #ifndef USE_LIBRARY_BASED_TLS_ONLY
163   assert(_thr_current == nullptr, "Thread::current already initialized");
164   _thr_current = this;
165 #endif
166   assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized");
167   ThreadLocalStorage::set_thread(this);
168   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
169 }
170 
171 void Thread::clear_thread_current() {
172   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");

593     }
594     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
595   }
596 }
597 
598 void Thread::SpinRelease(volatile int * adr) {
599   assert(*adr != 0, "invariant");
600   OrderAccess::fence();      // guarantee at least release consistency.
601   // Roach-motel semantics.
602   // It's safe if subsequent LDs and STs float "up" into the critical section,
603   // but prior LDs and STs within the critical section can't be allowed
604   // to reorder or float past the ST that releases the lock.
605   // Loads and stores in the critical section - which appear in program
606   // order before the store that releases the lock - must also appear
607   // before the store that releases the lock in memory visibility order.
608   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
609   // the ST of 0 into the lock-word which releases the lock, so fence
610   // more than covers this on all platforms.
611   *adr = 0;
612 }
613 
614 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
615   return t->name();
616 }
617 
618 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
619 
620 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
621   log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
622   Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
623 }
624 
< prev index next >