< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page

125     _unhandled_oops = new UnhandledOops(this);
126   }
127 #endif // CHECK_UNHANDLED_OOPS
128 
129   // Notify the barrier set that a thread is being created. The initial
130   // thread is created before the barrier set is available.  The call to
131   // BarrierSet::on_thread_create() for this thread is therefore deferred
132   // to BarrierSet::set_barrier_set().
133   BarrierSet* const barrier_set = BarrierSet::barrier_set();
134   if (barrier_set != nullptr) {
135     barrier_set->on_thread_create(this);
136   } else {
137     // Only the main thread should be created before the barrier set
138     // and that happens just before Thread::current is set. No other thread
139     // can attach as the VM is not created yet, so they can't execute this code.
140     // If the main thread creates other threads before the barrier set that is an error.
141     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
142   }
143 
144   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));











145 }
146 
147 #ifdef ASSERT
148 address Thread::stack_base() const {
149   // Note: can't report Thread::name() here as that can require a ResourceMark which we
150   // can't use because this gets called too early in the thread initialization.
151   assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
152          osthread() != nullptr ? osthread()->thread_id() : 0);
153   return _stack_base;
154 }
155 #endif
156 
157 void Thread::initialize_tlab() {
158   if (UseTLAB) {
159     tlab().initialize();
160   }
161 }
162 
163 void Thread::initialize_thread_current() {
164 #ifndef USE_LIBRARY_BASED_TLS_ONLY

587     }
588     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
589   }
590 }
591 
592 void Thread::SpinRelease(volatile int * adr) {
593   assert(*adr != 0, "invariant");
594   OrderAccess::fence();      // guarantee at least release consistency.
595   // Roach-motel semantics.
596   // It's safe if subsequent LDs and STs float "up" into the critical section,
597   // but prior LDs and STs within the critical section can't be allowed
598   // to reorder or float past the ST that releases the lock.
599   // Loads and stores in the critical section - which appear in program
600   // order before the store that releases the lock - must also appear
601   // before the store that releases the lock in memory visibility order.
602   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
603   // the ST of 0 into the lock-word which releases the lock, so fence
604   // more than covers this on all platforms.
605   *adr = 0;
606 }













125     _unhandled_oops = new UnhandledOops(this);
126   }
127 #endif // CHECK_UNHANDLED_OOPS
128 
129   // Notify the barrier set that a thread is being created. The initial
130   // thread is created before the barrier set is available.  The call to
131   // BarrierSet::on_thread_create() for this thread is therefore deferred
132   // to BarrierSet::set_barrier_set().
133   BarrierSet* const barrier_set = BarrierSet::barrier_set();
134   if (barrier_set != nullptr) {
135     barrier_set->on_thread_create(this);
136   } else {
137     // Only the main thread should be created before the barrier set
138     // and that happens just before Thread::current is set. No other thread
139     // can attach as the VM is not created yet, so they can't execute this code.
140     // If the main thread creates other threads before the barrier set that is an error.
141     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
142   }
143 
144   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
145 
146   _profile_vm_locks = false;
147   _profile_vm_calls = false;
148   _profile_vm_ops   = false;
149   _profile_rt_calls = false;
150   _profile_upcalls  = false;
151 
152   _all_bc_counter_value = 0;
153   _clinit_bc_counter_value = 0;
154 
155   _current_rt_call_timer = nullptr;
156 }
157 
158 #ifdef ASSERT
159 address Thread::stack_base() const {
160   // Note: can't report Thread::name() here as that can require a ResourceMark which we
161   // can't use because this gets called too early in the thread initialization.
162   assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
163          osthread() != nullptr ? osthread()->thread_id() : 0);
164   return _stack_base;
165 }
166 #endif
167 
168 void Thread::initialize_tlab() {
169   if (UseTLAB) {
170     tlab().initialize();
171   }
172 }
173 
174 void Thread::initialize_thread_current() {
175 #ifndef USE_LIBRARY_BASED_TLS_ONLY

598     }
599     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
600   }
601 }
602 
603 void Thread::SpinRelease(volatile int * adr) {
604   assert(*adr != 0, "invariant");
605   OrderAccess::fence();      // guarantee at least release consistency.
606   // Roach-motel semantics.
607   // It's safe if subsequent LDs and STs float "up" into the critical section,
608   // but prior LDs and STs within the critical section can't be allowed
609   // to reorder or float past the ST that releases the lock.
610   // Loads and stores in the critical section - which appear in program
611   // order before the store that releases the lock - must also appear
612   // before the store that releases the lock in memory visibility order.
613   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
614   // the ST of 0 into the lock-word which releases the lock, so fence
615   // more than covers this on all platforms.
616   *adr = 0;
617 }
618 
619 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
620   return t->name();
621 }
622 
623 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
624 
625 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
626   log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
627   Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
628 }
629 
< prev index next >