< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page

123     _unhandled_oops = new UnhandledOops(this);
124   }
125 #endif // CHECK_UNHANDLED_OOPS
126 
127   // Notify the barrier set that a thread is being created. The initial
128   // thread is created before the barrier set is available.  The call to
129   // BarrierSet::on_thread_create() for this thread is therefore deferred
130   // to BarrierSet::set_barrier_set().
131   BarrierSet* const barrier_set = BarrierSet::barrier_set();
132   if (barrier_set != nullptr) {
133     barrier_set->on_thread_create(this);
134   } else {
135     // Only the main thread should be created before the barrier set
136     // and that happens just before Thread::current is set. No other thread
137     // can attach as the VM is not created yet, so they can't execute this code.
138     // If the main thread creates other threads before the barrier set that is an error.
139     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
140   }
141 
142   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));











143 }
144 
145 #ifdef ASSERT
146 address Thread::stack_base() const {
147   // Note: can't report Thread::name() here as that can require a ResourceMark which we
148   // can't use because this gets called too early in the thread initialization.
149   assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
150          osthread() != nullptr ? osthread()->thread_id() : 0);
151   return _stack_base;
152 }
153 #endif
154 
155 void Thread::initialize_tlab() {
156   if (UseTLAB) {
157     tlab().initialize();
158   }
159 }
160 
161 void Thread::initialize_thread_current() {
162 #ifndef USE_LIBRARY_BASED_TLS_ONLY

596     }
597     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
598   }
599 }
600 
601 void Thread::SpinRelease(volatile int * adr) {
602   assert(*adr != 0, "invariant");
603   OrderAccess::fence();      // guarantee at least release consistency.
604   // Roach-motel semantics.
605   // It's safe if subsequent LDs and STs float "up" into the critical section,
606   // but prior LDs and STs within the critical section can't be allowed
607   // to reorder or float past the ST that releases the lock.
608   // Loads and stores in the critical section - which appear in program
609   // order before the store that releases the lock - must also appear
610   // before the store that releases the lock in memory visibility order.
611   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
612   // the ST of 0 into the lock-word which releases the lock, so fence
613   // more than covers this on all platforms.
614   *adr = 0;
615 }













123     _unhandled_oops = new UnhandledOops(this);
124   }
125 #endif // CHECK_UNHANDLED_OOPS
126 
127   // Notify the barrier set that a thread is being created. The initial
128   // thread is created before the barrier set is available.  The call to
129   // BarrierSet::on_thread_create() for this thread is therefore deferred
130   // to BarrierSet::set_barrier_set().
131   BarrierSet* const barrier_set = BarrierSet::barrier_set();
132   if (barrier_set != nullptr) {
133     barrier_set->on_thread_create(this);
134   } else {
135     // Only the main thread should be created before the barrier set
136     // and that happens just before Thread::current is set. No other thread
137     // can attach as the VM is not created yet, so they can't execute this code.
138     // If the main thread creates other threads before the barrier set that is an error.
139     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
140   }
141 
142   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
143 
144   _profile_vm_locks = false;
145   _profile_vm_calls = false;
146   _profile_vm_ops   = false;
147   _profile_rt_calls = false;
148   _profile_upcalls  = false;
149 
150   _all_bc_counter_value = 0;
151   _clinit_bc_counter_value = 0;
152 
153   _current_rt_call_timer = nullptr;
154 }
155 
156 #ifdef ASSERT
157 address Thread::stack_base() const {
158   // Note: can't report Thread::name() here as that can require a ResourceMark which we
159   // can't use because this gets called too early in the thread initialization.
160   assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
161          osthread() != nullptr ? osthread()->thread_id() : 0);
162   return _stack_base;
163 }
164 #endif
165 
166 void Thread::initialize_tlab() {
167   if (UseTLAB) {
168     tlab().initialize();
169   }
170 }
171 
172 void Thread::initialize_thread_current() {
173 #ifndef USE_LIBRARY_BASED_TLS_ONLY

607     }
608     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
609   }
610 }
611 
612 void Thread::SpinRelease(volatile int * adr) {
613   assert(*adr != 0, "invariant");
614   OrderAccess::fence();      // guarantee at least release consistency.
615   // Roach-motel semantics.
616   // It's safe if subsequent LDs and STs float "up" into the critical section,
617   // but prior LDs and STs within the critical section can't be allowed
618   // to reorder or float past the ST that releases the lock.
619   // Loads and stores in the critical section - which appear in program
620   // order before the store that releases the lock - must also appear
621   // before the store that releases the lock in memory visibility order.
622   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
623   // the ST of 0 into the lock-word which releases the lock, so fence
624   // more than covers this on all platforms.
625   *adr = 0;
626 }
627 
628 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
629   return t->name();
630 }
631 
632 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
633 
634 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
635   log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
636   Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
637 }
638 
< prev index next >