< prev index next >

src/hotspot/share/runtime/thread.cpp

Print this page

119     _unhandled_oops = new UnhandledOops(this);
120   }
121 #endif // CHECK_UNHANDLED_OOPS
122 
123   // Notify the barrier set that a thread is being created. The initial
124   // thread is created before the barrier set is available.  The call to
125   // BarrierSet::on_thread_create() for this thread is therefore deferred
126   // to BarrierSet::set_barrier_set().
127   BarrierSet* const barrier_set = BarrierSet::barrier_set();
128   if (barrier_set != nullptr) {
129     barrier_set->on_thread_create(this);
130   } else {
131     // Only the main thread should be created before the barrier set
132     // and that happens just before Thread::current is set. No other thread
133     // can attach as the VM is not created yet, so they can't execute this code.
134     // If the main thread creates other threads before the barrier set that is an error.
135     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
136   }
137 
138   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));











139 }
140 
141 #ifdef ASSERT
142 address Thread::stack_base() const {
143   // Note: can't report Thread::name() here as that can require a ResourceMark which we
144   // can't use because this gets called too early in the thread initialization.
145   assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
146          osthread() != nullptr ? osthread()->thread_id() : 0);
147   return _stack_base;
148 }
149 #endif
150 
151 void Thread::initialize_tlab() {
152   if (UseTLAB) {
153     tlab().initialize();
154   }
155 }
156 
157 void Thread::retire_tlab(ThreadLocalAllocStats* stats) {
158   // Sampling and serviceability support

598     }
599     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
600   }
601 }
602 
603 void Thread::SpinRelease(volatile int * adr) {
604   assert(*adr != 0, "invariant");
605   OrderAccess::fence();      // guarantee at least release consistency.
606   // Roach-motel semantics.
607   // It's safe if subsequent LDs and STs float "up" into the critical section,
608   // but prior LDs and STs within the critical section can't be allowed
609   // to reorder or float past the ST that releases the lock.
610   // Loads and stores in the critical section - which appear in program
611   // order before the store that releases the lock - must also appear
612   // before the store that releases the lock in memory visibility order.
613   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
614   // the ST of 0 into the lock-word which releases the lock, so fence
615   // more than covers this on all platforms.
616   *adr = 0;
617 }













119     _unhandled_oops = new UnhandledOops(this);
120   }
121 #endif // CHECK_UNHANDLED_OOPS
122 
123   // Notify the barrier set that a thread is being created. The initial
124   // thread is created before the barrier set is available.  The call to
125   // BarrierSet::on_thread_create() for this thread is therefore deferred
126   // to BarrierSet::set_barrier_set().
127   BarrierSet* const barrier_set = BarrierSet::barrier_set();
128   if (barrier_set != nullptr) {
129     barrier_set->on_thread_create(this);
130   } else {
131     // Only the main thread should be created before the barrier set
132     // and that happens just before Thread::current is set. No other thread
133     // can attach as the VM is not created yet, so they can't execute this code.
134     // If the main thread creates other threads before the barrier set that is an error.
135     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
136   }
137 
138   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
139 
140   _profile_vm_locks = false;
141   _profile_vm_calls = false;
142   _profile_vm_ops   = false;
143   _profile_rt_calls = false;
144   _profile_upcalls  = false;
145 
146   _all_bc_counter_value = 0;
147   _clinit_bc_counter_value = 0;
148 
149   _current_rt_call_timer = nullptr;
150 }
151 
152 #ifdef ASSERT
153 address Thread::stack_base() const {
154   // Note: can't report Thread::name() here as that can require a ResourceMark which we
155   // can't use because this gets called too early in the thread initialization.
156   assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
157          osthread() != nullptr ? osthread()->thread_id() : 0);
158   return _stack_base;
159 }
160 #endif
161 
162 void Thread::initialize_tlab() {
163   if (UseTLAB) {
164     tlab().initialize();
165   }
166 }
167 
168 void Thread::retire_tlab(ThreadLocalAllocStats* stats) {
169   // Sampling and serviceability support

609     }
610     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
611   }
612 }
613 
614 void Thread::SpinRelease(volatile int * adr) {
615   assert(*adr != 0, "invariant");
616   OrderAccess::fence();      // guarantee at least release consistency.
617   // Roach-motel semantics.
618   // It's safe if subsequent LDs and STs float "up" into the critical section,
619   // but prior LDs and STs within the critical section can't be allowed
620   // to reorder or float past the ST that releases the lock.
621   // Loads and stores in the critical section - which appear in program
622   // order before the store that releases the lock - must also appear
623   // before the store that releases the lock in memory visibility order.
624   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
625   // the ST of 0 into the lock-word which releases the lock, so fence
626   // more than covers this on all platforms.
627   *adr = 0;
628 }
629 
630 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
631   return t->name();
632 }
633 
634 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
635 
636 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
637   log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
638   Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
639 }
640 
< prev index next >