122 _unhandled_oops = new UnhandledOops(this);
123 }
124 #endif // CHECK_UNHANDLED_OOPS
125
126 // Notify the barrier set that a thread is being created. The initial
127 // thread is created before the barrier set is available. The call to
128 // BarrierSet::on_thread_create() for this thread is therefore deferred
129 // to BarrierSet::set_barrier_set().
130 BarrierSet* const barrier_set = BarrierSet::barrier_set();
131 if (barrier_set != nullptr) {
132 barrier_set->on_thread_create(this);
133 } else {
134 // Only the main thread should be created before the barrier set
135 // and that happens just before Thread::current is set. No other thread
136 // can attach as the VM is not created yet, so they can't execute this code.
137 // If the main thread creates other threads before the barrier set that is an error.
138 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
139 }
140
141 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
142 }
143
144 #ifdef ASSERT
145 address Thread::stack_base() const {
146 // Note: can't report Thread::name() here as that can require a ResourceMark which we
147 // can't use because this gets called too early in the thread initialization.
148 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
149 osthread() != nullptr ? osthread()->thread_id() : 0);
150 return _stack_base;
151 }
152 #endif
153
154 void Thread::initialize_tlab() {
155 if (UseTLAB) {
156 tlab().initialize();
157 }
158 }
159
160 void Thread::initialize_thread_current() {
161 #ifndef USE_LIBRARY_BASED_TLS_ONLY
586 }
587 if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
588 }
589 }
590
591 void Thread::SpinRelease(volatile int * adr) {
592 assert(*adr != 0, "invariant");
593 OrderAccess::fence(); // guarantee at least release consistency.
594 // Roach-motel semantics.
595 // It's safe if subsequent LDs and STs float "up" into the critical section,
596 // but prior LDs and STs within the critical section can't be allowed
597 // to reorder or float past the ST that releases the lock.
598 // Loads and stores in the critical section - which appear in program
599 // order before the store that releases the lock - must also appear
600 // before the store that releases the lock in memory visibility order.
601 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
602 // the ST of 0 into the lock-word which releases the lock, so fence
603 // more than covers this on all platforms.
604 *adr = 0;
605 }
|
122 _unhandled_oops = new UnhandledOops(this);
123 }
124 #endif // CHECK_UNHANDLED_OOPS
125
126 // Notify the barrier set that a thread is being created. The initial
127 // thread is created before the barrier set is available. The call to
128 // BarrierSet::on_thread_create() for this thread is therefore deferred
129 // to BarrierSet::set_barrier_set().
130 BarrierSet* const barrier_set = BarrierSet::barrier_set();
131 if (barrier_set != nullptr) {
132 barrier_set->on_thread_create(this);
133 } else {
134 // Only the main thread should be created before the barrier set
135 // and that happens just before Thread::current is set. No other thread
136 // can attach as the VM is not created yet, so they can't execute this code.
137 // If the main thread creates other threads before the barrier set that is an error.
138 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
139 }
140
141 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
142
143 _profile_vm_locks = false;
144 _profile_vm_calls = false;
145 _profile_vm_ops = false;
146 _profile_rt_calls = false;
147 _profile_upcalls = false;
148
149 _all_bc_counter_value = 0;
150 _clinit_bc_counter_value = 0;
151
152 _current_rt_call_timer = nullptr;
153 }
154
155 #ifdef ASSERT
156 address Thread::stack_base() const {
157 // Note: can't report Thread::name() here as that can require a ResourceMark which we
158 // can't use because this gets called too early in the thread initialization.
159 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
160 osthread() != nullptr ? osthread()->thread_id() : 0);
161 return _stack_base;
162 }
163 #endif
164
165 void Thread::initialize_tlab() {
166 if (UseTLAB) {
167 tlab().initialize();
168 }
169 }
170
171 void Thread::initialize_thread_current() {
172 #ifndef USE_LIBRARY_BASED_TLS_ONLY
597 }
598 if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
599 }
600 }
601
602 void Thread::SpinRelease(volatile int * adr) {
603 assert(*adr != 0, "invariant");
604 OrderAccess::fence(); // guarantee at least release consistency.
605 // Roach-motel semantics.
606 // It's safe if subsequent LDs and STs float "up" into the critical section,
607 // but prior LDs and STs within the critical section can't be allowed
608 // to reorder or float past the ST that releases the lock.
609 // Loads and stores in the critical section - which appear in program
610 // order before the store that releases the lock - must also appear
611 // before the store that releases the lock in memory visibility order.
612 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
613 // the ST of 0 into the lock-word which releases the lock, so fence
614 // more than covers this on all platforms.
615 *adr = 0;
616 }
617
618 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
619 return t->name();
620 }
621
622 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
623
624 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
625 log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
626 Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
627 }
628
|