119 _unhandled_oops = new UnhandledOops(this);
120 }
121 #endif // CHECK_UNHANDLED_OOPS
122
123 // Notify the barrier set that a thread is being created. The initial
124 // thread is created before the barrier set is available. The call to
125 // BarrierSet::on_thread_create() for this thread is therefore deferred
126 // to BarrierSet::set_barrier_set().
127 BarrierSet* const barrier_set = BarrierSet::barrier_set();
128 if (barrier_set != nullptr) {
129 barrier_set->on_thread_create(this);
130 } else {
131 // Only the main thread should be created before the barrier set
132 // and that happens just before Thread::current is set. No other thread
133 // can attach as the VM is not created yet, so they can't execute this code.
134 // If the main thread creates other threads before the barrier set that is an error.
135 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
136 }
137
138 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
139 }
140
141 #ifdef ASSERT
142 address Thread::stack_base() const {
143 // Note: can't report Thread::name() here as that can require a ResourceMark which we
144 // can't use because this gets called too early in the thread initialization.
145 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
146 osthread() != nullptr ? osthread()->thread_id() : 0);
147 return _stack_base;
148 }
149 #endif
150
151 void Thread::initialize_tlab() {
152 if (UseTLAB) {
153 tlab().initialize();
154 }
155 }
156
157 void Thread::retire_tlab(ThreadLocalAllocStats* stats) {
158 // Sampling and serviceability support
596 SpinPause();
597 }
598 }
599 if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
600 }
601 }
602
603 void Thread::SpinRelease(volatile int * adr) {
604 assert(*adr != 0, "invariant");
605 // Roach-motel semantics.
606 // It's safe if subsequent LDs and STs float "up" into the critical section,
607 // but prior LDs and STs within the critical section can't be allowed
608 // to reorder or float past the ST that releases the lock.
609 // Loads and stores in the critical section - which appear in program
610 // order before the store that releases the lock - must also appear
611 // before the store that releases the lock in memory visibility order.
612 // So we need a #loadstore|#storestore "release" memory barrier before
613 // the ST of 0 into the lock-word which releases the lock.
614 Atomic::release_store(adr, 0);
615 }
|
119 _unhandled_oops = new UnhandledOops(this);
120 }
121 #endif // CHECK_UNHANDLED_OOPS
122
123 // Notify the barrier set that a thread is being created. The initial
124 // thread is created before the barrier set is available. The call to
125 // BarrierSet::on_thread_create() for this thread is therefore deferred
126 // to BarrierSet::set_barrier_set().
127 BarrierSet* const barrier_set = BarrierSet::barrier_set();
128 if (barrier_set != nullptr) {
129 barrier_set->on_thread_create(this);
130 } else {
131 // Only the main thread should be created before the barrier set
132 // and that happens just before Thread::current is set. No other thread
133 // can attach as the VM is not created yet, so they can't execute this code.
134 // If the main thread creates other threads before the barrier set that is an error.
135 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
136 }
137
138 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
139
140 _profile_vm_locks = false;
141 _profile_vm_calls = false;
142 _profile_vm_ops = false;
143 _profile_rt_calls = false;
144 _profile_upcalls = false;
145
146 _all_bc_counter_value = 0;
147 _clinit_bc_counter_value = 0;
148
149 _current_rt_call_timer = nullptr;
150 }
151
152 #ifdef ASSERT
153 address Thread::stack_base() const {
154 // Note: can't report Thread::name() here as that can require a ResourceMark which we
155 // can't use because this gets called too early in the thread initialization.
156 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
157 osthread() != nullptr ? osthread()->thread_id() : 0);
158 return _stack_base;
159 }
160 #endif
161
162 void Thread::initialize_tlab() {
163 if (UseTLAB) {
164 tlab().initialize();
165 }
166 }
167
168 void Thread::retire_tlab(ThreadLocalAllocStats* stats) {
169 // Sampling and serviceability support
607 SpinPause();
608 }
609 }
610 if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
611 }
612 }
613
614 void Thread::SpinRelease(volatile int * adr) {
615 assert(*adr != 0, "invariant");
616 // Roach-motel semantics.
617 // It's safe if subsequent LDs and STs float "up" into the critical section,
618 // but prior LDs and STs within the critical section can't be allowed
619 // to reorder or float past the ST that releases the lock.
620 // Loads and stores in the critical section - which appear in program
621 // order before the store that releases the lock - must also appear
622 // before the store that releases the lock in memory visibility order.
623 // So we need a #loadstore|#storestore "release" memory barrier before
624 // the ST of 0 into the lock-word which releases the lock.
625 Atomic::release_store(adr, 0);
626 }
627
628 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
629 return t->name();
630 }
631
632 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
633
634 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
635 log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
636 Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
637 }
638
|