118 _unhandled_oops = new UnhandledOops(this);
119 }
120 #endif // CHECK_UNHANDLED_OOPS
121
122 // Notify the barrier set that a thread is being created. The initial
123 // thread is created before the barrier set is available. The call to
124 // BarrierSet::on_thread_create() for this thread is therefore deferred
125 // to BarrierSet::set_barrier_set().
126 BarrierSet* const barrier_set = BarrierSet::barrier_set();
127 if (barrier_set != nullptr) {
128 barrier_set->on_thread_create(this);
129 } else {
130 // Only the main thread should be created before the barrier set
131 // and that happens just before Thread::current is set. No other thread
132 // can attach as the VM is not created yet, so they can't execute this code.
133 // If the main thread creates other threads before the barrier set that is an error.
134 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
135 }
136
137 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
138 }
139
140 #ifdef ASSERT
141 address Thread::stack_base() const {
142 // Note: can't report Thread::name() here as that can require a ResourceMark which we
143 // can't use because this gets called too early in the thread initialization.
144 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
145 osthread() != nullptr ? osthread()->thread_id() : 0);
146 return _stack_base;
147 }
148 #endif
149
150 void Thread::initialize_tlab() {
151 if (UseTLAB) {
152 tlab().initialize();
153 }
154 }
155
156 void Thread::retire_tlab(ThreadLocalAllocStats* stats) {
157 // Sampling and serviceability support
595 SpinPause();
596 }
597 }
598 if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return;
599 }
600 }
601
602 void Thread::SpinRelease(volatile int * adr) {
603 assert(*adr != 0, "invariant");
604 // Roach-motel semantics.
605 // It's safe if subsequent LDs and STs float "up" into the critical section,
606 // but prior LDs and STs within the critical section can't be allowed
607 // to reorder or float past the ST that releases the lock.
608 // Loads and stores in the critical section - which appear in program
609 // order before the store that releases the lock - must also appear
610 // before the store that releases the lock in memory visibility order.
611 // So we need a #loadstore|#storestore "release" memory barrier before
612 // the ST of 0 into the lock-word which releases the lock.
613 AtomicAccess::release_store(adr, 0);
614 }
|
118 _unhandled_oops = new UnhandledOops(this);
119 }
120 #endif // CHECK_UNHANDLED_OOPS
121
122 // Notify the barrier set that a thread is being created. The initial
123 // thread is created before the barrier set is available. The call to
124 // BarrierSet::on_thread_create() for this thread is therefore deferred
125 // to BarrierSet::set_barrier_set().
126 BarrierSet* const barrier_set = BarrierSet::barrier_set();
127 if (barrier_set != nullptr) {
128 barrier_set->on_thread_create(this);
129 } else {
130 // Only the main thread should be created before the barrier set
131 // and that happens just before Thread::current is set. No other thread
132 // can attach as the VM is not created yet, so they can't execute this code.
133 // If the main thread creates other threads before the barrier set that is an error.
134 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
135 }
136
137 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
138
139 _profile_vm_locks = false;
140 _profile_vm_calls = false;
141 _profile_vm_ops = false;
142 _profile_rt_calls = false;
143 _profile_upcalls = false;
144
145 _all_bc_counter_value = 0;
146 _clinit_bc_counter_value = 0;
147
148 _current_rt_call_timer = nullptr;
149 }
150
151 #ifdef ASSERT
152 address Thread::stack_base() const {
153 // Note: can't report Thread::name() here as that can require a ResourceMark which we
154 // can't use because this gets called too early in the thread initialization.
155 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
156 osthread() != nullptr ? osthread()->thread_id() : 0);
157 return _stack_base;
158 }
159 #endif
160
161 void Thread::initialize_tlab() {
162 if (UseTLAB) {
163 tlab().initialize();
164 }
165 }
166
167 void Thread::retire_tlab(ThreadLocalAllocStats* stats) {
168 // Sampling and serviceability support
606 SpinPause();
607 }
608 }
609 if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return;
610 }
611 }
612
613 void Thread::SpinRelease(volatile int * adr) {
614 assert(*adr != 0, "invariant");
615 // Roach-motel semantics.
616 // It's safe if subsequent LDs and STs float "up" into the critical section,
617 // but prior LDs and STs within the critical section can't be allowed
618 // to reorder or float past the ST that releases the lock.
619 // Loads and stores in the critical section - which appear in program
620 // order before the store that releases the lock - must also appear
621 // before the store that releases the lock in memory visibility order.
622 // So we need a #loadstore|#storestore "release" memory barrier before
623 // the ST of 0 into the lock-word which releases the lock.
624 AtomicAccess::release_store(adr, 0);
625 }
626
627 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
628 return t->name();
629 }
630
631 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
632
633 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
634 log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
635 AtomicAccess::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
636 }
637
|