123 _unhandled_oops = new UnhandledOops(this);
124 }
125 #endif // CHECK_UNHANDLED_OOPS
126
127 // Notify the barrier set that a thread is being created. The initial
128 // thread is created before the barrier set is available. The call to
129 // BarrierSet::on_thread_create() for this thread is therefore deferred
130 // to BarrierSet::set_barrier_set().
131 BarrierSet* const barrier_set = BarrierSet::barrier_set();
132 if (barrier_set != nullptr) {
133 barrier_set->on_thread_create(this);
134 } else {
135 // Only the main thread should be created before the barrier set
136 // and that happens just before Thread::current is set. No other thread
137 // can attach as the VM is not created yet, so they can't execute this code.
138 // If the main thread creates other threads before the barrier set that is an error.
139 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
140 }
141
142 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
143 }
144
145 #ifdef ASSERT
146 address Thread::stack_base() const {
147 // Note: can't report Thread::name() here as that can require a ResourceMark which we
148 // can't use because this gets called too early in the thread initialization.
149 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
150 osthread() != nullptr ? osthread()->thread_id() : 0);
151 return _stack_base;
152 }
153 #endif
154
155 void Thread::initialize_tlab() {
156 if (UseTLAB) {
157 tlab().initialize();
158 }
159 }
160
161 void Thread::initialize_thread_current() {
162 #ifndef USE_LIBRARY_BASED_TLS_ONLY
593 }
594 if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
595 }
596 }
597
598 void Thread::SpinRelease(volatile int * adr) {
599 assert(*adr != 0, "invariant");
600 OrderAccess::fence(); // guarantee at least release consistency.
601 // Roach-motel semantics.
602 // It's safe if subsequent LDs and STs float "up" into the critical section,
603 // but prior LDs and STs within the critical section can't be allowed
604 // to reorder or float past the ST that releases the lock.
605 // Loads and stores in the critical section - which appear in program
606 // order before the store that releases the lock - must also appear
607 // before the store that releases the lock in memory visibility order.
608 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
609 // the ST of 0 into the lock-word which releases the lock, so fence
610 // more than covers this on all platforms.
611 *adr = 0;
612 }
|
123 _unhandled_oops = new UnhandledOops(this);
124 }
125 #endif // CHECK_UNHANDLED_OOPS
126
127 // Notify the barrier set that a thread is being created. The initial
128 // thread is created before the barrier set is available. The call to
129 // BarrierSet::on_thread_create() for this thread is therefore deferred
130 // to BarrierSet::set_barrier_set().
131 BarrierSet* const barrier_set = BarrierSet::barrier_set();
132 if (barrier_set != nullptr) {
133 barrier_set->on_thread_create(this);
134 } else {
135 // Only the main thread should be created before the barrier set
136 // and that happens just before Thread::current is set. No other thread
137 // can attach as the VM is not created yet, so they can't execute this code.
138 // If the main thread creates other threads before the barrier set that is an error.
139 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
140 }
141
142 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
143
144 _profile_vm_locks = false;
145 _profile_vm_calls = false;
146 _profile_vm_ops = false;
147 _profile_rt_calls = false;
148 _profile_upcalls = false;
149
150 _all_bc_counter_value = 0;
151 _clinit_bc_counter_value = 0;
152
153 _current_rt_call_timer = nullptr;
154 }
155
156 #ifdef ASSERT
157 address Thread::stack_base() const {
158 // Note: can't report Thread::name() here as that can require a ResourceMark which we
159 // can't use because this gets called too early in the thread initialization.
160 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
161 osthread() != nullptr ? osthread()->thread_id() : 0);
162 return _stack_base;
163 }
164 #endif
165
166 void Thread::initialize_tlab() {
167 if (UseTLAB) {
168 tlab().initialize();
169 }
170 }
171
172 void Thread::initialize_thread_current() {
173 #ifndef USE_LIBRARY_BASED_TLS_ONLY
604 }
605 if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
606 }
607 }
608
609 void Thread::SpinRelease(volatile int * adr) {
610 assert(*adr != 0, "invariant");
611 OrderAccess::fence(); // guarantee at least release consistency.
612 // Roach-motel semantics.
613 // It's safe if subsequent LDs and STs float "up" into the critical section,
614 // but prior LDs and STs within the critical section can't be allowed
615 // to reorder or float past the ST that releases the lock.
616 // Loads and stores in the critical section - which appear in program
617 // order before the store that releases the lock - must also appear
618 // before the store that releases the lock in memory visibility order.
619 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
620 // the ST of 0 into the lock-word which releases the lock, so fence
621 // more than covers this on all platforms.
622 *adr = 0;
623 }
624
625 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
626 return t->name();
627 }
628
629 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
630
631 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
632 log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
633 Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
634 }
635
|