1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "classfile/javaClasses.hpp"
 29 #include "classfile/javaThreadStatus.hpp"
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "jfr/jfrEvents.hpp"
 32 #include "jvm.h"
 33 #include "jvmtifiles/jvmtiEnv.hpp"
 34 #include "logging/log.hpp"
 35 #include "memory/allocation.inline.hpp"
 36 #include "memory/iterator.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "nmt/memTracker.hpp"
 39 #include "oops/oop.inline.hpp"
 40 #include "runtime/atomic.hpp"
 41 #include "runtime/handles.inline.hpp"
 42 #include "runtime/javaThread.inline.hpp"
 43 #include "runtime/nonJavaThread.hpp"
 44 #include "runtime/orderAccess.hpp"
 45 #include "runtime/osThread.hpp"
 46 #include "runtime/safepoint.hpp"
 47 #include "runtime/safepointMechanism.inline.hpp"
 48 #include "runtime/thread.inline.hpp"
 49 #include "runtime/threadSMR.inline.hpp"
 50 #include "utilities/macros.hpp"
 51 #include "utilities/spinYield.hpp"
 52 #if INCLUDE_JFR
 53 #include "jfr/jfr.hpp"
 54 #endif
 55 
 56 #ifndef USE_LIBRARY_BASED_TLS_ONLY
 57 // Current thread is maintained as a thread-local variable
 58 THREAD_LOCAL Thread* Thread::_thr_current = nullptr;
 59 #endif
 60 
 61 // ======= Thread ========
 62 // Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread,
 63 // JavaThread
 64 
 65 Thread::Thread(MemTag mem_tag) {
 66 
 67   DEBUG_ONLY(_run_state = PRE_CALL_RUN;)
 68 
 69   // stack and get_thread
 70   set_stack_base(nullptr);
 71   set_stack_size(0);
 72   set_lgrp_id(-1);
 73   DEBUG_ONLY(clear_suspendible_thread();)
 74   DEBUG_ONLY(clear_indirectly_suspendible_thread();)
 75   DEBUG_ONLY(clear_indirectly_safepoint_thread();)
 76 
 77   // allocated data structures
 78   set_osthread(nullptr);
 79   set_resource_area(new (mem_tag) ResourceArea(mem_tag));
 80   DEBUG_ONLY(_current_resource_mark = nullptr;)
 81   set_handle_area(new (mem_tag) HandleArea(mem_tag, nullptr));
 82   set_metadata_handles(new (mtClass) GrowableArray<Metadata*>(30, mtClass));
 83   set_last_handle_mark(nullptr);
 84 
 85   // Initial value of zero ==> never claimed.
 86   _threads_do_token = 0;
 87   _threads_hazard_ptr = nullptr;
 88   _threads_list_ptr = nullptr;
 89   _nested_threads_hazard_ptr_cnt = 0;
 90   _rcu_counter = 0;
 91 
 92   // the handle mark links itself to last_handle_mark
 93   new HandleMark(this);
 94 
 95   // plain initialization
 96   debug_only(_owned_locks = nullptr;)
 97   NOT_PRODUCT(_skip_gcalot = false;)
 98   _jvmti_env_iteration_count = 0;
 99   set_allocated_bytes(0);
100   _current_pending_raw_monitor = nullptr;
101   _vm_error_callbacks = nullptr;
102 
103   // thread-specific hashCode stream generator state - Marsaglia shift-xor form
104   // If we are dumping, keep ihashes constant. Note that during dumping we only
105   // ever run one java thread, and no other thread should generate ihashes either,
106   // so using a constant seed should work fine.
107   _hashStateX = CDSConfig::is_dumping_static_archive() ? 0x12345678 : os::random();
108   _hashStateY = 842502087;
109   _hashStateZ = 0x8767;    // (int)(3579807591LL & 0xffff) ;
110   _hashStateW = 273326509;
111 
112   // Many of the following fields are effectively final - immutable
113   // Note that nascent threads can't use the Native Monitor-Mutex
114   // construct until the _MutexEvent is initialized ...
115   // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
116   // we might instead use a stack of ParkEvents that we could provision on-demand.
117   // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
118   // and ::Release()
119   _ParkEvent   = ParkEvent::Allocate(this);
120 
121 #ifdef CHECK_UNHANDLED_OOPS
122   if (CheckUnhandledOops) {
123     _unhandled_oops = new UnhandledOops(this);
124   }
125 #endif // CHECK_UNHANDLED_OOPS
126 
127   // Notify the barrier set that a thread is being created. The initial
128   // thread is created before the barrier set is available.  The call to
129   // BarrierSet::on_thread_create() for this thread is therefore deferred
130   // to BarrierSet::set_barrier_set().
131   BarrierSet* const barrier_set = BarrierSet::barrier_set();
132   if (barrier_set != nullptr) {
133     barrier_set->on_thread_create(this);
134   } else {
135     // Only the main thread should be created before the barrier set
136     // and that happens just before Thread::current is set. No other thread
137     // can attach as the VM is not created yet, so they can't execute this code.
138     // If the main thread creates other threads before the barrier set that is an error.
139     assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
140   }
141 
142   MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
143 
144   _profile_vm_locks = false;
145   _profile_vm_calls = false;
146   _profile_vm_ops   = false;
147   _profile_rt_calls = false;
148   _profile_upcalls  = false;
149 
150   _all_bc_counter_value = 0;
151   _clinit_bc_counter_value = 0;
152 
153   _current_rt_call_timer = nullptr;
154 }
155 
156 #ifdef ASSERT
157 address Thread::stack_base() const {
158   // Note: can't report Thread::name() here as that can require a ResourceMark which we
159   // can't use because this gets called too early in the thread initialization.
160   assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)",
161          osthread() != nullptr ? osthread()->thread_id() : 0);
162   return _stack_base;
163 }
164 #endif
165 
166 void Thread::initialize_tlab() {
167   if (UseTLAB) {
168     tlab().initialize();
169   }
170 }
171 
172 void Thread::initialize_thread_current() {
173 #ifndef USE_LIBRARY_BASED_TLS_ONLY
174   assert(_thr_current == nullptr, "Thread::current already initialized");
175   _thr_current = this;
176 #endif
177   assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized");
178   ThreadLocalStorage::set_thread(this);
179   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
180 }
181 
182 void Thread::clear_thread_current() {
183   assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
184 #ifndef USE_LIBRARY_BASED_TLS_ONLY
185   _thr_current = nullptr;
186 #endif
187   ThreadLocalStorage::set_thread(nullptr);
188 }
189 
190 void Thread::record_stack_base_and_size() {
191   // Note: at this point, Thread object is not yet initialized. Do not rely on
192   // any members being initialized. Do not rely on Thread::current() being set.
193   // If possible, refrain from doing anything which may crash or assert since
194   // quite probably those crash dumps will be useless.
195   address base;
196   size_t size;
197   os::current_stack_base_and_size(&base, &size);
198   set_stack_base(base);
199   set_stack_size(size);
200 
201   // Set stack limits after thread is initialized.
202   if (is_Java_thread()) {
203     JavaThread::cast(this)->stack_overflow_state()->initialize(stack_base(), stack_end());
204   }
205 }
206 
207 void Thread::register_thread_stack_with_NMT() {
208   MemTracker::record_thread_stack(stack_end(), stack_size());
209 }
210 
211 void Thread::unregister_thread_stack_with_NMT() {
212   MemTracker::release_thread_stack(stack_end(), stack_size());
213 }
214 
215 void Thread::call_run() {
216   DEBUG_ONLY(_run_state = CALL_RUN;)
217 
218   // At this point, Thread object should be fully initialized and
219   // Thread::current() should be set.
220 
221   assert(Thread::current_or_null() != nullptr, "current thread is unset");
222   assert(Thread::current_or_null() == this, "current thread is wrong");
223 
224   // Perform common initialization actions
225 
226   MACOS_AARCH64_ONLY(this->init_wx());
227 
228   register_thread_stack_with_NMT();
229 
230   JFR_ONLY(Jfr::on_thread_start(this);)
231 
232   log_debug(os, thread)("Thread %zu stack dimensions: "
233     PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT "k).",
234     os::current_thread_id(), p2i(stack_end()),
235     p2i(stack_base()), stack_size()/1024);
236 
237   // Perform <ChildClass> initialization actions
238   DEBUG_ONLY(_run_state = PRE_RUN;)
239   this->pre_run();
240 
241   // Invoke <ChildClass>::run()
242   DEBUG_ONLY(_run_state = RUN;)
243   this->run();
244   // Returned from <ChildClass>::run(). Thread finished.
245 
246   // Perform common tear-down actions
247 
248   assert(Thread::current_or_null() != nullptr, "current thread is unset");
249   assert(Thread::current_or_null() == this, "current thread is wrong");
250 
251   // Perform <ChildClass> tear-down actions
252   DEBUG_ONLY(_run_state = POST_RUN;)
253   this->post_run();
254 
255   // Note: at this point the thread object may already have deleted itself,
256   // so from here on do not dereference *this*. Not all thread types currently
257   // delete themselves when they terminate. But no thread should ever be deleted
258   // asynchronously with respect to its termination - that is what _run_state can
259   // be used to check.
260 
261   // Logically we should do this->unregister_thread_stack_with_NMT() here, but we
262   // had to move that into post_run() because of the `this` deletion issue.
263 
264   assert(Thread::current_or_null() == nullptr, "current thread still present");
265 }
266 
267 Thread::~Thread() {
268 
269   // Attached threads will remain in PRE_CALL_RUN, as will threads that don't actually
270   // get started due to errors etc. Any active thread should at least reach post_run
271   // before it is deleted (usually in post_run()).
272   assert(_run_state == PRE_CALL_RUN ||
273          _run_state == POST_RUN, "Active Thread deleted before post_run(): "
274          "_run_state=%d", (int)_run_state);
275 
276   // Notify the barrier set that a thread is being destroyed. Note that a barrier
277   // set might not be available if we encountered errors during bootstrapping.
278   BarrierSet* const barrier_set = BarrierSet::barrier_set();
279   if (barrier_set != nullptr) {
280     barrier_set->on_thread_destroy(this);
281   }
282 
283   // deallocate data structures
284   delete resource_area();
285   // since the handle marks are using the handle area, we have to deallocated the root
286   // handle mark before deallocating the thread's handle area,
287   assert(last_handle_mark() != nullptr, "check we have an element");
288   delete last_handle_mark();
289   assert(last_handle_mark() == nullptr, "check we have reached the end");
290 
291   ParkEvent::Release(_ParkEvent);
292   // Set to null as a termination indicator for has_terminated().
293   Atomic::store(&_ParkEvent, (ParkEvent*)nullptr);
294 
295   delete handle_area();
296   delete metadata_handles();
297 
298   // osthread() can be null, if creation of thread failed.
299   if (osthread() != nullptr) os::free_thread(osthread());
300 
301   // Clear Thread::current if thread is deleting itself and it has not
302   // already been done. This must be done before the memory is deallocated.
303   // Needed to ensure JNI correctly detects non-attached threads.
304   if (this == Thread::current_or_null()) {
305     Thread::clear_thread_current();
306   }
307 
308   CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
309 }
310 
311 #ifdef ASSERT
312 // A JavaThread is considered dangling if it not handshake-safe with respect to
313 // the current thread, it is not on a ThreadsList, or not at safepoint.
314 void Thread::check_for_dangling_thread_pointer(Thread *thread) {
315   assert(!thread->is_Java_thread() ||
316          JavaThread::cast(thread)->is_handshake_safe_for(Thread::current()) ||
317          !JavaThread::cast(thread)->on_thread_list() ||
318          SafepointSynchronize::is_at_safepoint() ||
319          ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread::cast(thread)),
320          "possibility of dangling Thread pointer");
321 }
322 #endif
323 
324 // Is the target JavaThread protected by the calling Thread or by some other
325 // mechanism?
326 //
327 bool Thread::is_JavaThread_protected(const JavaThread* target) {
328   Thread* current_thread = Thread::current();
329 
330   // Do the simplest check first:
331   if (SafepointSynchronize::is_at_safepoint()) {
332     // The target is protected since JavaThreads cannot exit
333     // while we're at a safepoint.
334     return true;
335   }
336 
337   // If the target hasn't been started yet then it is trivially
338   // "protected". We assume the caller is the thread that will do
339   // the starting.
340   if (target->osthread() == nullptr || target->osthread()->get_state() <= INITIALIZED) {
341     return true;
342   }
343 
344   // Now make the simple checks based on who the caller is:
345   if (current_thread == target || Threads_lock->owner() == current_thread) {
346     // Target JavaThread is self or calling thread owns the Threads_lock.
347     // Second check is the same as Threads_lock->owner_is_self(),
348     // but we already have the current thread so check directly.
349     return true;
350   }
351 
352   // Check the ThreadsLists associated with the calling thread (if any)
353   // to see if one of them protects the target JavaThread:
354   if (is_JavaThread_protected_by_TLH(target)) {
355     return true;
356   }
357 
358   // Use this debug code with -XX:+UseNewCode to diagnose locations that
359   // are missing a ThreadsListHandle or other protection mechanism:
360   // guarantee(!UseNewCode, "current_thread=" INTPTR_FORMAT " is not protecting target="
361   //           INTPTR_FORMAT, p2i(current_thread), p2i(target));
362 
363   // Note: Since 'target' isn't protected by a TLH, the call to
364   // target->is_handshake_safe_for() may crash, but we have debug bits so
365   // we'll be able to figure out what protection mechanism is missing.
366   assert(target->is_handshake_safe_for(current_thread), "JavaThread=" INTPTR_FORMAT
367          " is not protected and not handshake safe.", p2i(target));
368 
369   // The target JavaThread is not protected so it is not safe to query:
370   return false;
371 }
372 
373 // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated
374 // with the calling Thread?
375 //
376 bool Thread::is_JavaThread_protected_by_TLH(const JavaThread* target) {
377   Thread* current_thread = Thread::current();
378 
379   // Check the ThreadsLists associated with the calling thread (if any)
380   // to see if one of them protects the target JavaThread:
381   for (SafeThreadsListPtr* stlp = current_thread->_threads_list_ptr;
382        stlp != nullptr; stlp = stlp->previous()) {
383     if (stlp->list()->includes(target)) {
384       // The target JavaThread is protected by this ThreadsList:
385       return true;
386     }
387   }
388 
389   // The target JavaThread is not protected by a TLH so it is not safe to query:
390   return false;
391 }
392 
393 void Thread::set_priority(Thread* thread, ThreadPriority priority) {
394   debug_only(check_for_dangling_thread_pointer(thread);)
395   // Can return an error!
396   (void)os::set_priority(thread, priority);
397 }
398 
399 
400 void Thread::start(Thread* thread) {
401   // Start is different from resume in that its safety is guaranteed by context or
402   // being called from a Java method synchronized on the Thread object.
403   if (thread->is_Java_thread()) {
404     // Initialize the thread state to RUNNABLE before starting this thread.
405     // Can not set it after the thread started because we do not know the
406     // exact thread state at that time. It could be in MONITOR_WAIT or
407     // in SLEEPING or some other state.
408     java_lang_Thread::set_thread_status(JavaThread::cast(thread)->threadObj(),
409                                         JavaThreadStatus::RUNNABLE);
410   }
411   os::start_thread(thread);
412 }
413 
414 // GC Support
415 bool Thread::claim_par_threads_do(uintx claim_token) {
416   uintx token = _threads_do_token;
417   if (token != claim_token) {
418     uintx res = Atomic::cmpxchg(&_threads_do_token, token, claim_token);
419     if (res == token) {
420       return true;
421     }
422     guarantee(res == claim_token, "invariant");
423   }
424   return false;
425 }
426 
427 void Thread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) {
428   // Do oop for ThreadShadow
429   f->do_oop((oop*)&_pending_exception);
430   handle_area()->oops_do(f);
431 }
432 
433 // If the caller is a NamedThread, then remember, in the current scope,
434 // the given JavaThread in its _processed_thread field.
435 class RememberProcessedThread: public StackObj {
436   NamedThread* _cur_thr;
437 public:
438   RememberProcessedThread(Thread* thread) {
439     Thread* self = Thread::current();
440     if (self->is_Named_thread()) {
441       _cur_thr = (NamedThread *)self;
442       assert(_cur_thr->processed_thread() == nullptr, "nesting not supported");
443       _cur_thr->set_processed_thread(thread);
444     } else {
445       _cur_thr = nullptr;
446     }
447   }
448 
449   ~RememberProcessedThread() {
450     if (_cur_thr) {
451       assert(_cur_thr->processed_thread() != nullptr, "nesting not supported");
452       _cur_thr->set_processed_thread(nullptr);
453     }
454   }
455 };
456 
457 void Thread::oops_do(OopClosure* f, NMethodClosure* cf) {
458   // Record JavaThread to GC thread
459   RememberProcessedThread rpt(this);
460   oops_do_no_frames(f, cf);
461   oops_do_frames(f, cf);
462 }
463 
464 void Thread::metadata_handles_do(void f(Metadata*)) {
465   // Only walk the Handles in Thread.
466   if (metadata_handles() != nullptr) {
467     for (int i = 0; i< metadata_handles()->length(); i++) {
468       f(metadata_handles()->at(i));
469     }
470   }
471 }
472 
473 void Thread::print_on(outputStream* st, bool print_extended_info) const {
474   // get_priority assumes osthread initialized
475   if (osthread() != nullptr) {
476     int os_prio;
477     if (os::get_native_priority(this, &os_prio) == OS_OK) {
478       st->print("os_prio=%d ", os_prio);
479     }
480 
481     st->print("cpu=%.2fms ",
482               (double)os::thread_cpu_time(const_cast<Thread*>(this), true) / 1000000.0
483               );
484     st->print("elapsed=%.2fs ",
485               (double)_statistical_info.getElapsedTime() / 1000.0
486               );
487     if (is_Java_thread() && (PrintExtendedThreadInfo || print_extended_info)) {
488       size_t allocated_bytes = (size_t) const_cast<Thread*>(this)->cooked_allocated_bytes();
489       st->print("allocated=" SIZE_FORMAT "%s ",
490                 byte_size_in_proper_unit(allocated_bytes),
491                 proper_unit_for_byte_size(allocated_bytes)
492                 );
493       st->print("defined_classes=" INT64_FORMAT " ", _statistical_info.getDefineClassCount());
494     }
495 
496     st->print("tid=" INTPTR_FORMAT " ", p2i(this));
497     if (!is_Java_thread() || !JavaThread::cast(this)->is_vthread_mounted()) {
498       osthread()->print_on(st);
499     }
500   }
501   ThreadsSMRSupport::print_info_on(this, st);
502   st->print(" ");
503   debug_only(if (WizardMode) print_owned_locks_on(st);)
504 }
505 
506 void Thread::print() const { print_on(tty); }
507 
508 // Thread::print_on_error() is called by fatal error handler. Don't use
509 // any lock or allocate memory.
510 void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
511   assert(!(is_Compiler_thread() || is_Java_thread()), "Can't call name() here if it allocates");
512 
513   st->print("%s \"%s\"", type_name(), name());
514 
515   OSThread* os_thr = osthread();
516   if (os_thr != nullptr) {
517     st->fill_to(67);
518     if (os_thr->get_state() != ZOMBIE) {
519       // Use raw field members for stack base/size as this could be
520       // called before a thread has run enough to initialize them.
521       st->print(" [id=%d, stack(" PTR_FORMAT "," PTR_FORMAT ") (" PROPERFMT ")]",
522                 osthread()->thread_id(), p2i(_stack_base - _stack_size), p2i(_stack_base),
523                 PROPERFMTARGS(_stack_size));
524     } else {
525       st->print(" terminated");
526     }
527   } else {
528     st->print(" unknown state (no osThread)");
529   }
530   ThreadsSMRSupport::print_info_on(this, st);
531 }
532 
533 void Thread::print_value_on(outputStream* st) const {
534   if (is_Named_thread()) {
535     st->print(" \"%s\" ", name());
536   }
537   st->print(INTPTR_FORMAT, p2i(this));   // print address
538 }
539 
540 #ifdef ASSERT
541 void Thread::print_owned_locks_on(outputStream* st) const {
542   Mutex* cur = _owned_locks;
543   if (cur == nullptr) {
544     st->print(" (no locks) ");
545   } else {
546     st->print_cr(" Locks owned:");
547     while (cur) {
548       cur->print_on(st);
549       cur = cur->next();
550     }
551   }
552 }
553 
554 Thread* Thread::_starting_thread = nullptr;
555 
556 bool Thread::is_starting_thread(const Thread* t) {
557   assert(_starting_thread != nullptr, "invariant");
558   return t == _starting_thread;
559 }
560 #endif // ASSERT
561 
562 bool Thread::set_as_starting_thread(JavaThread* jt) {
563   assert(jt != nullptr, "invariant");
564   assert(_starting_thread == nullptr, "already initialized: "
565          "_starting_thread=" INTPTR_FORMAT, p2i(_starting_thread));
566   // NOTE: this must be called from Threads::create_vm().
567   DEBUG_ONLY(_starting_thread = jt;)
568   return os::create_main_thread(jt);
569 }
570 
571 // Ad-hoc mutual exclusion primitives: SpinLock
572 //
573 // We employ SpinLocks _only for low-contention, fixed-length
574 // short-duration critical sections where we're concerned
575 // about native mutex_t or HotSpot Mutex:: latency.
576 //
577 // TODO-FIXME: ListLock should be of type SpinLock.
578 // We should make this a 1st-class type, integrated into the lock
579 // hierarchy as leaf-locks.  Critically, the SpinLock structure
580 // should have sufficient padding to avoid false-sharing and excessive
581 // cache-coherency traffic.
582 
583 
584 typedef volatile int SpinLockT;
585 
586 void Thread::SpinAcquire(volatile int * adr, const char * LockName) {
587   if (Atomic::cmpxchg(adr, 0, 1) == 0) {
588     return;   // normal fast-path return
589   }
590 
591   // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
592   int ctr = 0;
593   int Yields = 0;
594   for (;;) {
595     while (*adr != 0) {
596       ++ctr;
597       if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
598         if (Yields > 5) {
599           os::naked_short_sleep(1);
600         } else {
601           os::naked_yield();
602           ++Yields;
603         }
604       } else {
605         SpinPause();
606       }
607     }
608     if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
609   }
610 }
611 
612 void Thread::SpinRelease(volatile int * adr) {
613   assert(*adr != 0, "invariant");
614   OrderAccess::fence();      // guarantee at least release consistency.
615   // Roach-motel semantics.
616   // It's safe if subsequent LDs and STs float "up" into the critical section,
617   // but prior LDs and STs within the critical section can't be allowed
618   // to reorder or float past the ST that releases the lock.
619   // Loads and stores in the critical section - which appear in program
620   // order before the store that releases the lock - must also appear
621   // before the store that releases the lock in memory visibility order.
622   // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
623   // the ST of 0 into the lock-word which releases the lock, so fence
624   // more than covers this on all platforms.
625   *adr = 0;
626 }
627 
628 const char* ProfileVMCallContext::name(PerfTraceTime* t) {
629   return t->name();
630 }
631 
632 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0;
633 
634 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) {
635   log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name());
636   Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count);
637 }
638