1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "cds/cdsConfig.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "classfile/javaThreadStatus.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "jfr/jfrEvents.hpp" 31 #include "jvm.h" 32 #include "jvmtifiles/jvmtiEnv.hpp" 33 #include "logging/log.hpp" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/iterator.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "nmt/memTracker.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/atomic.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/javaThread.inline.hpp" 42 #include "runtime/nonJavaThread.hpp" 43 #include "runtime/orderAccess.hpp" 44 #include "runtime/osThread.hpp" 45 #include "runtime/safepoint.hpp" 46 #include "runtime/safepointMechanism.inline.hpp" 47 #include "runtime/thread.inline.hpp" 48 #include "runtime/threadSMR.inline.hpp" 49 #include "utilities/macros.hpp" 50 #include "utilities/spinYield.hpp" 51 #if INCLUDE_JFR 52 #include "jfr/jfr.hpp" 53 #endif 54 55 THREAD_LOCAL Thread* Thread::_thr_current = nullptr; 56 57 // ======= Thread ======== 58 // Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread, 59 // JavaThread 60 61 Thread::Thread(MemTag mem_tag) { 62 63 DEBUG_ONLY(_run_state = PRE_CALL_RUN;) 64 65 // stack and get_thread 66 set_stack_base(nullptr); 67 set_stack_size(0); 68 set_lgrp_id(-1); 69 DEBUG_ONLY(clear_suspendible_thread();) 70 DEBUG_ONLY(clear_indirectly_suspendible_thread();) 71 DEBUG_ONLY(clear_indirectly_safepoint_thread();) 72 73 // allocated data structures 74 set_osthread(nullptr); 75 set_resource_area(new (mem_tag) ResourceArea(mem_tag)); 76 DEBUG_ONLY(_current_resource_mark = nullptr;) 77 set_handle_area(new (mem_tag) HandleArea(mem_tag, nullptr)); 78 set_metadata_handles(new (mtClass) GrowableArray<Metadata*>(30, mtClass)); 79 set_last_handle_mark(nullptr); 80 81 // Initial value of zero ==> never claimed. 82 _threads_do_token = 0; 83 _threads_hazard_ptr = nullptr; 84 _threads_list_ptr = nullptr; 85 _nested_threads_hazard_ptr_cnt = 0; 86 _rcu_counter = 0; 87 88 // the handle mark links itself to last_handle_mark 89 new HandleMark(this); 90 91 // plain initialization 92 DEBUG_ONLY(_owned_locks = nullptr;) 93 NOT_PRODUCT(_skip_gcalot = false;) 94 _jvmti_env_iteration_count = 0; 95 set_allocated_bytes(0); 96 _current_pending_raw_monitor = nullptr; 97 _vm_error_callbacks = nullptr; 98 99 // thread-specific hashCode stream generator state - Marsaglia shift-xor form 100 // If we are dumping, keep ihashes constant. Note that during dumping we only 101 // ever run one java thread, and no other thread should generate ihashes either, 102 // so using a constant seed should work fine. 103 _hashStateX = CDSConfig::is_dumping_static_archive() ? 0x12345678 : os::random(); 104 _hashStateY = 842502087; 105 _hashStateZ = 0x8767; // (int)(3579807591LL & 0xffff) ; 106 _hashStateW = 273326509; 107 108 // Many of the following fields are effectively final - immutable 109 // Note that nascent threads can't use the Native Monitor-Mutex 110 // construct until the _MutexEvent is initialized ... 111 // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents 112 // we might instead use a stack of ParkEvents that we could provision on-demand. 113 // The stack would act as a cache to avoid calls to ParkEvent::Allocate() 114 // and ::Release() 115 _ParkEvent = ParkEvent::Allocate(this); 116 117 #ifdef CHECK_UNHANDLED_OOPS 118 if (CheckUnhandledOops) { 119 _unhandled_oops = new UnhandledOops(this); 120 } 121 #endif // CHECK_UNHANDLED_OOPS 122 123 // Notify the barrier set that a thread is being created. The initial 124 // thread is created before the barrier set is available. The call to 125 // BarrierSet::on_thread_create() for this thread is therefore deferred 126 // to BarrierSet::set_barrier_set(). 127 BarrierSet* const barrier_set = BarrierSet::barrier_set(); 128 if (barrier_set != nullptr) { 129 barrier_set->on_thread_create(this); 130 } else { 131 // Only the main thread should be created before the barrier set 132 // and that happens just before Thread::current is set. No other thread 133 // can attach as the VM is not created yet, so they can't execute this code. 134 // If the main thread creates other threads before the barrier set that is an error. 135 assert(Thread::current_or_null() == nullptr, "creating thread before barrier set"); 136 } 137 138 MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false)); 139 140 _profile_vm_locks = false; 141 _profile_vm_calls = false; 142 _profile_vm_ops = false; 143 _profile_rt_calls = false; 144 _profile_upcalls = false; 145 146 _all_bc_counter_value = 0; 147 _clinit_bc_counter_value = 0; 148 149 _current_rt_call_timer = nullptr; 150 } 151 152 #ifdef ASSERT 153 address Thread::stack_base() const { 154 // Note: can't report Thread::name() here as that can require a ResourceMark which we 155 // can't use because this gets called too early in the thread initialization. 156 assert(_stack_base != nullptr, "Stack base not yet set for thread id:%d (0 if not set)", 157 osthread() != nullptr ? osthread()->thread_id() : 0); 158 return _stack_base; 159 } 160 #endif 161 162 void Thread::initialize_tlab() { 163 if (UseTLAB) { 164 tlab().initialize(); 165 } 166 } 167 168 void Thread::retire_tlab(ThreadLocalAllocStats* stats) { 169 // Sampling and serviceability support 170 if (tlab().end() != nullptr) { 171 incr_allocated_bytes(tlab().used_bytes()); 172 heap_sampler().retire_tlab(tlab().top()); 173 } 174 175 // Retire the TLAB 176 tlab().retire(stats); 177 } 178 179 void Thread::fill_tlab(HeapWord* start, size_t pre_reserved, size_t new_size) { 180 // Thread allocation sampling support 181 heap_sampler().set_tlab_top_at_sample_start(start); 182 183 // Fill the TLAB 184 tlab().fill(start, start + pre_reserved, new_size); 185 } 186 187 void Thread::initialize_thread_current() { 188 assert(_thr_current == nullptr, "Thread::current already initialized"); 189 _thr_current = this; 190 assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized"); 191 ThreadLocalStorage::set_thread(this); 192 assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!"); 193 } 194 195 void Thread::clear_thread_current() { 196 assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!"); 197 _thr_current = nullptr; 198 ThreadLocalStorage::set_thread(nullptr); 199 } 200 201 void Thread::record_stack_base_and_size() { 202 // Note: at this point, Thread object is not yet initialized. Do not rely on 203 // any members being initialized. Do not rely on Thread::current() being set. 204 // If possible, refrain from doing anything which may crash or assert since 205 // quite probably those crash dumps will be useless. 206 address base; 207 size_t size; 208 os::current_stack_base_and_size(&base, &size); 209 set_stack_base(base); 210 set_stack_size(size); 211 212 // Set stack limits after thread is initialized. 213 if (is_Java_thread()) { 214 JavaThread::cast(this)->stack_overflow_state()->initialize(stack_base(), stack_end()); 215 } 216 } 217 218 void Thread::register_thread_stack_with_NMT() { 219 MemTracker::record_thread_stack(stack_end(), stack_size()); 220 } 221 222 void Thread::unregister_thread_stack_with_NMT() { 223 MemTracker::release_thread_stack(stack_end(), stack_size()); 224 } 225 226 void Thread::call_run() { 227 DEBUG_ONLY(_run_state = CALL_RUN;) 228 229 // At this point, Thread object should be fully initialized and 230 // Thread::current() should be set. 231 232 assert(Thread::current_or_null() != nullptr, "current thread is unset"); 233 assert(Thread::current_or_null() == this, "current thread is wrong"); 234 235 // Perform common initialization actions 236 237 MACOS_AARCH64_ONLY(this->init_wx()); 238 239 register_thread_stack_with_NMT(); 240 241 JFR_ONLY(Jfr::on_thread_start(this);) 242 243 log_debug(os, thread)("Thread %zu stack dimensions: " 244 PTR_FORMAT "-" PTR_FORMAT " (%zuk).", 245 os::current_thread_id(), p2i(stack_end()), 246 p2i(stack_base()), stack_size()/1024); 247 248 // Perform <ChildClass> initialization actions 249 DEBUG_ONLY(_run_state = PRE_RUN;) 250 this->pre_run(); 251 252 // Invoke <ChildClass>::run() 253 DEBUG_ONLY(_run_state = RUN;) 254 this->run(); 255 // Returned from <ChildClass>::run(). Thread finished. 256 257 // Perform common tear-down actions 258 259 assert(Thread::current_or_null() != nullptr, "current thread is unset"); 260 assert(Thread::current_or_null() == this, "current thread is wrong"); 261 262 // Perform <ChildClass> tear-down actions 263 DEBUG_ONLY(_run_state = POST_RUN;) 264 this->post_run(); 265 266 // Note: at this point the thread object may already have deleted itself, 267 // so from here on do not dereference *this*. Not all thread types currently 268 // delete themselves when they terminate. But no thread should ever be deleted 269 // asynchronously with respect to its termination - that is what _run_state can 270 // be used to check. 271 272 // Logically we should do this->unregister_thread_stack_with_NMT() here, but we 273 // had to move that into post_run() because of the `this` deletion issue. 274 275 assert(Thread::current_or_null() == nullptr, "current thread still present"); 276 } 277 278 Thread::~Thread() { 279 280 // Attached threads will remain in PRE_CALL_RUN, as will threads that don't actually 281 // get started due to errors etc. Any active thread should at least reach post_run 282 // before it is deleted (usually in post_run()). 283 assert(_run_state == PRE_CALL_RUN || 284 _run_state == POST_RUN, "Active Thread deleted before post_run(): " 285 "_run_state=%d", (int)_run_state); 286 287 // Notify the barrier set that a thread is being destroyed. Note that a barrier 288 // set might not be available if we encountered errors during bootstrapping. 289 BarrierSet* const barrier_set = BarrierSet::barrier_set(); 290 if (barrier_set != nullptr) { 291 barrier_set->on_thread_destroy(this); 292 } 293 294 // deallocate data structures 295 delete resource_area(); 296 // since the handle marks are using the handle area, we have to deallocated the root 297 // handle mark before deallocating the thread's handle area, 298 assert(last_handle_mark() != nullptr, "check we have an element"); 299 delete last_handle_mark(); 300 assert(last_handle_mark() == nullptr, "check we have reached the end"); 301 302 ParkEvent::Release(_ParkEvent); 303 // Set to null as a termination indicator for has_terminated(). 304 Atomic::store(&_ParkEvent, (ParkEvent*)nullptr); 305 306 delete handle_area(); 307 delete metadata_handles(); 308 309 // osthread() can be null, if creation of thread failed. 310 if (osthread() != nullptr) os::free_thread(osthread()); 311 312 // Clear Thread::current if thread is deleting itself and it has not 313 // already been done. This must be done before the memory is deallocated. 314 // Needed to ensure JNI correctly detects non-attached threads. 315 if (this == Thread::current_or_null()) { 316 Thread::clear_thread_current(); 317 } 318 319 CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();) 320 } 321 322 #ifdef ASSERT 323 // A JavaThread is considered dangling if it not handshake-safe with respect to 324 // the current thread, it is not on a ThreadsList, or not at safepoint. 325 void Thread::check_for_dangling_thread_pointer(Thread *thread) { 326 assert(!thread->is_Java_thread() || 327 JavaThread::cast(thread)->is_handshake_safe_for(Thread::current()) || 328 !JavaThread::cast(thread)->on_thread_list() || 329 SafepointSynchronize::is_at_safepoint() || 330 ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread::cast(thread)), 331 "possibility of dangling Thread pointer"); 332 } 333 #endif 334 335 // Is the target JavaThread protected by the calling Thread or by some other 336 // mechanism? 337 // 338 bool Thread::is_JavaThread_protected(const JavaThread* target) { 339 Thread* current_thread = Thread::current(); 340 341 // Do the simplest check first: 342 if (SafepointSynchronize::is_at_safepoint()) { 343 // The target is protected since JavaThreads cannot exit 344 // while we're at a safepoint. 345 return true; 346 } 347 348 // If the target hasn't been started yet then it is trivially 349 // "protected". We assume the caller is the thread that will do 350 // the starting. 351 if (target->osthread() == nullptr || target->osthread()->get_state() <= INITIALIZED) { 352 return true; 353 } 354 355 // Now make the simple checks based on who the caller is: 356 if (current_thread == target || Threads_lock->owner() == current_thread) { 357 // Target JavaThread is self or calling thread owns the Threads_lock. 358 // Second check is the same as Threads_lock->owner_is_self(), 359 // but we already have the current thread so check directly. 360 return true; 361 } 362 363 // Check the ThreadsLists associated with the calling thread (if any) 364 // to see if one of them protects the target JavaThread: 365 if (is_JavaThread_protected_by_TLH(target)) { 366 return true; 367 } 368 369 // Use this debug code with -XX:+UseNewCode to diagnose locations that 370 // are missing a ThreadsListHandle or other protection mechanism: 371 // guarantee(!UseNewCode, "current_thread=" INTPTR_FORMAT " is not protecting target=" 372 // INTPTR_FORMAT, p2i(current_thread), p2i(target)); 373 374 // Note: Since 'target' isn't protected by a TLH, the call to 375 // target->is_handshake_safe_for() may crash, but we have debug bits so 376 // we'll be able to figure out what protection mechanism is missing. 377 assert(target->is_handshake_safe_for(current_thread), "JavaThread=" INTPTR_FORMAT 378 " is not protected and not handshake safe.", p2i(target)); 379 380 // The target JavaThread is not protected so it is not safe to query: 381 return false; 382 } 383 384 // Is the target JavaThread protected by a ThreadsListHandle (TLH) associated 385 // with the calling Thread? 386 // 387 bool Thread::is_JavaThread_protected_by_TLH(const JavaThread* target) { 388 Thread* current_thread = Thread::current(); 389 390 // Check the ThreadsLists associated with the calling thread (if any) 391 // to see if one of them protects the target JavaThread: 392 for (SafeThreadsListPtr* stlp = current_thread->_threads_list_ptr; 393 stlp != nullptr; stlp = stlp->previous()) { 394 if (stlp->list()->includes(target)) { 395 // The target JavaThread is protected by this ThreadsList: 396 return true; 397 } 398 } 399 400 // The target JavaThread is not protected by a TLH so it is not safe to query: 401 return false; 402 } 403 404 void Thread::set_priority(Thread* thread, ThreadPriority priority) { 405 DEBUG_ONLY(check_for_dangling_thread_pointer(thread);) 406 // Can return an error! 407 (void)os::set_priority(thread, priority); 408 } 409 410 411 void Thread::start(Thread* thread) { 412 // Start is different from resume in that its safety is guaranteed by context or 413 // being called from a Java method synchronized on the Thread object. 414 if (thread->is_Java_thread()) { 415 // Initialize the thread state to RUNNABLE before starting this thread. 416 // Can not set it after the thread started because we do not know the 417 // exact thread state at that time. It could be in MONITOR_WAIT or 418 // in SLEEPING or some other state. 419 java_lang_Thread::set_thread_status(JavaThread::cast(thread)->threadObj(), 420 JavaThreadStatus::RUNNABLE); 421 } 422 os::start_thread(thread); 423 } 424 425 // GC Support 426 bool Thread::claim_par_threads_do(uintx claim_token) { 427 uintx token = _threads_do_token; 428 if (token != claim_token) { 429 uintx res = Atomic::cmpxchg(&_threads_do_token, token, claim_token); 430 if (res == token) { 431 return true; 432 } 433 guarantee(res == claim_token, "invariant"); 434 } 435 return false; 436 } 437 438 void Thread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) { 439 // Do oop for ThreadShadow 440 f->do_oop((oop*)&_pending_exception); 441 handle_area()->oops_do(f); 442 } 443 444 // If the caller is a NamedThread, then remember, in the current scope, 445 // the given JavaThread in its _processed_thread field. 446 class RememberProcessedThread: public StackObj { 447 NamedThread* _cur_thr; 448 public: 449 RememberProcessedThread(Thread* thread) { 450 Thread* self = Thread::current(); 451 if (self->is_Named_thread()) { 452 _cur_thr = (NamedThread *)self; 453 assert(_cur_thr->processed_thread() == nullptr, "nesting not supported"); 454 _cur_thr->set_processed_thread(thread); 455 } else { 456 _cur_thr = nullptr; 457 } 458 } 459 460 ~RememberProcessedThread() { 461 if (_cur_thr) { 462 assert(_cur_thr->processed_thread() != nullptr, "nesting not supported"); 463 _cur_thr->set_processed_thread(nullptr); 464 } 465 } 466 }; 467 468 void Thread::oops_do(OopClosure* f, NMethodClosure* cf) { 469 // Record JavaThread to GC thread 470 RememberProcessedThread rpt(this); 471 oops_do_no_frames(f, cf); 472 oops_do_frames(f, cf); 473 } 474 475 void Thread::metadata_handles_do(void f(Metadata*)) { 476 // Only walk the Handles in Thread. 477 if (metadata_handles() != nullptr) { 478 for (int i = 0; i< metadata_handles()->length(); i++) { 479 f(metadata_handles()->at(i)); 480 } 481 } 482 } 483 484 void Thread::print_on(outputStream* st, bool print_extended_info) const { 485 // get_priority assumes osthread initialized 486 if (osthread() != nullptr) { 487 int os_prio; 488 if (os::get_native_priority(this, &os_prio) == OS_OK) { 489 st->print("os_prio=%d ", os_prio); 490 } 491 492 st->print("cpu=%.2fms ", 493 (double)os::thread_cpu_time(const_cast<Thread*>(this), true) / 1000000.0 494 ); 495 st->print("elapsed=%.2fs ", 496 (double)_statistical_info.getElapsedTime() / 1000.0 497 ); 498 if (is_Java_thread() && (PrintExtendedThreadInfo || print_extended_info)) { 499 size_t allocated_bytes = (size_t) const_cast<Thread*>(this)->cooked_allocated_bytes(); 500 st->print("allocated=%zu%s ", 501 byte_size_in_proper_unit(allocated_bytes), 502 proper_unit_for_byte_size(allocated_bytes) 503 ); 504 st->print("defined_classes=" INT64_FORMAT " ", _statistical_info.getDefineClassCount()); 505 } 506 507 st->print("tid=" INTPTR_FORMAT " ", p2i(this)); 508 if (!is_Java_thread() || !JavaThread::cast(this)->is_vthread_mounted()) { 509 osthread()->print_on(st); 510 } 511 } 512 ThreadsSMRSupport::print_info_on(this, st); 513 st->print(" "); 514 DEBUG_ONLY(if (WizardMode) print_owned_locks_on(st);) 515 } 516 517 void Thread::print() const { print_on(tty); } 518 519 // Thread::print_on_error() is called by fatal error handler. Don't use 520 // any lock or allocate memory. 521 void Thread::print_on_error(outputStream* st, char* buf, int buflen) const { 522 assert(!(is_Compiler_thread() || is_Java_thread()), "Can't call name() here if it allocates"); 523 524 st->print("%s \"%s\"", type_name(), name()); 525 526 OSThread* os_thr = osthread(); 527 if (os_thr != nullptr) { 528 st->fill_to(67); 529 if (os_thr->get_state() != ZOMBIE) { 530 // Use raw field members for stack base/size as this could be 531 // called before a thread has run enough to initialize them. 532 st->print(" [id=%d, stack(" PTR_FORMAT "," PTR_FORMAT ") (" PROPERFMT ")]", 533 osthread()->thread_id(), p2i(_stack_base - _stack_size), p2i(_stack_base), 534 PROPERFMTARGS(_stack_size)); 535 } else { 536 st->print(" terminated"); 537 } 538 } else { 539 st->print(" unknown state (no osThread)"); 540 } 541 ThreadsSMRSupport::print_info_on(this, st); 542 } 543 544 void Thread::print_value_on(outputStream* st) const { 545 if (is_Named_thread()) { 546 st->print(" \"%s\" ", name()); 547 } 548 st->print(INTPTR_FORMAT, p2i(this)); // print address 549 } 550 551 #ifdef ASSERT 552 void Thread::print_owned_locks_on(outputStream* st) const { 553 Mutex* cur = _owned_locks; 554 if (cur == nullptr) { 555 st->print(" (no locks) "); 556 } else { 557 st->print_cr(" Locks owned:"); 558 while (cur) { 559 cur->print_on(st); 560 cur = cur->next(); 561 } 562 } 563 } 564 565 Thread* Thread::_starting_thread = nullptr; 566 567 bool Thread::is_starting_thread(const Thread* t) { 568 assert(_starting_thread != nullptr, "invariant"); 569 return t == _starting_thread; 570 } 571 #endif // ASSERT 572 573 bool Thread::set_as_starting_thread(JavaThread* jt) { 574 assert(jt != nullptr, "invariant"); 575 assert(_starting_thread == nullptr, "already initialized: " 576 "_starting_thread=" INTPTR_FORMAT, p2i(_starting_thread)); 577 // NOTE: this must be called from Threads::create_vm(). 578 DEBUG_ONLY(_starting_thread = jt;) 579 return os::create_main_thread(jt); 580 } 581 582 // Ad-hoc mutual exclusion primitive: spin lock 583 // 584 // We employ a spin lock _only for low-contention, fixed-length 585 // short-duration critical sections where we're concerned 586 // about native mutex_t or HotSpot Mutex:: latency. 587 588 void Thread::SpinAcquire(volatile int * adr) { 589 if (Atomic::cmpxchg(adr, 0, 1) == 0) { 590 return; // normal fast-path return 591 } 592 593 // Slow-path : We've encountered contention -- Spin/Yield/Block strategy. 594 int ctr = 0; 595 int Yields = 0; 596 for (;;) { 597 while (*adr != 0) { 598 ++ctr; 599 if ((ctr & 0xFFF) == 0 || !os::is_MP()) { 600 if (Yields > 5) { 601 os::naked_short_sleep(1); 602 } else { 603 os::naked_yield(); 604 ++Yields; 605 } 606 } else { 607 SpinPause(); 608 } 609 } 610 if (Atomic::cmpxchg(adr, 0, 1) == 0) return; 611 } 612 } 613 614 void Thread::SpinRelease(volatile int * adr) { 615 assert(*adr != 0, "invariant"); 616 OrderAccess::fence(); // guarantee at least release consistency. 617 // Roach-motel semantics. 618 // It's safe if subsequent LDs and STs float "up" into the critical section, 619 // but prior LDs and STs within the critical section can't be allowed 620 // to reorder or float past the ST that releases the lock. 621 // Loads and stores in the critical section - which appear in program 622 // order before the store that releases the lock - must also appear 623 // before the store that releases the lock in memory visibility order. 624 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 625 // the ST of 0 into the lock-word which releases the lock, so fence 626 // more than covers this on all platforms. 627 *adr = 0; 628 } 629 630 const char* ProfileVMCallContext::name(PerfTraceTime* t) { 631 return t->name(); 632 } 633 634 int ProfileVMCallContext::_perf_nested_runtime_calls_count = 0; 635 636 void ProfileVMCallContext::notify_nested_rt_call(PerfTraceTime* outer_timer, PerfTraceTime* inner_timer) { 637 log_debug(init)("Nested runtime call: inner=%s outer=%s", inner_timer->name(), outer_timer->name()); 638 Atomic::inc(&ProfileVMCallContext::_perf_nested_runtime_calls_count); 639 } 640