< prev index next >

src/hotspot/share/runtime/threadSMR.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 588,600 **** return new_list; } void ThreadsList::dec_nested_handle_cnt() { ! // The decrement only needs to be MO_ACQ_REL since the reference ! // counter is volatile (and the hazard ptr is already NULL). ! Atomic::dec(&_nested_handle_cnt); } int ThreadsList::find_index_of_JavaThread(JavaThread *target) { if (target == NULL) { return -1; --- 588,603 ---- return new_list; } void ThreadsList::dec_nested_handle_cnt() { ! // The decrement needs to be MO_ACQ_REL. At the moment, the Atomic::dec ! // backend on PPC does not yet conform to these requirements. Therefore ! // the decrement is simulated with an Atomic::sub(1, &addr). ! // Without this MO_ACQ_REL Atomic::dec simulation, the nested SMR mechanism ! // is not generally safe to use. ! Atomic::sub(1, &_nested_handle_cnt); } int ThreadsList::find_index_of_JavaThread(JavaThread *target) { if (target == NULL) { return -1;
*** 621,633 **** } return NULL; } void ThreadsList::inc_nested_handle_cnt() { ! // The increment needs to be MO_SEQ_CST so that the reference counter ! // update is seen before the subsequent hazard ptr update. ! Atomic::inc(&_nested_handle_cnt); } bool ThreadsList::includes(const JavaThread * const p) const { if (p == NULL) { return false; --- 624,646 ---- } return NULL; } void ThreadsList::inc_nested_handle_cnt() { ! // The increment needs to be MO_SEQ_CST. At the moment, the Atomic::inc ! // backend on PPC does not yet conform to these requirements. Therefore ! // the increment is simulated with a load phi; cas phi + 1; loop. ! // Without this MO_SEQ_CST Atomic::inc simulation, the nested SMR mechanism ! // is not generally safe to use. ! intx sample = OrderAccess::load_acquire(&_nested_handle_cnt); ! for (;;) { ! if (Atomic::cmpxchg(sample + 1, &_nested_handle_cnt, sample) == sample) { ! return; ! } else { ! sample = OrderAccess::load_acquire(&_nested_handle_cnt); ! } ! } } bool ThreadsList::includes(const JavaThread * const p) const { if (p == NULL) { return false;
*** 897,907 **** // Threads_lock (which was grabbed before delete_lock) so that // threads_do() can be called. This means the system can't start a // safepoint which means this thread can't take too long to get to // a safepoint because of being blocked on delete_lock. // ! MonitorLocker ml(ThreadsSMRSupport::delete_lock(), Monitor::_no_safepoint_check_flag); if (ThreadsSMRSupport::delete_notify()) { // Notify any exiting JavaThreads that are waiting in smr_delete() // that we've released a ThreadsList. ml.notify_all(); log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list notified %s", os::current_thread_id(), log_str); --- 910,920 ---- // Threads_lock (which was grabbed before delete_lock) so that // threads_do() can be called. This means the system can't start a // safepoint which means this thread can't take too long to get to // a safepoint because of being blocked on delete_lock. // ! MonitorLockerEx ml(ThreadsSMRSupport::delete_lock(), Monitor::_no_safepoint_check_flag); if (ThreadsSMRSupport::delete_notify()) { // Notify any exiting JavaThreads that are waiting in smr_delete() // that we've released a ThreadsList. ml.notify_all(); log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::release_stable_list notified %s", os::current_thread_id(), log_str);
*** 942,953 **** while (true) { { // No safepoint check because this JavaThread is not on the // Threads list. ! MutexLocker ml(Threads_lock, Mutex::_no_safepoint_check_flag); ! // Cannot use a MonitorLocker helper here because we have // to drop the Threads_lock first if we wait. ThreadsSMRSupport::delete_lock()->lock_without_safepoint_check(); // Set the delete_notify flag after we grab delete_lock // and before we scan hazard ptrs because we're doing // double-check locking in release_stable_list(). --- 955,966 ---- while (true) { { // No safepoint check because this JavaThread is not on the // Threads list. ! MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag); ! // Cannot use a MonitorLockerEx helper here because we have // to drop the Threads_lock first if we wait. ThreadsSMRSupport::delete_lock()->lock_without_safepoint_check(); // Set the delete_notify flag after we grab delete_lock // and before we scan hazard ptrs because we're doing // double-check locking in release_stable_list().
*** 983,993 **** } } // Wait for a release_stable_list() call before we check again. No // safepoint check, no timeout, and not as suspend equivalent flag // because this JavaThread is not on the Threads list. ! ThreadsSMRSupport::delete_lock()->wait_without_safepoint_check(); if (EnableThreadSMRStatistics) { _delete_lock_wait_cnt--; } ThreadsSMRSupport::clear_delete_notify(); --- 996,1007 ---- } } // Wait for a release_stable_list() call before we check again. No // safepoint check, no timeout, and not as suspend equivalent flag // because this JavaThread is not on the Threads list. ! ThreadsSMRSupport::delete_lock()->wait(Mutex::_no_safepoint_check_flag, 0, ! !Mutex::_as_suspend_equivalent_flag); if (EnableThreadSMRStatistics) { _delete_lock_wait_cnt--; } ThreadsSMRSupport::clear_delete_notify();
*** 1075,1085 **** // freed concurrently. However, grabbing the Threads_lock during // error reporting can be equally dangerous since this thread might // block during error reporting or a nested error could leave the // Threads_lock held. The classic no win scenario. // ! MutexLocker ml((Threads_lock->owned_by_self() || VMError::is_error_reported()) ? NULL : Threads_lock); st->print_cr("Threads class SMR info:"); st->print_cr("_java_thread_list=" INTPTR_FORMAT ", length=%u, " "elements={", p2i(_java_thread_list), _java_thread_list->length()); --- 1089,1099 ---- // freed concurrently. However, grabbing the Threads_lock during // error reporting can be equally dangerous since this thread might // block during error reporting or a nested error could leave the // Threads_lock held. The classic no win scenario. // ! MutexLockerEx ml((Threads_lock->owned_by_self() || VMError::is_error_reported()) ? NULL : Threads_lock); st->print_cr("Threads class SMR info:"); st->print_cr("_java_thread_list=" INTPTR_FORMAT ", length=%u, " "elements={", p2i(_java_thread_list), _java_thread_list->length());
< prev index next >