< prev index next >

src/hotspot/share/code/nmethod.cpp

Print this page

1953 bool nmethod::try_transition(signed char new_state_int) {
1954   signed char new_state = new_state_int;
1955   assert_lock_strong(NMethodState_lock);
1956   signed char old_state = _state;
1957   if (old_state >= new_state) {
1958     // Ensure monotonicity of transitions.
1959     return false;
1960   }
1961   Atomic::store(&_state, new_state);
1962   return true;
1963 }
1964 
1965 void nmethod::invalidate_osr_method() {
1966   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1967   // Remove from list of active nmethods
1968   if (method() != nullptr) {
1969     method()->method_holder()->remove_osr_nmethod(this);
1970   }
1971 }
1972 
1973 void nmethod::log_state_change(const char* reason) const {
1974   assert(reason != nullptr, "Must provide a reason");
1975 
1976   if (LogCompilation) {
1977     if (xtty != nullptr) {
1978       ttyLocker ttyl;  // keep the following output all in one block
1979       xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
1980                        os::current_thread_id(), reason);
1981       log_identity(xtty);
1982       xtty->stamp();
1983       xtty->end_elem();
1984     }
1985   }
1986 
1987   ResourceMark rm;
1988   stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
1989   ss.print("made not entrant: %s", reason);
1990 
1991   CompileTask::print_ul(this, ss.freeze());
1992   if (PrintCompilation) {
1993     print_on_with_msg(tty, ss.freeze());
1994   }
1995 }
1996 
1997 void nmethod::unlink_from_method() {
1998   if (method() != nullptr) {
1999     method()->unlink_code(this);
2000   }
2001 }
2002 
2003 // Invalidate code
2004 bool nmethod::make_not_entrant(const char* reason) {
2005   assert(reason != nullptr, "Must provide a reason");
2006 
2007   // This can be called while the system is already at a safepoint which is ok
2008   NoSafepointVerifier nsv;
2009 
2010   if (is_unloading()) {
2011     // If the nmethod is unloading, then it is already not entrant through
2012     // the nmethod entry barriers. No need to do anything; GC will unload it.
2013     return false;
2014   }
2015 
2016   if (Atomic::load(&_state) == not_entrant) {
2017     // Avoid taking the lock if already in required state.
2018     // This is safe from races because the state is an end-state,
2019     // which the nmethod cannot back out of once entered.
2020     // No need for fencing either.
2021     return false;
2022   }
2023 
2024   {
2025     // Enter critical section.  Does not block for safepoint.
2026     ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

2044     }
2045 
2046     if (update_recompile_counts()) {
2047       // Mark the method as decompiled.
2048       inc_decompile_count();
2049     }
2050 
2051     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2052     if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2053       // If nmethod entry barriers are not supported, we won't mark
2054       // nmethods as on-stack when they become on-stack. So we
2055       // degrade to a less accurate flushing strategy, for now.
2056       mark_as_maybe_on_stack();
2057     }
2058 
2059     // Change state
2060     bool success = try_transition(not_entrant);
2061     assert(success, "Transition can't fail");
2062 
2063     // Log the transition once
2064     log_state_change(reason);
2065 
2066     // Remove nmethod from method.
2067     unlink_from_method();
2068 
2069   } // leave critical region under NMethodState_lock
2070 
2071 #if INCLUDE_JVMCI
2072   // Invalidate can't occur while holding the NMethodState_lock
2073   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2074   if (nmethod_data != nullptr) {
2075     nmethod_data->invalidate_nmethod_mirror(this);
2076   }
2077 #endif
2078 
2079 #ifdef ASSERT
2080   if (is_osr_method() && method() != nullptr) {
2081     // Make sure osr nmethod is invalidated, i.e. not on the list
2082     bool found = method()->method_holder()->remove_osr_nmethod(this);
2083     assert(!found, "osr nmethod should have been invalidated");
2084   }
2085 #endif
2086 
2087   return true;
2088 }
2089 
2090 // For concurrent GCs, there must be a handshake between unlink and flush
2091 void nmethod::unlink() {
2092   if (is_unlinked()) {
2093     // Already unlinked.
2094     return;
2095   }
2096 
2097   flush_dependencies();
2098 
2099   // unlink_from_method will take the NMethodState_lock.
2100   // In this case we don't strictly need it when unlinking nmethods from
2101   // the Method, because it is only concurrently unlinked by
2102   // the entry barrier, which acquires the per nmethod lock.
2103   unlink_from_method();
2104 
2105   if (is_osr_method()) {
2106     invalidate_osr_method();
2107   }
2108 
2109 #if INCLUDE_JVMCI
2110   // Clear the link between this nmethod and a HotSpotNmethod mirror
2111   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2112   if (nmethod_data != nullptr) {
2113     nmethod_data->invalidate_nmethod_mirror(this);


2114   }
2115 #endif
2116 
2117   // Post before flushing as jmethodID is being used
2118   post_compiled_method_unload();
2119 
2120   // Register for flushing when it is safe. For concurrent class unloading,
2121   // that would be after the unloading handshake, and for STW class unloading
2122   // that would be when getting back to the VM thread.
2123   ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2124 }
2125 
2126 void nmethod::purge(bool unregister_nmethod) {
2127 
2128   MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2129 
2130   // completely deallocate this method
2131   Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2132 
2133   LogTarget(Debug, codecache) lt;

1953 bool nmethod::try_transition(signed char new_state_int) {
1954   signed char new_state = new_state_int;
1955   assert_lock_strong(NMethodState_lock);
1956   signed char old_state = _state;
1957   if (old_state >= new_state) {
1958     // Ensure monotonicity of transitions.
1959     return false;
1960   }
1961   Atomic::store(&_state, new_state);
1962   return true;
1963 }
1964 
1965 void nmethod::invalidate_osr_method() {
1966   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1967   // Remove from list of active nmethods
1968   if (method() != nullptr) {
1969     method()->method_holder()->remove_osr_nmethod(this);
1970   }
1971 }
1972 
1973 void nmethod::log_state_change(InvalidationReason invalidation_reason) const {


1974   if (LogCompilation) {
1975     if (xtty != nullptr) {
1976       ttyLocker ttyl;  // keep the following output all in one block
1977       xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
1978                        os::current_thread_id(), invalidation_reason_to_string(invalidation_reason));
1979       log_identity(xtty);
1980       xtty->stamp();
1981       xtty->end_elem();
1982     }
1983   }
1984 
1985   ResourceMark rm;
1986   stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
1987   ss.print("made not entrant: %s", invalidation_reason_to_string(invalidation_reason));
1988 
1989   CompileTask::print_ul(this, ss.freeze());
1990   if (PrintCompilation) {
1991     print_on_with_msg(tty, ss.freeze());
1992   }
1993 }
1994 
1995 void nmethod::unlink_from_method() {
1996   if (method() != nullptr) {
1997     method()->unlink_code(this);
1998   }
1999 }
2000 
2001 // Invalidate code
2002 bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) {


2003   // This can be called while the system is already at a safepoint which is ok
2004   NoSafepointVerifier nsv;
2005 
2006   if (is_unloading()) {
2007     // If the nmethod is unloading, then it is already not entrant through
2008     // the nmethod entry barriers. No need to do anything; GC will unload it.
2009     return false;
2010   }
2011 
2012   if (Atomic::load(&_state) == not_entrant) {
2013     // Avoid taking the lock if already in required state.
2014     // This is safe from races because the state is an end-state,
2015     // which the nmethod cannot back out of once entered.
2016     // No need for fencing either.
2017     return false;
2018   }
2019 
2020   {
2021     // Enter critical section.  Does not block for safepoint.
2022     ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);

2040     }
2041 
2042     if (update_recompile_counts()) {
2043       // Mark the method as decompiled.
2044       inc_decompile_count();
2045     }
2046 
2047     BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
2048     if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
2049       // If nmethod entry barriers are not supported, we won't mark
2050       // nmethods as on-stack when they become on-stack. So we
2051       // degrade to a less accurate flushing strategy, for now.
2052       mark_as_maybe_on_stack();
2053     }
2054 
2055     // Change state
2056     bool success = try_transition(not_entrant);
2057     assert(success, "Transition can't fail");
2058 
2059     // Log the transition once
2060     log_state_change(invalidation_reason);
2061 
2062     // Remove nmethod from method.
2063     unlink_from_method();
2064 
2065   } // leave critical region under NMethodState_lock
2066 
2067 #if INCLUDE_JVMCI
2068   // Invalidate can't occur while holding the NMethodState_lock
2069   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2070   if (nmethod_data != nullptr) {
2071     nmethod_data->invalidate_nmethod_mirror(this, invalidation_reason);
2072   }
2073 #endif
2074 
2075 #ifdef ASSERT
2076   if (is_osr_method() && method() != nullptr) {
2077     // Make sure osr nmethod is invalidated, i.e. not on the list
2078     bool found = method()->method_holder()->remove_osr_nmethod(this);
2079     assert(!found, "osr nmethod should have been invalidated");
2080   }
2081 #endif
2082 
2083   return true;
2084 }
2085 
2086 // For concurrent GCs, there must be a handshake between unlink and flush
2087 void nmethod::unlink() {
2088   if (is_unlinked()) {
2089     // Already unlinked.
2090     return;
2091   }
2092 
2093   flush_dependencies();
2094 
2095   // unlink_from_method will take the NMethodState_lock.
2096   // In this case we don't strictly need it when unlinking nmethods from
2097   // the Method, because it is only concurrently unlinked by
2098   // the entry barrier, which acquires the per nmethod lock.
2099   unlink_from_method();
2100 
2101   if (is_osr_method()) {
2102     invalidate_osr_method();
2103   }
2104 
2105 #if INCLUDE_JVMCI
2106   // Clear the link between this nmethod and a HotSpotNmethod mirror
2107   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
2108   if (nmethod_data != nullptr) {
2109     nmethod_data->invalidate_nmethod_mirror(this, is_cold() ?
2110             nmethod::InvalidationReason::UNLOADING_COLD :
2111             nmethod::InvalidationReason::UNLOADING);
2112   }
2113 #endif
2114 
2115   // Post before flushing as jmethodID is being used
2116   post_compiled_method_unload();
2117 
2118   // Register for flushing when it is safe. For concurrent class unloading,
2119   // that would be after the unloading handshake, and for STW class unloading
2120   // that would be when getting back to the VM thread.
2121   ClassUnloadingContext::context()->register_unlinked_nmethod(this);
2122 }
2123 
2124 void nmethod::purge(bool unregister_nmethod) {
2125 
2126   MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2127 
2128   // completely deallocate this method
2129   Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
2130 
2131   LogTarget(Debug, codecache) lt;
< prev index next >