1 /* 2 * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "code/dependencies.hpp" 28 #include "code/dependencyContext.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/atomic.hpp" 33 #include "runtime/deoptimization.hpp" 34 #include "runtime/mutexLocker.hpp" 35 #include "runtime/orderAccess.hpp" 36 #include "runtime/perfData.hpp" 37 #include "utilities/exceptions.hpp" 38 39 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = nullptr; 40 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = nullptr; 41 PerfCounter* DependencyContext::_perf_total_buckets_stale_count = nullptr; 42 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = nullptr; 43 nmethodBucket* volatile DependencyContext::_purge_list = nullptr; 44 volatile uint64_t DependencyContext::_cleaning_epoch = 0; 45 uint64_t DependencyContext::_cleaning_epoch_monotonic = 0; 46 47 void dependencyContext_init() { 48 DependencyContext::init(); 49 } 50 51 void DependencyContext::init() { 52 if (UsePerfData) { 53 EXCEPTION_MARK; 54 _perf_total_buckets_allocated_count = 55 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK); 56 _perf_total_buckets_deallocated_count = 57 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK); 58 _perf_total_buckets_stale_count = 59 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK); 60 _perf_total_buckets_stale_acc_count = 61 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK); 62 } 63 } 64 65 // 66 // Walk the list of dependent nmethods searching for nmethods which 67 // are dependent on the changes that were passed in and mark them for 68 // deoptimization. 69 // 70 void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) { 71 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { 72 nmethod* nm = b->get_nmethod(); 73 if (nm->is_marked_for_deoptimization()) { 74 deopt_scope->dependent(nm); 75 } else if (nm->check_dependency_on(changes)) { 76 LogTarget(Info, dependencies) lt; 77 if (lt.is_enabled()) { 78 ResourceMark rm; 79 LogStream ls(<); 80 ls.print_cr("Marked for deoptimization"); 81 changes.print_on(&ls); 82 nm->print_on(&ls); 83 nm->print_dependencies_on(&ls); 84 } 85 deopt_scope->mark(nm, !changes.is_call_site_change()); 86 } 87 } 88 } 89 90 // 91 // Add an nmethod to the dependency context. 92 // 93 void DependencyContext::add_dependent_nmethod(nmethod* nm) { 94 assert_lock_strong(CodeCache_lock); 95 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { 96 if (nm == b->get_nmethod()) { 97 return; 98 } 99 } 100 nmethodBucket* new_head = new nmethodBucket(nm, nullptr); 101 for (;;) { 102 nmethodBucket* head = Atomic::load(_dependency_context_addr); 103 new_head->set_next(head); 104 if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) { 105 break; 106 } 107 } 108 if (UsePerfData) { 109 _perf_total_buckets_allocated_count->inc(); 110 } 111 } 112 113 void DependencyContext::release(nmethodBucket* b) { 114 if (delete_on_release()) { 115 assert_locked_or_safepoint(CodeCache_lock); 116 delete b; 117 if (UsePerfData) { 118 _perf_total_buckets_deallocated_count->inc(); 119 } 120 } else { 121 // Mark the context as having stale entries, since it is not safe to 122 // expunge the list right now. 123 for (;;) { 124 nmethodBucket* purge_list_head = Atomic::load(&_purge_list); 125 b->set_purge_list_next(purge_list_head); 126 if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) { 127 break; 128 } 129 } 130 if (UsePerfData) { 131 _perf_total_buckets_stale_count->inc(); 132 _perf_total_buckets_stale_acc_count->inc(); 133 } 134 } 135 } 136 137 // 138 // Reclaim all unused buckets. 139 // 140 void DependencyContext::purge_dependency_contexts() { 141 int removed = 0; 142 for (nmethodBucket* b = _purge_list; b != nullptr;) { 143 nmethodBucket* next = b->purge_list_next(); 144 removed++; 145 delete b; 146 b = next; 147 } 148 if (UsePerfData && removed > 0) { 149 _perf_total_buckets_deallocated_count->inc(removed); 150 } 151 _purge_list = nullptr; 152 } 153 154 // 155 // Cleanup a dependency context by unlinking and placing all dependents corresponding 156 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe. 157 void DependencyContext::clean_unloading_dependents() { 158 if (!claim_cleanup()) { 159 // Somebody else is cleaning up this dependency context. 160 return; 161 } 162 // Walk the nmethodBuckets and move dead entries on the purge list, which will 163 // be deleted during ClassLoaderDataGraph::purge(). 164 nmethodBucket* b = dependencies_not_unloading(); 165 while (b != nullptr) { 166 nmethodBucket* next = b->next_not_unloading(); 167 b = next; 168 } 169 } 170 171 nmethodBucket* DependencyContext::release_and_get_next_not_unloading(nmethodBucket* b) { 172 nmethodBucket* next = b->next_not_unloading(); 173 release(b); 174 return next; 175 } 176 177 // 178 // Invalidate all dependencies in the context 179 void DependencyContext::remove_all_dependents() { 180 // Assume that the dependency is not deleted immediately but moved into the 181 // purge list when calling this. 182 assert(!delete_on_release(), "should not delete on release"); 183 184 nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr); 185 if (first == nullptr) { 186 return; 187 } 188 189 nmethodBucket* cur = first; 190 nmethodBucket* last = cur; 191 jlong count = 0; 192 for (; cur != nullptr; cur = cur->next()) { 193 assert(cur->get_nmethod()->is_unloading(), "must be"); 194 last = cur; 195 count++; 196 } 197 198 // Add the whole list to the purge list at once. 199 nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list); 200 for (;;) { 201 last->set_purge_list_next(old_purge_list_head); 202 nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first); 203 if (old_purge_list_head == next_purge_list_head) { 204 break; 205 } 206 old_purge_list_head = next_purge_list_head; 207 } 208 209 if (UsePerfData) { 210 _perf_total_buckets_stale_count->inc(count); 211 _perf_total_buckets_stale_acc_count->inc(count); 212 } 213 214 set_dependencies(nullptr); 215 } 216 217 void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope) { 218 nmethodBucket* b = dependencies_not_unloading(); 219 set_dependencies(nullptr); 220 while (b != nullptr) { 221 nmethod* nm = b->get_nmethod(); 222 // Also count already (concurrently) marked nmethods to make sure 223 // deoptimization is triggered before execution in this thread continues. 224 deopt_scope->mark(nm); 225 b = release_and_get_next_not_unloading(b); 226 } 227 } 228 229 #ifndef PRODUCT 230 bool DependencyContext::is_empty() { 231 return dependencies() == nullptr; 232 } 233 234 void DependencyContext::print_dependent_nmethods(bool verbose) { 235 int idx = 0; 236 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { 237 nmethod* nm = b->get_nmethod(); 238 tty->print("[%d] { ", idx++); 239 if (!verbose) { 240 nm->print_on(tty, "nmethod"); 241 tty->print_cr(" } "); 242 } else { 243 nm->print(); 244 nm->print_dependencies_on(tty); 245 tty->print_cr("--- } "); 246 } 247 } 248 } 249 #endif //PRODUCT 250 251 bool DependencyContext::is_dependent_nmethod(nmethod* nm) { 252 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { 253 if (nm == b->get_nmethod()) { 254 return true; 255 } 256 } 257 return false; 258 } 259 260 // We use a monotonically increasing epoch counter to track the last epoch a given 261 // dependency context was cleaned. GC threads claim cleanup tasks by performing 262 // a CAS on this value. 263 bool DependencyContext::claim_cleanup() { 264 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch); 265 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr); 266 if (last_cleanup >= cleaning_epoch) { 267 return false; 268 } 269 return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup; 270 } 271 272 bool DependencyContext::delete_on_release() { 273 return Atomic::load(&_cleaning_epoch) == 0; 274 } 275 276 // Retrieve the first nmethodBucket that has a dependent that does not correspond to 277 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head 278 // that is_unloading() will be unlinked and placed on the purge list. 279 nmethodBucket* DependencyContext::dependencies_not_unloading() { 280 for (;;) { 281 // Need acquire because the read value could come from a concurrent insert. 282 nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr); 283 if (head == nullptr || !head->get_nmethod()->is_unloading()) { 284 return head; 285 } 286 nmethodBucket* head_next = head->next(); 287 OrderAccess::loadload(); 288 if (Atomic::load(_dependency_context_addr) != head) { 289 // Unstable load of head w.r.t. head->next 290 continue; 291 } 292 if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) { 293 // Release is_unloading entries if unlinking was claimed 294 DependencyContext::release(head); 295 } 296 } 297 } 298 299 // Relaxed accessors 300 void DependencyContext::set_dependencies(nmethodBucket* b) { 301 Atomic::store(_dependency_context_addr, b); 302 } 303 304 nmethodBucket* DependencyContext::dependencies() { 305 return Atomic::load(_dependency_context_addr); 306 } 307 308 // After the gc_prologue, the dependency contexts may be claimed by the GC 309 // and releasing of nmethodBucket entries will be deferred and placed on 310 // a purge list to be deleted later. 311 void DependencyContext::cleaning_start() { 312 assert(SafepointSynchronize::is_at_safepoint(), "must be"); 313 uint64_t epoch = ++_cleaning_epoch_monotonic; 314 Atomic::store(&_cleaning_epoch, epoch); 315 } 316 317 // The epilogue marks the end of dependency context cleanup by the GC, 318 // and also makes subsequent releases of nmethodBuckets cause immediate 319 // deletion. It is okay to delay calling of cleaning_end() to a concurrent 320 // phase, subsequent to the safepoint operation in which cleaning_start() 321 // was called. That allows dependency contexts to be cleaned concurrently. 322 void DependencyContext::cleaning_end() { 323 uint64_t epoch = 0; 324 Atomic::store(&_cleaning_epoch, epoch); 325 } 326 327 // This function skips over nmethodBuckets in the list corresponding to 328 // nmethods that are is_unloading. This allows exposing a view of the 329 // dependents as-if they were already cleaned, despite being cleaned 330 // concurrently. Any entry observed that is_unloading() will be unlinked 331 // and placed on the purge list. 332 nmethodBucket* nmethodBucket::next_not_unloading() { 333 for (;;) { 334 // Do not need acquire because the loaded entry can never be 335 // concurrently inserted. 336 nmethodBucket* next = Atomic::load(&_next); 337 if (next == nullptr || !next->get_nmethod()->is_unloading()) { 338 return next; 339 } 340 nmethodBucket* next_next = Atomic::load(&next->_next); 341 OrderAccess::loadload(); 342 if (Atomic::load(&_next) != next) { 343 // Unstable load of next w.r.t. next->next 344 continue; 345 } 346 if (Atomic::cmpxchg(&_next, next, next_next) == next) { 347 // Release is_unloading entries if unlinking was claimed 348 DependencyContext::release(next); 349 } 350 } 351 } 352 353 // Relaxed accessors 354 nmethodBucket* nmethodBucket::next() { 355 return Atomic::load(&_next); 356 } 357 358 void nmethodBucket::set_next(nmethodBucket* b) { 359 Atomic::store(&_next, b); 360 } 361 362 nmethodBucket* nmethodBucket::purge_list_next() { 363 return Atomic::load(&_purge_list_next); 364 } 365 366 void nmethodBucket::set_purge_list_next(nmethodBucket* b) { 367 Atomic::store(&_purge_list_next, b); 368 }