1 /*
  2  * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/nmethod.hpp"
 26 #include "code/dependencies.hpp"
 27 #include "code/dependencyContext.hpp"
 28 #include "logging/log.hpp"
 29 #include "logging/logStream.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "runtime/atomic.hpp"
 32 #include "runtime/deoptimization.hpp"
 33 #include "runtime/mutexLocker.hpp"
 34 #include "runtime/orderAccess.hpp"
 35 #include "runtime/perfData.hpp"
 36 #include "utilities/exceptions.hpp"
 37 
 38 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count   = nullptr;
 39 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = nullptr;
 40 PerfCounter* DependencyContext::_perf_total_buckets_stale_count       = nullptr;
 41 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count   = nullptr;
 42 nmethodBucket* volatile DependencyContext::_purge_list                = nullptr;
 43 volatile uint64_t DependencyContext::_cleaning_epoch                  = 0;
 44 uint64_t  DependencyContext::_cleaning_epoch_monotonic                = 0;
 45 
 46 void dependencyContext_init() {
 47   DependencyContext::init();
 48 }
 49 
 50 void DependencyContext::init() {
 51   if (UsePerfData) {
 52     EXCEPTION_MARK;
 53     _perf_total_buckets_allocated_count =
 54         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
 55     _perf_total_buckets_deallocated_count =
 56         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
 57     _perf_total_buckets_stale_count =
 58         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
 59     _perf_total_buckets_stale_acc_count =
 60         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
 61   }
 62 }
 63 
 64 //
 65 // Walk the list of dependent nmethods searching for nmethods which
 66 // are dependent on the changes that were passed in and mark them for
 67 // deoptimization.
 68 //
 69 void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes, InstanceKlass* context) {
 70   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
 71     nmethod* nm = b->get_nmethod();
 72     {
 73       LogStreamHandle(Trace, dependencies) log;
 74       if (log.is_enabled()) {
 75         log.print("Processing ");
 76         if (context != nullptr) {
 77           log.print(" ctx=");
 78           context->name()->print_value_on(&log);
 79           log.print(" ");
 80         }
 81         nm->print_value_on(&log);
 82       }
 83     }
 84     if (nm->is_marked_for_deoptimization()) {
 85       deopt_scope->dependent(nm);
 86     } else if (nm->check_dependency_on(changes)) {
 87       LogTarget(Info, dependencies) lt;
 88       if (lt.is_enabled()) {
 89         ResourceMark rm;
 90         LogStream ls(&lt);
 91         ls.print_cr("Marked for deoptimization");
 92         changes.print_on(&ls);
 93         nm->print_on(&ls);
 94         nm->print_dependencies_on(&ls);
 95       }
 96       deopt_scope->mark(nm, !changes.is_call_site_change());
 97     }
 98   }
 99 }
100 
101 //
102 // Add an nmethod to the dependency context.
103 //
104 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
105   assert_lock_strong(CodeCache_lock);
106   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
107     if (nm == b->get_nmethod()) {
108       return;
109     }
110   }
111   nmethodBucket* new_head = new nmethodBucket(nm, nullptr);
112   for (;;) {
113     nmethodBucket* head = Atomic::load(_dependency_context_addr);
114     new_head->set_next(head);
115     if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
116       break;
117     }
118   }
119   if (UsePerfData) {
120     _perf_total_buckets_allocated_count->inc();
121   }
122 }
123 
124 void DependencyContext::release(nmethodBucket* b) {
125   if (delete_on_release()) {
126     assert_locked_or_safepoint(CodeCache_lock);
127     delete b;
128     if (UsePerfData) {
129       _perf_total_buckets_deallocated_count->inc();
130     }
131   } else {
132     // Mark the context as having stale entries, since it is not safe to
133     // expunge the list right now.
134     for (;;) {
135       nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
136       b->set_purge_list_next(purge_list_head);
137       if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
138         break;
139       }
140     }
141     if (UsePerfData) {
142       _perf_total_buckets_stale_count->inc();
143       _perf_total_buckets_stale_acc_count->inc();
144     }
145   }
146 }
147 
148 //
149 // Reclaim all unused buckets.
150 //
151 void DependencyContext::purge_dependency_contexts() {
152   int removed = 0;
153   for (nmethodBucket* b = _purge_list; b != nullptr;) {
154     nmethodBucket* next = b->purge_list_next();
155     removed++;
156     delete b;
157     b = next;
158   }
159   if (UsePerfData && removed > 0) {
160     _perf_total_buckets_deallocated_count->inc(removed);
161   }
162   _purge_list = nullptr;
163 }
164 
165 //
166 // Cleanup a dependency context by unlinking and placing all dependents corresponding
167 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
168 void DependencyContext::clean_unloading_dependents() {
169   if (!claim_cleanup()) {
170     // Somebody else is cleaning up this dependency context.
171     return;
172   }
173   // Walk the nmethodBuckets and move dead entries on the purge list, which will
174   // be deleted during ClassLoaderDataGraph::purge().
175   nmethodBucket* b = dependencies_not_unloading();
176   while (b != nullptr) {
177     nmethodBucket* next = b->next_not_unloading();
178     b = next;
179   }
180 }
181 
182 //
183 // Invalidate all dependencies in the context
184 void DependencyContext::remove_all_dependents() {
185   // Assume that the dependency is not deleted immediately but moved into the
186   // purge list when calling this.
187   assert(!delete_on_release(), "should not delete on release");
188 
189   nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr);
190   if (first == nullptr) {
191     return;
192   }
193 
194   nmethodBucket* cur = first;
195   nmethodBucket* last = cur;
196   jlong count = 0;
197   for (; cur != nullptr; cur = cur->next()) {
198     assert(cur->get_nmethod()->is_unloading(), "must be");
199     last = cur;
200     count++;
201   }
202 
203   // Add the whole list to the purge list at once.
204   nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list);
205   for (;;) {
206     last->set_purge_list_next(old_purge_list_head);
207     nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first);
208     if (old_purge_list_head == next_purge_list_head) {
209       break;
210     }
211     old_purge_list_head = next_purge_list_head;
212   }
213 
214   if (UsePerfData) {
215     _perf_total_buckets_stale_count->inc(count);
216     _perf_total_buckets_stale_acc_count->inc(count);
217   }
218 
219   set_dependencies(nullptr);
220 }
221 
222 #ifndef PRODUCT
223 bool DependencyContext::is_empty() {
224   return dependencies() == nullptr;
225 }
226 
227 void DependencyContext::print_dependent_nmethods(bool verbose) {
228   int idx = 0;
229   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
230     nmethod* nm = b->get_nmethod();
231     tty->print("[%d] { ", idx++);
232     if (!verbose) {
233       nm->print_on_with_msg(tty, "nmethod");
234       tty->print_cr(" } ");
235     } else {
236       nm->print();
237       nm->print_dependencies_on(tty);
238       tty->print_cr("--- } ");
239     }
240   }
241 }
242 #endif //PRODUCT
243 
244 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
245   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
246     if (nm == b->get_nmethod()) {
247       return true;
248     }
249   }
250   return false;
251 }
252 
253 // We use a monotonically increasing epoch counter to track the last epoch a given
254 // dependency context was cleaned. GC threads claim cleanup tasks by performing
255 // a CAS on this value.
256 bool DependencyContext::claim_cleanup() {
257   uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
258   uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
259   if (last_cleanup >= cleaning_epoch) {
260     return false;
261   }
262   return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
263 }
264 
265 bool DependencyContext::delete_on_release() {
266   return Atomic::load(&_cleaning_epoch) == 0;
267 }
268 
269 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
270 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
271 // that is_unloading() will be unlinked and placed on the purge list.
272 nmethodBucket* DependencyContext::dependencies_not_unloading() {
273   for (;;) {
274     // Need acquire because the read value could come from a concurrent insert.
275     nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
276     if (head == nullptr || !head->get_nmethod()->is_unloading()) {
277       return head;
278     }
279     nmethodBucket* head_next = head->next();
280     OrderAccess::loadload();
281     if (Atomic::load(_dependency_context_addr) != head) {
282       // Unstable load of head w.r.t. head->next
283       continue;
284     }
285     if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
286       // Release is_unloading entries if unlinking was claimed
287       DependencyContext::release(head);
288     }
289   }
290 }
291 
292 // Relaxed accessors
293 void DependencyContext::set_dependencies(nmethodBucket* b) {
294   Atomic::store(_dependency_context_addr, b);
295 }
296 
297 nmethodBucket* DependencyContext::dependencies() {
298   return Atomic::load(_dependency_context_addr);
299 }
300 
301 // After the gc_prologue, the dependency contexts may be claimed by the GC
302 // and releasing of nmethodBucket entries will be deferred and placed on
303 // a purge list to be deleted later.
304 void DependencyContext::cleaning_start() {
305   assert(SafepointSynchronize::is_at_safepoint(), "must be");
306   uint64_t epoch = ++_cleaning_epoch_monotonic;
307   Atomic::store(&_cleaning_epoch, epoch);
308 }
309 
310 // The epilogue marks the end of dependency context cleanup by the GC,
311 // and also makes subsequent releases of nmethodBuckets cause immediate
312 // deletion. It is okay to delay calling of cleaning_end() to a concurrent
313 // phase, subsequent to the safepoint operation in which cleaning_start()
314 // was called. That allows dependency contexts to be cleaned concurrently.
315 void DependencyContext::cleaning_end() {
316   uint64_t epoch = 0;
317   Atomic::store(&_cleaning_epoch, epoch);
318 }
319 
320 // This function skips over nmethodBuckets in the list corresponding to
321 // nmethods that are is_unloading. This allows exposing a view of the
322 // dependents as-if they were already cleaned, despite being cleaned
323 // concurrently. Any entry observed that is_unloading() will be unlinked
324 // and placed on the purge list.
325 nmethodBucket* nmethodBucket::next_not_unloading() {
326   for (;;) {
327     // Do not need acquire because the loaded entry can never be
328     // concurrently inserted.
329     nmethodBucket* next = Atomic::load(&_next);
330     if (next == nullptr || !next->get_nmethod()->is_unloading()) {
331       return next;
332     }
333     nmethodBucket* next_next = Atomic::load(&next->_next);
334     OrderAccess::loadload();
335     if (Atomic::load(&_next) != next) {
336       // Unstable load of next w.r.t. next->next
337       continue;
338     }
339     if (Atomic::cmpxchg(&_next, next, next_next) == next) {
340       // Release is_unloading entries if unlinking was claimed
341       DependencyContext::release(next);
342     }
343   }
344 }
345 
346 // Relaxed accessors
347 nmethodBucket* nmethodBucket::next() {
348   return Atomic::load(&_next);
349 }
350 
351 void nmethodBucket::set_next(nmethodBucket* b) {
352   Atomic::store(&_next, b);
353 }
354 
355 nmethodBucket* nmethodBucket::purge_list_next() {
356   return Atomic::load(&_purge_list_next);
357 }
358 
359 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
360   Atomic::store(&_purge_list_next, b);
361 }