1 /*
  2  * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/nmethod.hpp"
 26 #include "code/dependencies.hpp"
 27 #include "code/dependencyContext.hpp"
 28 #include "logging/log.hpp"
 29 #include "logging/logStream.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "runtime/atomic.hpp"
 32 #include "runtime/deoptimization.hpp"
 33 #include "runtime/mutexLocker.hpp"
 34 #include "runtime/orderAccess.hpp"
 35 #include "runtime/perfData.hpp"
 36 #include "utilities/exceptions.hpp"
 37 
 38 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count   = nullptr;
 39 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = nullptr;
 40 PerfCounter* DependencyContext::_perf_total_buckets_stale_count       = nullptr;
 41 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count   = nullptr;
 42 nmethodBucket* volatile DependencyContext::_purge_list                = nullptr;
 43 volatile uint64_t DependencyContext::_cleaning_epoch                  = 0;
 44 uint64_t  DependencyContext::_cleaning_epoch_monotonic                = 0;
 45 
 46 void dependencyContext_init() {
 47   DependencyContext::init();
 48 }
 49 
 50 void DependencyContext::init() {
 51   if (UsePerfData) {
 52     EXCEPTION_MARK;
 53     _perf_total_buckets_allocated_count =
 54         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
 55     _perf_total_buckets_deallocated_count =
 56         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
 57     _perf_total_buckets_stale_count =
 58         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
 59     _perf_total_buckets_stale_acc_count =
 60         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
 61   }
 62 }
 63 
 64 //
 65 // Walk the list of dependent nmethods searching for nmethods which
 66 // are dependent on the changes that were passed in and mark them for
 67 // deoptimization.
 68 //
 69 void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) {
 70   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
 71     nmethod* nm = b->get_nmethod();
 72     if (nm->is_marked_for_deoptimization()) {
 73       deopt_scope->dependent(nm);
 74     } else if (nm->check_dependency_on(changes)) {
 75       LogTarget(Info, dependencies) lt;
 76       if (lt.is_enabled()) {
 77         ResourceMark rm;
 78         LogStream ls(&lt);
 79         ls.print_cr("Marked for deoptimization");
 80         changes.print_on(&ls);
 81         nm->print_on(&ls);
 82         nm->print_dependencies_on(&ls);
 83       }
 84       deopt_scope->mark(nm, !changes.is_call_site_change());
 85     }
 86   }
 87 }
 88 
 89 //
 90 // Add an nmethod to the dependency context.
 91 //
 92 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
 93   assert_lock_strong(CodeCache_lock);
 94   assert(nm->is_not_installed(), "Precondition: new nmethod");
 95 
 96   // This method tries to add never before seen nmethod, holding the CodeCache_lock
 97   // until all dependencies are added. The caller code can call multiple times
 98   // with the same nmethod, but always under the same lock hold.
 99   //
100   // This means the buckets list is guaranteed to be in either of two states, with
101   // regards to the newly added nmethod:
102   //   1. The nmethod is not in the list, and can be just added to the head of the list.
103   //   2. The nmethod is in the list, and it is already at the head of the list.
104   //
105   // This path is the only path that adds to the list. There can be concurrent removals
106   // from the list, but they do not break this invariant. This invariant allows us
107   // to skip list scans. The individual method checks are cheap, but walking the large
108   // list of dependencies gets expensive.
109 
110   nmethodBucket* head = Atomic::load(_dependency_context_addr);
111   if (head != nullptr && nm == head->get_nmethod()) {
112     return;
113   }
114 
115 #ifdef ASSERT
116   for (nmethodBucket* b = head; b != nullptr; b = b->next()) {
117     assert(nm != b->get_nmethod(), "Invariant: should not be in the list yet");
118   }
119 #endif
120 
121   nmethodBucket* new_head = new nmethodBucket(nm, nullptr);
122   for (;;) {
123     new_head->set_next(head);
124     if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
125       break;
126     }
127     head = Atomic::load(_dependency_context_addr);
128   }
129   if (UsePerfData) {
130     _perf_total_buckets_allocated_count->inc();
131   }
132 }
133 
134 void DependencyContext::release(nmethodBucket* b) {
135   if (delete_on_release()) {
136     assert_locked_or_safepoint(CodeCache_lock);
137     delete b;
138     if (UsePerfData) {
139       _perf_total_buckets_deallocated_count->inc();
140     }
141   } else {
142     // Mark the context as having stale entries, since it is not safe to
143     // expunge the list right now.
144     for (;;) {
145       nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
146       b->set_purge_list_next(purge_list_head);
147       if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
148         break;
149       }
150     }
151     if (UsePerfData) {
152       _perf_total_buckets_stale_count->inc();
153       _perf_total_buckets_stale_acc_count->inc();
154     }
155   }
156 }
157 
158 //
159 // Reclaim all unused buckets.
160 //
161 void DependencyContext::purge_dependency_contexts() {
162   int removed = 0;
163   for (nmethodBucket* b = _purge_list; b != nullptr;) {
164     nmethodBucket* next = b->purge_list_next();
165     removed++;
166     delete b;
167     b = next;
168   }
169   if (UsePerfData && removed > 0) {
170     _perf_total_buckets_deallocated_count->inc(removed);
171   }
172   _purge_list = nullptr;
173 }
174 
175 //
176 // Cleanup a dependency context by unlinking and placing all dependents corresponding
177 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
178 void DependencyContext::clean_unloading_dependents() {
179   if (!claim_cleanup()) {
180     // Somebody else is cleaning up this dependency context.
181     return;
182   }
183   // Walk the nmethodBuckets and move dead entries on the purge list, which will
184   // be deleted during ClassLoaderDataGraph::purge().
185   nmethodBucket* b = dependencies_not_unloading();
186   while (b != nullptr) {
187     nmethodBucket* next = b->next_not_unloading();
188     b = next;
189   }
190 }
191 
192 //
193 // Invalidate all dependencies in the context
194 void DependencyContext::remove_all_dependents() {
195   // Assume that the dependency is not deleted immediately but moved into the
196   // purge list when calling this.
197   assert(!delete_on_release(), "should not delete on release");
198 
199   nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr);
200   if (first == nullptr) {
201     return;
202   }
203 
204   nmethodBucket* cur = first;
205   nmethodBucket* last = cur;
206   jlong count = 0;
207   for (; cur != nullptr; cur = cur->next()) {
208     assert(cur->get_nmethod()->is_unloading(), "must be");
209     last = cur;
210     count++;
211   }
212 
213   // Add the whole list to the purge list at once.
214   nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list);
215   for (;;) {
216     last->set_purge_list_next(old_purge_list_head);
217     nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first);
218     if (old_purge_list_head == next_purge_list_head) {
219       break;
220     }
221     old_purge_list_head = next_purge_list_head;
222   }
223 
224   if (UsePerfData) {
225     _perf_total_buckets_stale_count->inc(count);
226     _perf_total_buckets_stale_acc_count->inc(count);
227   }
228 
229   set_dependencies(nullptr);
230 }
231 
232 #ifndef PRODUCT
233 bool DependencyContext::is_empty() {
234   return dependencies() == nullptr;
235 }
236 
237 void DependencyContext::print_dependent_nmethods(bool verbose) {
238   int idx = 0;
239   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
240     nmethod* nm = b->get_nmethod();
241     tty->print("[%d] { ", idx++);
242     if (!verbose) {
243       nm->print_on_with_msg(tty, "nmethod");
244       tty->print_cr(" } ");
245     } else {
246       nm->print();
247       nm->print_dependencies_on(tty);
248       tty->print_cr("--- } ");
249     }
250   }
251 }
252 #endif //PRODUCT
253 
254 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
255   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
256     if (nm == b->get_nmethod()) {
257       return true;
258     }
259   }
260   return false;
261 }
262 
263 // We use a monotonically increasing epoch counter to track the last epoch a given
264 // dependency context was cleaned. GC threads claim cleanup tasks by performing
265 // a CAS on this value.
266 bool DependencyContext::claim_cleanup() {
267   uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
268   uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
269   if (last_cleanup >= cleaning_epoch) {
270     return false;
271   }
272   return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
273 }
274 
275 bool DependencyContext::delete_on_release() {
276   return Atomic::load(&_cleaning_epoch) == 0;
277 }
278 
279 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
280 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
281 // that is_unloading() will be unlinked and placed on the purge list.
282 nmethodBucket* DependencyContext::dependencies_not_unloading() {
283   for (;;) {
284     // Need acquire because the read value could come from a concurrent insert.
285     nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
286     if (head == nullptr || !head->get_nmethod()->is_unloading()) {
287       return head;
288     }
289     nmethodBucket* head_next = head->next();
290     OrderAccess::loadload();
291     if (Atomic::load(_dependency_context_addr) != head) {
292       // Unstable load of head w.r.t. head->next
293       continue;
294     }
295     if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
296       // Release is_unloading entries if unlinking was claimed
297       DependencyContext::release(head);
298     }
299   }
300 }
301 
302 // Relaxed accessors
303 void DependencyContext::set_dependencies(nmethodBucket* b) {
304   Atomic::store(_dependency_context_addr, b);
305 }
306 
307 nmethodBucket* DependencyContext::dependencies() {
308   return Atomic::load(_dependency_context_addr);
309 }
310 
311 // After the gc_prologue, the dependency contexts may be claimed by the GC
312 // and releasing of nmethodBucket entries will be deferred and placed on
313 // a purge list to be deleted later.
314 void DependencyContext::cleaning_start() {
315   assert(SafepointSynchronize::is_at_safepoint(), "must be");
316   uint64_t epoch = ++_cleaning_epoch_monotonic;
317   Atomic::store(&_cleaning_epoch, epoch);
318 }
319 
320 // The epilogue marks the end of dependency context cleanup by the GC,
321 // and also makes subsequent releases of nmethodBuckets cause immediate
322 // deletion. It is okay to delay calling of cleaning_end() to a concurrent
323 // phase, subsequent to the safepoint operation in which cleaning_start()
324 // was called. That allows dependency contexts to be cleaned concurrently.
325 void DependencyContext::cleaning_end() {
326   uint64_t epoch = 0;
327   Atomic::store(&_cleaning_epoch, epoch);
328 }
329 
330 // This function skips over nmethodBuckets in the list corresponding to
331 // nmethods that are is_unloading. This allows exposing a view of the
332 // dependents as-if they were already cleaned, despite being cleaned
333 // concurrently. Any entry observed that is_unloading() will be unlinked
334 // and placed on the purge list.
335 nmethodBucket* nmethodBucket::next_not_unloading() {
336   for (;;) {
337     // Do not need acquire because the loaded entry can never be
338     // concurrently inserted.
339     nmethodBucket* next = Atomic::load(&_next);
340     if (next == nullptr || !next->get_nmethod()->is_unloading()) {
341       return next;
342     }
343     nmethodBucket* next_next = Atomic::load(&next->_next);
344     OrderAccess::loadload();
345     if (Atomic::load(&_next) != next) {
346       // Unstable load of next w.r.t. next->next
347       continue;
348     }
349     if (Atomic::cmpxchg(&_next, next, next_next) == next) {
350       // Release is_unloading entries if unlinking was claimed
351       DependencyContext::release(next);
352     }
353   }
354 }
355 
356 // Relaxed accessors
357 nmethodBucket* nmethodBucket::next() {
358   return Atomic::load(&_next);
359 }
360 
361 void nmethodBucket::set_next(nmethodBucket* b) {
362   Atomic::store(&_next, b);
363 }
364 
365 nmethodBucket* nmethodBucket::purge_list_next() {
366   return Atomic::load(&_purge_list_next);
367 }
368 
369 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
370   Atomic::store(&_purge_list_next, b);
371 }