1 /*
2 * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/dependencies.hpp"
26 #include "code/dependencyContext.hpp"
27 #include "code/nmethod.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "runtime/atomicAccess.hpp"
32 #include "runtime/deoptimization.hpp"
33 #include "runtime/mutexLocker.hpp"
34 #include "runtime/orderAccess.hpp"
35 #include "runtime/perfData.hpp"
36 #include "utilities/exceptions.hpp"
37
38 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = nullptr;
39 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = nullptr;
40 PerfCounter* DependencyContext::_perf_total_buckets_stale_count = nullptr;
41 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = nullptr;
42 nmethodBucket* volatile DependencyContext::_purge_list = nullptr;
43 volatile uint64_t DependencyContext::_cleaning_epoch = 0;
44 uint64_t DependencyContext::_cleaning_epoch_monotonic = 0;
45
46 void dependencyContext_init() {
47 DependencyContext::init();
48 }
49
50 void DependencyContext::init() {
51 if (UsePerfData) {
52 EXCEPTION_MARK;
53 _perf_total_buckets_allocated_count =
54 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
55 _perf_total_buckets_deallocated_count =
56 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
57 _perf_total_buckets_stale_count =
58 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
59 _perf_total_buckets_stale_acc_count =
60 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
61 }
62 }
63
64 //
65 // Walk the list of dependent nmethods searching for nmethods which
66 // are dependent on the changes that were passed in and mark them for
67 // deoptimization.
68 //
69 void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes, InstanceKlass* context) {
70 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
71 nmethod* nm = b->get_nmethod();
72 {
73 LogStreamHandle(Trace, dependencies) log;
74 if (log.is_enabled()) {
75 log.print("Processing ");
76 if (context != nullptr) {
77 log.print(" ctx=");
78 context->name()->print_value_on(&log);
79 log.print(" ");
80 }
81 nm->print_value_on(&log);
82 }
83 }
84 if (nm->is_marked_for_deoptimization()) {
85 deopt_scope->dependent(nm);
86 } else if (nm->check_dependency_on(changes)) {
87 LogTarget(Info, dependencies) lt;
88 if (lt.is_enabled()) {
89 ResourceMark rm;
90 LogStream ls(<);
91 ls.print_cr("Marked for deoptimization");
92 changes.print_on(&ls);
93 nm->print_on(&ls);
94 nm->print_dependencies_on(&ls);
95 }
96 deopt_scope->mark(nm, !changes.is_call_site_change());
97 }
98 }
99 }
100
101 //
102 // Add an nmethod to the dependency context.
103 //
104 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
105 assert_lock_strong(CodeCache_lock);
106 assert(nm->is_not_installed(), "Precondition: new nmethod");
107
108 // This method tries to add never before seen nmethod, holding the CodeCache_lock
109 // until all dependencies are added. The caller code can call multiple times
110 // with the same nmethod, but always under the same lock hold.
111 //
112 // This means the buckets list is guaranteed to be in either of two states, with
113 // regards to the newly added nmethod:
114 // 1. The nmethod is not in the list, and can be just added to the head of the list.
115 // 2. The nmethod is in the list, and it is already at the head of the list.
116 //
117 // This path is the only path that adds to the list. There can be concurrent removals
118 // from the list, but they do not break this invariant. This invariant allows us
119 // to skip list scans. The individual method checks are cheap, but walking the large
120 // list of dependencies gets expensive.
121
122 nmethodBucket* head = AtomicAccess::load(_dependency_context_addr);
123 if (head != nullptr && nm == head->get_nmethod()) {
124 return;
125 }
126
127 #ifdef ASSERT
128 for (nmethodBucket* b = head; b != nullptr; b = b->next()) {
129 assert(nm != b->get_nmethod(), "Invariant: should not be in the list yet");
130 }
131 #endif
132
133 nmethodBucket* new_head = new nmethodBucket(nm, nullptr);
134 for (;;) {
135 new_head->set_next(head);
136 if (AtomicAccess::cmpxchg(_dependency_context_addr, head, new_head) == head) {
137 break;
138 }
139 head = AtomicAccess::load(_dependency_context_addr);
140 }
141 if (UsePerfData) {
142 _perf_total_buckets_allocated_count->inc();
143 }
144 }
145
146 void DependencyContext::release(nmethodBucket* b) {
147 if (delete_on_release()) {
148 assert_locked_or_safepoint(CodeCache_lock);
149 delete b;
150 if (UsePerfData) {
151 _perf_total_buckets_deallocated_count->inc();
152 }
153 } else {
154 // Mark the context as having stale entries, since it is not safe to
155 // expunge the list right now.
156 for (;;) {
157 nmethodBucket* purge_list_head = AtomicAccess::load(&_purge_list);
158 b->set_purge_list_next(purge_list_head);
159 if (AtomicAccess::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
160 break;
161 }
162 }
163 if (UsePerfData) {
164 _perf_total_buckets_stale_count->inc();
165 _perf_total_buckets_stale_acc_count->inc();
166 }
167 }
168 }
169
170 //
171 // Reclaim all unused buckets.
172 //
173 void DependencyContext::purge_dependency_contexts() {
174 int removed = 0;
175 for (nmethodBucket* b = _purge_list; b != nullptr;) {
176 nmethodBucket* next = b->purge_list_next();
177 removed++;
178 delete b;
179 b = next;
180 }
181 if (UsePerfData && removed > 0) {
182 _perf_total_buckets_deallocated_count->inc(removed);
183 }
184 _purge_list = nullptr;
185 }
186
187 //
188 // Cleanup a dependency context by unlinking and placing all dependents corresponding
189 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
190 void DependencyContext::clean_unloading_dependents() {
191 if (!claim_cleanup()) {
192 // Somebody else is cleaning up this dependency context.
193 return;
194 }
195 // Walk the nmethodBuckets and move dead entries on the purge list, which will
196 // be deleted during ClassLoaderDataGraph::purge().
197 nmethodBucket* b = dependencies_not_unloading();
198 while (b != nullptr) {
199 nmethodBucket* next = b->next_not_unloading();
200 b = next;
201 }
202 }
203
204 //
205 // Invalidate all dependencies in the context
206 void DependencyContext::remove_all_dependents() {
207 // Assume that the dependency is not deleted immediately but moved into the
208 // purge list when calling this.
209 assert(!delete_on_release(), "should not delete on release");
210
211 nmethodBucket* first = AtomicAccess::load_acquire(_dependency_context_addr);
212 if (first == nullptr) {
213 return;
214 }
215
216 nmethodBucket* cur = first;
217 nmethodBucket* last = cur;
218 jlong count = 0;
219 for (; cur != nullptr; cur = cur->next()) {
220 assert(cur->get_nmethod()->is_unloading(), "must be");
221 last = cur;
222 count++;
223 }
224
225 // Add the whole list to the purge list at once.
226 nmethodBucket* old_purge_list_head = AtomicAccess::load(&_purge_list);
227 for (;;) {
228 last->set_purge_list_next(old_purge_list_head);
229 nmethodBucket* next_purge_list_head = AtomicAccess::cmpxchg(&_purge_list, old_purge_list_head, first);
230 if (old_purge_list_head == next_purge_list_head) {
231 break;
232 }
233 old_purge_list_head = next_purge_list_head;
234 }
235
236 if (UsePerfData) {
237 _perf_total_buckets_stale_count->inc(count);
238 _perf_total_buckets_stale_acc_count->inc(count);
239 }
240
241 set_dependencies(nullptr);
242 }
243
244 #ifndef PRODUCT
245 bool DependencyContext::is_empty() {
246 return dependencies() == nullptr;
247 }
248
249 void DependencyContext::print_dependent_nmethods(bool verbose) {
250 int idx = 0;
251 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
252 nmethod* nm = b->get_nmethod();
253 tty->print("[%d] { ", idx++);
254 if (!verbose) {
255 nm->print_on_with_msg(tty, "nmethod");
256 tty->print_cr(" } ");
257 } else {
258 nm->print();
259 nm->print_dependencies_on(tty);
260 tty->print_cr("--- } ");
261 }
262 }
263 }
264 #endif //PRODUCT
265
266 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
267 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
268 if (nm == b->get_nmethod()) {
269 return true;
270 }
271 }
272 return false;
273 }
274
275 // We use a monotonically increasing epoch counter to track the last epoch a given
276 // dependency context was cleaned. GC threads claim cleanup tasks by performing
277 // a CAS on this value.
278 bool DependencyContext::claim_cleanup() {
279 uint64_t cleaning_epoch = AtomicAccess::load(&_cleaning_epoch);
280 uint64_t last_cleanup = AtomicAccess::load(_last_cleanup_addr);
281 if (last_cleanup >= cleaning_epoch) {
282 return false;
283 }
284 return AtomicAccess::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
285 }
286
287 bool DependencyContext::delete_on_release() {
288 return AtomicAccess::load(&_cleaning_epoch) == 0;
289 }
290
291 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
292 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
293 // that is_unloading() will be unlinked and placed on the purge list.
294 nmethodBucket* DependencyContext::dependencies_not_unloading() {
295 for (;;) {
296 // Need acquire because the read value could come from a concurrent insert.
297 nmethodBucket* head = AtomicAccess::load_acquire(_dependency_context_addr);
298 if (head == nullptr || !head->get_nmethod()->is_unloading()) {
299 return head;
300 }
301 nmethodBucket* head_next = head->next();
302 OrderAccess::loadload();
303 if (AtomicAccess::load(_dependency_context_addr) != head) {
304 // Unstable load of head w.r.t. head->next
305 continue;
306 }
307 if (AtomicAccess::cmpxchg(_dependency_context_addr, head, head_next) == head) {
308 // Release is_unloading entries if unlinking was claimed
309 DependencyContext::release(head);
310 }
311 }
312 }
313
314 // Relaxed accessors
315 void DependencyContext::set_dependencies(nmethodBucket* b) {
316 AtomicAccess::store(_dependency_context_addr, b);
317 }
318
319 nmethodBucket* DependencyContext::dependencies() {
320 return AtomicAccess::load(_dependency_context_addr);
321 }
322
323 // After the gc_prologue, the dependency contexts may be claimed by the GC
324 // and releasing of nmethodBucket entries will be deferred and placed on
325 // a purge list to be deleted later.
326 void DependencyContext::cleaning_start() {
327 assert(SafepointSynchronize::is_at_safepoint(), "must be");
328 uint64_t epoch = ++_cleaning_epoch_monotonic;
329 AtomicAccess::store(&_cleaning_epoch, epoch);
330 }
331
332 // The epilogue marks the end of dependency context cleanup by the GC,
333 // and also makes subsequent releases of nmethodBuckets cause immediate
334 // deletion. It is okay to delay calling of cleaning_end() to a concurrent
335 // phase, subsequent to the safepoint operation in which cleaning_start()
336 // was called. That allows dependency contexts to be cleaned concurrently.
337 void DependencyContext::cleaning_end() {
338 uint64_t epoch = 0;
339 AtomicAccess::store(&_cleaning_epoch, epoch);
340 }
341
342 // This function skips over nmethodBuckets in the list corresponding to
343 // nmethods that are is_unloading. This allows exposing a view of the
344 // dependents as-if they were already cleaned, despite being cleaned
345 // concurrently. Any entry observed that is_unloading() will be unlinked
346 // and placed on the purge list.
347 nmethodBucket* nmethodBucket::next_not_unloading() {
348 for (;;) {
349 // Do not need acquire because the loaded entry can never be
350 // concurrently inserted.
351 nmethodBucket* next = AtomicAccess::load(&_next);
352 if (next == nullptr || !next->get_nmethod()->is_unloading()) {
353 return next;
354 }
355 nmethodBucket* next_next = AtomicAccess::load(&next->_next);
356 OrderAccess::loadload();
357 if (AtomicAccess::load(&_next) != next) {
358 // Unstable load of next w.r.t. next->next
359 continue;
360 }
361 if (AtomicAccess::cmpxchg(&_next, next, next_next) == next) {
362 // Release is_unloading entries if unlinking was claimed
363 DependencyContext::release(next);
364 }
365 }
366 }
367
368 // Relaxed accessors
369 nmethodBucket* nmethodBucket::next() {
370 return AtomicAccess::load(&_next);
371 }
372
373 void nmethodBucket::set_next(nmethodBucket* b) {
374 AtomicAccess::store(&_next, b);
375 }
376
377 nmethodBucket* nmethodBucket::purge_list_next() {
378 return AtomicAccess::load(&_purge_list_next);
379 }
380
381 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
382 AtomicAccess::store(&_purge_list_next, b);
383 }