1 /*
2 * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/nmethod.hpp"
26 #include "code/dependencies.hpp"
27 #include "code/dependencyContext.hpp"
28 #include "logging/log.hpp"
29 #include "logging/logStream.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/deoptimization.hpp"
33 #include "runtime/mutexLocker.hpp"
34 #include "runtime/orderAccess.hpp"
35 #include "runtime/perfData.hpp"
36 #include "utilities/exceptions.hpp"
37
38 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = nullptr;
39 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = nullptr;
40 PerfCounter* DependencyContext::_perf_total_buckets_stale_count = nullptr;
41 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = nullptr;
42 nmethodBucket* volatile DependencyContext::_purge_list = nullptr;
43 volatile uint64_t DependencyContext::_cleaning_epoch = 0;
44 uint64_t DependencyContext::_cleaning_epoch_monotonic = 0;
45
46 void dependencyContext_init() {
47 DependencyContext::init();
48 }
49
50 void DependencyContext::init() {
51 if (UsePerfData) {
52 EXCEPTION_MARK;
53 _perf_total_buckets_allocated_count =
54 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
55 _perf_total_buckets_deallocated_count =
56 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
57 _perf_total_buckets_stale_count =
58 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
59 _perf_total_buckets_stale_acc_count =
60 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
61 }
62 }
63
64 //
65 // Walk the list of dependent nmethods searching for nmethods which
66 // are dependent on the changes that were passed in and mark them for
67 // deoptimization.
68 //
69 void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) {
70 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
71 nmethod* nm = b->get_nmethod();
72 if (nm->is_marked_for_deoptimization()) {
73 deopt_scope->dependent(nm);
74 } else if (nm->check_dependency_on(changes)) {
75 LogTarget(Info, dependencies) lt;
76 if (lt.is_enabled()) {
77 ResourceMark rm;
78 LogStream ls(<);
79 ls.print_cr("Marked for deoptimization");
80 changes.print_on(&ls);
81 nm->print_on(&ls);
82 nm->print_dependencies_on(&ls);
83 }
84 deopt_scope->mark(nm, !changes.is_call_site_change());
85 }
86 }
87 }
88
89 //
90 // Add an nmethod to the dependency context.
91 //
92 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
93 assert_lock_strong(CodeCache_lock);
94 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
95 if (nm == b->get_nmethod()) {
96 return;
97 }
98 }
99 nmethodBucket* new_head = new nmethodBucket(nm, nullptr);
100 for (;;) {
101 nmethodBucket* head = Atomic::load(_dependency_context_addr);
102 new_head->set_next(head);
103 if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
104 break;
105 }
106 }
107 if (UsePerfData) {
108 _perf_total_buckets_allocated_count->inc();
109 }
110 }
111
112 void DependencyContext::release(nmethodBucket* b) {
113 if (delete_on_release()) {
114 assert_locked_or_safepoint(CodeCache_lock);
115 delete b;
116 if (UsePerfData) {
117 _perf_total_buckets_deallocated_count->inc();
118 }
119 } else {
120 // Mark the context as having stale entries, since it is not safe to
121 // expunge the list right now.
122 for (;;) {
123 nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
124 b->set_purge_list_next(purge_list_head);
125 if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
126 break;
127 }
128 }
129 if (UsePerfData) {
130 _perf_total_buckets_stale_count->inc();
131 _perf_total_buckets_stale_acc_count->inc();
132 }
133 }
134 }
135
136 //
137 // Reclaim all unused buckets.
138 //
139 void DependencyContext::purge_dependency_contexts() {
140 int removed = 0;
141 for (nmethodBucket* b = _purge_list; b != nullptr;) {
142 nmethodBucket* next = b->purge_list_next();
143 removed++;
144 delete b;
145 b = next;
146 }
147 if (UsePerfData && removed > 0) {
148 _perf_total_buckets_deallocated_count->inc(removed);
149 }
150 _purge_list = nullptr;
151 }
152
153 //
154 // Cleanup a dependency context by unlinking and placing all dependents corresponding
155 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
156 void DependencyContext::clean_unloading_dependents() {
157 if (!claim_cleanup()) {
158 // Somebody else is cleaning up this dependency context.
159 return;
160 }
161 // Walk the nmethodBuckets and move dead entries on the purge list, which will
162 // be deleted during ClassLoaderDataGraph::purge().
163 nmethodBucket* b = dependencies_not_unloading();
164 while (b != nullptr) {
165 nmethodBucket* next = b->next_not_unloading();
166 b = next;
167 }
168 }
169
170 //
171 // Invalidate all dependencies in the context
172 void DependencyContext::remove_all_dependents() {
173 // Assume that the dependency is not deleted immediately but moved into the
174 // purge list when calling this.
175 assert(!delete_on_release(), "should not delete on release");
176
177 nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr);
178 if (first == nullptr) {
179 return;
180 }
181
182 nmethodBucket* cur = first;
183 nmethodBucket* last = cur;
184 jlong count = 0;
185 for (; cur != nullptr; cur = cur->next()) {
186 assert(cur->get_nmethod()->is_unloading(), "must be");
187 last = cur;
188 count++;
189 }
190
191 // Add the whole list to the purge list at once.
192 nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list);
193 for (;;) {
194 last->set_purge_list_next(old_purge_list_head);
195 nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first);
196 if (old_purge_list_head == next_purge_list_head) {
197 break;
198 }
199 old_purge_list_head = next_purge_list_head;
200 }
201
202 if (UsePerfData) {
203 _perf_total_buckets_stale_count->inc(count);
204 _perf_total_buckets_stale_acc_count->inc(count);
205 }
206
207 set_dependencies(nullptr);
208 }
209
210 #ifndef PRODUCT
211 bool DependencyContext::is_empty() {
212 return dependencies() == nullptr;
213 }
214
215 void DependencyContext::print_dependent_nmethods(bool verbose) {
216 int idx = 0;
217 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
218 nmethod* nm = b->get_nmethod();
219 tty->print("[%d] { ", idx++);
220 if (!verbose) {
221 nm->print_on(tty, "nmethod");
222 tty->print_cr(" } ");
223 } else {
224 nm->print();
225 nm->print_dependencies_on(tty);
226 tty->print_cr("--- } ");
227 }
228 }
229 }
230 #endif //PRODUCT
231
232 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
233 for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
234 if (nm == b->get_nmethod()) {
235 return true;
236 }
237 }
238 return false;
239 }
240
241 // We use a monotonically increasing epoch counter to track the last epoch a given
242 // dependency context was cleaned. GC threads claim cleanup tasks by performing
243 // a CAS on this value.
244 bool DependencyContext::claim_cleanup() {
245 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
246 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
247 if (last_cleanup >= cleaning_epoch) {
248 return false;
249 }
250 return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
251 }
252
253 bool DependencyContext::delete_on_release() {
254 return Atomic::load(&_cleaning_epoch) == 0;
255 }
256
257 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
258 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
259 // that is_unloading() will be unlinked and placed on the purge list.
260 nmethodBucket* DependencyContext::dependencies_not_unloading() {
261 for (;;) {
262 // Need acquire because the read value could come from a concurrent insert.
263 nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
264 if (head == nullptr || !head->get_nmethod()->is_unloading()) {
265 return head;
266 }
267 nmethodBucket* head_next = head->next();
268 OrderAccess::loadload();
269 if (Atomic::load(_dependency_context_addr) != head) {
270 // Unstable load of head w.r.t. head->next
271 continue;
272 }
273 if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
274 // Release is_unloading entries if unlinking was claimed
275 DependencyContext::release(head);
276 }
277 }
278 }
279
280 // Relaxed accessors
281 void DependencyContext::set_dependencies(nmethodBucket* b) {
282 Atomic::store(_dependency_context_addr, b);
283 }
284
285 nmethodBucket* DependencyContext::dependencies() {
286 return Atomic::load(_dependency_context_addr);
287 }
288
289 // After the gc_prologue, the dependency contexts may be claimed by the GC
290 // and releasing of nmethodBucket entries will be deferred and placed on
291 // a purge list to be deleted later.
292 void DependencyContext::cleaning_start() {
293 assert(SafepointSynchronize::is_at_safepoint(), "must be");
294 uint64_t epoch = ++_cleaning_epoch_monotonic;
295 Atomic::store(&_cleaning_epoch, epoch);
296 }
297
298 // The epilogue marks the end of dependency context cleanup by the GC,
299 // and also makes subsequent releases of nmethodBuckets cause immediate
300 // deletion. It is okay to delay calling of cleaning_end() to a concurrent
301 // phase, subsequent to the safepoint operation in which cleaning_start()
302 // was called. That allows dependency contexts to be cleaned concurrently.
303 void DependencyContext::cleaning_end() {
304 uint64_t epoch = 0;
305 Atomic::store(&_cleaning_epoch, epoch);
306 }
307
308 // This function skips over nmethodBuckets in the list corresponding to
309 // nmethods that are is_unloading. This allows exposing a view of the
310 // dependents as-if they were already cleaned, despite being cleaned
311 // concurrently. Any entry observed that is_unloading() will be unlinked
312 // and placed on the purge list.
313 nmethodBucket* nmethodBucket::next_not_unloading() {
314 for (;;) {
315 // Do not need acquire because the loaded entry can never be
316 // concurrently inserted.
317 nmethodBucket* next = Atomic::load(&_next);
318 if (next == nullptr || !next->get_nmethod()->is_unloading()) {
319 return next;
320 }
321 nmethodBucket* next_next = Atomic::load(&next->_next);
322 OrderAccess::loadload();
323 if (Atomic::load(&_next) != next) {
324 // Unstable load of next w.r.t. next->next
325 continue;
326 }
327 if (Atomic::cmpxchg(&_next, next, next_next) == next) {
328 // Release is_unloading entries if unlinking was claimed
329 DependencyContext::release(next);
330 }
331 }
332 }
333
334 // Relaxed accessors
335 nmethodBucket* nmethodBucket::next() {
336 return Atomic::load(&_next);
337 }
338
339 void nmethodBucket::set_next(nmethodBucket* b) {
340 Atomic::store(&_next, b);
341 }
342
343 nmethodBucket* nmethodBucket::purge_list_next() {
344 return Atomic::load(&_purge_list_next);
345 }
346
347 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
348 Atomic::store(&_purge_list_next, b);
349 }
--- EOF ---