1 /*
  2  * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/nmethod.hpp"
 27 #include "code/dependencies.hpp"
 28 #include "code/dependencyContext.hpp"
 29 #include "logging/log.hpp"
 30 #include "logging/logStream.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "runtime/deoptimization.hpp"
 34 #include "runtime/mutexLocker.hpp"
 35 #include "runtime/orderAccess.hpp"
 36 #include "runtime/perfData.hpp"
 37 #include "utilities/exceptions.hpp"
 38 
 39 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count   = nullptr;
 40 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = nullptr;
 41 PerfCounter* DependencyContext::_perf_total_buckets_stale_count       = nullptr;
 42 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count   = nullptr;
 43 nmethodBucket* volatile DependencyContext::_purge_list                = nullptr;
 44 volatile uint64_t DependencyContext::_cleaning_epoch                  = 0;
 45 uint64_t  DependencyContext::_cleaning_epoch_monotonic                = 0;
 46 
 47 void dependencyContext_init() {
 48   DependencyContext::init();
 49 }
 50 
 51 void DependencyContext::init() {
 52   if (UsePerfData) {
 53     EXCEPTION_MARK;
 54     _perf_total_buckets_allocated_count =
 55         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
 56     _perf_total_buckets_deallocated_count =
 57         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
 58     _perf_total_buckets_stale_count =
 59         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
 60     _perf_total_buckets_stale_acc_count =
 61         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
 62   }
 63 }
 64 
 65 //
 66 // Walk the list of dependent nmethods searching for nmethods which
 67 // are dependent on the changes that were passed in and mark them for
 68 // deoptimization.
 69 //
 70 void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes, InstanceKlass* context) {
 71   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
 72     nmethod* nm = b->get_nmethod();
 73     {
 74       LogStreamHandle(Trace, dependencies) log;
 75       if (log.is_enabled()) {
 76         log.print("Processing ");
 77         if (context != nullptr) {
 78           log.print(" ctx=");
 79           context->name()->print_value_on(&log);
 80           log.print(" ");
 81         }
 82         nm->print_value_on(&log);
 83       }
 84     }
 85     if (nm->is_marked_for_deoptimization()) {
 86       deopt_scope->dependent(nm);
 87     } else if (nm->check_dependency_on(changes)) {
 88       LogTarget(Info, dependencies) lt;
 89       if (lt.is_enabled()) {
 90         ResourceMark rm;
 91         LogStream ls(&lt);
 92         ls.print_cr("Marked for deoptimization");
 93         changes.print_on(&ls);
 94         nm->print_on(&ls);
 95         nm->print_dependencies_on(&ls);
 96       }
 97       deopt_scope->mark(nm, !changes.is_call_site_change());
 98     }
 99   }
100 }
101 
102 //
103 // Add an nmethod to the dependency context.
104 //
105 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
106   assert_lock_strong(CodeCache_lock);
107   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
108     if (nm == b->get_nmethod()) {
109       return;
110     }
111   }
112   nmethodBucket* new_head = new nmethodBucket(nm, nullptr);
113   for (;;) {
114     nmethodBucket* head = Atomic::load(_dependency_context_addr);
115     new_head->set_next(head);
116     if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
117       break;
118     }
119   }
120   if (UsePerfData) {
121     _perf_total_buckets_allocated_count->inc();
122   }
123 }
124 
125 void DependencyContext::release(nmethodBucket* b) {
126   if (delete_on_release()) {
127     assert_locked_or_safepoint(CodeCache_lock);
128     delete b;
129     if (UsePerfData) {
130       _perf_total_buckets_deallocated_count->inc();
131     }
132   } else {
133     // Mark the context as having stale entries, since it is not safe to
134     // expunge the list right now.
135     for (;;) {
136       nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
137       b->set_purge_list_next(purge_list_head);
138       if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
139         break;
140       }
141     }
142     if (UsePerfData) {
143       _perf_total_buckets_stale_count->inc();
144       _perf_total_buckets_stale_acc_count->inc();
145     }
146   }
147 }
148 
149 //
150 // Reclaim all unused buckets.
151 //
152 void DependencyContext::purge_dependency_contexts() {
153   int removed = 0;
154   for (nmethodBucket* b = _purge_list; b != nullptr;) {
155     nmethodBucket* next = b->purge_list_next();
156     removed++;
157     delete b;
158     b = next;
159   }
160   if (UsePerfData && removed > 0) {
161     _perf_total_buckets_deallocated_count->inc(removed);
162   }
163   _purge_list = nullptr;
164 }
165 
166 //
167 // Cleanup a dependency context by unlinking and placing all dependents corresponding
168 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
169 void DependencyContext::clean_unloading_dependents() {
170   if (!claim_cleanup()) {
171     // Somebody else is cleaning up this dependency context.
172     return;
173   }
174   // Walk the nmethodBuckets and move dead entries on the purge list, which will
175   // be deleted during ClassLoaderDataGraph::purge().
176   nmethodBucket* b = dependencies_not_unloading();
177   while (b != nullptr) {
178     nmethodBucket* next = b->next_not_unloading();
179     b = next;
180   }
181 }
182 
183 nmethodBucket* DependencyContext::release_and_get_next_not_unloading(nmethodBucket* b) {
184   nmethodBucket* next = b->next_not_unloading();
185   release(b);
186   return next;
187  }
188 
189 //
190 // Invalidate all dependencies in the context
191 void DependencyContext::remove_all_dependents() {
192   // Assume that the dependency is not deleted immediately but moved into the
193   // purge list when calling this.
194   assert(!delete_on_release(), "should not delete on release");
195 
196   nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr);
197   if (first == nullptr) {
198     return;
199   }
200 
201   nmethodBucket* cur = first;
202   nmethodBucket* last = cur;
203   jlong count = 0;
204   for (; cur != nullptr; cur = cur->next()) {
205     assert(cur->get_nmethod()->is_unloading(), "must be");
206     last = cur;
207     count++;
208   }
209 
210   // Add the whole list to the purge list at once.
211   nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list);
212   for (;;) {
213     last->set_purge_list_next(old_purge_list_head);
214     nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first);
215     if (old_purge_list_head == next_purge_list_head) {
216       break;
217     }
218     old_purge_list_head = next_purge_list_head;
219   }
220 
221   if (UsePerfData) {
222     _perf_total_buckets_stale_count->inc(count);
223     _perf_total_buckets_stale_acc_count->inc(count);
224   }
225 
226   set_dependencies(nullptr);
227 }
228 
229 void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope) {
230   nmethodBucket* b = dependencies_not_unloading();
231   set_dependencies(nullptr);
232   while (b != nullptr) {
233     nmethod* nm = b->get_nmethod();
234     // Also count already (concurrently) marked nmethods to make sure
235     // deoptimization is triggered before execution in this thread continues.
236     deopt_scope->mark(nm);
237     b = release_and_get_next_not_unloading(b);
238   }
239 }
240 
241 #ifndef PRODUCT
242 bool DependencyContext::is_empty() {
243   return dependencies() == nullptr;
244 }
245 
246 void DependencyContext::print_dependent_nmethods(bool verbose) {
247   int idx = 0;
248   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
249     nmethod* nm = b->get_nmethod();
250     tty->print("[%d] { ", idx++);
251     if (!verbose) {
252       nm->print_on(tty, "nmethod");
253       tty->print_cr(" } ");
254     } else {
255       nm->print();
256       nm->print_dependencies_on(tty);
257       tty->print_cr("--- } ");
258     }
259   }
260 }
261 #endif //PRODUCT
262 
263 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
264   for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
265     if (nm == b->get_nmethod()) {
266       return true;
267     }
268   }
269   return false;
270 }
271 
272 // We use a monotonically increasing epoch counter to track the last epoch a given
273 // dependency context was cleaned. GC threads claim cleanup tasks by performing
274 // a CAS on this value.
275 bool DependencyContext::claim_cleanup() {
276   uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
277   uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
278   if (last_cleanup >= cleaning_epoch) {
279     return false;
280   }
281   return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
282 }
283 
284 bool DependencyContext::delete_on_release() {
285   return Atomic::load(&_cleaning_epoch) == 0;
286 }
287 
288 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
289 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
290 // that is_unloading() will be unlinked and placed on the purge list.
291 nmethodBucket* DependencyContext::dependencies_not_unloading() {
292   for (;;) {
293     // Need acquire because the read value could come from a concurrent insert.
294     nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
295     if (head == nullptr || !head->get_nmethod()->is_unloading()) {
296       return head;
297     }
298     nmethodBucket* head_next = head->next();
299     OrderAccess::loadload();
300     if (Atomic::load(_dependency_context_addr) != head) {
301       // Unstable load of head w.r.t. head->next
302       continue;
303     }
304     if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
305       // Release is_unloading entries if unlinking was claimed
306       DependencyContext::release(head);
307     }
308   }
309 }
310 
311 // Relaxed accessors
312 void DependencyContext::set_dependencies(nmethodBucket* b) {
313   Atomic::store(_dependency_context_addr, b);
314 }
315 
316 nmethodBucket* DependencyContext::dependencies() {
317   return Atomic::load(_dependency_context_addr);
318 }
319 
320 // After the gc_prologue, the dependency contexts may be claimed by the GC
321 // and releasing of nmethodBucket entries will be deferred and placed on
322 // a purge list to be deleted later.
323 void DependencyContext::cleaning_start() {
324   assert(SafepointSynchronize::is_at_safepoint(), "must be");
325   uint64_t epoch = ++_cleaning_epoch_monotonic;
326   Atomic::store(&_cleaning_epoch, epoch);
327 }
328 
329 // The epilogue marks the end of dependency context cleanup by the GC,
330 // and also makes subsequent releases of nmethodBuckets cause immediate
331 // deletion. It is okay to delay calling of cleaning_end() to a concurrent
332 // phase, subsequent to the safepoint operation in which cleaning_start()
333 // was called. That allows dependency contexts to be cleaned concurrently.
334 void DependencyContext::cleaning_end() {
335   uint64_t epoch = 0;
336   Atomic::store(&_cleaning_epoch, epoch);
337 }
338 
339 // This function skips over nmethodBuckets in the list corresponding to
340 // nmethods that are is_unloading. This allows exposing a view of the
341 // dependents as-if they were already cleaned, despite being cleaned
342 // concurrently. Any entry observed that is_unloading() will be unlinked
343 // and placed on the purge list.
344 nmethodBucket* nmethodBucket::next_not_unloading() {
345   for (;;) {
346     // Do not need acquire because the loaded entry can never be
347     // concurrently inserted.
348     nmethodBucket* next = Atomic::load(&_next);
349     if (next == nullptr || !next->get_nmethod()->is_unloading()) {
350       return next;
351     }
352     nmethodBucket* next_next = Atomic::load(&next->_next);
353     OrderAccess::loadload();
354     if (Atomic::load(&_next) != next) {
355       // Unstable load of next w.r.t. next->next
356       continue;
357     }
358     if (Atomic::cmpxchg(&_next, next, next_next) == next) {
359       // Release is_unloading entries if unlinking was claimed
360       DependencyContext::release(next);
361     }
362   }
363 }
364 
365 // Relaxed accessors
366 nmethodBucket* nmethodBucket::next() {
367   return Atomic::load(&_next);
368 }
369 
370 void nmethodBucket::set_next(nmethodBucket* b) {
371   Atomic::store(&_next, b);
372 }
373 
374 nmethodBucket* nmethodBucket::purge_list_next() {
375   return Atomic::load(&_purge_list_next);
376 }
377 
378 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
379   Atomic::store(&_purge_list_next, b);
380 }