1 /*
2 * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2019, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26
27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "runtime/continuation.hpp"
32 #include "runtime/safepointVerifiers.hpp"
33
34 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) :
35 _nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false), _lock(), _ic_lock() {
36
37 if (!oops.is_empty()) {
38 _oops_count = oops.length();
39 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
40 for (int c = 0; c < _oops_count; c++) {
41 _oops[c] = oops.at(c);
42 }
43 }
44 _has_non_immed_oops = non_immediate_oops;
45
46 assert_same_oops();
47 }
48
49 ShenandoahNMethod::~ShenandoahNMethod() {
50 if (_oops != nullptr) {
51 FREE_C_HEAP_ARRAY(oop*, _oops);
52 }
53 }
54
55 void ShenandoahNMethod::update() {
56 ResourceMark rm;
57 bool non_immediate_oops = false;
58 GrowableArray<oop*> oops;
59
60 detect_reloc_oops(nm(), oops, non_immediate_oops);
61 if (oops.length() != _oops_count) {
62 if (_oops != nullptr) {
63 FREE_C_HEAP_ARRAY(oop*, _oops);
64 _oops = nullptr;
65 }
66
67 _oops_count = oops.length();
68 if (_oops_count > 0) {
69 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
70 }
71 }
72
73 for (int index = 0; index < _oops_count; index ++) {
74 _oops[index] = oops.at(index);
75 }
76 _has_non_immed_oops = non_immediate_oops;
77
78 assert_same_oops();
79 }
80
81 void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops) {
82 has_non_immed_oops = false;
83 // Find all oops relocations
84 RelocIterator iter(nm);
85 while (iter.next()) {
86 if (iter.type() != relocInfo::oop_type) {
87 // Not an oop
88 continue;
89 }
90
91 oop_Relocation* r = iter.oop_reloc();
92 if (!r->oop_is_immediate()) {
93 // Non-immediate oop found
94 has_non_immed_oops = true;
95 continue;
96 }
97
98 oop value = r->oop_value();
99 if (value != nullptr) {
100 oop* addr = r->oop_addr();
101 shenandoah_assert_correct(addr, value);
102 shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
103 shenandoah_assert_not_forwarded(addr, value);
104 // Non-null immediate oop found. null oops can safely be
105 // ignored since the method will be re-registered if they
106 // are later patched to be non-null.
107 oops.push(addr);
108 }
109 }
110 }
111
112 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
113 ResourceMark rm;
114 bool non_immediate_oops = false;
115 GrowableArray<oop*> oops;
116
117 detect_reloc_oops(nm, oops, non_immediate_oops);
118 return new ShenandoahNMethod(nm, oops, non_immediate_oops);
119 }
120
121 void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
122 ShenandoahNMethod* data = gc_data(nm);
123 assert(data != nullptr, "Sanity");
124 assert(data->lock()->owned_by_self(), "Must hold the lock");
125
126 ShenandoahHeap* const heap = ShenandoahHeap::heap();
127 if (heap->is_concurrent_weak_root_in_progress() ||
128 heap->is_concurrent_strong_root_in_progress()) {
129 ShenandoahEvacOOMScope evac_scope;
130 heal_nmethod_metadata(data);
131 } else if (heap->is_concurrent_mark_in_progress()) {
132 ShenandoahKeepAliveClosure cl;
133 data->oops_do(&cl);
134 } else {
135 // There is possibility that GC is cancelled when it arrives final mark.
136 // In this case, concurrent root phase is skipped and degenerated GC should be
137 // followed, where nmethods are disarmed.
138 }
139 }
140
141 #ifdef ASSERT
142 void ShenandoahNMethod::assert_correct() {
143 ShenandoahHeap* heap = ShenandoahHeap::heap();
144 for (int c = 0; c < _oops_count; c++) {
145 oop *loc = _oops[c];
146 assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
147 oop o = RawAccess<>::oop_load(loc);
148 shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress());
149 }
150
151 oop* const begin = _nm->oops_begin();
152 oop* const end = _nm->oops_end();
153 for (oop* p = begin; p < end; p++) {
154 if (*p != Universe::non_oop_word()) {
155 oop o = RawAccess<>::oop_load(p);
156 shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress());
157 }
158 }
159 }
160
161 class ShenandoahNMethodOopDetector : public OopClosure {
162 private:
163 ResourceMark rm; // For growable array allocation below.
164 GrowableArray<oop*> _oops;
165
166 public:
167 ShenandoahNMethodOopDetector() : _oops(10) {};
168
169 void do_oop(oop* o) {
170 _oops.append(o);
171 }
172 void do_oop(narrowOop* o) {
173 fatal("NMethods should not have compressed oops embedded.");
174 }
175
176 GrowableArray<oop*>* oops() {
177 return &_oops;
178 }
179 };
180
181 void ShenandoahNMethod::assert_same_oops() {
182 ShenandoahNMethodOopDetector detector;
183 nm()->oops_do(&detector);
184
185 GrowableArray<oop*>* oops = detector.oops();
186
187 int count = _oops_count;
188 for (int index = 0; index < _oops_count; index ++) {
189 assert(oops->contains(_oops[index]), "Must contain this oop");
190 }
191
192 for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) {
193 if (*p == Universe::non_oop_word()) continue;
194 count++;
195 assert(oops->contains(p), "Must contain this oop");
196 }
197
198 if (oops->length() < count) {
199 stringStream debug_stream;
200 debug_stream.print_cr("detected locs: %d", oops->length());
201 for (int i = 0; i < oops->length(); i++) {
202 debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i)));
203 }
204 debug_stream.print_cr("recorded oops: %d", _oops_count);
205 for (int i = 0; i < _oops_count; i++) {
206 debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i]));
207 }
208 GrowableArray<oop*> check;
209 bool non_immed;
210 detect_reloc_oops(nm(), check, non_immed);
211 debug_stream.print_cr("check oops: %d", check.length());
212 for (int i = 0; i < check.length(); i++) {
213 debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i)));
214 }
215 fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s",
216 oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.freeze());
217 }
218 }
219 #endif
220
221 ShenandoahNMethodTable::ShenandoahNMethodTable() :
222 _heap(ShenandoahHeap::heap()),
223 _index(0),
224 _itr_cnt(0) {
225 _list = new ShenandoahNMethodList(minSize);
226 }
227
228 ShenandoahNMethodTable::~ShenandoahNMethodTable() {
229 assert(_list != nullptr, "Sanity");
230 _list->release();
231 }
232
233 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
234 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
235 assert(_index >= 0 && _index <= _list->size(), "Sanity");
236
237 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
238
239 if (data != nullptr) {
240 assert(contain(nm), "Must have been registered");
241 assert(nm == data->nm(), "Must be same nmethod");
242 // Prevent updating a nmethod while concurrent iteration is in progress.
243 wait_until_concurrent_iteration_done();
244 ShenandoahNMethodLocker data_locker(data->lock());
245 data->update();
246 } else {
247 // For a new nmethod, we can safely append it to the list, because
248 // concurrent iteration will not touch it.
249 data = ShenandoahNMethod::for_nmethod(nm);
250 assert(data != nullptr, "Sanity");
251 ShenandoahNMethod::attach_gc_data(nm, data);
252 ShenandoahLocker locker(&_lock);
253 log_register_nmethod(nm);
254 append(data);
255 }
256 // Disarm new nmethod
257 ShenandoahNMethod::disarm_nmethod(nm);
258 }
259
260 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
261 assert_locked_or_safepoint(CodeCache_lock);
262
263 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
264 assert(data != nullptr, "Sanity");
265 log_unregister_nmethod(nm);
266 ShenandoahLocker locker(&_lock);
267 assert(contain(nm), "Must have been registered");
268
269 int idx = index_of(nm);
270 assert(idx >= 0 && idx < _index, "Invalid index");
271 ShenandoahNMethod::attach_gc_data(nm, nullptr);
272 remove(idx);
273 }
274
275 bool ShenandoahNMethodTable::contain(nmethod* nm) const {
276 return index_of(nm) != -1;
277 }
278
279 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const {
280 assert(index >= 0 && index < _index, "Out of bound");
281 return _list->at(index);
282 }
283
284 int ShenandoahNMethodTable::index_of(nmethod* nm) const {
285 for (int index = 0; index < length(); index ++) {
286 if (at(index)->nm() == nm) {
287 return index;
288 }
289 }
290 return -1;
291 }
292
293 void ShenandoahNMethodTable::remove(int idx) {
294 shenandoah_assert_locked_or_safepoint(CodeCache_lock);
295 assert(_index >= 0 && _index <= _list->size(), "Sanity");
296
297 assert(idx >= 0 && idx < _index, "Out of bound");
298 ShenandoahNMethod* snm = _list->at(idx);
299 ShenandoahNMethod* tmp = _list->at(_index - 1);
300 _list->set(idx, tmp);
301 _index --;
302
303 delete snm;
304 }
305
306 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() {
307 assert(CodeCache_lock->owned_by_self(), "Lock must be held");
308 while (iteration_in_progress()) {
309 CodeCache_lock->wait_without_safepoint_check();
310 }
311 }
312
313 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) {
314 if (is_full()) {
315 int new_size = 2 * _list->size();
316 // Rebuild table and replace current one
317 rebuild(new_size);
318 }
319
320 _list->set(_index++, snm);
321 assert(_index >= 0 && _index <= _list->size(), "Sanity");
322 }
323
324 void ShenandoahNMethodTable::rebuild(int size) {
325 ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size);
326 new_list->transfer(_list, _index);
327
328 // Release old list
329 _list->release();
330 _list = new_list;
331 }
332
333 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() {
334 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
335 _itr_cnt++;
336 return new ShenandoahNMethodTableSnapshot(this);
337 }
338
339 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
340 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
341 assert(iteration_in_progress(), "Why we here?");
342 assert(snapshot != nullptr, "No snapshot");
343 _itr_cnt--;
344
345 delete snapshot;
346 }
347
348 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) {
349 LogTarget(Debug, gc, nmethod) log;
350 if (!log.is_enabled()) {
351 return;
352 }
353
354 ResourceMark rm;
355 log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)",
356 nm->method()->method_holder()->external_name(),
357 nm->method()->name()->as_C_string(),
358 p2i(nm),
359 nm->compiler_name());
360 }
361
362 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
363 LogTarget(Debug, gc, nmethod) log;
364 if (!log.is_enabled()) {
365 return;
366 }
367
368 ResourceMark rm;
369 log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]",
370 nm->method()->method_holder()->external_name(),
371 nm->method()->name()->as_C_string(),
372 p2i(nm));
373 }
374
375 #ifdef ASSERT
376 void ShenandoahNMethodTable::assert_nmethods_correct() {
377 assert_locked_or_safepoint(CodeCache_lock);
378
379 for (int index = 0; index < length(); index ++) {
380 ShenandoahNMethod* m = _list->at(index);
381 // Concurrent unloading may have dead nmethods to be cleaned by sweeper
382 if (m->is_unregistered()) continue;
383 m->assert_correct();
384 }
385 }
386 #endif
387
388
389 ShenandoahNMethodList::ShenandoahNMethodList(int size) :
390 _size(size), _ref_count(1) {
391 _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC);
392 }
393
394 ShenandoahNMethodList::~ShenandoahNMethodList() {
395 assert(_list != nullptr, "Sanity");
396 assert(_ref_count == 0, "Must be");
397 FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
398 }
399
400 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) {
401 assert(limit <= size(), "Sanity");
402 ShenandoahNMethod** old_list = list->list();
403 for (int index = 0; index < limit; index++) {
404 _list[index] = old_list[index];
405 }
406 }
407
408 ShenandoahNMethodList* ShenandoahNMethodList::acquire() {
409 assert_locked_or_safepoint(CodeCache_lock);
410 _ref_count++;
411 return this;
412 }
413
414 void ShenandoahNMethodList::release() {
415 assert_locked_or_safepoint(CodeCache_lock);
416 _ref_count--;
417 if (_ref_count == 0) {
418 delete this;
419 }
420 }
421
422 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) :
423 _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) {
424 }
425
426 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() {
427 _list->release();
428 }
429
430 void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
431 size_t stride = 256; // educated guess
432
433 ShenandoahNMethod** const list = _list->list();
434
435 size_t max = (size_t)_limit;
436 while (_claimed.load_relaxed() < max) {
437 size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
438 size_t start = cur;
439 size_t end = MIN2(cur + stride, max);
440 if (start >= max) break;
441
442 for (size_t idx = start; idx < end; idx++) {
443 ShenandoahNMethod* nmr = list[idx];
444 assert(nmr != nullptr, "Sanity");
445 if (nmr->is_unregistered()) {
446 continue;
447 }
448
449 nmr->assert_correct();
450 f->do_nmethod(nmr->nm());
451 }
452 }
453 }
454
455 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) {
456 size_t stride = 256; // educated guess
457
458 ShenandoahNMethod** list = _list->list();
459 size_t max = (size_t)_limit;
460 while (_claimed.load_relaxed() < max) {
461 size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
462 size_t start = cur;
463 size_t end = MIN2(cur + stride, max);
464 if (start >= max) break;
465
466 for (size_t idx = start; idx < end; idx++) {
467 ShenandoahNMethod* data = list[idx];
468 assert(data != nullptr, "Should not be null");
469 if (!data->is_unregistered()) {
470 cl->do_nmethod(data->nm());
471 }
472 }
473 }
474 }
475
476 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
477 _table(table),
478 _table_snapshot(nullptr),
479 _started_workers(0),
480 _finished_workers(0) {}
481
482 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
483 // Cannot safepoint when iteration is running, because this can cause deadlocks
484 // with other threads waiting on iteration to be over.
485 NoSafepointVerifier nsv;
486
487 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
488
489 if (_finished_workers > 0) {
490 // Some threads have already finished. We are now in rampdown: we are now
491 // waiting for all currently recorded workers to finish. No new workers
492 // should start.
493 return;
494 }
495
496 // Record a new worker and initialize the snapshot if it is a first visitor.
497 if (_started_workers++ == 0) {
498 _table_snapshot = _table->snapshot_for_iteration();
499 }
500
501 // All set, relinquish the lock and go concurrent.
502 {
503 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
504 _table_snapshot->concurrent_nmethods_do(cl);
505 }
506
507 // Record completion. Last worker shuts down the iterator and notifies any waiters.
508 uint count = ++_finished_workers;
509 if (count == _started_workers) {
510 _table->finish_iteration(_table_snapshot);
511 CodeCache_lock->notify_all();
512 }
513 }