1 /*
2 * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2019, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26
27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
28 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "runtime/continuation.hpp"
33 #include "runtime/safepointVerifiers.hpp"
34
35 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops, GrowableArray<ShenandoahNMethodBarrier>& barriers) :
36 _nm(nm), _oops(nullptr), _oops_count(0), _barriers(nullptr), _barriers_count(0), _unregistered(false), _lock(), _ic_lock() {
37
38 if (!oops.is_empty()) {
39 _oops_count = oops.length();
40 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
41 for (int c = 0; c < _oops_count; c++) {
42 _oops[c] = oops.at(c);
43 }
44 }
45 _has_non_immed_oops = non_immediate_oops;
46
47 assert_same_oops();
48
49 if (!barriers.is_empty()) {
50 _barriers_count = barriers.length();
51 _barriers = NEW_C_HEAP_ARRAY(ShenandoahNMethodBarrier, _barriers_count, mtGC);
52 for (int c = 0; c < _barriers_count; c++) {
53 _barriers[c] = barriers.at(c);
54 }
55 }
56 }
57
58 ShenandoahNMethod::~ShenandoahNMethod() {
59 if (_oops != nullptr) {
60 FREE_C_HEAP_ARRAY(oop*, _oops);
61 }
62 if (_barriers != nullptr) {
63 FREE_C_HEAP_ARRAY(ShenandoahNMethodBarrier, _barriers);
64 }
65 }
66
67 void ShenandoahNMethod::update() {
68 ResourceMark rm;
69 bool non_immediate_oops = false;
70 GrowableArray<oop*> oops;
71 GrowableArray<ShenandoahNMethodBarrier> barriers;
72
73 parse(nm(), oops, non_immediate_oops, barriers);
74 if (oops.length() != _oops_count) {
75 if (_oops != nullptr) {
76 FREE_C_HEAP_ARRAY(oop*, _oops);
77 _oops = nullptr;
78 }
79
80 _oops_count = oops.length();
81 if (_oops_count > 0) {
82 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
83 }
84 }
85
86 for (int index = 0; index < _oops_count; index ++) {
87 _oops[index] = oops.at(index);
88 }
89 _has_non_immed_oops = non_immediate_oops;
90
91 assert_same_oops();
92 }
93
94 void ShenandoahNMethod::parse(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops, GrowableArray<ShenandoahNMethodBarrier>& barriers) {
95 has_non_immed_oops = false;
96 RelocIterator iter(nm);
97 while (iter.next()) {
98 switch (iter.type()) {
99 case relocInfo::oop_type: {
100 oop_Relocation* r = iter.oop_reloc();
101 if (!r->oop_is_immediate()) {
102 // Non-immediate oop found
103 has_non_immed_oops = true;
104 break;
105 }
106
107 oop value = r->oop_value();
108 if (value != nullptr) {
109 oop* addr = r->oop_addr();
110 shenandoah_assert_correct(addr, value);
111 shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
112 shenandoah_assert_not_forwarded(addr, value);
113 // Non-null immediate oop found. null oops can safely be
114 // ignored since the method will be re-registered if they
115 // are later patched to be non-null.
116 oops.push(addr);
117 }
118 break;
119 }
120 #ifdef COMPILER2
121 case relocInfo::barrier_type: {
122 barrier_Relocation* r = iter.barrier_reloc();
123
124 ShenandoahNMethodBarrier b;
125 b._pc = r->addr();
126 // TODO: Parsing the stub address from generated code is kludgy. It also does not work
127 // with nmethod relocation, that can copy the nmethod body with barriers already nop-ped out.
128 b._stub_addr = ShenandoahBarrierSetAssembler::parse_stub_address(b._pc);
129 // TODO next step: Figure out which GC state we care about in at this fastpath check:
130 // b._gc_state_fast_bit = r->format();
131 barriers.push(b);
132 break;
133 }
134 #endif
135 default:
136 // We do not care about other relocations.
137 break;
138 }
139 }
140 }
141
142 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
143 ResourceMark rm;
144 bool non_immediate_oops = false;
145 GrowableArray<oop*> oops;
146 GrowableArray<ShenandoahNMethodBarrier> barriers;
147
148 parse(nm, oops, non_immediate_oops, barriers);
149 return new ShenandoahNMethod(nm, oops, non_immediate_oops, barriers);
150 }
151
152 void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
153 ShenandoahNMethod* data = gc_data(nm);
154 assert(data != nullptr, "Sanity");
155 assert(data->lock()->owned_by_self(), "Must hold the lock");
156
157 ShenandoahHeap* const heap = ShenandoahHeap::heap();
158 if (heap->is_concurrent_weak_root_in_progress() ||
159 heap->is_concurrent_strong_root_in_progress()) {
160 ShenandoahEvacOOMScope evac_scope;
161 heal_nmethod_metadata(data);
162 } else if (heap->is_concurrent_mark_in_progress()) {
163 ShenandoahKeepAliveClosure cl;
164 data->oops_do(&cl);
165 } else {
166 // There is possibility that GC is cancelled when it arrives final mark.
167 // In this case, concurrent root phase is skipped and degenerated GC should be
168 // followed, where nmethods are disarmed.
169 }
170 }
171
172 void ShenandoahNMethod::update_barriers() {
173 #ifdef COMPILER2
174 ShenandoahHeap* heap = ShenandoahHeap::heap();
175
176 for (int c = 0; c < _barriers_count; c++) {
177 address pc = _barriers[c]._pc;
178 address stub_addr = _barriers[c]._stub_addr;
179 if (heap->is_idle()) {
180 ShenandoahBarrierSetAssembler::patch_branch_to_nop(pc);
181 } else {
182 ShenandoahBarrierSetAssembler::patch_nop_to_branch(pc, stub_addr);
183 }
184 }
185 #endif
186 }
187
188 #ifdef ASSERT
189 void ShenandoahNMethod::assert_correct() {
190 ShenandoahHeap* heap = ShenandoahHeap::heap();
191 for (int c = 0; c < _oops_count; c++) {
192 oop *loc = _oops[c];
193 assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
194 oop o = RawAccess<>::oop_load(loc);
195 shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress());
196 }
197
198 oop* const begin = _nm->oops_begin();
199 oop* const end = _nm->oops_end();
200 for (oop* p = begin; p < end; p++) {
201 if (*p != Universe::non_oop_word()) {
202 oop o = RawAccess<>::oop_load(p);
203 shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress());
204 }
205 }
206 }
207
208 class ShenandoahNMethodOopDetector : public OopClosure {
209 private:
210 ResourceMark rm; // For growable array allocation below.
211 GrowableArray<oop*> _oops;
212
213 public:
214 ShenandoahNMethodOopDetector() : _oops(10) {};
215
216 void do_oop(oop* o) {
217 _oops.append(o);
218 }
219 void do_oop(narrowOop* o) {
220 fatal("NMethods should not have compressed oops embedded.");
221 }
222
223 GrowableArray<oop*>* oops() {
224 return &_oops;
225 }
226 };
227
228 void ShenandoahNMethod::assert_same_oops() {
229 ShenandoahNMethodOopDetector detector;
230 nm()->oops_do(&detector);
231
232 GrowableArray<oop*>* oops = detector.oops();
233
234 int count = _oops_count;
235 for (int index = 0; index < _oops_count; index ++) {
236 assert(oops->contains(_oops[index]), "Must contain this oop");
237 }
238
239 for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) {
240 if (*p == Universe::non_oop_word()) continue;
241 count++;
242 assert(oops->contains(p), "Must contain this oop");
243 }
244
245 if (oops->length() < count) {
246 stringStream debug_stream;
247 debug_stream.print_cr("detected locs: %d", oops->length());
248 for (int i = 0; i < oops->length(); i++) {
249 debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i)));
250 }
251 debug_stream.print_cr("recorded oops: %d", _oops_count);
252 for (int i = 0; i < _oops_count; i++) {
253 debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i]));
254 }
255 GrowableArray<oop*> check;
256 GrowableArray<ShenandoahNMethodBarrier> barriers;
257 bool non_immed;
258 parse(nm(), check, non_immed, barriers);
259 debug_stream.print_cr("check oops: %d", check.length());
260 for (int i = 0; i < check.length(); i++) {
261 debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i)));
262 }
263 fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s",
264 oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.freeze());
265 }
266 }
267 #endif
268
269 ShenandoahNMethodTable::ShenandoahNMethodTable() :
270 _heap(ShenandoahHeap::heap()),
271 _index(0),
272 _itr_cnt(0) {
273 _list = new ShenandoahNMethodList(minSize);
274 }
275
276 ShenandoahNMethodTable::~ShenandoahNMethodTable() {
277 assert(_list != nullptr, "Sanity");
278 _list->release();
279 }
280
281 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
282 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
283 assert(_index >= 0 && _index <= _list->size(), "Sanity");
284
285 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
286
287 if (data != nullptr) {
288 assert(contain(nm), "Must have been registered");
289 assert(nm == data->nm(), "Must be same nmethod");
290 // Prevent updating a nmethod while concurrent iteration is in progress.
291 wait_until_concurrent_iteration_done();
292 ShenandoahNMethodLocker data_locker(data->lock());
293 data->update();
294 data->update_barriers();
295 } else {
296 // For a new nmethod, we can safely append it to the list, because
297 // concurrent iteration will not touch it.
298 data = ShenandoahNMethod::for_nmethod(nm);
299 assert(data != nullptr, "Sanity");
300 ShenandoahNMethod::attach_gc_data(nm, data);
301 ShenandoahLocker locker(&_lock);
302 log_register_nmethod(nm);
303 append(data);
304 data->update_barriers();
305 }
306 // Disarm new nmethod
307 ShenandoahNMethod::disarm_nmethod(nm);
308 }
309
310 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
311 assert_locked_or_safepoint(CodeCache_lock);
312
313 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
314 assert(data != nullptr, "Sanity");
315 log_unregister_nmethod(nm);
316 ShenandoahLocker locker(&_lock);
317 assert(contain(nm), "Must have been registered");
318
319 int idx = index_of(nm);
320 assert(idx >= 0 && idx < _index, "Invalid index");
321 ShenandoahNMethod::attach_gc_data(nm, nullptr);
322 remove(idx);
323 }
324
325 bool ShenandoahNMethodTable::contain(nmethod* nm) const {
326 return index_of(nm) != -1;
327 }
328
329 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const {
330 assert(index >= 0 && index < _index, "Out of bound");
331 return _list->at(index);
332 }
333
334 int ShenandoahNMethodTable::index_of(nmethod* nm) const {
335 for (int index = 0; index < length(); index ++) {
336 if (at(index)->nm() == nm) {
337 return index;
338 }
339 }
340 return -1;
341 }
342
343 void ShenandoahNMethodTable::remove(int idx) {
344 shenandoah_assert_locked_or_safepoint(CodeCache_lock);
345 assert(_index >= 0 && _index <= _list->size(), "Sanity");
346
347 assert(idx >= 0 && idx < _index, "Out of bound");
348 ShenandoahNMethod* snm = _list->at(idx);
349 ShenandoahNMethod* tmp = _list->at(_index - 1);
350 _list->set(idx, tmp);
351 _index --;
352
353 delete snm;
354 }
355
356 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() {
357 assert(CodeCache_lock->owned_by_self(), "Lock must be held");
358 while (iteration_in_progress()) {
359 CodeCache_lock->wait_without_safepoint_check();
360 }
361 }
362
363 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) {
364 if (is_full()) {
365 int new_size = 2 * _list->size();
366 // Rebuild table and replace current one
367 rebuild(new_size);
368 }
369
370 _list->set(_index++, snm);
371 assert(_index >= 0 && _index <= _list->size(), "Sanity");
372 }
373
374 void ShenandoahNMethodTable::rebuild(int size) {
375 ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size);
376 new_list->transfer(_list, _index);
377
378 // Release old list
379 _list->release();
380 _list = new_list;
381 }
382
383 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() {
384 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
385 _itr_cnt++;
386 return new ShenandoahNMethodTableSnapshot(this);
387 }
388
389 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
390 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
391 assert(iteration_in_progress(), "Why we here?");
392 assert(snapshot != nullptr, "No snapshot");
393 _itr_cnt--;
394
395 delete snapshot;
396 }
397
398 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) {
399 LogTarget(Debug, gc, nmethod) log;
400 if (!log.is_enabled()) {
401 return;
402 }
403
404 ResourceMark rm;
405 log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)",
406 nm->method()->method_holder()->external_name(),
407 nm->method()->name()->as_C_string(),
408 p2i(nm),
409 nm->compiler_name());
410 }
411
412 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
413 LogTarget(Debug, gc, nmethod) log;
414 if (!log.is_enabled()) {
415 return;
416 }
417
418 ResourceMark rm;
419 log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]",
420 nm->method()->method_holder()->external_name(),
421 nm->method()->name()->as_C_string(),
422 p2i(nm));
423 }
424
425 #ifdef ASSERT
426 void ShenandoahNMethodTable::assert_nmethods_correct() {
427 assert_locked_or_safepoint(CodeCache_lock);
428
429 for (int index = 0; index < length(); index ++) {
430 ShenandoahNMethod* m = _list->at(index);
431 // Concurrent unloading may have dead nmethods to be cleaned by sweeper
432 if (m->is_unregistered()) continue;
433 m->assert_correct();
434 }
435 }
436 #endif
437
438
439 ShenandoahNMethodList::ShenandoahNMethodList(int size) :
440 _size(size), _ref_count(1) {
441 _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC);
442 }
443
444 ShenandoahNMethodList::~ShenandoahNMethodList() {
445 assert(_list != nullptr, "Sanity");
446 assert(_ref_count == 0, "Must be");
447 FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
448 }
449
450 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) {
451 assert(limit <= size(), "Sanity");
452 ShenandoahNMethod** old_list = list->list();
453 for (int index = 0; index < limit; index++) {
454 _list[index] = old_list[index];
455 }
456 }
457
458 ShenandoahNMethodList* ShenandoahNMethodList::acquire() {
459 assert_locked_or_safepoint(CodeCache_lock);
460 _ref_count++;
461 return this;
462 }
463
464 void ShenandoahNMethodList::release() {
465 assert_locked_or_safepoint(CodeCache_lock);
466 _ref_count--;
467 if (_ref_count == 0) {
468 delete this;
469 }
470 }
471
472 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) :
473 _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) {
474 }
475
476 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() {
477 _list->release();
478 }
479
480 void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
481 size_t stride = 256; // educated guess
482
483 ShenandoahNMethod** const list = _list->list();
484
485 size_t max = (size_t)_limit;
486 while (_claimed.load_relaxed() < max) {
487 size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
488 size_t start = cur;
489 size_t end = MIN2(cur + stride, max);
490 if (start >= max) break;
491
492 for (size_t idx = start; idx < end; idx++) {
493 ShenandoahNMethod* nmr = list[idx];
494 assert(nmr != nullptr, "Sanity");
495 if (nmr->is_unregistered()) {
496 continue;
497 }
498
499 nmr->assert_correct();
500 f->do_nmethod(nmr->nm());
501 }
502 }
503 }
504
505 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) {
506 size_t stride = 256; // educated guess
507
508 ShenandoahNMethod** list = _list->list();
509 size_t max = (size_t)_limit;
510 while (_claimed.load_relaxed() < max) {
511 size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
512 size_t start = cur;
513 size_t end = MIN2(cur + stride, max);
514 if (start >= max) break;
515
516 for (size_t idx = start; idx < end; idx++) {
517 ShenandoahNMethod* data = list[idx];
518 assert(data != nullptr, "Should not be null");
519 if (!data->is_unregistered()) {
520 cl->do_nmethod(data->nm());
521 }
522 }
523 }
524 }
525
526 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
527 _table(table),
528 _table_snapshot(nullptr),
529 _started_workers(0),
530 _finished_workers(0) {}
531
532 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
533 // Cannot safepoint when iteration is running, because this can cause deadlocks
534 // with other threads waiting on iteration to be over.
535 NoSafepointVerifier nsv;
536
537 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
538
539 if (_finished_workers > 0) {
540 // Some threads have already finished. We are now in rampdown: we are now
541 // waiting for all currently recorded workers to finish. No new workers
542 // should start.
543 return;
544 }
545
546 // Record a new worker and initialize the snapshot if it is a first visitor.
547 if (_started_workers++ == 0) {
548 _table_snapshot = _table->snapshot_for_iteration();
549 }
550
551 // All set, relinquish the lock and go concurrent.
552 {
553 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
554 _table_snapshot->concurrent_nmethods_do(cl);
555 }
556
557 // Record completion. Last worker shuts down the iterator and notifies any waiters.
558 uint count = ++_finished_workers;
559 if (count == _started_workers) {
560 _table->finish_iteration(_table_snapshot);
561 CodeCache_lock->notify_all();
562 }
563 }