1 /*
  2  * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2019, 2022, Red Hat, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 
 27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 28 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/safepointVerifiers.hpp"
 34 
 35 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops, GrowableArray<ShenandoahNMethodBarrier>& barriers) :
 36   _nm(nm), _oops(nullptr), _oops_count(0), _barriers(nullptr), _barriers_count(0), _unregistered(false), _lock(), _ic_lock() {
 37 
 38   if (!oops.is_empty()) {
 39     _oops_count = oops.length();
 40     _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
 41     for (int c = 0; c < _oops_count; c++) {
 42       _oops[c] = oops.at(c);
 43     }
 44   }
 45   _has_non_immed_oops = non_immediate_oops;
 46 
 47   assert_same_oops();
 48 
 49   if (!barriers.is_empty()) {
 50     _barriers_count = barriers.length();
 51     _barriers = NEW_C_HEAP_ARRAY(ShenandoahNMethodBarrier, _barriers_count, mtGC);
 52     for (int c = 0; c < _barriers_count; c++) {
 53       _barriers[c] = barriers.at(c);
 54     }
 55   }
 56 }
 57 
 58 ShenandoahNMethod::~ShenandoahNMethod() {
 59   if (_oops != nullptr) {
 60     FREE_C_HEAP_ARRAY(oop*, _oops);
 61   }
 62   if (_barriers != nullptr) {
 63     FREE_C_HEAP_ARRAY(ShenandoahNMethodBarrier, _barriers);
 64   }
 65 }
 66 
 67 void ShenandoahNMethod::update() {
 68   ResourceMark rm;
 69   bool non_immediate_oops = false;
 70   GrowableArray<oop*> oops;
 71   GrowableArray<ShenandoahNMethodBarrier> barriers;
 72 
 73   parse(nm(), oops, non_immediate_oops, barriers);
 74   if (oops.length() != _oops_count) {
 75     if (_oops != nullptr) {
 76       FREE_C_HEAP_ARRAY(oop*, _oops);
 77       _oops = nullptr;
 78     }
 79 
 80     _oops_count = oops.length();
 81     if (_oops_count > 0) {
 82       _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
 83     }
 84   }
 85 
 86   for (int index = 0; index < _oops_count; index ++) {
 87     _oops[index] = oops.at(index);
 88   }
 89   _has_non_immed_oops = non_immediate_oops;
 90 
 91   assert_same_oops();
 92 }
 93 
 94 void ShenandoahNMethod::parse(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops, GrowableArray<ShenandoahNMethodBarrier>& barriers) {
 95   has_non_immed_oops = false;
 96   RelocIterator iter(nm);
 97   while (iter.next()) {
 98     switch (iter.type()) {
 99       case relocInfo::oop_type: {
100         oop_Relocation* r = iter.oop_reloc();
101         if (!r->oop_is_immediate()) {
102           // Non-immediate oop found
103           has_non_immed_oops = true;
104           break;
105         }
106 
107         oop value = r->oop_value();
108         if (value != nullptr) {
109           oop* addr = r->oop_addr();
110           shenandoah_assert_correct(addr, value);
111           shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
112           shenandoah_assert_not_forwarded(addr, value);
113           // Non-null immediate oop found. null oops can safely be
114           // ignored since the method will be re-registered if they
115           // are later patched to be non-null.
116           oops.push(addr);
117         }
118         break;
119       }
120 #ifdef COMPILER2
121       case relocInfo::barrier_type: {
122         assert(ShenandoahGCStateCheckHotpatch, "Who emits these?");
123         barrier_Relocation* r = iter.barrier_reloc();
124 
125         ShenandoahNMethodBarrier b;
126         b._pc = r->addr();
127         b._stub_addr = ShenandoahBarrierSetAssembler::parse_stub_address(b._pc);
128         // TODO: Can technically figure out which GC state we care about in this reloc.
129         // b._gc_state_fast_bit = r->format();
130         barriers.push(b);
131         break;
132       }
133 #endif
134       default:
135         // We do not care about other relocations.
136         break;
137     }
138   }
139 }
140 
141 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
142   ResourceMark rm;
143   bool non_immediate_oops = false;
144   GrowableArray<oop*> oops;
145   GrowableArray<ShenandoahNMethodBarrier> barriers;
146 
147   parse(nm, oops, non_immediate_oops, barriers);
148   return new ShenandoahNMethod(nm, oops, non_immediate_oops, barriers);
149 }
150 
151 void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
152   ShenandoahNMethod* data = gc_data(nm);
153   assert(data != nullptr, "Sanity");
154   assert(data->lock()->owned_by_self(), "Must hold the lock");
155 
156   ShenandoahHeap* const heap = ShenandoahHeap::heap();
157   if (heap->is_concurrent_weak_root_in_progress() ||
158       heap->is_concurrent_strong_root_in_progress()) {
159     ShenandoahEvacOOMScope evac_scope;
160     heal_nmethod_metadata(data);
161   } else if (heap->is_concurrent_mark_in_progress()) {
162     ShenandoahKeepAliveClosure cl;
163     data->oops_do(&cl);
164   } else {
165     // There is possibility that GC is cancelled when it arrives final mark.
166     // In this case, concurrent root phase is skipped and degenerated GC should be
167     // followed, where nmethods are disarmed.
168   }
169 }
170 
171 void ShenandoahNMethod::update_barriers() {
172 #ifdef COMPILER2
173   if (!ShenandoahGCStateCheckHotpatch) {
174     return;
175   }
176 
177   ShenandoahHeap* heap = ShenandoahHeap::heap();
178 
179   for (int c = 0; c < _barriers_count; c++) {
180     address pc = _barriers[c]._pc;
181     address stub_addr = _barriers[c]._stub_addr;
182     if (heap->is_idle()) {
183       ShenandoahBarrierSetAssembler::patch_branch_to_nop(pc);
184     } else {
185       ShenandoahBarrierSetAssembler::patch_nop_to_branch(pc, stub_addr);
186     }
187   }
188 #endif
189 }
190 
191 #ifdef ASSERT
192 void ShenandoahNMethod::assert_correct() {
193   ShenandoahHeap* heap = ShenandoahHeap::heap();
194   for (int c = 0; c < _oops_count; c++) {
195     oop *loc = _oops[c];
196     assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
197     oop o = RawAccess<>::oop_load(loc);
198     shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress());
199   }
200 
201   oop* const begin = _nm->oops_begin();
202   oop* const end = _nm->oops_end();
203   for (oop* p = begin; p < end; p++) {
204     if (*p != Universe::non_oop_word()) {
205       oop o = RawAccess<>::oop_load(p);
206       shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress());
207     }
208   }
209 }
210 
211 class ShenandoahNMethodOopDetector : public OopClosure {
212 private:
213   ResourceMark rm; // For growable array allocation below.
214   GrowableArray<oop*> _oops;
215 
216 public:
217   ShenandoahNMethodOopDetector() : _oops(10) {};
218 
219   void do_oop(oop* o) {
220     _oops.append(o);
221   }
222   void do_oop(narrowOop* o) {
223     fatal("NMethods should not have compressed oops embedded.");
224   }
225 
226   GrowableArray<oop*>* oops() {
227     return &_oops;
228   }
229 };
230 
231 void ShenandoahNMethod::assert_same_oops() {
232   ShenandoahNMethodOopDetector detector;
233   nm()->oops_do(&detector);
234 
235   GrowableArray<oop*>* oops = detector.oops();
236 
237   int count = _oops_count;
238   for (int index = 0; index < _oops_count; index ++) {
239     assert(oops->contains(_oops[index]), "Must contain this oop");
240   }
241 
242   for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) {
243     if (*p == Universe::non_oop_word()) continue;
244     count++;
245     assert(oops->contains(p), "Must contain this oop");
246   }
247 
248   if (oops->length() < count) {
249     stringStream debug_stream;
250     debug_stream.print_cr("detected locs: %d", oops->length());
251     for (int i = 0; i < oops->length(); i++) {
252       debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i)));
253     }
254     debug_stream.print_cr("recorded oops: %d", _oops_count);
255     for (int i = 0; i < _oops_count; i++) {
256       debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i]));
257     }
258     GrowableArray<oop*> check;
259     GrowableArray<ShenandoahNMethodBarrier> barriers;
260     bool non_immed;
261     parse(nm(), check, non_immed, barriers);
262     debug_stream.print_cr("check oops: %d", check.length());
263     for (int i = 0; i < check.length(); i++) {
264       debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i)));
265     }
266     fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s",
267           oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.freeze());
268   }
269 }
270 #endif
271 
272 ShenandoahNMethodTable::ShenandoahNMethodTable() :
273   _heap(ShenandoahHeap::heap()),
274   _index(0),
275   _itr_cnt(0) {
276   _list = new ShenandoahNMethodList(minSize);
277 }
278 
279 ShenandoahNMethodTable::~ShenandoahNMethodTable() {
280   assert(_list != nullptr, "Sanity");
281   _list->release();
282 }
283 
284 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
285   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
286   assert(_index >= 0 && _index <= _list->size(), "Sanity");
287 
288   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
289 
290   if (data != nullptr) {
291     assert(contain(nm), "Must have been registered");
292     assert(nm == data->nm(), "Must be same nmethod");
293     // Prevent updating a nmethod while concurrent iteration is in progress.
294     wait_until_concurrent_iteration_done();
295     ShenandoahNMethodLocker data_locker(data->lock());
296     data->update();
297     data->update_barriers();
298   } else {
299     // For a new nmethod, we can safely append it to the list, because
300     // concurrent iteration will not touch it.
301     data = ShenandoahNMethod::for_nmethod(nm);
302     assert(data != nullptr, "Sanity");
303     ShenandoahNMethod::attach_gc_data(nm, data);
304     ShenandoahLocker locker(&_lock);
305     log_register_nmethod(nm);
306     append(data);
307     data->update_barriers();
308   }
309   // Disarm new nmethod
310   ShenandoahNMethod::disarm_nmethod(nm);
311 }
312 
313 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
314   assert_locked_or_safepoint(CodeCache_lock);
315 
316   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
317   assert(data != nullptr, "Sanity");
318   log_unregister_nmethod(nm);
319   ShenandoahLocker locker(&_lock);
320   assert(contain(nm), "Must have been registered");
321 
322   int idx = index_of(nm);
323   assert(idx >= 0 && idx < _index, "Invalid index");
324   ShenandoahNMethod::attach_gc_data(nm, nullptr);
325   remove(idx);
326 }
327 
328 bool ShenandoahNMethodTable::contain(nmethod* nm) const {
329   return index_of(nm) != -1;
330 }
331 
332 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const {
333   assert(index >= 0 && index < _index, "Out of bound");
334   return _list->at(index);
335 }
336 
337 int ShenandoahNMethodTable::index_of(nmethod* nm) const {
338   for (int index = 0; index < length(); index ++) {
339     if (at(index)->nm() == nm) {
340       return index;
341     }
342   }
343   return -1;
344 }
345 
346 void ShenandoahNMethodTable::remove(int idx) {
347   shenandoah_assert_locked_or_safepoint(CodeCache_lock);
348   assert(_index >= 0 && _index <= _list->size(), "Sanity");
349 
350   assert(idx >= 0 && idx < _index, "Out of bound");
351   ShenandoahNMethod* snm = _list->at(idx);
352   ShenandoahNMethod* tmp = _list->at(_index - 1);
353   _list->set(idx, tmp);
354   _index --;
355 
356   delete snm;
357 }
358 
359 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() {
360   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
361   while (iteration_in_progress()) {
362     CodeCache_lock->wait_without_safepoint_check();
363   }
364 }
365 
366 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) {
367   if (is_full()) {
368     int new_size = 2 * _list->size();
369     // Rebuild table and replace current one
370     rebuild(new_size);
371   }
372 
373   _list->set(_index++,  snm);
374   assert(_index >= 0 && _index <= _list->size(), "Sanity");
375 }
376 
377 void ShenandoahNMethodTable::rebuild(int size) {
378   ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size);
379   new_list->transfer(_list, _index);
380 
381   // Release old list
382   _list->release();
383   _list = new_list;
384 }
385 
386 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() {
387   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
388   _itr_cnt++;
389   return new ShenandoahNMethodTableSnapshot(this);
390 }
391 
392 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
393   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
394   assert(iteration_in_progress(), "Why we here?");
395   assert(snapshot != nullptr, "No snapshot");
396   _itr_cnt--;
397 
398   delete snapshot;
399 }
400 
401 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) {
402   LogTarget(Debug, gc, nmethod) log;
403   if (!log.is_enabled()) {
404     return;
405   }
406 
407   ResourceMark rm;
408   log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)",
409             nm->method()->method_holder()->external_name(),
410             nm->method()->name()->as_C_string(),
411             p2i(nm),
412             nm->compiler_name());
413 }
414 
415 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
416   LogTarget(Debug, gc, nmethod) log;
417   if (!log.is_enabled()) {
418     return;
419   }
420 
421   ResourceMark rm;
422   log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]",
423             nm->method()->method_holder()->external_name(),
424             nm->method()->name()->as_C_string(),
425             p2i(nm));
426 }
427 
428 #ifdef ASSERT
429 void ShenandoahNMethodTable::assert_nmethods_correct() {
430   assert_locked_or_safepoint(CodeCache_lock);
431 
432   for (int index = 0; index < length(); index ++) {
433     ShenandoahNMethod* m = _list->at(index);
434     // Concurrent unloading may have dead nmethods to be cleaned by sweeper
435     if (m->is_unregistered()) continue;
436     m->assert_correct();
437   }
438 }
439 #endif
440 
441 
442 ShenandoahNMethodList::ShenandoahNMethodList(int size) :
443   _size(size), _ref_count(1) {
444   _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC);
445 }
446 
447 ShenandoahNMethodList::~ShenandoahNMethodList() {
448   assert(_list != nullptr, "Sanity");
449   assert(_ref_count == 0, "Must be");
450   FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
451 }
452 
453 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) {
454   assert(limit <= size(), "Sanity");
455   ShenandoahNMethod** old_list = list->list();
456   for (int index = 0; index < limit; index++) {
457     _list[index] = old_list[index];
458   }
459 }
460 
461 ShenandoahNMethodList* ShenandoahNMethodList::acquire() {
462   assert_locked_or_safepoint(CodeCache_lock);
463   _ref_count++;
464   return this;
465 }
466 
467 void ShenandoahNMethodList::release() {
468   assert_locked_or_safepoint(CodeCache_lock);
469   _ref_count--;
470   if (_ref_count == 0) {
471     delete this;
472   }
473 }
474 
475 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) :
476   _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) {
477 }
478 
479 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() {
480   _list->release();
481 }
482 
483 void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
484   size_t stride = 256; // educated guess
485 
486   ShenandoahNMethod** const list = _list->list();
487 
488   size_t max = (size_t)_limit;
489   while (_claimed.load_relaxed() < max) {
490     size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
491     size_t start = cur;
492     size_t end = MIN2(cur + stride, max);
493     if (start >= max) break;
494 
495     for (size_t idx = start; idx < end; idx++) {
496       ShenandoahNMethod* nmr = list[idx];
497       assert(nmr != nullptr, "Sanity");
498       if (nmr->is_unregistered()) {
499         continue;
500       }
501 
502       nmr->assert_correct();
503       f->do_nmethod(nmr->nm());
504     }
505   }
506 }
507 
508 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) {
509   size_t stride = 256; // educated guess
510 
511   ShenandoahNMethod** list = _list->list();
512   size_t max = (size_t)_limit;
513   while (_claimed.load_relaxed() < max) {
514     size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
515     size_t start = cur;
516     size_t end = MIN2(cur + stride, max);
517     if (start >= max) break;
518 
519     for (size_t idx = start; idx < end; idx++) {
520       ShenandoahNMethod* data = list[idx];
521       assert(data != nullptr, "Should not be null");
522       if (!data->is_unregistered()) {
523         cl->do_nmethod(data->nm());
524       }
525     }
526   }
527 }
528 
529 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
530   _table(table),
531   _table_snapshot(nullptr),
532   _started_workers(0),
533   _finished_workers(0) {}
534 
535 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
536   // Cannot safepoint when iteration is running, because this can cause deadlocks
537   // with other threads waiting on iteration to be over.
538   NoSafepointVerifier nsv;
539 
540   MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
541 
542   if (_finished_workers > 0) {
543     // Some threads have already finished. We are now in rampdown: we are now
544     // waiting for all currently recorded workers to finish. No new workers
545     // should start.
546     return;
547   }
548 
549   // Record a new worker and initialize the snapshot if it is a first visitor.
550   if (_started_workers++ == 0) {
551     _table_snapshot = _table->snapshot_for_iteration();
552   }
553 
554   // All set, relinquish the lock and go concurrent.
555   {
556     MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
557     _table_snapshot->concurrent_nmethods_do(cl);
558   }
559 
560   // Record completion. Last worker shuts down the iterator and notifies any waiters.
561   uint count = ++_finished_workers;
562   if (count == _started_workers) {
563     _table->finish_iteration(_table_snapshot);
564     CodeCache_lock->notify_all();
565   }
566 }