1 /*
  2  * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2019, 2022, Red Hat, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 
 27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "runtime/continuation.hpp"
 32 #include "runtime/safepointVerifiers.hpp"
 33 
 34 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops, GrowableArray<ShenandoahNMethodBarrier>& barriers) :
 35   _nm(nm), _oops(nullptr), _oops_count(0), _barriers(nullptr), _unregistered(false), _lock(), _ic_lock() {
 36 
 37   if (!oops.is_empty()) {
 38     _oops_count = oops.length();
 39     _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
 40     for (int c = 0; c < _oops_count; c++) {
 41       _oops[c] = oops.at(c);
 42     }
 43   }
 44   _has_non_immed_oops = non_immediate_oops;
 45 
 46   assert_same_oops();
 47 
 48   if (!barriers.is_empty()) {
 49     _barriers_count = barriers.length();
 50     _barriers = NEW_C_HEAP_ARRAY(ShenandoahNMethodBarrier, _barriers_count, mtGC);
 51     for (int c = 0; c < _barriers_count; c++) {
 52       _barriers[c] = barriers.at(c);
 53     }
 54   }
 55 }
 56 
 57 ShenandoahNMethod::~ShenandoahNMethod() {
 58   if (_oops != nullptr) {
 59     FREE_C_HEAP_ARRAY(oop*, _oops);
 60   }
 61   if (_barriers != nullptr) {
 62     FREE_C_HEAP_ARRAY(ShenandoahNMethodBarrier, _barriers);
 63   }
 64 }
 65 
 66 void ShenandoahNMethod::update() {
 67   ResourceMark rm;
 68   bool non_immediate_oops = false;
 69   GrowableArray<oop*> oops;
 70   GrowableArray<ShenandoahNMethodBarrier> barriers;
 71 
 72   parse(nm(), oops, non_immediate_oops, barriers);
 73   if (oops.length() != _oops_count) {
 74     if (_oops != nullptr) {
 75       FREE_C_HEAP_ARRAY(oop*, _oops);
 76       _oops = nullptr;
 77     }
 78 
 79     _oops_count = oops.length();
 80     if (_oops_count > 0) {
 81       _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
 82     }
 83   }
 84 
 85   for (int index = 0; index < _oops_count; index ++) {
 86     _oops[index] = oops.at(index);
 87   }
 88   _has_non_immed_oops = non_immediate_oops;
 89 
 90   assert_same_oops();
 91 }
 92 
 93 void ShenandoahNMethod::parse(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops, GrowableArray<ShenandoahNMethodBarrier>& barriers) {
 94   has_non_immed_oops = false;
 95   RelocIterator iter(nm);
 96   while (iter.next()) {
 97     switch (iter.type()) {
 98       case relocInfo::oop_type: {
 99         oop_Relocation* r = iter.oop_reloc();
100         if (!r->oop_is_immediate()) {
101           // Non-immediate oop found
102           has_non_immed_oops = true;
103           break;
104         }
105 
106         oop value = r->oop_value();
107         if (value != nullptr) {
108           oop* addr = r->oop_addr();
109           shenandoah_assert_correct(addr, value);
110           shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
111           shenandoah_assert_not_forwarded(addr, value);
112           // Non-null immediate oop found. null oops can safely be
113           // ignored since the method will be re-registered if they
114           // are later patched to be non-null.
115           oops.push(addr);
116         }
117         break;
118       }
119       case relocInfo::barrier_type: {
120         assert(ShenandoahGCStateCheckHotpatch, "Who emits these?");
121         barrier_Relocation* r = iter.barrier_reloc();
122 
123         // TODO: Move to assembler?
124 #ifdef AMD64
125         NativeInstruction* ni = nativeInstruction_at(r->addr());
126         assert(ni->is_jump(), "Initial code version: GC barrier fastpath must be a jump");
127         NativeJump* jmp = nativeJump_at(r->addr());
128 
129         ShenandoahNMethodBarrier b;
130         b._barrier_pc = r->addr();
131         b._stub_addr = jmp->jump_destination();
132         // TODO: Can technically figure out which GC state we care about in this reloc.
133         // b._gc_state_fast_bit = r->format();
134         barriers.push(b);
135 #else
136         Unimplemented();
137 #endif
138 
139         break;
140       }
141       default:
142         // We do not care about other relocations.
143         break;
144     }
145   }
146 }
147 
148 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
149   ResourceMark rm;
150   bool non_immediate_oops = false;
151   GrowableArray<oop*> oops;
152   GrowableArray<ShenandoahNMethodBarrier> barriers;
153 
154   parse(nm, oops, non_immediate_oops, barriers);
155   return new ShenandoahNMethod(nm, oops, non_immediate_oops, barriers);
156 }
157 
158 void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
159   ShenandoahNMethod* data = gc_data(nm);
160   assert(data != nullptr, "Sanity");
161   assert(data->lock()->owned_by_self(), "Must hold the lock");
162 
163   ShenandoahHeap* const heap = ShenandoahHeap::heap();
164   if (heap->is_concurrent_weak_root_in_progress() ||
165       heap->is_concurrent_strong_root_in_progress()) {
166     ShenandoahEvacOOMScope evac_scope;
167     heal_nmethod_metadata(data);
168   } else if (heap->is_concurrent_mark_in_progress()) {
169     ShenandoahKeepAliveClosure cl;
170     data->oops_do(&cl);
171   } else {
172     // There is possibility that GC is cancelled when it arrives final mark.
173     // In this case, concurrent root phase is skipped and degenerated GC should be
174     // followed, where nmethods are disarmed.
175   }
176 
177   // Update all barriers
178   data->update_barriers();
179 }
180 
181 #ifdef AMD64
182 void insert_5_byte_nop(address pc) {
183   *(pc + 0) = 0x0F;
184   *(pc + 1) = 0x1F;
185   *(pc + 2) = 0x44;
186   *(pc + 3) = 0x00;
187   *(pc + 4) = 0x00;
188   ICache::invalidate_range(pc, 5);
189 }
190 
191 bool is_5_byte_nop(address pc) {
192   if (*(pc + 0) != 0x0F) return false;
193   if (*(pc + 1) != 0x1F) return false;
194   if (*(pc + 2) != 0x44) return false;
195   if (*(pc + 3) != 0x00) return false;
196   if (*(pc + 4) != 0x00) return false;
197   return true;
198 }
199 #endif
200 
201 void ShenandoahNMethod::update_barriers() {
202   if (!ShenandoahGCStateCheckHotpatch) return;
203 
204   ShenandoahHeap* heap = ShenandoahHeap::heap();
205 
206   for (int c = 0; c < _barriers_count; c++) {
207     ShenandoahNMethodBarrier& barrier = _barriers[c];
208 
209     address pc = barrier._barrier_pc;
210     NativeInstruction* ni = nativeInstruction_at(pc);
211 
212     // TODO: This should really be in assembler?
213  #ifdef AMD64
214     if (heap->is_idle()) {
215       // Heap is idle: insert nops
216       if (ni->is_jump()) {
217         insert_5_byte_nop(pc);
218       } else {
219         assert(is_5_byte_nop(pc), "Sanity: should already be nop at PC " PTR_FORMAT ": %02x%02x%02x%02x%02x",
220                p2i(pc), *(pc + 0), *(pc + 1), *(pc + 2), *(pc + 3), *(pc + 4));
221       }
222     } else {
223       // Heap is active: insert jumps to barrier stubs
224       if (is_5_byte_nop(pc)) {
225         NativeJump::insert(pc, barrier._stub_addr);
226       } else {
227         assert(ni->is_jump(), "Sanity: should already be jump at PC " PTR_FORMAT ": %02x%02x%02x%02x%02x",
228               p2i(pc), *(pc + 0), *(pc + 1), *(pc + 2), *(pc + 3), *(pc + 4));
229       }
230     }
231 #else
232     Unimplemented();
233 #endif
234   }
235 }
236 
237 #ifdef ASSERT
238 void ShenandoahNMethod::assert_correct() {
239   ShenandoahHeap* heap = ShenandoahHeap::heap();
240   for (int c = 0; c < _oops_count; c++) {
241     oop *loc = _oops[c];
242     assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
243     oop o = RawAccess<>::oop_load(loc);
244     shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress());
245   }
246 
247   oop* const begin = _nm->oops_begin();
248   oop* const end = _nm->oops_end();
249   for (oop* p = begin; p < end; p++) {
250     if (*p != Universe::non_oop_word()) {
251       oop o = RawAccess<>::oop_load(p);
252       shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress());
253     }
254   }
255 }
256 
257 class ShenandoahNMethodOopDetector : public OopClosure {
258 private:
259   ResourceMark rm; // For growable array allocation below.
260   GrowableArray<oop*> _oops;
261 
262 public:
263   ShenandoahNMethodOopDetector() : _oops(10) {};
264 
265   void do_oop(oop* o) {
266     _oops.append(o);
267   }
268   void do_oop(narrowOop* o) {
269     fatal("NMethods should not have compressed oops embedded.");
270   }
271 
272   GrowableArray<oop*>* oops() {
273     return &_oops;
274   }
275 };
276 
277 void ShenandoahNMethod::assert_same_oops() {
278   ShenandoahNMethodOopDetector detector;
279   nm()->oops_do(&detector);
280 
281   GrowableArray<oop*>* oops = detector.oops();
282 
283   int count = _oops_count;
284   for (int index = 0; index < _oops_count; index ++) {
285     assert(oops->contains(_oops[index]), "Must contain this oop");
286   }
287 
288   for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) {
289     if (*p == Universe::non_oop_word()) continue;
290     count++;
291     assert(oops->contains(p), "Must contain this oop");
292   }
293 
294   if (oops->length() < count) {
295     stringStream debug_stream;
296     debug_stream.print_cr("detected locs: %d", oops->length());
297     for (int i = 0; i < oops->length(); i++) {
298       debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i)));
299     }
300     debug_stream.print_cr("recorded oops: %d", _oops_count);
301     for (int i = 0; i < _oops_count; i++) {
302       debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i]));
303     }
304     GrowableArray<oop*> check;
305     GrowableArray<ShenandoahNMethodBarrier> barriers;
306     bool non_immed;
307     parse(nm(), check, non_immed, barriers);
308     debug_stream.print_cr("check oops: %d", check.length());
309     for (int i = 0; i < check.length(); i++) {
310       debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i)));
311     }
312     fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s",
313           oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.freeze());
314   }
315 }
316 #endif
317 
318 ShenandoahNMethodTable::ShenandoahNMethodTable() :
319   _heap(ShenandoahHeap::heap()),
320   _index(0),
321   _itr_cnt(0) {
322   _list = new ShenandoahNMethodList(minSize);
323 }
324 
325 ShenandoahNMethodTable::~ShenandoahNMethodTable() {
326   assert(_list != nullptr, "Sanity");
327   _list->release();
328 }
329 
330 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
331   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
332   assert(_index >= 0 && _index <= _list->size(), "Sanity");
333 
334   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
335 
336   if (data != nullptr) {
337     assert(contain(nm), "Must have been registered");
338     assert(nm == data->nm(), "Must be same nmethod");
339     // Prevent updating a nmethod while concurrent iteration is in progress.
340     wait_until_concurrent_iteration_done();
341     ShenandoahNMethodLocker data_locker(data->lock());
342     data->update();
343     data->update_barriers();
344   } else {
345     // For a new nmethod, we can safely append it to the list, because
346     // concurrent iteration will not touch it.
347     data = ShenandoahNMethod::for_nmethod(nm);
348     data->update_barriers();
349     assert(data != nullptr, "Sanity");
350     ShenandoahNMethod::attach_gc_data(nm, data);
351     ShenandoahLocker locker(&_lock);
352     log_register_nmethod(nm);
353     append(data);
354   }
355   // Disarm new nmethod
356   ShenandoahNMethod::disarm_nmethod(nm);
357 }
358 
359 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
360   assert_locked_or_safepoint(CodeCache_lock);
361 
362   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
363   assert(data != nullptr, "Sanity");
364   log_unregister_nmethod(nm);
365   ShenandoahLocker locker(&_lock);
366   assert(contain(nm), "Must have been registered");
367 
368   int idx = index_of(nm);
369   assert(idx >= 0 && idx < _index, "Invalid index");
370   ShenandoahNMethod::attach_gc_data(nm, nullptr);
371   remove(idx);
372 }
373 
374 bool ShenandoahNMethodTable::contain(nmethod* nm) const {
375   return index_of(nm) != -1;
376 }
377 
378 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const {
379   assert(index >= 0 && index < _index, "Out of bound");
380   return _list->at(index);
381 }
382 
383 int ShenandoahNMethodTable::index_of(nmethod* nm) const {
384   for (int index = 0; index < length(); index ++) {
385     if (at(index)->nm() == nm) {
386       return index;
387     }
388   }
389   return -1;
390 }
391 
392 void ShenandoahNMethodTable::remove(int idx) {
393   shenandoah_assert_locked_or_safepoint(CodeCache_lock);
394   assert(_index >= 0 && _index <= _list->size(), "Sanity");
395 
396   assert(idx >= 0 && idx < _index, "Out of bound");
397   ShenandoahNMethod* snm = _list->at(idx);
398   ShenandoahNMethod* tmp = _list->at(_index - 1);
399   _list->set(idx, tmp);
400   _index --;
401 
402   delete snm;
403 }
404 
405 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() {
406   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
407   while (iteration_in_progress()) {
408     CodeCache_lock->wait_without_safepoint_check();
409   }
410 }
411 
412 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) {
413   if (is_full()) {
414     int new_size = 2 * _list->size();
415     // Rebuild table and replace current one
416     rebuild(new_size);
417   }
418 
419   _list->set(_index++,  snm);
420   assert(_index >= 0 && _index <= _list->size(), "Sanity");
421 }
422 
423 void ShenandoahNMethodTable::rebuild(int size) {
424   ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size);
425   new_list->transfer(_list, _index);
426 
427   // Release old list
428   _list->release();
429   _list = new_list;
430 }
431 
432 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() {
433   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
434   _itr_cnt++;
435   return new ShenandoahNMethodTableSnapshot(this);
436 }
437 
438 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
439   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
440   assert(iteration_in_progress(), "Why we here?");
441   assert(snapshot != nullptr, "No snapshot");
442   _itr_cnt--;
443 
444   delete snapshot;
445 }
446 
447 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) {
448   LogTarget(Debug, gc, nmethod) log;
449   if (!log.is_enabled()) {
450     return;
451   }
452 
453   ResourceMark rm;
454   log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)",
455             nm->method()->method_holder()->external_name(),
456             nm->method()->name()->as_C_string(),
457             p2i(nm),
458             nm->compiler_name());
459 }
460 
461 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
462   LogTarget(Debug, gc, nmethod) log;
463   if (!log.is_enabled()) {
464     return;
465   }
466 
467   ResourceMark rm;
468   log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]",
469             nm->method()->method_holder()->external_name(),
470             nm->method()->name()->as_C_string(),
471             p2i(nm));
472 }
473 
474 #ifdef ASSERT
475 void ShenandoahNMethodTable::assert_nmethods_correct() {
476   assert_locked_or_safepoint(CodeCache_lock);
477 
478   for (int index = 0; index < length(); index ++) {
479     ShenandoahNMethod* m = _list->at(index);
480     // Concurrent unloading may have dead nmethods to be cleaned by sweeper
481     if (m->is_unregistered()) continue;
482     m->assert_correct();
483   }
484 }
485 #endif
486 
487 
488 ShenandoahNMethodList::ShenandoahNMethodList(int size) :
489   _size(size), _ref_count(1) {
490   _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC);
491 }
492 
493 ShenandoahNMethodList::~ShenandoahNMethodList() {
494   assert(_list != nullptr, "Sanity");
495   assert(_ref_count == 0, "Must be");
496   FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
497 }
498 
499 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) {
500   assert(limit <= size(), "Sanity");
501   ShenandoahNMethod** old_list = list->list();
502   for (int index = 0; index < limit; index++) {
503     _list[index] = old_list[index];
504   }
505 }
506 
507 ShenandoahNMethodList* ShenandoahNMethodList::acquire() {
508   assert_locked_or_safepoint(CodeCache_lock);
509   _ref_count++;
510   return this;
511 }
512 
513 void ShenandoahNMethodList::release() {
514   assert_locked_or_safepoint(CodeCache_lock);
515   _ref_count--;
516   if (_ref_count == 0) {
517     delete this;
518   }
519 }
520 
521 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) :
522   _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) {
523 }
524 
525 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() {
526   _list->release();
527 }
528 
529 void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) {
530   size_t stride = 256; // educated guess
531 
532   ShenandoahNMethod** const list = _list->list();
533 
534   size_t max = (size_t)_limit;
535   while (_claimed.load_relaxed() < max) {
536     size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
537     size_t start = cur;
538     size_t end = MIN2(cur + stride, max);
539     if (start >= max) break;
540 
541     for (size_t idx = start; idx < end; idx++) {
542       ShenandoahNMethod* nmr = list[idx];
543       assert(nmr != nullptr, "Sanity");
544       if (nmr->is_unregistered()) {
545         continue;
546       }
547 
548       nmr->assert_correct();
549       f->do_nmethod(nmr->nm());
550     }
551   }
552 }
553 
554 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) {
555   size_t stride = 256; // educated guess
556 
557   ShenandoahNMethod** list = _list->list();
558   size_t max = (size_t)_limit;
559   while (_claimed.load_relaxed() < max) {
560     size_t cur = _claimed.fetch_then_add(stride, memory_order_relaxed);
561     size_t start = cur;
562     size_t end = MIN2(cur + stride, max);
563     if (start >= max) break;
564 
565     for (size_t idx = start; idx < end; idx++) {
566       ShenandoahNMethod* data = list[idx];
567       assert(data != nullptr, "Should not be null");
568       if (!data->is_unregistered()) {
569         cl->do_nmethod(data->nm());
570       }
571     }
572   }
573 }
574 
575 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
576   _table(table),
577   _table_snapshot(nullptr),
578   _started_workers(0),
579   _finished_workers(0) {}
580 
581 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
582   // Cannot safepoint when iteration is running, because this can cause deadlocks
583   // with other threads waiting on iteration to be over.
584   NoSafepointVerifier nsv;
585 
586   MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
587 
588   if (_finished_workers > 0) {
589     // Some threads have already finished. We are now in rampdown: we are now
590     // waiting for all currently recorded workers to finish. No new workers
591     // should start.
592     return;
593   }
594 
595   // Record a new worker and initialize the snapshot if it is a first visitor.
596   if (_started_workers++ == 0) {
597     _table_snapshot = _table->snapshot_for_iteration();
598   }
599 
600   // All set, relinquish the lock and go concurrent.
601   {
602     MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
603     _table_snapshot->concurrent_nmethods_do(cl);
604   }
605 
606   // Record completion. Last worker shuts down the iterator and notifies any waiters.
607   uint count = ++_finished_workers;
608   if (count == _started_workers) {
609     _table->finish_iteration(_table_snapshot);
610     CodeCache_lock->notify_all();
611   }
612 }