1 /*
  2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2019, 2022, Red Hat, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "runtime/continuation.hpp"
 34 #include "runtime/safepointVerifiers.hpp"
 35 
 36 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) :
 37   _nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false) {
 38 
 39   if (!oops.is_empty()) {
 40     _oops_count = oops.length();
 41     _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
 42     for (int c = 0; c < _oops_count; c++) {
 43       _oops[c] = oops.at(c);
 44     }
 45   }
 46   _has_non_immed_oops = non_immediate_oops;
 47 
 48   assert_same_oops();
 49 }
 50 
 51 ShenandoahNMethod::~ShenandoahNMethod() {
 52   if (_oops != nullptr) {
 53     FREE_C_HEAP_ARRAY(oop*, _oops);
 54   }
 55 }
 56 
 57 void ShenandoahNMethod::update() {
 58   ResourceMark rm;
 59   bool non_immediate_oops = false;
 60   GrowableArray<oop*> oops;
 61 
 62   detect_reloc_oops(nm(), oops, non_immediate_oops);
 63   if (oops.length() != _oops_count) {
 64     if (_oops != nullptr) {
 65       FREE_C_HEAP_ARRAY(oop*, _oops);
 66       _oops = nullptr;
 67     }
 68 
 69     _oops_count = oops.length();
 70     if (_oops_count > 0) {
 71       _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
 72     }
 73   }
 74 
 75   for (int index = 0; index < _oops_count; index ++) {
 76     _oops[index] = oops.at(index);
 77   }
 78   _has_non_immed_oops = non_immediate_oops;
 79 
 80   assert_same_oops();
 81 }
 82 
 83 void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops) {
 84   has_non_immed_oops = false;
 85   // Find all oops relocations
 86   RelocIterator iter(nm);
 87   while (iter.next()) {
 88     if (iter.type() != relocInfo::oop_type) {
 89       // Not an oop
 90       continue;
 91     }
 92 
 93     oop_Relocation* r = iter.oop_reloc();
 94     if (!r->oop_is_immediate()) {
 95       // Non-immediate oop found
 96       has_non_immed_oops = true;
 97       continue;
 98     }
 99 
100     oop value = r->oop_value();
101     if (value != nullptr) {
102       oop* addr = r->oop_addr();
103       shenandoah_assert_correct(addr, value);
104       shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
105       shenandoah_assert_not_forwarded(addr, value);
106       // Non-null immediate oop found. null oops can safely be
107       // ignored since the method will be re-registered if they
108       // are later patched to be non-null.
109       oops.push(addr);
110     }
111   }
112 }
113 
114 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
115   ResourceMark rm;
116   bool non_immediate_oops = false;
117   GrowableArray<oop*> oops;
118 
119   detect_reloc_oops(nm, oops, non_immediate_oops);
120   return new ShenandoahNMethod(nm, oops, non_immediate_oops);
121 }
122 
123 void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
124   ShenandoahNMethod* data = gc_data(nm);
125   assert(data != nullptr, "Sanity");
126   assert(data->lock()->owned_by_self(), "Must hold the lock");
127 
128   ShenandoahHeap* const heap = ShenandoahHeap::heap();
129   if (heap->is_concurrent_weak_root_in_progress() ||
130       heap->is_concurrent_strong_root_in_progress()) {
131     ShenandoahEvacOOMScope evac_scope;
132     heal_nmethod_metadata(data);
133   } else if (heap->is_concurrent_mark_in_progress()) {
134     ShenandoahKeepAliveClosure cl;
135     data->oops_do(&cl);
136   } else {
137     // There is possibility that GC is cancelled when it arrives final mark.
138     // In this case, concurrent root phase is skipped and degenerated GC should be
139     // followed, where nmethods are disarmed.
140   }
141 }
142 
143 #ifdef ASSERT
144 void ShenandoahNMethod::assert_correct() {
145   ShenandoahHeap* heap = ShenandoahHeap::heap();
146   for (int c = 0; c < _oops_count; c++) {
147     oop *loc = _oops[c];
148     assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
149     oop o = RawAccess<>::oop_load(loc);
150     shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress());
151   }
152 
153   oop* const begin = _nm->oops_begin();
154   oop* const end = _nm->oops_end();
155   for (oop* p = begin; p < end; p++) {
156     if (*p != Universe::non_oop_word()) {
157       oop o = RawAccess<>::oop_load(p);
158       shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress());
159     }
160   }
161 }
162 
163 class ShenandoahNMethodOopDetector : public OopClosure {
164 private:
165   ResourceMark rm; // For growable array allocation below.
166   GrowableArray<oop*> _oops;
167 
168 public:
169   ShenandoahNMethodOopDetector() : _oops(10) {};
170 
171   void do_oop(oop* o) {
172     _oops.append(o);
173   }
174   void do_oop(narrowOop* o) {
175     fatal("NMethods should not have compressed oops embedded.");
176   }
177 
178   GrowableArray<oop*>* oops() {
179     return &_oops;
180   }
181 };
182 
183 void ShenandoahNMethod::assert_same_oops(bool allow_dead) {
184   ShenandoahNMethodOopDetector detector;
185   nm()->oops_do(&detector, allow_dead);
186 
187   GrowableArray<oop*>* oops = detector.oops();
188 
189   int count = _oops_count;
190   for (int index = 0; index < _oops_count; index ++) {
191     assert(oops->contains(_oops[index]), "Must contain this oop");
192   }
193 
194   for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) {
195     if (*p == Universe::non_oop_word()) continue;
196     count++;
197     assert(oops->contains(p), "Must contain this oop");
198   }
199 
200   if (oops->length() < count) {
201     stringStream debug_stream;
202     debug_stream.print_cr("detected locs: %d", oops->length());
203     for (int i = 0; i < oops->length(); i++) {
204       debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i)));
205     }
206     debug_stream.print_cr("recorded oops: %d", _oops_count);
207     for (int i = 0; i < _oops_count; i++) {
208       debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i]));
209     }
210     GrowableArray<oop*> check;
211     bool non_immed;
212     detect_reloc_oops(nm(), check, non_immed);
213     debug_stream.print_cr("check oops: %d", check.length());
214     for (int i = 0; i < check.length(); i++) {
215       debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i)));
216     }
217     fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s",
218           oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.freeze());
219   }
220 }
221 #endif
222 
223 ShenandoahNMethodTable::ShenandoahNMethodTable() :
224   _heap(ShenandoahHeap::heap()),
225   _index(0),
226   _itr_cnt(0) {
227   _list = new ShenandoahNMethodList(minSize);
228 }
229 
230 ShenandoahNMethodTable::~ShenandoahNMethodTable() {
231   assert(_list != nullptr, "Sanity");
232   _list->release();
233 }
234 
235 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
236   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
237   assert(_index >= 0 && _index <= _list->size(), "Sanity");
238 
239   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
240 
241   if (data != nullptr) {
242     assert(contain(nm), "Must have been registered");
243     assert(nm == data->nm(), "Must be same nmethod");
244     // Prevent updating a nmethod while concurrent iteration is in progress.
245     wait_until_concurrent_iteration_done();
246     ShenandoahReentrantLocker data_locker(data->lock());
247     data->update();
248   } else {
249     // For a new nmethod, we can safely append it to the list, because
250     // concurrent iteration will not touch it.
251     data = ShenandoahNMethod::for_nmethod(nm);
252     assert(data != nullptr, "Sanity");
253     ShenandoahNMethod::attach_gc_data(nm, data);
254     ShenandoahLocker locker(&_lock);
255     log_register_nmethod(nm);
256     append(data);
257   }
258   // Disarm new nmethod
259   ShenandoahNMethod::disarm_nmethod(nm);
260 }
261 
262 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
263   assert_locked_or_safepoint(CodeCache_lock);
264 
265   ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
266   assert(data != nullptr, "Sanity");
267   log_unregister_nmethod(nm);
268   ShenandoahLocker locker(&_lock);
269   assert(contain(nm), "Must have been registered");
270 
271   int idx = index_of(nm);
272   assert(idx >= 0 && idx < _index, "Invalid index");
273   ShenandoahNMethod::attach_gc_data(nm, nullptr);
274   remove(idx);
275 }
276 
277 bool ShenandoahNMethodTable::contain(nmethod* nm) const {
278   return index_of(nm) != -1;
279 }
280 
281 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const {
282   assert(index >= 0 && index < _index, "Out of bound");
283   return _list->at(index);
284 }
285 
286 int ShenandoahNMethodTable::index_of(nmethod* nm) const {
287   for (int index = 0; index < length(); index ++) {
288     if (at(index)->nm() == nm) {
289       return index;
290     }
291   }
292   return -1;
293 }
294 
295 void ShenandoahNMethodTable::remove(int idx) {
296   shenandoah_assert_locked_or_safepoint(CodeCache_lock);
297   assert(_index >= 0 && _index <= _list->size(), "Sanity");
298 
299   assert(idx >= 0 && idx < _index, "Out of bound");
300   ShenandoahNMethod* snm = _list->at(idx);
301   ShenandoahNMethod* tmp = _list->at(_index - 1);
302   _list->set(idx, tmp);
303   _index --;
304 
305   delete snm;
306 }
307 
308 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() {
309   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
310   while (iteration_in_progress()) {
311     CodeCache_lock->wait_without_safepoint_check();
312   }
313 }
314 
315 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) {
316   if (is_full()) {
317     int new_size = 2 * _list->size();
318     // Rebuild table and replace current one
319     rebuild(new_size);
320   }
321 
322   _list->set(_index++,  snm);
323   assert(_index >= 0 && _index <= _list->size(), "Sanity");
324 }
325 
326 void ShenandoahNMethodTable::rebuild(int size) {
327   ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size);
328   new_list->transfer(_list, _index);
329 
330   // Release old list
331   _list->release();
332   _list = new_list;
333 }
334 
335 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() {
336   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
337   _itr_cnt++;
338   return new ShenandoahNMethodTableSnapshot(this);
339 }
340 
341 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
342   assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
343   assert(iteration_in_progress(), "Why we here?");
344   assert(snapshot != nullptr, "No snapshot");
345   _itr_cnt--;
346 
347   delete snapshot;
348 }
349 
350 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) {
351   LogTarget(Debug, gc, nmethod) log;
352   if (!log.is_enabled()) {
353     return;
354   }
355 
356   ResourceMark rm;
357   log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)",
358             nm->method()->method_holder()->external_name(),
359             nm->method()->name()->as_C_string(),
360             p2i(nm),
361             nm->compiler_name());
362 }
363 
364 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
365   LogTarget(Debug, gc, nmethod) log;
366   if (!log.is_enabled()) {
367     return;
368   }
369 
370   ResourceMark rm;
371   log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]",
372             nm->method()->method_holder()->external_name(),
373             nm->method()->name()->as_C_string(),
374             p2i(nm));
375 }
376 
377 #ifdef ASSERT
378 void ShenandoahNMethodTable::assert_nmethods_correct() {
379   assert_locked_or_safepoint(CodeCache_lock);
380 
381   for (int index = 0; index < length(); index ++) {
382     ShenandoahNMethod* m = _list->at(index);
383     // Concurrent unloading may have dead nmethods to be cleaned by sweeper
384     if (m->is_unregistered()) continue;
385     m->assert_correct();
386   }
387 }
388 #endif
389 
390 
391 ShenandoahNMethodList::ShenandoahNMethodList(int size) :
392   _size(size), _ref_count(1) {
393   _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC);
394 }
395 
396 ShenandoahNMethodList::~ShenandoahNMethodList() {
397   assert(_list != nullptr, "Sanity");
398   assert(_ref_count == 0, "Must be");
399   FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
400 }
401 
402 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) {
403   assert(limit <= size(), "Sanity");
404   ShenandoahNMethod** old_list = list->list();
405   for (int index = 0; index < limit; index++) {
406     _list[index] = old_list[index];
407   }
408 }
409 
410 ShenandoahNMethodList* ShenandoahNMethodList::acquire() {
411   assert_locked_or_safepoint(CodeCache_lock);
412   _ref_count++;
413   return this;
414 }
415 
416 void ShenandoahNMethodList::release() {
417   assert_locked_or_safepoint(CodeCache_lock);
418   _ref_count--;
419   if (_ref_count == 0) {
420     delete this;
421   }
422 }
423 
424 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) :
425   _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) {
426 }
427 
428 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() {
429   _list->release();
430 }
431 
432 void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
433   size_t stride = 256; // educated guess
434 
435   ShenandoahNMethod** const list = _list->list();
436 
437   size_t max = (size_t)_limit;
438   while (_claimed < max) {
439     size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed);
440     size_t start = cur;
441     size_t end = MIN2(cur + stride, max);
442     if (start >= max) break;
443 
444     for (size_t idx = start; idx < end; idx++) {
445       ShenandoahNMethod* nmr = list[idx];
446       assert(nmr != nullptr, "Sanity");
447       if (nmr->is_unregistered()) {
448         continue;
449       }
450 
451       nmr->assert_correct();
452       f->do_code_blob(nmr->nm());
453     }
454   }
455 }
456 
457 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) {
458   size_t stride = 256; // educated guess
459 
460   ShenandoahNMethod** list = _list->list();
461   size_t max = (size_t)_limit;
462   while (_claimed < max) {
463     size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed);
464     size_t start = cur;
465     size_t end = MIN2(cur + stride, max);
466     if (start >= max) break;
467 
468     for (size_t idx = start; idx < end; idx++) {
469       ShenandoahNMethod* data = list[idx];
470       assert(data != nullptr, "Should not be null");
471       if (!data->is_unregistered()) {
472         cl->do_nmethod(data->nm());
473       }
474     }
475   }
476 }
477 
478 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
479   _table(table),
480   _table_snapshot(nullptr),
481   _started_workers(0),
482   _finished_workers(0) {}
483 
484 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
485   // Cannot safepoint when iteration is running, because this can cause deadlocks
486   // with other threads waiting on iteration to be over.
487   NoSafepointVerifier nsv;
488 
489   MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
490 
491   if (_finished_workers > 0) {
492     // Some threads have already finished. We are now in rampdown: we are now
493     // waiting for all currently recorded workers to finish. No new workers
494     // should start.
495     return;
496   }
497 
498   // Record a new worker and initialize the snapshot if it is a first visitor.
499   if (_started_workers++ == 0) {
500     _table_snapshot = _table->snapshot_for_iteration();
501   }
502 
503   // All set, relinquish the lock and go concurrent.
504   {
505     MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
506     _table_snapshot->concurrent_nmethods_do(cl);
507   }
508 
509   // Record completion. Last worker shuts down the iterator and notifies any waiters.
510   uint count = ++_finished_workers;
511   if (count == _started_workers) {
512     _table->finish_iteration(_table_snapshot);
513     CodeCache_lock->notify_all();
514   }
515 }