1 /* 2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2019, 2022, Red Hat, Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/continuation.hpp" 33 #include "runtime/safepointVerifiers.hpp" 34 35 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) : 36 _nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false), _lock(), _ic_lock() { 37 38 if (!oops.is_empty()) { 39 _oops_count = oops.length(); 40 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC); 41 for (int c = 0; c < _oops_count; c++) { 42 _oops[c] = oops.at(c); 43 } 44 } 45 _has_non_immed_oops = non_immediate_oops; 46 47 assert_same_oops(); 48 } 49 50 ShenandoahNMethod::~ShenandoahNMethod() { 51 if (_oops != nullptr) { 52 FREE_C_HEAP_ARRAY(oop*, _oops); 53 } 54 } 55 56 void ShenandoahNMethod::update() { 57 ResourceMark rm; 58 bool non_immediate_oops = false; 59 GrowableArray<oop*> oops; 60 61 detect_reloc_oops(nm(), oops, non_immediate_oops); 62 if (oops.length() != _oops_count) { 63 if (_oops != nullptr) { 64 FREE_C_HEAP_ARRAY(oop*, _oops); 65 _oops = nullptr; 66 } 67 68 _oops_count = oops.length(); 69 if (_oops_count > 0) { 70 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC); 71 } 72 } 73 74 for (int index = 0; index < _oops_count; index ++) { 75 _oops[index] = oops.at(index); 76 } 77 _has_non_immed_oops = non_immediate_oops; 78 79 assert_same_oops(); 80 } 81 82 void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops) { 83 has_non_immed_oops = false; 84 // Find all oops relocations 85 RelocIterator iter(nm); 86 while (iter.next()) { 87 if (iter.type() != relocInfo::oop_type) { 88 // Not an oop 89 continue; 90 } 91 92 oop_Relocation* r = iter.oop_reloc(); 93 if (!r->oop_is_immediate()) { 94 // Non-immediate oop found 95 has_non_immed_oops = true; 96 continue; 97 } 98 99 oop value = r->oop_value(); 100 if (value != nullptr) { 101 oop* addr = r->oop_addr(); 102 shenandoah_assert_correct(addr, value); 103 shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc()); 104 shenandoah_assert_not_forwarded(addr, value); 105 // Non-null immediate oop found. null oops can safely be 106 // ignored since the method will be re-registered if they 107 // are later patched to be non-null. 108 oops.push(addr); 109 } 110 } 111 } 112 113 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) { 114 ResourceMark rm; 115 bool non_immediate_oops = false; 116 GrowableArray<oop*> oops; 117 118 detect_reloc_oops(nm, oops, non_immediate_oops); 119 return new ShenandoahNMethod(nm, oops, non_immediate_oops); 120 } 121 122 void ShenandoahNMethod::heal_nmethod(nmethod* nm) { 123 ShenandoahNMethod* data = gc_data(nm); 124 assert(data != nullptr, "Sanity"); 125 assert(data->lock()->owned_by_self(), "Must hold the lock"); 126 127 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 128 if (heap->is_concurrent_weak_root_in_progress() || 129 heap->is_concurrent_strong_root_in_progress()) { 130 ShenandoahEvacOOMScope evac_scope; 131 heal_nmethod_metadata(data); 132 } else if (heap->is_concurrent_mark_in_progress()) { 133 ShenandoahKeepAliveClosure cl; 134 data->oops_do(&cl); 135 } else { 136 // There is possibility that GC is cancelled when it arrives final mark. 137 // In this case, concurrent root phase is skipped and degenerated GC should be 138 // followed, where nmethods are disarmed. 139 } 140 } 141 142 #ifdef ASSERT 143 void ShenandoahNMethod::assert_correct() { 144 ShenandoahHeap* heap = ShenandoahHeap::heap(); 145 for (int c = 0; c < _oops_count; c++) { 146 oop *loc = _oops[c]; 147 assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*"); 148 oop o = RawAccess<>::oop_load(loc); 149 shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress()); 150 } 151 152 oop* const begin = _nm->oops_begin(); 153 oop* const end = _nm->oops_end(); 154 for (oop* p = begin; p < end; p++) { 155 if (*p != Universe::non_oop_word()) { 156 oop o = RawAccess<>::oop_load(p); 157 shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress()); 158 } 159 } 160 } 161 162 class ShenandoahNMethodOopDetector : public OopClosure { 163 private: 164 ResourceMark rm; // For growable array allocation below. 165 GrowableArray<oop*> _oops; 166 167 public: 168 ShenandoahNMethodOopDetector() : _oops(10) {}; 169 170 void do_oop(oop* o) { 171 _oops.append(o); 172 } 173 void do_oop(narrowOop* o) { 174 fatal("NMethods should not have compressed oops embedded."); 175 } 176 177 GrowableArray<oop*>* oops() { 178 return &_oops; 179 } 180 }; 181 182 void ShenandoahNMethod::assert_same_oops(bool allow_dead) { 183 ShenandoahNMethodOopDetector detector; 184 nm()->oops_do(&detector, allow_dead); 185 186 GrowableArray<oop*>* oops = detector.oops(); 187 188 int count = _oops_count; 189 for (int index = 0; index < _oops_count; index ++) { 190 assert(oops->contains(_oops[index]), "Must contain this oop"); 191 } 192 193 for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) { 194 if (*p == Universe::non_oop_word()) continue; 195 count++; 196 assert(oops->contains(p), "Must contain this oop"); 197 } 198 199 if (oops->length() < count) { 200 stringStream debug_stream; 201 debug_stream.print_cr("detected locs: %d", oops->length()); 202 for (int i = 0; i < oops->length(); i++) { 203 debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i))); 204 } 205 debug_stream.print_cr("recorded oops: %d", _oops_count); 206 for (int i = 0; i < _oops_count; i++) { 207 debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i])); 208 } 209 GrowableArray<oop*> check; 210 bool non_immed; 211 detect_reloc_oops(nm(), check, non_immed); 212 debug_stream.print_cr("check oops: %d", check.length()); 213 for (int i = 0; i < check.length(); i++) { 214 debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i))); 215 } 216 fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s", 217 oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.freeze()); 218 } 219 } 220 #endif 221 222 ShenandoahNMethodTable::ShenandoahNMethodTable() : 223 _heap(ShenandoahHeap::heap()), 224 _index(0), 225 _itr_cnt(0) { 226 _list = new ShenandoahNMethodList(minSize); 227 } 228 229 ShenandoahNMethodTable::~ShenandoahNMethodTable() { 230 assert(_list != nullptr, "Sanity"); 231 _list->release(); 232 } 233 234 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) { 235 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); 236 assert(_index >= 0 && _index <= _list->size(), "Sanity"); 237 238 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); 239 240 if (data != nullptr) { 241 assert(contain(nm), "Must have been registered"); 242 assert(nm == data->nm(), "Must be same nmethod"); 243 // Prevent updating a nmethod while concurrent iteration is in progress. 244 wait_until_concurrent_iteration_done(); 245 ShenandoahReentrantLocker data_locker(data->lock()); 246 data->update(); 247 } else { 248 // For a new nmethod, we can safely append it to the list, because 249 // concurrent iteration will not touch it. 250 data = ShenandoahNMethod::for_nmethod(nm); 251 assert(data != nullptr, "Sanity"); 252 ShenandoahNMethod::attach_gc_data(nm, data); 253 ShenandoahLocker locker(&_lock); 254 log_register_nmethod(nm); 255 append(data); 256 } 257 // Disarm new nmethod 258 ShenandoahNMethod::disarm_nmethod(nm); 259 } 260 261 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) { 262 assert_locked_or_safepoint(CodeCache_lock); 263 264 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); 265 assert(data != nullptr, "Sanity"); 266 log_unregister_nmethod(nm); 267 ShenandoahLocker locker(&_lock); 268 assert(contain(nm), "Must have been registered"); 269 270 int idx = index_of(nm); 271 assert(idx >= 0 && idx < _index, "Invalid index"); 272 ShenandoahNMethod::attach_gc_data(nm, nullptr); 273 remove(idx); 274 } 275 276 bool ShenandoahNMethodTable::contain(nmethod* nm) const { 277 return index_of(nm) != -1; 278 } 279 280 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const { 281 assert(index >= 0 && index < _index, "Out of bound"); 282 return _list->at(index); 283 } 284 285 int ShenandoahNMethodTable::index_of(nmethod* nm) const { 286 for (int index = 0; index < length(); index ++) { 287 if (at(index)->nm() == nm) { 288 return index; 289 } 290 } 291 return -1; 292 } 293 294 void ShenandoahNMethodTable::remove(int idx) { 295 shenandoah_assert_locked_or_safepoint(CodeCache_lock); 296 assert(_index >= 0 && _index <= _list->size(), "Sanity"); 297 298 assert(idx >= 0 && idx < _index, "Out of bound"); 299 ShenandoahNMethod* snm = _list->at(idx); 300 ShenandoahNMethod* tmp = _list->at(_index - 1); 301 _list->set(idx, tmp); 302 _index --; 303 304 delete snm; 305 } 306 307 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() { 308 assert(CodeCache_lock->owned_by_self(), "Lock must be held"); 309 while (iteration_in_progress()) { 310 CodeCache_lock->wait_without_safepoint_check(); 311 } 312 } 313 314 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) { 315 if (is_full()) { 316 int new_size = 2 * _list->size(); 317 // Rebuild table and replace current one 318 rebuild(new_size); 319 } 320 321 _list->set(_index++, snm); 322 assert(_index >= 0 && _index <= _list->size(), "Sanity"); 323 } 324 325 void ShenandoahNMethodTable::rebuild(int size) { 326 ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size); 327 new_list->transfer(_list, _index); 328 329 // Release old list 330 _list->release(); 331 _list = new_list; 332 } 333 334 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() { 335 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); 336 _itr_cnt++; 337 return new ShenandoahNMethodTableSnapshot(this); 338 } 339 340 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) { 341 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); 342 assert(iteration_in_progress(), "Why we here?"); 343 assert(snapshot != nullptr, "No snapshot"); 344 _itr_cnt--; 345 346 delete snapshot; 347 } 348 349 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) { 350 LogTarget(Debug, gc, nmethod) log; 351 if (!log.is_enabled()) { 352 return; 353 } 354 355 ResourceMark rm; 356 log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)", 357 nm->method()->method_holder()->external_name(), 358 nm->method()->name()->as_C_string(), 359 p2i(nm), 360 nm->compiler_name()); 361 } 362 363 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) { 364 LogTarget(Debug, gc, nmethod) log; 365 if (!log.is_enabled()) { 366 return; 367 } 368 369 ResourceMark rm; 370 log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]", 371 nm->method()->method_holder()->external_name(), 372 nm->method()->name()->as_C_string(), 373 p2i(nm)); 374 } 375 376 #ifdef ASSERT 377 void ShenandoahNMethodTable::assert_nmethods_correct() { 378 assert_locked_or_safepoint(CodeCache_lock); 379 380 for (int index = 0; index < length(); index ++) { 381 ShenandoahNMethod* m = _list->at(index); 382 // Concurrent unloading may have dead nmethods to be cleaned by sweeper 383 if (m->is_unregistered()) continue; 384 m->assert_correct(); 385 } 386 } 387 #endif 388 389 390 ShenandoahNMethodList::ShenandoahNMethodList(int size) : 391 _size(size), _ref_count(1) { 392 _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC); 393 } 394 395 ShenandoahNMethodList::~ShenandoahNMethodList() { 396 assert(_list != nullptr, "Sanity"); 397 assert(_ref_count == 0, "Must be"); 398 FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list); 399 } 400 401 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) { 402 assert(limit <= size(), "Sanity"); 403 ShenandoahNMethod** old_list = list->list(); 404 for (int index = 0; index < limit; index++) { 405 _list[index] = old_list[index]; 406 } 407 } 408 409 ShenandoahNMethodList* ShenandoahNMethodList::acquire() { 410 assert_locked_or_safepoint(CodeCache_lock); 411 _ref_count++; 412 return this; 413 } 414 415 void ShenandoahNMethodList::release() { 416 assert_locked_or_safepoint(CodeCache_lock); 417 _ref_count--; 418 if (_ref_count == 0) { 419 delete this; 420 } 421 } 422 423 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) : 424 _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) { 425 } 426 427 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() { 428 _list->release(); 429 } 430 431 void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) { 432 size_t stride = 256; // educated guess 433 434 ShenandoahNMethod** const list = _list->list(); 435 436 size_t max = (size_t)_limit; 437 while (_claimed < max) { 438 size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); 439 size_t start = cur; 440 size_t end = MIN2(cur + stride, max); 441 if (start >= max) break; 442 443 for (size_t idx = start; idx < end; idx++) { 444 ShenandoahNMethod* nmr = list[idx]; 445 assert(nmr != nullptr, "Sanity"); 446 if (nmr->is_unregistered()) { 447 continue; 448 } 449 450 nmr->assert_correct(); 451 f->do_nmethod(nmr->nm()); 452 } 453 } 454 } 455 456 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) { 457 size_t stride = 256; // educated guess 458 459 ShenandoahNMethod** list = _list->list(); 460 size_t max = (size_t)_limit; 461 while (_claimed < max) { 462 size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); 463 size_t start = cur; 464 size_t end = MIN2(cur + stride, max); 465 if (start >= max) break; 466 467 for (size_t idx = start; idx < end; idx++) { 468 ShenandoahNMethod* data = list[idx]; 469 assert(data != nullptr, "Should not be null"); 470 if (!data->is_unregistered()) { 471 cl->do_nmethod(data->nm()); 472 } 473 } 474 } 475 } 476 477 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) : 478 _table(table), 479 _table_snapshot(nullptr), 480 _started_workers(0), 481 _finished_workers(0) {} 482 483 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) { 484 // Cannot safepoint when iteration is running, because this can cause deadlocks 485 // with other threads waiting on iteration to be over. 486 NoSafepointVerifier nsv; 487 488 MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); 489 490 if (_finished_workers > 0) { 491 // Some threads have already finished. We are now in rampdown: we are now 492 // waiting for all currently recorded workers to finish. No new workers 493 // should start. 494 return; 495 } 496 497 // Record a new worker and initialize the snapshot if it is a first visitor. 498 if (_started_workers++ == 0) { 499 _table_snapshot = _table->snapshot_for_iteration(); 500 } 501 502 // All set, relinquish the lock and go concurrent. 503 { 504 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 505 _table_snapshot->concurrent_nmethods_do(cl); 506 } 507 508 // Record completion. Last worker shuts down the iterator and notifies any waiters. 509 uint count = ++_finished_workers; 510 if (count == _started_workers) { 511 _table->finish_iteration(_table_snapshot); 512 CodeCache_lock->notify_all(); 513 } 514 }