1 /* 2 * Copyright (c) 2019, 2022, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 30 #include "gc/shenandoah/shenandoahNMethod.inline.hpp" 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "runtime/continuation.hpp" 34 35 ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) : 36 _nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false) { 37 38 if (!oops.is_empty()) { 39 _oops_count = oops.length(); 40 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC); 41 for (int c = 0; c < _oops_count; c++) { 42 _oops[c] = oops.at(c); 43 } 44 } 45 _has_non_immed_oops = non_immediate_oops; 46 47 assert_same_oops(); 48 } 49 50 ShenandoahNMethod::~ShenandoahNMethod() { 51 if (_oops != nullptr) { 52 FREE_C_HEAP_ARRAY(oop*, _oops); 53 } 54 } 55 56 class ShenandoahHasCSetOopClosure : public OopClosure { 57 private: 58 ShenandoahHeap* const _heap; 59 bool _has_cset_oops; 60 61 public: 62 ShenandoahHasCSetOopClosure(ShenandoahHeap *heap) : 63 _heap(heap), 64 _has_cset_oops(false) { 65 } 66 67 bool has_cset_oops() const { 68 return _has_cset_oops; 69 } 70 71 void do_oop(oop* p) { 72 oop value = RawAccess<>::oop_load(p); 73 if (!_has_cset_oops && _heap->in_collection_set(value)) { 74 _has_cset_oops = true; 75 } 76 } 77 78 void do_oop(narrowOop* p) { 79 ShouldNotReachHere(); 80 } 81 }; 82 83 bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) { 84 ShenandoahHasCSetOopClosure cl(heap); 85 oops_do(&cl); 86 return cl.has_cset_oops(); 87 } 88 89 void ShenandoahNMethod::update() { 90 ResourceMark rm; 91 bool non_immediate_oops = false; 92 GrowableArray<oop*> oops; 93 94 detect_reloc_oops(nm(), oops, non_immediate_oops); 95 if (oops.length() != _oops_count) { 96 if (_oops != nullptr) { 97 FREE_C_HEAP_ARRAY(oop*, _oops); 98 _oops = nullptr; 99 } 100 101 _oops_count = oops.length(); 102 if (_oops_count > 0) { 103 _oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC); 104 } 105 } 106 107 for (int index = 0; index < _oops_count; index ++) { 108 _oops[index] = oops.at(index); 109 } 110 _has_non_immed_oops = non_immediate_oops; 111 112 assert_same_oops(); 113 } 114 115 void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops) { 116 has_non_immed_oops = false; 117 // Find all oops relocations 118 RelocIterator iter(nm); 119 while (iter.next()) { 120 if (iter.type() != relocInfo::oop_type) { 121 // Not an oop 122 continue; 123 } 124 125 oop_Relocation* r = iter.oop_reloc(); 126 if (!r->oop_is_immediate()) { 127 // Non-immediate oop found 128 has_non_immed_oops = true; 129 continue; 130 } 131 132 oop value = r->oop_value(); 133 if (value != nullptr) { 134 oop* addr = r->oop_addr(); 135 shenandoah_assert_correct(addr, value); 136 shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc()); 137 shenandoah_assert_not_forwarded(addr, value); 138 // Non-null immediate oop found. null oops can safely be 139 // ignored since the method will be re-registered if they 140 // are later patched to be non-null. 141 oops.push(addr); 142 } 143 } 144 } 145 146 ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) { 147 ResourceMark rm; 148 bool non_immediate_oops = false; 149 GrowableArray<oop*> oops; 150 151 detect_reloc_oops(nm, oops, non_immediate_oops); 152 return new ShenandoahNMethod(nm, oops, non_immediate_oops); 153 } 154 155 void ShenandoahNMethod::heal_nmethod(nmethod* nm) { 156 ShenandoahNMethod* data = gc_data(nm); 157 assert(data != nullptr, "Sanity"); 158 assert(data->lock()->owned_by_self(), "Must hold the lock"); 159 160 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 161 if (heap->is_concurrent_weak_root_in_progress() || 162 heap->is_concurrent_strong_root_in_progress()) { 163 ShenandoahEvacOOMScope evac_scope; 164 heal_nmethod_metadata(data); 165 } else if (heap->is_concurrent_mark_in_progress()) { 166 ShenandoahKeepAliveClosure cl; 167 data->oops_do(&cl); 168 } else { 169 // There is possibility that GC is cancelled when it arrives final mark. 170 // In this case, concurrent root phase is skipped and degenerated GC should be 171 // followed, where nmethods are disarmed. 172 } 173 } 174 175 #ifdef ASSERT 176 void ShenandoahNMethod::assert_correct() { 177 ShenandoahHeap* heap = ShenandoahHeap::heap(); 178 for (int c = 0; c < _oops_count; c++) { 179 oop *loc = _oops[c]; 180 assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*"); 181 oop o = RawAccess<>::oop_load(loc); 182 shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress()); 183 } 184 185 oop* const begin = _nm->oops_begin(); 186 oop* const end = _nm->oops_end(); 187 for (oop* p = begin; p < end; p++) { 188 if (*p != Universe::non_oop_word()) { 189 oop o = RawAccess<>::oop_load(p); 190 shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress()); 191 } 192 } 193 } 194 195 class ShenandoahNMethodOopDetector : public OopClosure { 196 private: 197 ResourceMark rm; // For growable array allocation below. 198 GrowableArray<oop*> _oops; 199 200 public: 201 ShenandoahNMethodOopDetector() : _oops(10) {}; 202 203 void do_oop(oop* o) { 204 _oops.append(o); 205 } 206 void do_oop(narrowOop* o) { 207 fatal("NMethods should not have compressed oops embedded."); 208 } 209 210 GrowableArray<oop*>* oops() { 211 return &_oops; 212 } 213 214 bool has_oops() { 215 return !_oops.is_empty(); 216 } 217 }; 218 219 void ShenandoahNMethod::assert_same_oops(bool allow_dead) { 220 ShenandoahNMethodOopDetector detector; 221 nm()->oops_do(&detector, allow_dead); 222 223 GrowableArray<oop*>* oops = detector.oops(); 224 225 int count = _oops_count; 226 for (int index = 0; index < _oops_count; index ++) { 227 assert(oops->contains(_oops[index]), "Must contain this oop"); 228 } 229 230 for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) { 231 if (*p == Universe::non_oop_word()) continue; 232 count++; 233 assert(oops->contains(p), "Must contain this oop"); 234 } 235 236 if (oops->length() < count) { 237 stringStream debug_stream; 238 debug_stream.print_cr("detected locs: %d", oops->length()); 239 for (int i = 0; i < oops->length(); i++) { 240 debug_stream.print_cr("-> " PTR_FORMAT, p2i(oops->at(i))); 241 } 242 debug_stream.print_cr("recorded oops: %d", _oops_count); 243 for (int i = 0; i < _oops_count; i++) { 244 debug_stream.print_cr("-> " PTR_FORMAT, p2i(_oops[i])); 245 } 246 GrowableArray<oop*> check; 247 bool non_immed; 248 detect_reloc_oops(nm(), check, non_immed); 249 debug_stream.print_cr("check oops: %d", check.length()); 250 for (int i = 0; i < check.length(); i++) { 251 debug_stream.print_cr("-> " PTR_FORMAT, p2i(check.at(i))); 252 } 253 fatal("Must match #detected: %d, #recorded: %d, #total: %d, begin: " PTR_FORMAT ", end: " PTR_FORMAT "\n%s", 254 oops->length(), _oops_count, count, p2i(nm()->oops_begin()), p2i(nm()->oops_end()), debug_stream.freeze()); 255 } 256 } 257 #endif 258 259 ShenandoahNMethodTable::ShenandoahNMethodTable() : 260 _heap(ShenandoahHeap::heap()), 261 _index(0), 262 _itr_cnt(0) { 263 _list = new ShenandoahNMethodList(minSize); 264 } 265 266 ShenandoahNMethodTable::~ShenandoahNMethodTable() { 267 assert(_list != nullptr, "Sanity"); 268 _list->release(); 269 } 270 271 void ShenandoahNMethodTable::register_nmethod(nmethod* nm) { 272 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); 273 assert(_index >= 0 && _index <= _list->size(), "Sanity"); 274 275 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); 276 277 if (data != nullptr) { 278 assert(contain(nm), "Must have been registered"); 279 assert(nm == data->nm(), "Must be same nmethod"); 280 // Prevent updating a nmethod while concurrent iteration is in progress. 281 wait_until_concurrent_iteration_done(); 282 ShenandoahReentrantLocker data_locker(data->lock()); 283 data->update(); 284 } else { 285 // For a new nmethod, we can safely append it to the list, because 286 // concurrent iteration will not touch it. 287 data = ShenandoahNMethod::for_nmethod(nm); 288 assert(data != nullptr, "Sanity"); 289 ShenandoahNMethod::attach_gc_data(nm, data); 290 ShenandoahLocker locker(&_lock); 291 log_register_nmethod(nm); 292 append(data); 293 } 294 // Disarm new nmethod 295 ShenandoahNMethod::disarm_nmethod(nm); 296 } 297 298 void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) { 299 assert_locked_or_safepoint(CodeCache_lock); 300 301 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); 302 assert(data != nullptr, "Sanity"); 303 log_unregister_nmethod(nm); 304 ShenandoahLocker locker(&_lock); 305 assert(contain(nm), "Must have been registered"); 306 307 int idx = index_of(nm); 308 assert(idx >= 0 && idx < _index, "Invalid index"); 309 ShenandoahNMethod::attach_gc_data(nm, nullptr); 310 remove(idx); 311 } 312 313 bool ShenandoahNMethodTable::contain(nmethod* nm) const { 314 return index_of(nm) != -1; 315 } 316 317 ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const { 318 assert(index >= 0 && index < _index, "Out of bound"); 319 return _list->at(index); 320 } 321 322 int ShenandoahNMethodTable::index_of(nmethod* nm) const { 323 for (int index = 0; index < length(); index ++) { 324 if (at(index)->nm() == nm) { 325 return index; 326 } 327 } 328 return -1; 329 } 330 331 void ShenandoahNMethodTable::remove(int idx) { 332 shenandoah_assert_locked_or_safepoint(CodeCache_lock); 333 assert(_index >= 0 && _index <= _list->size(), "Sanity"); 334 335 assert(idx >= 0 && idx < _index, "Out of bound"); 336 ShenandoahNMethod* snm = _list->at(idx); 337 ShenandoahNMethod* tmp = _list->at(_index - 1); 338 _list->set(idx, tmp); 339 _index --; 340 341 delete snm; 342 } 343 344 void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() { 345 assert(CodeCache_lock->owned_by_self(), "Lock must be held"); 346 while (iteration_in_progress()) { 347 CodeCache_lock->wait_without_safepoint_check(); 348 } 349 } 350 351 void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) { 352 if (is_full()) { 353 int new_size = 2 * _list->size(); 354 // Rebuild table and replace current one 355 rebuild(new_size); 356 } 357 358 _list->set(_index++, snm); 359 assert(_index >= 0 && _index <= _list->size(), "Sanity"); 360 } 361 362 void ShenandoahNMethodTable::rebuild(int size) { 363 ShenandoahNMethodList* new_list = new ShenandoahNMethodList(size); 364 new_list->transfer(_list, _index); 365 366 // Release old list 367 _list->release(); 368 _list = new_list; 369 } 370 371 ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() { 372 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); 373 _itr_cnt++; 374 return new ShenandoahNMethodTableSnapshot(this); 375 } 376 377 void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) { 378 assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held"); 379 assert(iteration_in_progress(), "Why we here?"); 380 assert(snapshot != nullptr, "No snapshot"); 381 _itr_cnt--; 382 383 delete snapshot; 384 } 385 386 void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) { 387 LogTarget(Debug, gc, nmethod) log; 388 if (!log.is_enabled()) { 389 return; 390 } 391 392 ResourceMark rm; 393 log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)", 394 nm->method()->method_holder()->external_name(), 395 nm->method()->name()->as_C_string(), 396 p2i(nm), 397 nm->compiler_name()); 398 } 399 400 void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) { 401 LogTarget(Debug, gc, nmethod) log; 402 if (!log.is_enabled()) { 403 return; 404 } 405 406 ResourceMark rm; 407 log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]", 408 nm->method()->method_holder()->external_name(), 409 nm->method()->name()->as_C_string(), 410 p2i(nm)); 411 } 412 413 #ifdef ASSERT 414 void ShenandoahNMethodTable::assert_nmethods_correct() { 415 assert_locked_or_safepoint(CodeCache_lock); 416 417 for (int index = 0; index < length(); index ++) { 418 ShenandoahNMethod* m = _list->at(index); 419 // Concurrent unloading may have dead nmethods to be cleaned by sweeper 420 if (m->is_unregistered()) continue; 421 m->assert_correct(); 422 } 423 } 424 #endif 425 426 427 ShenandoahNMethodList::ShenandoahNMethodList(int size) : 428 _size(size), _ref_count(1) { 429 _list = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC); 430 } 431 432 ShenandoahNMethodList::~ShenandoahNMethodList() { 433 assert(_list != nullptr, "Sanity"); 434 assert(_ref_count == 0, "Must be"); 435 FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list); 436 } 437 438 void ShenandoahNMethodList::transfer(ShenandoahNMethodList* const list, int limit) { 439 assert(limit <= size(), "Sanity"); 440 ShenandoahNMethod** old_list = list->list(); 441 for (int index = 0; index < limit; index++) { 442 _list[index] = old_list[index]; 443 } 444 } 445 446 ShenandoahNMethodList* ShenandoahNMethodList::acquire() { 447 assert_locked_or_safepoint(CodeCache_lock); 448 _ref_count++; 449 return this; 450 } 451 452 void ShenandoahNMethodList::release() { 453 assert_locked_or_safepoint(CodeCache_lock); 454 _ref_count--; 455 if (_ref_count == 0) { 456 delete this; 457 } 458 } 459 460 ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) : 461 _heap(ShenandoahHeap::heap()), _list(table->_list->acquire()), _limit(table->_index), _claimed(0) { 462 } 463 464 ShenandoahNMethodTableSnapshot::~ShenandoahNMethodTableSnapshot() { 465 _list->release(); 466 } 467 468 void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) { 469 size_t stride = 256; // educated guess 470 471 ShenandoahNMethod** const list = _list->list(); 472 473 size_t max = (size_t)_limit; 474 while (_claimed < max) { 475 size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); 476 size_t start = cur; 477 size_t end = MIN2(cur + stride, max); 478 if (start >= max) break; 479 480 for (size_t idx = start; idx < end; idx++) { 481 ShenandoahNMethod* nmr = list[idx]; 482 assert(nmr != nullptr, "Sanity"); 483 if (nmr->is_unregistered()) { 484 continue; 485 } 486 487 nmr->assert_correct(); 488 f->do_code_blob(nmr->nm()); 489 } 490 } 491 } 492 493 void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) { 494 size_t stride = 256; // educated guess 495 496 ShenandoahNMethod** list = _list->list(); 497 size_t max = (size_t)_limit; 498 while (_claimed < max) { 499 size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); 500 size_t start = cur; 501 size_t end = MIN2(cur + stride, max); 502 if (start >= max) break; 503 504 for (size_t idx = start; idx < end; idx++) { 505 ShenandoahNMethod* data = list[idx]; 506 assert(data != nullptr, "Should not be null"); 507 if (!data->is_unregistered()) { 508 cl->do_nmethod(data->nm()); 509 } 510 } 511 } 512 } 513 514 ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) : 515 _table(table), _table_snapshot(nullptr) { 516 } 517 518 void ShenandoahConcurrentNMethodIterator::nmethods_do_begin() { 519 assert(CodeCache_lock->owned_by_self(), "Lock must be held"); 520 _table_snapshot = _table->snapshot_for_iteration(); 521 } 522 523 void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) { 524 assert(_table_snapshot != nullptr, "Must first call nmethod_do_begin()"); 525 _table_snapshot->concurrent_nmethods_do(cl); 526 } 527 528 void ShenandoahConcurrentNMethodIterator::nmethods_do_end() { 529 assert(CodeCache_lock->owned_by_self(), "Lock must be held"); 530 _table->finish_iteration(_table_snapshot); 531 CodeCache_lock->notify_all(); 532 }