1 /* 2 * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/compiledMethod.inline.hpp" 28 #include "code/exceptionHandlerTable.hpp" 29 #include "code/scopeDesc.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/barrierSetNMethod.hpp" 34 #include "gc/shared/gcBehaviours.hpp" 35 #include "interpreter/bytecode.inline.hpp" 36 #include "logging/log.hpp" 37 #include "logging/logTag.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/compiledICHolder.inline.hpp" 40 #include "oops/klass.inline.hpp" 41 #include "oops/methodData.hpp" 42 #include "oops/method.inline.hpp" 43 #include "oops/weakHandle.inline.hpp" 44 #include "prims/methodHandles.hpp" 45 #include "runtime/atomic.hpp" 46 #include "runtime/deoptimization.hpp" 47 #include "runtime/frame.inline.hpp" 48 #include "runtime/jniHandles.inline.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/mutexLocker.hpp" 51 #include "runtime/sharedRuntime.hpp" 52 53 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, 54 int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, 55 bool caller_must_gc_arguments, bool compiled) 56 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), 57 _mark_for_deoptimization_status(not_marked), 58 _method(method), 59 _gc_data(NULL) 60 { 61 init_defaults(); 62 } 63 64 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, 65 int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, 66 OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) 67 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, 68 frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), 69 _mark_for_deoptimization_status(not_marked), 70 _method(method), 71 _gc_data(NULL) 72 { 73 init_defaults(); 74 } 75 76 void CompiledMethod::init_defaults() { 77 { // avoid uninitialized fields, even for short time periods 78 _scopes_data_begin = NULL; 79 _deopt_handler_begin = NULL; 80 _deopt_mh_handler_begin = NULL; 81 _exception_cache = NULL; 82 } 83 _has_unsafe_access = 0; 84 _has_method_handle_invokes = 0; 85 _has_wide_vectors = 0; 86 _has_monitors = 0; 87 } 88 89 bool CompiledMethod::is_method_handle_return(address return_pc) { 90 if (!has_method_handle_invokes()) return false; 91 PcDesc* pd = pc_desc_at(return_pc); 92 if (pd == NULL) 93 return false; 94 return pd->is_method_handle_invoke(); 95 } 96 97 // Returns a string version of the method state. 98 const char* CompiledMethod::state() const { 99 int state = get_state(); 100 switch (state) { 101 case not_installed: 102 return "not installed"; 103 case in_use: 104 return "in use"; 105 case not_used: 106 return "not_used"; 107 case not_entrant: 108 return "not_entrant"; 109 default: 110 fatal("unexpected method state: %d", state); 111 return NULL; 112 } 113 } 114 115 //----------------------------------------------------------------------------- 116 void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) { 117 // assert(can_be_deoptimized(), ""); // in some places we check before marking, in others not. 118 MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, 119 Mutex::_no_safepoint_check_flag); 120 if (_mark_for_deoptimization_status != deoptimize_done) { // can't go backwards 121 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); 122 } 123 } 124 125 //----------------------------------------------------------------------------- 126 127 ExceptionCache* CompiledMethod::exception_cache_acquire() const { 128 return Atomic::load_acquire(&_exception_cache); 129 } 130 131 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { 132 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 133 assert(new_entry != NULL,"Must be non null"); 134 assert(new_entry->next() == NULL, "Must be null"); 135 136 for (;;) { 137 ExceptionCache *ec = exception_cache(); 138 if (ec != NULL) { 139 Klass* ex_klass = ec->exception_type(); 140 if (!ex_klass->is_loader_alive()) { 141 // We must guarantee that entries are not inserted with new next pointer 142 // edges to ExceptionCache entries with dead klasses, due to bad interactions 143 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll 144 // the head pointer forward to the first live ExceptionCache, so that the new 145 // next pointers always point at live ExceptionCaches, that are not removed due 146 // to concurrent ExceptionCache cleanup. 147 ExceptionCache* next = ec->next(); 148 if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { 149 CodeCache::release_exception_cache(ec); 150 } 151 continue; 152 } 153 ec = exception_cache(); 154 if (ec != NULL) { 155 new_entry->set_next(ec); 156 } 157 } 158 if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { 159 return; 160 } 161 } 162 } 163 164 void CompiledMethod::clean_exception_cache() { 165 // For each nmethod, only a single thread may call this cleanup function 166 // at the same time, whether called in STW cleanup or concurrent cleanup. 167 // Note that if the GC is processing exception cache cleaning in a concurrent phase, 168 // then a single writer may contend with cleaning up the head pointer to the 169 // first ExceptionCache node that has a Klass* that is alive. That is fine, 170 // as long as there is no concurrent cleanup of next pointers from concurrent writers. 171 // And the concurrent writers do not clean up next pointers, only the head. 172 // Also note that concurrent readers will walk through Klass* pointers that are not 173 // alive. That does not cause ABA problems, because Klass* is deleted after 174 // a handshake with all threads, after all stale ExceptionCaches have been 175 // unlinked. That is also when the CodeCache::exception_cache_purge_list() 176 // is deleted, with all ExceptionCache entries that were cleaned concurrently. 177 // That similarly implies that CAS operations on ExceptionCache entries do not 178 // suffer from ABA problems as unlinking and deletion is separated by a global 179 // handshake operation. 180 ExceptionCache* prev = NULL; 181 ExceptionCache* curr = exception_cache_acquire(); 182 183 while (curr != NULL) { 184 ExceptionCache* next = curr->next(); 185 186 if (!curr->exception_type()->is_loader_alive()) { 187 if (prev == NULL) { 188 // Try to clean head; this is contended by concurrent inserts, that 189 // both lazily clean the head, and insert entries at the head. If 190 // the CAS fails, the operation is restarted. 191 if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { 192 prev = NULL; 193 curr = exception_cache_acquire(); 194 continue; 195 } 196 } else { 197 // It is impossible to during cleanup connect the next pointer to 198 // an ExceptionCache that has not been published before a safepoint 199 // prior to the cleanup. Therefore, release is not required. 200 prev->set_next(next); 201 } 202 // prev stays the same. 203 204 CodeCache::release_exception_cache(curr); 205 } else { 206 prev = curr; 207 } 208 209 curr = next; 210 } 211 } 212 213 // public method for accessing the exception cache 214 // These are the public access methods. 215 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { 216 // We never grab a lock to read the exception cache, so we may 217 // have false negatives. This is okay, as it can only happen during 218 // the first few exception lookups for a given nmethod. 219 ExceptionCache* ec = exception_cache_acquire(); 220 while (ec != NULL) { 221 address ret_val; 222 if ((ret_val = ec->match(exception,pc)) != NULL) { 223 return ret_val; 224 } 225 ec = ec->next(); 226 } 227 return NULL; 228 } 229 230 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 231 // There are potential race conditions during exception cache updates, so we 232 // must own the ExceptionCache_lock before doing ANY modifications. Because 233 // we don't lock during reads, it is possible to have several threads attempt 234 // to update the cache with the same data. We need to check for already inserted 235 // copies of the current data before adding it. 236 237 MutexLocker ml(ExceptionCache_lock); 238 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 239 240 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { 241 target_entry = new ExceptionCache(exception,pc,handler); 242 add_exception_cache_entry(target_entry); 243 } 244 } 245 246 // private method for handling exception cache 247 // These methods are private, and used to manipulate the exception cache 248 // directly. 249 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { 250 ExceptionCache* ec = exception_cache_acquire(); 251 while (ec != NULL) { 252 if (ec->match_exception_with_space(exception)) { 253 return ec; 254 } 255 ec = ec->next(); 256 } 257 return NULL; 258 } 259 260 //-------------end of code for ExceptionCache-------------- 261 262 bool CompiledMethod::is_at_poll_return(address pc) { 263 RelocIterator iter(this, pc, pc+1); 264 while (iter.next()) { 265 if (iter.type() == relocInfo::poll_return_type) 266 return true; 267 } 268 return false; 269 } 270 271 272 bool CompiledMethod::is_at_poll_or_poll_return(address pc) { 273 RelocIterator iter(this, pc, pc+1); 274 while (iter.next()) { 275 relocInfo::relocType t = iter.type(); 276 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 277 return true; 278 } 279 return false; 280 } 281 282 void CompiledMethod::verify_oop_relocations() { 283 // Ensure sure that the code matches the current oop values 284 RelocIterator iter(this, NULL, NULL); 285 while (iter.next()) { 286 if (iter.type() == relocInfo::oop_type) { 287 oop_Relocation* reloc = iter.oop_reloc(); 288 if (!reloc->oop_is_immediate()) { 289 reloc->verify_oop_relocation(); 290 } 291 } 292 } 293 } 294 295 296 ScopeDesc* CompiledMethod::scope_desc_at(address pc) { 297 PcDesc* pd = pc_desc_at(pc); 298 guarantee(pd != NULL, "scope must be present"); 299 return new ScopeDesc(this, pd); 300 } 301 302 ScopeDesc* CompiledMethod::scope_desc_near(address pc) { 303 PcDesc* pd = pc_desc_near(pc); 304 guarantee(pd != NULL, "scope must be present"); 305 return new ScopeDesc(this, pd); 306 } 307 308 address CompiledMethod::oops_reloc_begin() const { 309 // If the method is not entrant then a JMP is plastered over the 310 // first few bytes. If an oop in the old code was there, that oop 311 // should not get GC'd. Skip the first few bytes of oops on 312 // not-entrant methods. 313 if (frame_complete_offset() != CodeOffsets::frame_never_safe && 314 code_begin() + frame_complete_offset() > 315 verified_entry_point() + NativeJump::instruction_size) 316 { 317 // If we have a frame_complete_offset after the native jump, then there 318 // is no point trying to look for oops before that. This is a requirement 319 // for being allowed to scan oops concurrently. 320 return code_begin() + frame_complete_offset(); 321 } 322 323 // It is not safe to read oops concurrently using entry barriers, if their 324 // location depend on whether the nmethod is entrant or not. 325 // assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan"); 326 327 address low_boundary = verified_entry_point(); 328 if (!is_in_use() && is_nmethod()) { 329 low_boundary += NativeJump::instruction_size; 330 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 331 // This means that the low_boundary is going to be a little too high. 332 // This shouldn't matter, since oops of non-entrant methods are never used. 333 // In fact, why are we bothering to look at oops in a non-entrant method?? 334 } 335 return low_boundary; 336 } 337 338 int CompiledMethod::verify_icholder_relocations() { 339 ResourceMark rm; 340 int count = 0; 341 342 RelocIterator iter(this); 343 while(iter.next()) { 344 if (iter.type() == relocInfo::virtual_call_type) { 345 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { 346 CompiledIC *ic = CompiledIC_at(&iter); 347 if (TraceCompiledIC) { 348 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); 349 ic->print(); 350 } 351 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 352 count++; 353 } 354 } 355 } 356 357 return count; 358 } 359 360 // Method that knows how to preserve outgoing arguments at call. This method must be 361 // called with a frame corresponding to a Java invoke 362 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 363 if (method() == NULL) { 364 return; 365 } 366 367 // handle the case of an anchor explicitly set in continuation code that doesn't have a callee 368 JavaThread* thread = reg_map->thread(); 369 if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) { 370 return; 371 } 372 373 if (!method()->is_native()) { 374 address pc = fr.pc(); 375 bool has_receiver, has_appendix; 376 Symbol* signature; 377 378 // The method attached by JIT-compilers should be used, if present. 379 // Bytecode can be inaccurate in such case. 380 Method* callee = attached_method_before_pc(pc); 381 if (callee != NULL) { 382 has_receiver = !(callee->access_flags().is_static()); 383 has_appendix = false; 384 signature = callee->signature(); 385 386 // If inline types are passed as fields, use the extended signature 387 // which contains the types of all (oop) fields of the inline type. 388 if (is_compiled_by_c2() && callee->has_scalarized_args()) { 389 const GrowableArray<SigEntry>* sig = callee->adapter()->get_sig_cc(); 390 assert(sig != NULL, "sig should never be null"); 391 TempNewSymbol tmp_sig = SigEntry::create_symbol(sig); 392 has_receiver = false; // The extended signature contains the receiver type 393 fr.oops_compiled_arguments_do(tmp_sig, has_receiver, has_appendix, reg_map, f); 394 return; 395 } 396 } else { 397 SimpleScopeDesc ssd(this, pc); 398 399 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci()); 400 has_receiver = call.has_receiver(); 401 has_appendix = call.has_appendix(); 402 signature = call.signature(); 403 } 404 405 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 406 } else if (method()->is_continuation_enter_intrinsic()) { 407 // This method only calls Continuation.enter() 408 Symbol* signature = vmSymbols::continuationEnter_signature(); 409 fr.oops_compiled_arguments_do(signature, false, false, reg_map, f); 410 } 411 } 412 413 Method* CompiledMethod::attached_method(address call_instr) { 414 assert(code_contains(call_instr), "not part of the nmethod"); 415 RelocIterator iter(this, call_instr, call_instr + 1); 416 while (iter.next()) { 417 if (iter.addr() == call_instr) { 418 switch(iter.type()) { 419 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 420 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 421 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 422 default: break; 423 } 424 } 425 } 426 return NULL; // not found 427 } 428 429 Method* CompiledMethod::attached_method_before_pc(address pc) { 430 if (NativeCall::is_call_before(pc)) { 431 NativeCall* ncall = nativeCall_before(pc); 432 return attached_method(ncall->instruction_address()); 433 } 434 return NULL; // not a call 435 } 436 437 void CompiledMethod::clear_inline_caches() { 438 assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint"); 439 RelocIterator iter(this); 440 while (iter.next()) { 441 iter.reloc()->clear_inline_cache(); 442 } 443 } 444 445 // Clear IC callsites, releasing ICStubs of all compiled ICs 446 // as well as any associated CompiledICHolders. 447 void CompiledMethod::clear_ic_callsites() { 448 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 449 ResourceMark rm; 450 RelocIterator iter(this); 451 while(iter.next()) { 452 if (iter.type() == relocInfo::virtual_call_type) { 453 CompiledIC* ic = CompiledIC_at(&iter); 454 ic->set_to_clean(false); 455 } 456 } 457 } 458 459 #ifdef ASSERT 460 // Check class_loader is alive for this bit of metadata. 461 class CheckClass : public MetadataClosure { 462 void do_metadata(Metadata* md) { 463 Klass* klass = NULL; 464 if (md->is_klass()) { 465 klass = ((Klass*)md); 466 } else if (md->is_method()) { 467 klass = ((Method*)md)->method_holder(); 468 } else if (md->is_methodData()) { 469 klass = ((MethodData*)md)->method()->method_holder(); 470 } else { 471 md->print(); 472 ShouldNotReachHere(); 473 } 474 assert(klass->is_loader_alive(), "must be alive"); 475 } 476 }; 477 #endif // ASSERT 478 479 480 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { 481 if (ic->is_clean()) { 482 return true; 483 } 484 if (ic->is_icholder_call()) { 485 // The only exception is compiledICHolder metadata which may 486 // yet be marked below. (We check this further below). 487 CompiledICHolder* cichk_metdata = ic->cached_icholder(); 488 489 if (cichk_metdata->is_loader_alive()) { 490 return true; 491 } 492 } else { 493 Metadata* ic_metdata = ic->cached_metadata(); 494 if (ic_metdata != NULL) { 495 if (ic_metdata->is_klass()) { 496 if (((Klass*)ic_metdata)->is_loader_alive()) { 497 return true; 498 } 499 } else if (ic_metdata->is_method()) { 500 Method* method = (Method*)ic_metdata; 501 assert(!method->is_old(), "old method should have been cleaned"); 502 if (method->method_holder()->is_loader_alive()) { 503 return true; 504 } 505 } else { 506 ShouldNotReachHere(); 507 } 508 } else { 509 // This inline cache is a megamorphic vtable call. Those ICs never hold 510 // any Metadata and should therefore never be cleaned by this function. 511 return true; 512 } 513 } 514 515 return ic->set_to_clean(); 516 } 517 518 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. 519 template <class CompiledICorStaticCall> 520 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, 521 bool clean_all) { 522 CodeBlob *cb = CodeCache::find_blob(addr); 523 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 524 if (nm != NULL) { 525 // Clean inline caches pointing to bad nmethods 526 if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { 527 if (!ic->set_to_clean(!from->is_unloading())) { 528 return false; 529 } 530 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); 531 } 532 } 533 return true; 534 } 535 536 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, 537 bool clean_all) { 538 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all); 539 } 540 541 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, 542 bool clean_all) { 543 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all); 544 } 545 546 // Cleans caches in nmethods that point to either classes that are unloaded 547 // or nmethods that are unloaded. 548 // 549 // Can be called either in parallel by G1 currently or after all 550 // nmethods are unloaded. Return postponed=true in the parallel case for 551 // inline caches found that point to nmethods that are not yet visited during 552 // the do_unloading walk. 553 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { 554 ResourceMark rm; 555 556 // Exception cache only needs to be called if unloading occurred 557 if (unloading_occurred) { 558 clean_exception_cache(); 559 } 560 561 if (!cleanup_inline_caches_impl(unloading_occurred, false)) { 562 return false; 563 } 564 565 #ifdef ASSERT 566 // Check that the metadata embedded in the nmethod is alive 567 CheckClass check_class; 568 metadata_do(&check_class); 569 #endif 570 return true; 571 } 572 573 void CompiledMethod::run_nmethod_entry_barrier() { 574 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 575 if (bs_nm != NULL) { 576 // We want to keep an invariant that nmethods found through iterations of a Thread's 577 // nmethods found in safepoints have gone through an entry barrier and are not armed. 578 // By calling this nmethod entry barrier, it plays along and acts 579 // like any other nmethod found on the stack of a thread (fewer surprises). 580 nmethod* nm = as_nmethod_or_null(); 581 if (nm != NULL && bs_nm->is_armed(nm)) { 582 bool alive = bs_nm->nmethod_entry_barrier(nm); 583 assert(alive, "should be alive"); 584 } 585 } 586 } 587 588 // Only called by whitebox test 589 void CompiledMethod::cleanup_inline_caches_whitebox() { 590 assert_locked_or_safepoint(CodeCache_lock); 591 CompiledICLocker ic_locker(this); 592 guarantee(cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */), 593 "Inline cache cleaning in a safepoint can't fail"); 594 } 595 596 address* CompiledMethod::orig_pc_addr(const frame* fr) { 597 return (address*) ((address)fr->unextended_sp() + orig_pc_offset()); 598 } 599 600 // Called to clean up after class unloading for live nmethods 601 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { 602 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 603 ResourceMark rm; 604 605 // Find all calls in an nmethod and clear the ones that point to bad nmethods. 606 RelocIterator iter(this, oops_reloc_begin()); 607 bool is_in_static_stub = false; 608 while(iter.next()) { 609 610 switch (iter.type()) { 611 612 case relocInfo::virtual_call_type: 613 if (unloading_occurred) { 614 // If class unloading occurred we first clear ICs where the cached metadata 615 // is referring to an unloaded klass or method. 616 if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) { 617 return false; 618 } 619 } 620 621 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 622 return false; 623 } 624 break; 625 626 case relocInfo::opt_virtual_call_type: 627 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 628 return false; 629 } 630 break; 631 632 case relocInfo::static_call_type: 633 if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) { 634 return false; 635 } 636 break; 637 638 case relocInfo::static_stub_type: { 639 is_in_static_stub = true; 640 break; 641 } 642 643 case relocInfo::metadata_type: { 644 // Only the metadata relocations contained in static/opt virtual call stubs 645 // contains the Method* passed to c2i adapters. It is the only metadata 646 // relocation that needs to be walked, as it is the one metadata relocation 647 // that violates the invariant that all metadata relocations have an oop 648 // in the compiled method (due to deferred resolution and code patching). 649 650 // This causes dead metadata to remain in compiled methods that are not 651 // unloading. Unless these slippery metadata relocations of the static 652 // stubs are at least cleared, subsequent class redefinition operations 653 // will access potentially free memory, and JavaThread execution 654 // concurrent to class unloading may call c2i adapters with dead methods. 655 if (!is_in_static_stub) { 656 // The first metadata relocation after a static stub relocation is the 657 // metadata relocation of the static stub used to pass the Method* to 658 // c2i adapters. 659 continue; 660 } 661 is_in_static_stub = false; 662 if (is_unloading()) { 663 // If the nmethod itself is dying, then it may point at dead metadata. 664 // Nobody should follow that metadata; it is strictly unsafe. 665 continue; 666 } 667 metadata_Relocation* r = iter.metadata_reloc(); 668 Metadata* md = r->metadata_value(); 669 if (md != NULL && md->is_method()) { 670 Method* method = static_cast<Method*>(md); 671 if (!method->method_holder()->is_loader_alive()) { 672 Atomic::store(r->metadata_addr(), (Method*)NULL); 673 674 if (!r->metadata_is_immediate()) { 675 r->fix_metadata_relocation(); 676 } 677 } 678 } 679 break; 680 } 681 682 default: 683 break; 684 } 685 } 686 687 return true; 688 } 689 690 address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) { 691 // Exception happened outside inline-cache check code => we are inside 692 // an active nmethod => use cpc to determine a return address 693 int exception_offset = pc - code_begin(); 694 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset ); 695 #ifdef ASSERT 696 if (cont_offset == 0) { 697 Thread* thread = Thread::current(); 698 ResourceMark rm(thread); 699 CodeBlob* cb = CodeCache::find_blob(pc); 700 assert(cb != NULL && cb == this, ""); 701 ttyLocker ttyl; 702 tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc)); 703 print(); 704 method()->print_codes(); 705 print_code(); 706 print_pcs(); 707 } 708 #endif 709 if (cont_offset == 0) { 710 // Let the normal error handling report the exception 711 return NULL; 712 } 713 if (cont_offset == exception_offset) { 714 #if INCLUDE_JVMCI 715 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check; 716 JavaThread *thread = JavaThread::current(); 717 thread->set_jvmci_implicit_exception_pc(pc); 718 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason, 719 Deoptimization::Action_reinterpret)); 720 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); 721 #else 722 ShouldNotReachHere(); 723 #endif 724 } 725 return code_begin() + cont_offset; 726 } 727 728 class HasEvolDependency : public MetadataClosure { 729 bool _has_evol_dependency; 730 public: 731 HasEvolDependency() : _has_evol_dependency(false) {} 732 void do_metadata(Metadata* md) { 733 if (md->is_method()) { 734 Method* method = (Method*)md; 735 if (method->is_old()) { 736 _has_evol_dependency = true; 737 } 738 } 739 } 740 bool has_evol_dependency() const { return _has_evol_dependency; } 741 }; 742 743 bool CompiledMethod::has_evol_metadata() { 744 // Check the metadata in relocIter and CompiledIC and also deoptimize 745 // any nmethod that has reference to old methods. 746 HasEvolDependency check_evol; 747 metadata_do(&check_evol); 748 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) { 749 ResourceMark rm; 750 log_debug(redefine, class, nmethod) 751 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata", 752 _method->method_holder()->external_name(), 753 _method->name()->as_C_string(), 754 _method->signature()->as_C_string(), 755 compile_id()); 756 } 757 return check_evol.has_evol_dependency(); 758 }