1 /*
  2  * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/exceptionHandlerTable.hpp"
 29 #include "code/scopeDesc.hpp"
 30 #include "code/codeCache.hpp"
 31 #include "code/icBuffer.hpp"
 32 #include "gc/shared/barrierSet.hpp"
 33 #include "gc/shared/barrierSetNMethod.hpp"
 34 #include "gc/shared/gcBehaviours.hpp"
 35 #include "interpreter/bytecode.inline.hpp"
 36 #include "logging/log.hpp"
 37 #include "logging/logTag.hpp"
 38 #include "memory/resourceArea.hpp"
 39 #include "oops/compiledICHolder.inline.hpp"
 40 #include "oops/klass.inline.hpp"
 41 #include "oops/methodData.hpp"
 42 #include "oops/method.inline.hpp"
 43 #include "oops/weakHandle.inline.hpp"
 44 #include "prims/methodHandles.hpp"
 45 #include "runtime/atomic.hpp"
 46 #include "runtime/deoptimization.hpp"
 47 #include "runtime/frame.inline.hpp"
 48 #include "runtime/jniHandles.inline.hpp"
 49 #include "runtime/handles.inline.hpp"
 50 #include "runtime/mutexLocker.hpp"
 51 #include "runtime/sharedRuntime.hpp"
 52 
 53 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
 54                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
 55                                bool caller_must_gc_arguments, bool compiled)
 56   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
 57     _deoptimization_status(not_marked),
 58     _deoptimization_generation(0),
 59     _method(method),
 60     _gc_data(nullptr)
 61 {
 62   init_defaults();
 63 }
 64 
 65 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
 66                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
 67                                OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled)
 68   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
 69              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
 70     _deoptimization_status(not_marked),
 71     _deoptimization_generation(0),
 72     _method(method),
 73     _gc_data(nullptr)
 74 {
 75   init_defaults();
 76 }
 77 
 78 void CompiledMethod::init_defaults() {
 79   { // avoid uninitialized fields, even for short time periods
 80     _scopes_data_begin          = nullptr;
 81     _deopt_handler_begin        = nullptr;
 82     _deopt_mh_handler_begin     = nullptr;
 83     _exception_cache            = nullptr;
 84   }
 85   _has_unsafe_access          = 0;
 86   _has_method_handle_invokes  = 0;
 87   _has_wide_vectors           = 0;
 88   _has_monitors               = 0;
 89 }
 90 
 91 bool CompiledMethod::is_method_handle_return(address return_pc) {
 92   if (!has_method_handle_invokes())  return false;
 93   PcDesc* pd = pc_desc_at(return_pc);
 94   if (pd == nullptr)
 95     return false;
 96   return pd->is_method_handle_invoke();
 97 }
 98 
 99 // Returns a string version of the method state.
100 const char* CompiledMethod::state() const {
101   int state = get_state();
102   switch (state) {
103   case not_installed:
104     return "not installed";
105   case in_use:
106     return "in use";
107   case not_used:
108     return "not_used";
109   case not_entrant:
110     return "not_entrant";
111   default:
112     fatal("unexpected method state: %d", state);
113     return nullptr;
114   }
115 }
116 
117 //-----------------------------------------------------------------------------
118 void CompiledMethod::set_deoptimized_done() {
119   ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
120   if (_deoptimization_status != deoptimize_done) { // can't go backwards
121     Atomic::store(&_deoptimization_status, deoptimize_done);
122   }
123 }
124 
125 //-----------------------------------------------------------------------------
126 
127 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
128   return Atomic::load_acquire(&_exception_cache);
129 }
130 
131 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
132   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
133   assert(new_entry != nullptr,"Must be non null");
134   assert(new_entry->next() == nullptr, "Must be null");
135 
136   for (;;) {
137     ExceptionCache *ec = exception_cache();
138     if (ec != nullptr) {
139       Klass* ex_klass = ec->exception_type();
140       if (!ex_klass->is_loader_alive()) {
141         // We must guarantee that entries are not inserted with new next pointer
142         // edges to ExceptionCache entries with dead klasses, due to bad interactions
143         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
144         // the head pointer forward to the first live ExceptionCache, so that the new
145         // next pointers always point at live ExceptionCaches, that are not removed due
146         // to concurrent ExceptionCache cleanup.
147         ExceptionCache* next = ec->next();
148         if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
149           CodeCache::release_exception_cache(ec);
150         }
151         continue;
152       }
153       ec = exception_cache();
154       if (ec != nullptr) {
155         new_entry->set_next(ec);
156       }
157     }
158     if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
159       return;
160     }
161   }
162 }
163 
164 void CompiledMethod::clean_exception_cache() {
165   // For each nmethod, only a single thread may call this cleanup function
166   // at the same time, whether called in STW cleanup or concurrent cleanup.
167   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
168   // then a single writer may contend with cleaning up the head pointer to the
169   // first ExceptionCache node that has a Klass* that is alive. That is fine,
170   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
171   // And the concurrent writers do not clean up next pointers, only the head.
172   // Also note that concurrent readers will walk through Klass* pointers that are not
173   // alive. That does not cause ABA problems, because Klass* is deleted after
174   // a handshake with all threads, after all stale ExceptionCaches have been
175   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
176   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
177   // That similarly implies that CAS operations on ExceptionCache entries do not
178   // suffer from ABA problems as unlinking and deletion is separated by a global
179   // handshake operation.
180   ExceptionCache* prev = nullptr;
181   ExceptionCache* curr = exception_cache_acquire();
182 
183   while (curr != nullptr) {
184     ExceptionCache* next = curr->next();
185 
186     if (!curr->exception_type()->is_loader_alive()) {
187       if (prev == nullptr) {
188         // Try to clean head; this is contended by concurrent inserts, that
189         // both lazily clean the head, and insert entries at the head. If
190         // the CAS fails, the operation is restarted.
191         if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
192           prev = nullptr;
193           curr = exception_cache_acquire();
194           continue;
195         }
196       } else {
197         // It is impossible to during cleanup connect the next pointer to
198         // an ExceptionCache that has not been published before a safepoint
199         // prior to the cleanup. Therefore, release is not required.
200         prev->set_next(next);
201       }
202       // prev stays the same.
203 
204       CodeCache::release_exception_cache(curr);
205     } else {
206       prev = curr;
207     }
208 
209     curr = next;
210   }
211 }
212 
213 // public method for accessing the exception cache
214 // These are the public access methods.
215 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
216   // We never grab a lock to read the exception cache, so we may
217   // have false negatives. This is okay, as it can only happen during
218   // the first few exception lookups for a given nmethod.
219   ExceptionCache* ec = exception_cache_acquire();
220   while (ec != nullptr) {
221     address ret_val;
222     if ((ret_val = ec->match(exception,pc)) != nullptr) {
223       return ret_val;
224     }
225     ec = ec->next();
226   }
227   return nullptr;
228 }
229 
230 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
231   // There are potential race conditions during exception cache updates, so we
232   // must own the ExceptionCache_lock before doing ANY modifications. Because
233   // we don't lock during reads, it is possible to have several threads attempt
234   // to update the cache with the same data. We need to check for already inserted
235   // copies of the current data before adding it.
236 
237   MutexLocker ml(ExceptionCache_lock);
238   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
239 
240   if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
241     target_entry = new ExceptionCache(exception,pc,handler);
242     add_exception_cache_entry(target_entry);
243   }
244 }
245 
246 // private method for handling exception cache
247 // These methods are private, and used to manipulate the exception cache
248 // directly.
249 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
250   ExceptionCache* ec = exception_cache_acquire();
251   while (ec != nullptr) {
252     if (ec->match_exception_with_space(exception)) {
253       return ec;
254     }
255     ec = ec->next();
256   }
257   return nullptr;
258 }
259 
260 //-------------end of code for ExceptionCache--------------
261 
262 bool CompiledMethod::is_at_poll_return(address pc) {
263   RelocIterator iter(this, pc, pc+1);
264   while (iter.next()) {
265     if (iter.type() == relocInfo::poll_return_type)
266       return true;
267   }
268   return false;
269 }
270 
271 
272 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
273   RelocIterator iter(this, pc, pc+1);
274   while (iter.next()) {
275     relocInfo::relocType t = iter.type();
276     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
277       return true;
278   }
279   return false;
280 }
281 
282 void CompiledMethod::verify_oop_relocations() {
283   // Ensure sure that the code matches the current oop values
284   RelocIterator iter(this, nullptr, nullptr);
285   while (iter.next()) {
286     if (iter.type() == relocInfo::oop_type) {
287       oop_Relocation* reloc = iter.oop_reloc();
288       if (!reloc->oop_is_immediate()) {
289         reloc->verify_oop_relocation();
290       }
291     }
292   }
293 }
294 
295 
296 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
297   PcDesc* pd = pc_desc_at(pc);
298   guarantee(pd != nullptr, "scope must be present");
299   return new ScopeDesc(this, pd);
300 }
301 
302 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
303   PcDesc* pd = pc_desc_near(pc);
304   guarantee(pd != nullptr, "scope must be present");
305   return new ScopeDesc(this, pd);
306 }
307 
308 address CompiledMethod::oops_reloc_begin() const {
309   // If the method is not entrant then a JMP is plastered over the
310   // first few bytes.  If an oop in the old code was there, that oop
311   // should not get GC'd.  Skip the first few bytes of oops on
312   // not-entrant methods.
313   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
314       code_begin() + frame_complete_offset() >
315       verified_entry_point() + NativeJump::instruction_size)
316   {
317     // If we have a frame_complete_offset after the native jump, then there
318     // is no point trying to look for oops before that. This is a requirement
319     // for being allowed to scan oops concurrently.
320     return code_begin() + frame_complete_offset();
321   }
322 
323   // It is not safe to read oops concurrently using entry barriers, if their
324   // location depend on whether the nmethod is entrant or not.
325   // assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan");
326 
327   address low_boundary = verified_entry_point();
328   if (!is_in_use() && is_nmethod()) {
329     low_boundary += NativeJump::instruction_size;
330     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
331     // This means that the low_boundary is going to be a little too high.
332     // This shouldn't matter, since oops of non-entrant methods are never used.
333     // In fact, why are we bothering to look at oops in a non-entrant method??
334   }
335   return low_boundary;
336 }
337 
338 int CompiledMethod::verify_icholder_relocations() {
339   ResourceMark rm;
340   int count = 0;
341 
342   RelocIterator iter(this);
343   while(iter.next()) {
344     if (iter.type() == relocInfo::virtual_call_type) {
345       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
346         CompiledIC *ic = CompiledIC_at(&iter);
347         if (TraceCompiledIC) {
348           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
349           ic->print();
350         }
351         assert(ic->cached_icholder() != nullptr, "must be non-nullptr");
352         count++;
353       }
354     }
355   }
356 
357   return count;
358 }
359 
360 // Method that knows how to preserve outgoing arguments at call. This method must be
361 // called with a frame corresponding to a Java invoke
362 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
363   if (method() == nullptr) {
364     return;
365   }
366 
367   // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
368   JavaThread* thread = reg_map->thread();
369   if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) {
370     return;
371   }
372 
373   if (!method()->is_native()) {
374     address pc = fr.pc();
375     bool has_receiver, has_appendix;
376     Symbol* signature;
377 
378     // The method attached by JIT-compilers should be used, if present.
379     // Bytecode can be inaccurate in such case.
380     Method* callee = attached_method_before_pc(pc);
381     if (callee != nullptr) {
382       has_receiver = !(callee->access_flags().is_static());
383       has_appendix = false;
384       signature    = callee->signature();
385     } else {
386       SimpleScopeDesc ssd(this, pc);
387 
388       Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
389       has_receiver = call.has_receiver();
390       has_appendix = call.has_appendix();
391       signature    = call.signature();
392     }
393 
394     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
395   } else if (method()->is_continuation_enter_intrinsic()) {
396     // This method only calls Continuation.enter()
397     Symbol* signature = vmSymbols::continuationEnter_signature();
398     fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
399   }
400 }
401 
402 Method* CompiledMethod::attached_method(address call_instr) {
403   assert(code_contains(call_instr), "not part of the nmethod");
404   RelocIterator iter(this, call_instr, call_instr + 1);
405   while (iter.next()) {
406     if (iter.addr() == call_instr) {
407       switch(iter.type()) {
408         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
409         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
410         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
411         default:                               break;
412       }
413     }
414   }
415   return nullptr; // not found
416 }
417 
418 Method* CompiledMethod::attached_method_before_pc(address pc) {
419   if (NativeCall::is_call_before(pc)) {
420     NativeCall* ncall = nativeCall_before(pc);
421     return attached_method(ncall->instruction_address());
422   }
423   return nullptr; // not a call
424 }
425 
426 void CompiledMethod::clear_inline_caches() {
427   assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
428   RelocIterator iter(this);
429   while (iter.next()) {
430     iter.reloc()->clear_inline_cache();
431   }
432 }
433 
434 // Clear IC callsites, releasing ICStubs of all compiled ICs
435 // as well as any associated CompiledICHolders.
436 void CompiledMethod::clear_ic_callsites() {
437   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
438   ResourceMark rm;
439   RelocIterator iter(this);
440   while(iter.next()) {
441     if (iter.type() == relocInfo::virtual_call_type) {
442       CompiledIC* ic = CompiledIC_at(&iter);
443       ic->set_to_clean(false);
444     }
445   }
446 }
447 
448 #ifdef ASSERT
449 // Check class_loader is alive for this bit of metadata.
450 class CheckClass : public MetadataClosure {
451   void do_metadata(Metadata* md) {
452     Klass* klass = nullptr;
453     if (md->is_klass()) {
454       klass = ((Klass*)md);
455     } else if (md->is_method()) {
456       klass = ((Method*)md)->method_holder();
457     } else if (md->is_methodData()) {
458       klass = ((MethodData*)md)->method()->method_holder();
459     } else {
460       md->print();
461       ShouldNotReachHere();
462     }
463     assert(klass->is_loader_alive(), "must be alive");
464   }
465 };
466 #endif // ASSERT
467 
468 
469 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
470   if (ic->is_clean()) {
471     return true;
472   }
473   if (ic->is_icholder_call()) {
474     // The only exception is compiledICHolder metadata which may
475     // yet be marked below. (We check this further below).
476     CompiledICHolder* cichk_metdata = ic->cached_icholder();
477 
478     if (cichk_metdata->is_loader_alive()) {
479       return true;
480     }
481   } else {
482     Metadata* ic_metdata = ic->cached_metadata();
483     if (ic_metdata != nullptr) {
484       if (ic_metdata->is_klass()) {
485         if (((Klass*)ic_metdata)->is_loader_alive()) {
486           return true;
487         }
488       } else if (ic_metdata->is_method()) {
489         Method* method = (Method*)ic_metdata;
490         assert(!method->is_old(), "old method should have been cleaned");
491         if (method->method_holder()->is_loader_alive()) {
492           return true;
493         }
494       } else {
495         ShouldNotReachHere();
496       }
497     } else {
498       // This inline cache is a megamorphic vtable call. Those ICs never hold
499       // any Metadata and should therefore never be cleaned by this function.
500       return true;
501     }
502   }
503 
504   return ic->set_to_clean();
505 }
506 
507 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
508 template <class CompiledICorStaticCall>
509 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
510                                          bool clean_all) {
511   CodeBlob *cb = CodeCache::find_blob(addr);
512   CompiledMethod* nm = (cb != nullptr) ? cb->as_compiled_method_or_null() : nullptr;
513   if (nm != nullptr) {
514     // Clean inline caches pointing to bad nmethods
515     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
516       if (!ic->set_to_clean(!from->is_unloading())) {
517         return false;
518       }
519       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
520     }
521   }
522   return true;
523 }
524 
525 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
526                                          bool clean_all) {
527   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
528 }
529 
530 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
531                                          bool clean_all) {
532   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
533 }
534 
535 // Cleans caches in nmethods that point to either classes that are unloaded
536 // or nmethods that are unloaded.
537 //
538 // Can be called either in parallel by G1 currently or after all
539 // nmethods are unloaded.  Return postponed=true in the parallel case for
540 // inline caches found that point to nmethods that are not yet visited during
541 // the do_unloading walk.
542 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
543   ResourceMark rm;
544 
545   // Exception cache only needs to be called if unloading occurred
546   if (unloading_occurred) {
547     clean_exception_cache();
548   }
549 
550   if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
551     return false;
552   }
553 
554 #ifdef ASSERT
555   // Check that the metadata embedded in the nmethod is alive
556   CheckClass check_class;
557   metadata_do(&check_class);
558 #endif
559   return true;
560 }
561 
562 void CompiledMethod::run_nmethod_entry_barrier() {
563   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
564   if (bs_nm != nullptr) {
565     // We want to keep an invariant that nmethods found through iterations of a Thread's
566     // nmethods found in safepoints have gone through an entry barrier and are not armed.
567     // By calling this nmethod entry barrier, it plays along and acts
568     // like any other nmethod found on the stack of a thread (fewer surprises).
569     nmethod* nm = as_nmethod_or_null();
570     if (nm != nullptr && bs_nm->is_armed(nm)) {
571       bool alive = bs_nm->nmethod_entry_barrier(nm);
572       assert(alive, "should be alive");
573     }
574   }
575 }
576 
577 // Only called by whitebox test
578 void CompiledMethod::cleanup_inline_caches_whitebox() {
579   assert_locked_or_safepoint(CodeCache_lock);
580   CompiledICLocker ic_locker(this);
581   guarantee(cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */),
582             "Inline cache cleaning in a safepoint can't fail");
583 }
584 
585 address* CompiledMethod::orig_pc_addr(const frame* fr) {
586   return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
587 }
588 
589 // Called to clean up after class unloading for live nmethods
590 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
591   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
592   ResourceMark rm;
593 
594   // Find all calls in an nmethod and clear the ones that point to bad nmethods.
595   RelocIterator iter(this, oops_reloc_begin());
596   bool is_in_static_stub = false;
597   while(iter.next()) {
598 
599     switch (iter.type()) {
600 
601     case relocInfo::virtual_call_type:
602       if (unloading_occurred) {
603         // If class unloading occurred we first clear ICs where the cached metadata
604         // is referring to an unloaded klass or method.
605         if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
606           return false;
607         }
608       }
609 
610       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
611         return false;
612       }
613       break;
614 
615     case relocInfo::opt_virtual_call_type:
616       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
617         return false;
618       }
619       break;
620 
621     case relocInfo::static_call_type:
622       if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
623         return false;
624       }
625       break;
626 
627     case relocInfo::static_stub_type: {
628       is_in_static_stub = true;
629       break;
630     }
631 
632     case relocInfo::metadata_type: {
633       // Only the metadata relocations contained in static/opt virtual call stubs
634       // contains the Method* passed to c2i adapters. It is the only metadata
635       // relocation that needs to be walked, as it is the one metadata relocation
636       // that violates the invariant that all metadata relocations have an oop
637       // in the compiled method (due to deferred resolution and code patching).
638 
639       // This causes dead metadata to remain in compiled methods that are not
640       // unloading. Unless these slippery metadata relocations of the static
641       // stubs are at least cleared, subsequent class redefinition operations
642       // will access potentially free memory, and JavaThread execution
643       // concurrent to class unloading may call c2i adapters with dead methods.
644       if (!is_in_static_stub) {
645         // The first metadata relocation after a static stub relocation is the
646         // metadata relocation of the static stub used to pass the Method* to
647         // c2i adapters.
648         continue;
649       }
650       is_in_static_stub = false;
651       if (is_unloading()) {
652         // If the nmethod itself is dying, then it may point at dead metadata.
653         // Nobody should follow that metadata; it is strictly unsafe.
654         continue;
655       }
656       metadata_Relocation* r = iter.metadata_reloc();
657       Metadata* md = r->metadata_value();
658       if (md != nullptr && md->is_method()) {
659         Method* method = static_cast<Method*>(md);
660         if (!method->method_holder()->is_loader_alive()) {
661           Atomic::store(r->metadata_addr(), (Method*)nullptr);
662 
663           if (!r->metadata_is_immediate()) {
664             r->fix_metadata_relocation();
665           }
666         }
667       }
668       break;
669     }
670 
671     default:
672       break;
673     }
674   }
675 
676   return true;
677 }
678 
679 address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
680   // Exception happened outside inline-cache check code => we are inside
681   // an active nmethod => use cpc to determine a return address
682   int exception_offset = int(pc - code_begin());
683   int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
684 #ifdef ASSERT
685   if (cont_offset == 0) {
686     Thread* thread = Thread::current();
687     ResourceMark rm(thread);
688     CodeBlob* cb = CodeCache::find_blob(pc);
689     assert(cb != nullptr && cb == this, "");
690     ttyLocker ttyl;
691     tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
692     print();
693     method()->print_codes();
694     print_code();
695     print_pcs();
696   }
697 #endif
698   if (cont_offset == 0) {
699     // Let the normal error handling report the exception
700     return nullptr;
701   }
702   if (cont_offset == exception_offset) {
703 #if INCLUDE_JVMCI
704     Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
705     JavaThread *thread = JavaThread::current();
706     thread->set_jvmci_implicit_exception_pc(pc);
707     thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
708                                                                          Deoptimization::Action_reinterpret));
709     return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
710 #else
711     ShouldNotReachHere();
712 #endif
713   }
714   return code_begin() + cont_offset;
715 }
716 
717 class HasEvolDependency : public MetadataClosure {
718   bool _has_evol_dependency;
719  public:
720   HasEvolDependency() : _has_evol_dependency(false) {}
721   void do_metadata(Metadata* md) {
722     if (md->is_method()) {
723       Method* method = (Method*)md;
724       if (method->is_old()) {
725         _has_evol_dependency = true;
726       }
727     }
728   }
729   bool has_evol_dependency() const { return _has_evol_dependency; }
730 };
731 
732 bool CompiledMethod::has_evol_metadata() {
733   // Check the metadata in relocIter and CompiledIC and also deoptimize
734   // any nmethod that has reference to old methods.
735   HasEvolDependency check_evol;
736   metadata_do(&check_evol);
737   if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
738     ResourceMark rm;
739     log_debug(redefine, class, nmethod)
740             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
741              _method->method_holder()->external_name(),
742              _method->name()->as_C_string(),
743              _method->signature()->as_C_string(),
744              compile_id());
745   }
746   return check_evol.has_evol_dependency();
747 }