1 /*
  2  * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/exceptionHandlerTable.hpp"
 29 #include "code/scopeDesc.hpp"
 30 #include "code/codeCache.hpp"
 31 #include "gc/shared/barrierSet.hpp"
 32 #include "gc/shared/barrierSetNMethod.hpp"
 33 #include "gc/shared/gcBehaviours.hpp"
 34 #include "interpreter/bytecode.inline.hpp"
 35 #include "logging/log.hpp"
 36 #include "logging/logTag.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "oops/klass.inline.hpp"
 39 #include "oops/methodData.hpp"
 40 #include "oops/method.inline.hpp"
 41 #include "oops/weakHandle.inline.hpp"
 42 #include "prims/methodHandles.hpp"
 43 #include "runtime/atomic.hpp"
 44 #include "runtime/deoptimization.hpp"
 45 #include "runtime/frame.inline.hpp"
 46 #include "runtime/jniHandles.inline.hpp"
 47 #include "runtime/handles.inline.hpp"
 48 #include "runtime/mutexLocker.hpp"
 49 #include "runtime/sharedRuntime.hpp"
 50 
 51 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
 52                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
 53                                bool caller_must_gc_arguments, bool compiled)
 54   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
 55     _deoptimization_status(not_marked),
 56     _deoptimization_generation(0),
 57     _method(method),
 58     _gc_data(nullptr)
 59 {
 60   init_defaults();
 61 }
 62 
 63 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
 64                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
 65                                OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled)
 66   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
 67              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
 68     _deoptimization_status(not_marked),
 69     _deoptimization_generation(0),
 70     _method(method),
 71     _gc_data(nullptr)
 72 {
 73   init_defaults();
 74 }
 75 
 76 void CompiledMethod::init_defaults() {
 77   { // avoid uninitialized fields, even for short time periods
 78     _scopes_data_begin          = nullptr;
 79     _deopt_handler_begin        = nullptr;
 80     _deopt_mh_handler_begin     = nullptr;
 81     _exception_cache            = nullptr;
 82   }
 83   _has_unsafe_access          = 0;
 84   _has_method_handle_invokes  = 0;
 85   _has_wide_vectors           = 0;
 86   _has_monitors               = 0;
 87 }
 88 
 89 bool CompiledMethod::is_method_handle_return(address return_pc) {
 90   if (!has_method_handle_invokes())  return false;
 91   PcDesc* pd = pc_desc_at(return_pc);
 92   if (pd == nullptr)
 93     return false;
 94   return pd->is_method_handle_invoke();
 95 }
 96 
 97 // Returns a string version of the method state.
 98 const char* CompiledMethod::state() const {
 99   int state = get_state();
100   switch (state) {
101   case not_installed:
102     return "not installed";
103   case in_use:
104     return "in use";
105   case not_used:
106     return "not_used";
107   case not_entrant:
108     return "not_entrant";
109   default:
110     fatal("unexpected method state: %d", state);
111     return nullptr;
112   }
113 }
114 
115 //-----------------------------------------------------------------------------
116 void CompiledMethod::set_deoptimized_done() {
117   ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
118   if (_deoptimization_status != deoptimize_done) { // can't go backwards
119     Atomic::store(&_deoptimization_status, deoptimize_done);
120   }
121 }
122 
123 //-----------------------------------------------------------------------------
124 
125 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
126   return Atomic::load_acquire(&_exception_cache);
127 }
128 
129 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
130   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
131   assert(new_entry != nullptr,"Must be non null");
132   assert(new_entry->next() == nullptr, "Must be null");
133 
134   for (;;) {
135     ExceptionCache *ec = exception_cache();
136     if (ec != nullptr) {
137       Klass* ex_klass = ec->exception_type();
138       if (!ex_klass->is_loader_alive()) {
139         // We must guarantee that entries are not inserted with new next pointer
140         // edges to ExceptionCache entries with dead klasses, due to bad interactions
141         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
142         // the head pointer forward to the first live ExceptionCache, so that the new
143         // next pointers always point at live ExceptionCaches, that are not removed due
144         // to concurrent ExceptionCache cleanup.
145         ExceptionCache* next = ec->next();
146         if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
147           CodeCache::release_exception_cache(ec);
148         }
149         continue;
150       }
151       ec = exception_cache();
152       if (ec != nullptr) {
153         new_entry->set_next(ec);
154       }
155     }
156     if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
157       return;
158     }
159   }
160 }
161 
162 void CompiledMethod::clean_exception_cache() {
163   // For each nmethod, only a single thread may call this cleanup function
164   // at the same time, whether called in STW cleanup or concurrent cleanup.
165   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
166   // then a single writer may contend with cleaning up the head pointer to the
167   // first ExceptionCache node that has a Klass* that is alive. That is fine,
168   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
169   // And the concurrent writers do not clean up next pointers, only the head.
170   // Also note that concurrent readers will walk through Klass* pointers that are not
171   // alive. That does not cause ABA problems, because Klass* is deleted after
172   // a handshake with all threads, after all stale ExceptionCaches have been
173   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
174   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
175   // That similarly implies that CAS operations on ExceptionCache entries do not
176   // suffer from ABA problems as unlinking and deletion is separated by a global
177   // handshake operation.
178   ExceptionCache* prev = nullptr;
179   ExceptionCache* curr = exception_cache_acquire();
180 
181   while (curr != nullptr) {
182     ExceptionCache* next = curr->next();
183 
184     if (!curr->exception_type()->is_loader_alive()) {
185       if (prev == nullptr) {
186         // Try to clean head; this is contended by concurrent inserts, that
187         // both lazily clean the head, and insert entries at the head. If
188         // the CAS fails, the operation is restarted.
189         if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
190           prev = nullptr;
191           curr = exception_cache_acquire();
192           continue;
193         }
194       } else {
195         // It is impossible to during cleanup connect the next pointer to
196         // an ExceptionCache that has not been published before a safepoint
197         // prior to the cleanup. Therefore, release is not required.
198         prev->set_next(next);
199       }
200       // prev stays the same.
201 
202       CodeCache::release_exception_cache(curr);
203     } else {
204       prev = curr;
205     }
206 
207     curr = next;
208   }
209 }
210 
211 // public method for accessing the exception cache
212 // These are the public access methods.
213 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
214   // We never grab a lock to read the exception cache, so we may
215   // have false negatives. This is okay, as it can only happen during
216   // the first few exception lookups for a given nmethod.
217   ExceptionCache* ec = exception_cache_acquire();
218   while (ec != nullptr) {
219     address ret_val;
220     if ((ret_val = ec->match(exception,pc)) != nullptr) {
221       return ret_val;
222     }
223     ec = ec->next();
224   }
225   return nullptr;
226 }
227 
228 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
229   // There are potential race conditions during exception cache updates, so we
230   // must own the ExceptionCache_lock before doing ANY modifications. Because
231   // we don't lock during reads, it is possible to have several threads attempt
232   // to update the cache with the same data. We need to check for already inserted
233   // copies of the current data before adding it.
234 
235   MutexLocker ml(ExceptionCache_lock);
236   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
237 
238   if (target_entry == nullptr || !target_entry->add_address_and_handler(pc,handler)) {
239     target_entry = new ExceptionCache(exception,pc,handler);
240     add_exception_cache_entry(target_entry);
241   }
242 }
243 
244 // private method for handling exception cache
245 // These methods are private, and used to manipulate the exception cache
246 // directly.
247 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
248   ExceptionCache* ec = exception_cache_acquire();
249   while (ec != nullptr) {
250     if (ec->match_exception_with_space(exception)) {
251       return ec;
252     }
253     ec = ec->next();
254   }
255   return nullptr;
256 }
257 
258 //-------------end of code for ExceptionCache--------------
259 
260 bool CompiledMethod::is_at_poll_return(address pc) {
261   RelocIterator iter(this, pc, pc+1);
262   while (iter.next()) {
263     if (iter.type() == relocInfo::poll_return_type)
264       return true;
265   }
266   return false;
267 }
268 
269 
270 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
271   RelocIterator iter(this, pc, pc+1);
272   while (iter.next()) {
273     relocInfo::relocType t = iter.type();
274     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
275       return true;
276   }
277   return false;
278 }
279 
280 void CompiledMethod::verify_oop_relocations() {
281   // Ensure sure that the code matches the current oop values
282   RelocIterator iter(this, nullptr, nullptr);
283   while (iter.next()) {
284     if (iter.type() == relocInfo::oop_type) {
285       oop_Relocation* reloc = iter.oop_reloc();
286       if (!reloc->oop_is_immediate()) {
287         reloc->verify_oop_relocation();
288       }
289     }
290   }
291 }
292 
293 
294 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
295   PcDesc* pd = pc_desc_at(pc);
296   guarantee(pd != nullptr, "scope must be present");
297   return new ScopeDesc(this, pd);
298 }
299 
300 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
301   PcDesc* pd = pc_desc_near(pc);
302   guarantee(pd != nullptr, "scope must be present");
303   return new ScopeDesc(this, pd);
304 }
305 
306 address CompiledMethod::oops_reloc_begin() const {
307   // If the method is not entrant then a JMP is plastered over the
308   // first few bytes.  If an oop in the old code was there, that oop
309   // should not get GC'd.  Skip the first few bytes of oops on
310   // not-entrant methods.
311   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
312       code_begin() + frame_complete_offset() >
313       verified_entry_point() + NativeJump::instruction_size)
314   {
315     // If we have a frame_complete_offset after the native jump, then there
316     // is no point trying to look for oops before that. This is a requirement
317     // for being allowed to scan oops concurrently.
318     return code_begin() + frame_complete_offset();
319   }
320 
321   // It is not safe to read oops concurrently using entry barriers, if their
322   // location depend on whether the nmethod is entrant or not.
323   // assert(BarrierSet::barrier_set()->barrier_set_nmethod() == nullptr, "Not safe oop scan");
324 
325   address low_boundary = verified_entry_point();
326   if (!is_in_use() && is_nmethod()) {
327     low_boundary += NativeJump::instruction_size;
328     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
329     // This means that the low_boundary is going to be a little too high.
330     // This shouldn't matter, since oops of non-entrant methods are never used.
331     // In fact, why are we bothering to look at oops in a non-entrant method??
332   }
333   return low_boundary;
334 }
335 
336 // Method that knows how to preserve outgoing arguments at call. This method must be
337 // called with a frame corresponding to a Java invoke
338 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
339   if (method() == nullptr) {
340     return;
341   }
342 
343   // handle the case of an anchor explicitly set in continuation code that doesn't have a callee
344   JavaThread* thread = reg_map->thread();
345   if (thread->has_last_Java_frame() && fr.sp() == thread->last_Java_sp()) {
346     return;
347   }
348 
349   if (!method()->is_native()) {
350     address pc = fr.pc();
351     bool has_receiver, has_appendix;
352     Symbol* signature;
353 
354     // The method attached by JIT-compilers should be used, if present.
355     // Bytecode can be inaccurate in such case.
356     Method* callee = attached_method_before_pc(pc);
357     if (callee != nullptr) {
358       has_receiver = !(callee->access_flags().is_static());
359       has_appendix = false;
360       signature = callee->signature();
361 
362       // If inline types are passed as fields, use the extended signature
363       // which contains the types of all (oop) fields of the inline type.
364       if (is_compiled_by_c2() && callee->has_scalarized_args()) {
365         const GrowableArray<SigEntry>* sig = callee->adapter()->get_sig_cc();
366         assert(sig != nullptr, "sig should never be null");
367         TempNewSymbol tmp_sig = SigEntry::create_symbol(sig);
368         has_receiver = false; // The extended signature contains the receiver type
369         fr.oops_compiled_arguments_do(tmp_sig, has_receiver, has_appendix, reg_map, f);
370         return;
371       }
372     } else {
373       SimpleScopeDesc ssd(this, pc);
374 
375       Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
376       has_receiver = call.has_receiver();
377       has_appendix = call.has_appendix();
378       signature    = call.signature();
379     }
380 
381     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
382   } else if (method()->is_continuation_enter_intrinsic()) {
383     // This method only calls Continuation.enter()
384     Symbol* signature = vmSymbols::continuationEnter_signature();
385     fr.oops_compiled_arguments_do(signature, false, false, reg_map, f);
386   }
387 }
388 
389 Method* CompiledMethod::attached_method(address call_instr) {
390   assert(code_contains(call_instr), "not part of the nmethod");
391   RelocIterator iter(this, call_instr, call_instr + 1);
392   while (iter.next()) {
393     if (iter.addr() == call_instr) {
394       switch(iter.type()) {
395         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
396         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
397         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
398         default:                               break;
399       }
400     }
401   }
402   return nullptr; // not found
403 }
404 
405 Method* CompiledMethod::attached_method_before_pc(address pc) {
406   if (NativeCall::is_call_before(pc)) {
407     NativeCall* ncall = nativeCall_before(pc);
408     return attached_method(ncall->instruction_address());
409   }
410   return nullptr; // not a call
411 }
412 
413 void CompiledMethod::clear_inline_caches() {
414   assert(SafepointSynchronize::is_at_safepoint(), "clearing of IC's only allowed at safepoint");
415   RelocIterator iter(this);
416   while (iter.next()) {
417     iter.reloc()->clear_inline_cache();
418   }
419 }
420 
421 #ifdef ASSERT
422 // Check class_loader is alive for this bit of metadata.
423 class CheckClass : public MetadataClosure {
424   void do_metadata(Metadata* md) {
425     Klass* klass = nullptr;
426     if (md->is_klass()) {
427       klass = ((Klass*)md);
428     } else if (md->is_method()) {
429       klass = ((Method*)md)->method_holder();
430     } else if (md->is_methodData()) {
431       klass = ((MethodData*)md)->method()->method_holder();
432     } else {
433       md->print();
434       ShouldNotReachHere();
435     }
436     assert(klass->is_loader_alive(), "must be alive");
437   }
438 };
439 #endif // ASSERT
440 
441 
442 static void clean_ic_if_metadata_is_dead(CompiledIC *ic) {
443   ic->clean_metadata();
444 }
445 
446 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
447 template <typename CallsiteT>
448 static void clean_if_nmethod_is_unloaded(CallsiteT* callsite, CompiledMethod* from,
449                                          bool clean_all) {
450   CodeBlob* cb = CodeCache::find_blob(callsite->destination());
451   if (!cb->is_compiled()) {
452     return;
453   }
454   CompiledMethod* cm = cb->as_compiled_method();
455   if (clean_all || !cm->is_in_use() || cm->is_unloading() || cm->method()->code() != cm) {
456     callsite->set_to_clean();
457   }
458 }
459 
460 // Cleans caches in nmethods that point to either classes that are unloaded
461 // or nmethods that are unloaded.
462 //
463 // Can be called either in parallel by G1 currently or after all
464 // nmethods are unloaded.  Return postponed=true in the parallel case for
465 // inline caches found that point to nmethods that are not yet visited during
466 // the do_unloading walk.
467 void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
468   ResourceMark rm;
469 
470   // Exception cache only needs to be called if unloading occurred
471   if (unloading_occurred) {
472     clean_exception_cache();
473   }
474 
475   cleanup_inline_caches_impl(unloading_occurred, false);
476 
477 #ifdef ASSERT
478   // Check that the metadata embedded in the nmethod is alive
479   CheckClass check_class;
480   metadata_do(&check_class);
481 #endif
482 }
483 
484 void CompiledMethod::run_nmethod_entry_barrier() {
485   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
486   if (bs_nm != nullptr) {
487     // We want to keep an invariant that nmethods found through iterations of a Thread's
488     // nmethods found in safepoints have gone through an entry barrier and are not armed.
489     // By calling this nmethod entry barrier, it plays along and acts
490     // like any other nmethod found on the stack of a thread (fewer surprises).
491     nmethod* nm = as_nmethod_or_null();
492     if (nm != nullptr && bs_nm->is_armed(nm)) {
493       bool alive = bs_nm->nmethod_entry_barrier(nm);
494       assert(alive, "should be alive");
495     }
496   }
497 }
498 
499 // Only called by whitebox test
500 void CompiledMethod::cleanup_inline_caches_whitebox() {
501   assert_locked_or_safepoint(CodeCache_lock);
502   CompiledICLocker ic_locker(this);
503   cleanup_inline_caches_impl(false /* unloading_occurred */, true /* clean_all */);
504 }
505 
506 address* CompiledMethod::orig_pc_addr(const frame* fr) {
507   return (address*) ((address)fr->unextended_sp() + orig_pc_offset());
508 }
509 
510 // Called to clean up after class unloading for live nmethods
511 void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
512   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
513   ResourceMark rm;
514 
515   // Find all calls in an nmethod and clear the ones that point to bad nmethods.
516   RelocIterator iter(this, oops_reloc_begin());
517   bool is_in_static_stub = false;
518   while(iter.next()) {
519 
520     switch (iter.type()) {
521 
522     case relocInfo::virtual_call_type:
523       if (unloading_occurred) {
524         // If class unloading occurred we first clear ICs where the cached metadata
525         // is referring to an unloaded klass or method.
526         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
527       }
528 
529       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
530       break;
531 
532     case relocInfo::opt_virtual_call_type:
533     case relocInfo::static_call_type:
534       clean_if_nmethod_is_unloaded(CompiledDirectCall::at(iter.reloc()), this, clean_all);
535       break;
536 
537     case relocInfo::static_stub_type: {
538       is_in_static_stub = true;
539       break;
540     }
541 
542     case relocInfo::metadata_type: {
543       // Only the metadata relocations contained in static/opt virtual call stubs
544       // contains the Method* passed to c2i adapters. It is the only metadata
545       // relocation that needs to be walked, as it is the one metadata relocation
546       // that violates the invariant that all metadata relocations have an oop
547       // in the compiled method (due to deferred resolution and code patching).
548 
549       // This causes dead metadata to remain in compiled methods that are not
550       // unloading. Unless these slippery metadata relocations of the static
551       // stubs are at least cleared, subsequent class redefinition operations
552       // will access potentially free memory, and JavaThread execution
553       // concurrent to class unloading may call c2i adapters with dead methods.
554       if (!is_in_static_stub) {
555         // The first metadata relocation after a static stub relocation is the
556         // metadata relocation of the static stub used to pass the Method* to
557         // c2i adapters.
558         continue;
559       }
560       is_in_static_stub = false;
561       if (is_unloading()) {
562         // If the nmethod itself is dying, then it may point at dead metadata.
563         // Nobody should follow that metadata; it is strictly unsafe.
564         continue;
565       }
566       metadata_Relocation* r = iter.metadata_reloc();
567       Metadata* md = r->metadata_value();
568       if (md != nullptr && md->is_method()) {
569         Method* method = static_cast<Method*>(md);
570         if (!method->method_holder()->is_loader_alive()) {
571           Atomic::store(r->metadata_addr(), (Method*)nullptr);
572 
573           if (!r->metadata_is_immediate()) {
574             r->fix_metadata_relocation();
575           }
576         }
577       }
578       break;
579     }
580 
581     default:
582       break;
583     }
584   }
585 }
586 
587 address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
588   // Exception happened outside inline-cache check code => we are inside
589   // an active nmethod => use cpc to determine a return address
590   int exception_offset = int(pc - code_begin());
591   int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
592 #ifdef ASSERT
593   if (cont_offset == 0) {
594     Thread* thread = Thread::current();
595     ResourceMark rm(thread);
596     CodeBlob* cb = CodeCache::find_blob(pc);
597     assert(cb != nullptr && cb == this, "");
598 
599     // Keep tty output consistent. To avoid ttyLocker, we buffer in stream, and print all at once.
600     stringStream ss;
601     ss.print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
602     print_on(&ss);
603     method()->print_codes_on(&ss);
604     print_code_on(&ss);
605     print_pcs_on(&ss);
606     tty->print("%s", ss.as_string()); // print all at once
607   }
608 #endif
609   if (cont_offset == 0) {
610     // Let the normal error handling report the exception
611     return nullptr;
612   }
613   if (cont_offset == exception_offset) {
614 #if INCLUDE_JVMCI
615     Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
616     JavaThread *thread = JavaThread::current();
617     thread->set_jvmci_implicit_exception_pc(pc);
618     thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
619                                                                          Deoptimization::Action_reinterpret));
620     return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
621 #else
622     ShouldNotReachHere();
623 #endif
624   }
625   return code_begin() + cont_offset;
626 }
627 
628 class HasEvolDependency : public MetadataClosure {
629   bool _has_evol_dependency;
630  public:
631   HasEvolDependency() : _has_evol_dependency(false) {}
632   void do_metadata(Metadata* md) {
633     if (md->is_method()) {
634       Method* method = (Method*)md;
635       if (method->is_old()) {
636         _has_evol_dependency = true;
637       }
638     }
639   }
640   bool has_evol_dependency() const { return _has_evol_dependency; }
641 };
642 
643 bool CompiledMethod::has_evol_metadata() {
644   // Check the metadata in relocIter and CompiledIC and also deoptimize
645   // any nmethod that has reference to old methods.
646   HasEvolDependency check_evol;
647   metadata_do(&check_evol);
648   if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
649     ResourceMark rm;
650     log_debug(redefine, class, nmethod)
651             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
652              _method->method_holder()->external_name(),
653              _method->name()->as_C_string(),
654              _method->signature()->as_C_string(),
655              compile_id());
656   }
657   return check_evol.has_evol_dependency();
658 }
--- EOF ---