1 /*
  2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/exceptionHandlerTable.hpp"
 29 #include "code/scopeDesc.hpp"
 30 #include "code/codeCache.hpp"
 31 #include "code/icBuffer.hpp"
 32 #include "gc/shared/barrierSet.hpp"
 33 #include "gc/shared/barrierSetNMethod.hpp"
 34 #include "gc/shared/gcBehaviours.hpp"
 35 #include "interpreter/bytecode.inline.hpp"
 36 #include "logging/log.hpp"
 37 #include "logging/logTag.hpp"
 38 #include "memory/resourceArea.hpp"
 39 #include "oops/compiledICHolder.inline.hpp"
 40 #include "oops/klass.inline.hpp"
 41 #include "oops/methodData.hpp"
 42 #include "oops/method.inline.hpp"
 43 #include "prims/methodHandles.hpp"
 44 #include "runtime/atomic.hpp"
 45 #include "runtime/deoptimization.hpp"
 46 #include "runtime/handles.inline.hpp"
 47 #include "runtime/mutexLocker.hpp"
 48 #include "runtime/sharedRuntime.hpp"
 49 
 50 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
 51                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
 52                                bool caller_must_gc_arguments)
 53   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 54     _mark_for_deoptimization_status(not_marked),
 55     _method(method),
 56     _gc_data(NULL)
 57 {
 58   init_defaults();
 59 }
 60 
 61 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
 62                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
 63                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
 64   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
 65              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 66     _mark_for_deoptimization_status(not_marked),
 67     _method(method),
 68     _gc_data(NULL)
 69 {
 70   init_defaults();
 71 }
 72 
 73 void CompiledMethod::init_defaults() {
 74   { // avoid uninitialized fields, even for short time periods
 75     _scopes_data_begin          = NULL;
 76     _deopt_handler_begin        = NULL;
 77     _deopt_mh_handler_begin     = NULL;
 78     _exception_cache            = NULL;
 79   }
 80   _has_unsafe_access          = 0;
 81   _has_method_handle_invokes  = 0;
 82   _has_wide_vectors           = 0;
 83 }
 84 
 85 bool CompiledMethod::is_method_handle_return(address return_pc) {
 86   if (!has_method_handle_invokes())  return false;
 87   PcDesc* pd = pc_desc_at(return_pc);
 88   if (pd == NULL)
 89     return false;
 90   return pd->is_method_handle_invoke();
 91 }
 92 
 93 // Returns a string version of the method state.
 94 const char* CompiledMethod::state() const {
 95   int state = get_state();
 96   switch (state) {
 97   case not_installed:
 98     return "not installed";
 99   case in_use:
100     return "in use";
101   case not_used:
102     return "not_used";
103   case not_entrant:
104     return "not_entrant";
105   case zombie:
106     return "zombie";
107   case unloaded:
108     return "unloaded";
109   default:
110     fatal("unexpected method state: %d", state);
111     return NULL;
112   }
113 }
114 
115 //-----------------------------------------------------------------------------
116 void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
117   MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
118                  Mutex::_no_safepoint_check_flag);
119   _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
120 }
121 
122 //-----------------------------------------------------------------------------
123 
124 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
125   return Atomic::load_acquire(&_exception_cache);
126 }
127 
128 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
129   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
130   assert(new_entry != NULL,"Must be non null");
131   assert(new_entry->next() == NULL, "Must be null");
132 
133   for (;;) {
134     ExceptionCache *ec = exception_cache();
135     if (ec != NULL) {
136       Klass* ex_klass = ec->exception_type();
137       if (!ex_klass->is_loader_alive()) {
138         // We must guarantee that entries are not inserted with new next pointer
139         // edges to ExceptionCache entries with dead klasses, due to bad interactions
140         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
141         // the head pointer forward to the first live ExceptionCache, so that the new
142         // next pointers always point at live ExceptionCaches, that are not removed due
143         // to concurrent ExceptionCache cleanup.
144         ExceptionCache* next = ec->next();
145         if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
146           CodeCache::release_exception_cache(ec);
147         }
148         continue;
149       }
150       ec = exception_cache();
151       if (ec != NULL) {
152         new_entry->set_next(ec);
153       }
154     }
155     if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
156       return;
157     }
158   }
159 }
160 
161 void CompiledMethod::clean_exception_cache() {
162   // For each nmethod, only a single thread may call this cleanup function
163   // at the same time, whether called in STW cleanup or concurrent cleanup.
164   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
165   // then a single writer may contend with cleaning up the head pointer to the
166   // first ExceptionCache node that has a Klass* that is alive. That is fine,
167   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
168   // And the concurrent writers do not clean up next pointers, only the head.
169   // Also note that concurent readers will walk through Klass* pointers that are not
170   // alive. That does not cause ABA problems, because Klass* is deleted after
171   // a handshake with all threads, after all stale ExceptionCaches have been
172   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
173   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
174   // That similarly implies that CAS operations on ExceptionCache entries do not
175   // suffer from ABA problems as unlinking and deletion is separated by a global
176   // handshake operation.
177   ExceptionCache* prev = NULL;
178   ExceptionCache* curr = exception_cache_acquire();
179 
180   while (curr != NULL) {
181     ExceptionCache* next = curr->next();
182 
183     if (!curr->exception_type()->is_loader_alive()) {
184       if (prev == NULL) {
185         // Try to clean head; this is contended by concurrent inserts, that
186         // both lazily clean the head, and insert entries at the head. If
187         // the CAS fails, the operation is restarted.
188         if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
189           prev = NULL;
190           curr = exception_cache_acquire();
191           continue;
192         }
193       } else {
194         // It is impossible to during cleanup connect the next pointer to
195         // an ExceptionCache that has not been published before a safepoint
196         // prior to the cleanup. Therefore, release is not required.
197         prev->set_next(next);
198       }
199       // prev stays the same.
200 
201       CodeCache::release_exception_cache(curr);
202     } else {
203       prev = curr;
204     }
205 
206     curr = next;
207   }
208 }
209 
210 // public method for accessing the exception cache
211 // These are the public access methods.
212 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
213   // We never grab a lock to read the exception cache, so we may
214   // have false negatives. This is okay, as it can only happen during
215   // the first few exception lookups for a given nmethod.
216   ExceptionCache* ec = exception_cache_acquire();
217   while (ec != NULL) {
218     address ret_val;
219     if ((ret_val = ec->match(exception,pc)) != NULL) {
220       return ret_val;
221     }
222     ec = ec->next();
223   }
224   return NULL;
225 }
226 
227 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
228   // There are potential race conditions during exception cache updates, so we
229   // must own the ExceptionCache_lock before doing ANY modifications. Because
230   // we don't lock during reads, it is possible to have several threads attempt
231   // to update the cache with the same data. We need to check for already inserted
232   // copies of the current data before adding it.
233 
234   MutexLocker ml(ExceptionCache_lock);
235   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
236 
237   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
238     target_entry = new ExceptionCache(exception,pc,handler);
239     add_exception_cache_entry(target_entry);
240   }
241 }
242 
243 // private method for handling exception cache
244 // These methods are private, and used to manipulate the exception cache
245 // directly.
246 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
247   ExceptionCache* ec = exception_cache_acquire();
248   while (ec != NULL) {
249     if (ec->match_exception_with_space(exception)) {
250       return ec;
251     }
252     ec = ec->next();
253   }
254   return NULL;
255 }
256 
257 //-------------end of code for ExceptionCache--------------
258 
259 bool CompiledMethod::is_at_poll_return(address pc) {
260   RelocIterator iter(this, pc, pc+1);
261   while (iter.next()) {
262     if (iter.type() == relocInfo::poll_return_type)
263       return true;
264   }
265   return false;
266 }
267 
268 
269 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
270   RelocIterator iter(this, pc, pc+1);
271   while (iter.next()) {
272     relocInfo::relocType t = iter.type();
273     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
274       return true;
275   }
276   return false;
277 }
278 
279 void CompiledMethod::verify_oop_relocations() {
280   // Ensure sure that the code matches the current oop values
281   RelocIterator iter(this, NULL, NULL);
282   while (iter.next()) {
283     if (iter.type() == relocInfo::oop_type) {
284       oop_Relocation* reloc = iter.oop_reloc();
285       if (!reloc->oop_is_immediate()) {
286         reloc->verify_oop_relocation();
287       }
288     }
289   }
290 }
291 
292 
293 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
294   PcDesc* pd = pc_desc_at(pc);
295   guarantee(pd != NULL, "scope must be present");
296   return new ScopeDesc(this, pd);
297 }
298 
299 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
300   PcDesc* pd = pc_desc_near(pc);
301   guarantee(pd != NULL, "scope must be present");
302   return new ScopeDesc(this, pd);
303 }
304 
305 address CompiledMethod::oops_reloc_begin() const {
306   // If the method is not entrant or zombie then a JMP is plastered over the
307   // first few bytes.  If an oop in the old code was there, that oop
308   // should not get GC'd.  Skip the first few bytes of oops on
309   // not-entrant methods.
310   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
311       code_begin() + frame_complete_offset() >
312       verified_entry_point() + NativeJump::instruction_size)
313   {
314     // If we have a frame_complete_offset after the native jump, then there
315     // is no point trying to look for oops before that. This is a requirement
316     // for being allowed to scan oops concurrently.
317     return code_begin() + frame_complete_offset();
318   }
319 
320   // It is not safe to read oops concurrently using entry barriers, if their
321   // location depend on whether the nmethod is entrant or not.
322   assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
323 
324   address low_boundary = verified_entry_point();
325   if (!is_in_use() && is_nmethod()) {
326     low_boundary += NativeJump::instruction_size;
327     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
328     // This means that the low_boundary is going to be a little too high.
329     // This shouldn't matter, since oops of non-entrant methods are never used.
330     // In fact, why are we bothering to look at oops in a non-entrant method??
331   }
332   return low_boundary;
333 }
334 
335 int CompiledMethod::verify_icholder_relocations() {
336   ResourceMark rm;
337   int count = 0;
338 
339   RelocIterator iter(this);
340   while(iter.next()) {
341     if (iter.type() == relocInfo::virtual_call_type) {
342       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
343         CompiledIC *ic = CompiledIC_at(&iter);
344         if (TraceCompiledIC) {
345           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
346           ic->print();
347         }
348         assert(ic->cached_icholder() != NULL, "must be non-NULL");
349         count++;
350       }
351     }
352   }
353 
354   return count;
355 }
356 
357 // Method that knows how to preserve outgoing arguments at call. This method must be
358 // called with a frame corresponding to a Java invoke
359 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
360   if (method() != NULL && !method()->is_native()) {
361     address pc = fr.pc();
362     SimpleScopeDesc ssd(this, pc);
363     if (ssd.is_optimized_linkToNative()) return; // call was replaced
364     Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
365     bool has_receiver = call.has_receiver();
366     bool has_appendix = call.has_appendix();
367     Symbol* signature = call.signature();
368 
369     // The method attached by JIT-compilers should be used, if present.
370     // Bytecode can be inaccurate in such case.
371     Method* callee = attached_method_before_pc(pc);
372     if (callee != NULL) {
373       has_receiver = !(callee->access_flags().is_static());
374       has_appendix = false;
375       signature = callee->signature();
376     }
377 
378     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
379   }
380 }
381 
382 Method* CompiledMethod::attached_method(address call_instr) {
383   assert(code_contains(call_instr), "not part of the nmethod");
384   RelocIterator iter(this, call_instr, call_instr + 1);
385   while (iter.next()) {
386     if (iter.addr() == call_instr) {
387       switch(iter.type()) {
388         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
389         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
390         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
391         default:                               break;
392       }
393     }
394   }
395   return NULL; // not found
396 }
397 
398 Method* CompiledMethod::attached_method_before_pc(address pc) {
399   if (NativeCall::is_call_before(pc)) {
400     NativeCall* ncall = nativeCall_before(pc);
401     return attached_method(ncall->instruction_address());
402   }
403   return NULL; // not a call
404 }
405 
406 void CompiledMethod::clear_inline_caches() {
407   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
408   if (is_zombie()) {
409     return;
410   }
411 
412   RelocIterator iter(this);
413   while (iter.next()) {
414     iter.reloc()->clear_inline_cache();
415   }
416 }
417 
418 // Clear IC callsites, releasing ICStubs of all compiled ICs
419 // as well as any associated CompiledICHolders.
420 void CompiledMethod::clear_ic_callsites() {
421   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
422   ResourceMark rm;
423   RelocIterator iter(this);
424   while(iter.next()) {
425     if (iter.type() == relocInfo::virtual_call_type) {
426       CompiledIC* ic = CompiledIC_at(&iter);
427       ic->set_to_clean(false);
428     }
429   }
430 }
431 
432 #ifdef ASSERT
433 // Check class_loader is alive for this bit of metadata.
434 class CheckClass : public MetadataClosure {
435   void do_metadata(Metadata* md) {
436     Klass* klass = NULL;
437     if (md->is_klass()) {
438       klass = ((Klass*)md);
439     } else if (md->is_method()) {
440       klass = ((Method*)md)->method_holder();
441     } else if (md->is_methodData()) {
442       klass = ((MethodData*)md)->method()->method_holder();
443     } else {
444       md->print();
445       ShouldNotReachHere();
446     }
447     assert(klass->is_loader_alive(), "must be alive");
448   }
449 };
450 #endif // ASSERT
451 
452 
453 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
454   if (ic->is_clean()) {
455     return true;
456   }
457   if (ic->is_icholder_call()) {
458     // The only exception is compiledICHolder metdata which may
459     // yet be marked below. (We check this further below).
460     CompiledICHolder* cichk_metdata = ic->cached_icholder();
461 
462     if (cichk_metdata->is_loader_alive()) {
463       return true;
464     }
465   } else {
466     Metadata* ic_metdata = ic->cached_metadata();
467     if (ic_metdata != NULL) {
468       if (ic_metdata->is_klass()) {
469         if (((Klass*)ic_metdata)->is_loader_alive()) {
470           return true;
471         }
472       } else if (ic_metdata->is_method()) {
473         Method* method = (Method*)ic_metdata;
474         assert(!method->is_old(), "old method should have been cleaned");
475         if (method->method_holder()->is_loader_alive()) {
476           return true;
477         }
478       } else {
479         ShouldNotReachHere();
480       }
481     }
482   }
483 
484   return ic->set_to_clean();
485 }
486 
487 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
488 template <class CompiledICorStaticCall>
489 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
490                                          bool clean_all) {
491   // Ok, to lookup references to zombies here
492   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
493   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
494   if (nm != NULL) {
495     // Clean inline caches pointing to both zombie and not_entrant methods
496     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
497       // Inline cache cleaning should only be initiated on CompiledMethods that have been
498       // observed to be is_alive(). However, with concurrent code cache unloading, it is
499       // possible that by now, the state has become !is_alive. This can happen in two ways:
500       // 1) It can be racingly flipped to unloaded if the nmethod // being cleaned (from the
501       // sweeper) is_unloading(). This is fine, because if that happens, then the inline
502       // caches have already been cleaned under the same CompiledICLocker that we now hold during
503       // inline cache cleaning, and we will simply walk the inline caches again, and likely not
504       // find much of interest to clean. However, this race prevents us from asserting that the
505       // nmethod is_alive(). The is_unloading() function is completely monotonic; once set due
506       // to an oop dying, it remains set forever until freed. Because of that, all unloaded
507       // nmethods are is_unloading(), but notably, an unloaded nmethod may also subsequently
508       // become zombie (when the sweeper converts it to zombie).
509       // 2) It can be racingly flipped to zombie if the nmethod being cleaned (by the concurrent
510       // GC) cleans a zombie nmethod that is concurrently made zombie by the sweeper. In this
511       // scenario, the sweeper will first transition the nmethod to zombie, and then when
512       // unregistering from the GC, it will wait until the GC is done. The GC will then clean
513       // the inline caches *with IC stubs*, even though no IC stubs are needed. This is fine,
514       // as long as the IC stubs are guaranteed to be released until the next safepoint, where
515       // IC finalization requires live IC stubs to not be associated with zombie nmethods.
516       // This is guaranteed, because the sweeper does not have a single safepoint check until
517       // after it completes the whole transition function; it will wake up after the GC is
518       // done with concurrent code cache cleaning (which blocks out safepoints using the
519       // suspendible threads set), and then call clear_ic_callsites, which will release the
520       // associated IC stubs, before a subsequent safepoint poll can be reached. This
521       // guarantees that the spuriously created IC stubs are released appropriately before
522       // IC finalization in a safepoint gets to run. Therefore, this race is fine. This is also
523       // valid in a scenario where an inline cache of a zombie nmethod gets a spurious IC stub,
524       // and then when cleaning another inline cache, fails to request an IC stub because we
525       // exhausted the IC stub buffer. In this scenario, the GC will request a safepoint after
526       // yielding the suspendible therad set, effectively unblocking safepoints. Before such
527       // a safepoint can be reached, the sweeper similarly has to wake up, clear the IC stubs,
528       // and reach the next safepoint poll, after the whole transition function has completed.
529       // Due to the various races that can cause an nmethod to first be is_alive() and then
530       // racingly become !is_alive(), it is unfortunately not possible to assert the nmethod
531       // is_alive(), !is_unloaded() or !is_zombie() here.
532       if (!ic->set_to_clean(!from->is_unloading())) {
533         return false;
534       }
535       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
536     }
537   }
538   return true;
539 }
540 
541 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
542                                          bool clean_all) {
543   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
544 }
545 
546 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
547                                          bool clean_all) {
548   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
549 }
550 
551 // Cleans caches in nmethods that point to either classes that are unloaded
552 // or nmethods that are unloaded.
553 //
554 // Can be called either in parallel by G1 currently or after all
555 // nmethods are unloaded.  Return postponed=true in the parallel case for
556 // inline caches found that point to nmethods that are not yet visited during
557 // the do_unloading walk.
558 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
559   ResourceMark rm;
560 
561   // Exception cache only needs to be called if unloading occurred
562   if (unloading_occurred) {
563     clean_exception_cache();
564   }
565 
566   if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
567     return false;
568   }
569 
570 #ifdef ASSERT
571   // Check that the metadata embedded in the nmethod is alive
572   CheckClass check_class;
573   metadata_do(&check_class);
574 #endif
575   return true;
576 }
577 
578 void CompiledMethod::run_nmethod_entry_barrier() {
579   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
580   if (bs_nm != NULL) {
581     // We want to keep an invariant that nmethods found through iterations of a Thread's
582     // nmethods found in safepoints have gone through an entry barrier and are not armed.
583     // By calling this nmethod entry barrier, it plays along and acts
584     // like any other nmethod found on the stack of a thread (fewer surprises).
585     nmethod* nm = as_nmethod_or_null();
586     if (nm != NULL) {
587       bool alive = bs_nm->nmethod_entry_barrier(nm);
588       assert(alive, "should be alive");
589     }
590   }
591 }
592 
593 void CompiledMethod::cleanup_inline_caches(bool clean_all) {
594   for (;;) {
595     ICRefillVerifier ic_refill_verifier;
596     { CompiledICLocker ic_locker(this);
597       if (cleanup_inline_caches_impl(false, clean_all)) {
598         return;
599       }
600     }
601     // Call this nmethod entry barrier from the sweeper.
602     run_nmethod_entry_barrier();
603     InlineCacheBuffer::refill_ic_stubs();
604   }
605 }
606 
607 // Called to clean up after class unloading for live nmethods and from the sweeper
608 // for all methods.
609 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
610   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
611   ResourceMark rm;
612 
613   // Find all calls in an nmethod and clear the ones that point to non-entrant,
614   // zombie and unloaded nmethods.
615   RelocIterator iter(this, oops_reloc_begin());
616   bool is_in_static_stub = false;
617   while(iter.next()) {
618 
619     switch (iter.type()) {
620 
621     case relocInfo::virtual_call_type:
622       if (unloading_occurred) {
623         // If class unloading occurred we first clear ICs where the cached metadata
624         // is referring to an unloaded klass or method.
625         if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
626           return false;
627         }
628       }
629 
630       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
631         return false;
632       }
633       break;
634 
635     case relocInfo::opt_virtual_call_type:
636       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
637         return false;
638       }
639       break;
640 
641     case relocInfo::static_call_type:
642       if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
643         return false;
644       }
645       break;
646 
647     case relocInfo::static_stub_type: {
648       is_in_static_stub = true;
649       break;
650     }
651 
652     case relocInfo::metadata_type: {
653       // Only the metadata relocations contained in static/opt virtual call stubs
654       // contains the Method* passed to c2i adapters. It is the only metadata
655       // relocation that needs to be walked, as it is the one metadata relocation
656       // that violates the invariant that all metadata relocations have an oop
657       // in the compiled method (due to deferred resolution and code patching).
658 
659       // This causes dead metadata to remain in compiled methods that are not
660       // unloading. Unless these slippery metadata relocations of the static
661       // stubs are at least cleared, subsequent class redefinition operations
662       // will access potentially free memory, and JavaThread execution
663       // concurrent to class unloading may call c2i adapters with dead methods.
664       if (!is_in_static_stub) {
665         // The first metadata relocation after a static stub relocation is the
666         // metadata relocation of the static stub used to pass the Method* to
667         // c2i adapters.
668         continue;
669       }
670       is_in_static_stub = false;
671       if (is_unloading()) {
672         // If the nmethod itself is dying, then it may point at dead metadata.
673         // Nobody should follow that metadata; it is strictly unsafe.
674         continue;
675       }
676       metadata_Relocation* r = iter.metadata_reloc();
677       Metadata* md = r->metadata_value();
678       if (md != NULL && md->is_method()) {
679         Method* method = static_cast<Method*>(md);
680         if (!method->method_holder()->is_loader_alive()) {
681           Atomic::store(r->metadata_addr(), (Method*)NULL);
682 
683           if (!r->metadata_is_immediate()) {
684             r->fix_metadata_relocation();
685           }
686         }
687       }
688       break;
689     }
690 
691     default:
692       break;
693     }
694   }
695 
696   return true;
697 }
698 
699 address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
700   // Exception happened outside inline-cache check code => we are inside
701   // an active nmethod => use cpc to determine a return address
702   int exception_offset = pc - code_begin();
703   int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
704 #ifdef ASSERT
705   if (cont_offset == 0) {
706     Thread* thread = Thread::current();
707     ResourceMark rm(thread);
708     CodeBlob* cb = CodeCache::find_blob(pc);
709     assert(cb != NULL && cb == this, "");
710     ttyLocker ttyl;
711     tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
712     print();
713     method()->print_codes();
714     print_code();
715     print_pcs();
716   }
717 #endif
718   if (cont_offset == 0) {
719     // Let the normal error handling report the exception
720     return NULL;
721   }
722   if (cont_offset == exception_offset) {
723 #if INCLUDE_JVMCI
724     Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
725     JavaThread *thread = JavaThread::current();
726     thread->set_jvmci_implicit_exception_pc(pc);
727     thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
728                                                                          Deoptimization::Action_reinterpret));
729     return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
730 #else
731     ShouldNotReachHere();
732 #endif
733   }
734   return code_begin() + cont_offset;
735 }
736 
737 class HasEvolDependency : public MetadataClosure {
738   bool _has_evol_dependency;
739  public:
740   HasEvolDependency() : _has_evol_dependency(false) {}
741   void do_metadata(Metadata* md) {
742     if (md->is_method()) {
743       Method* method = (Method*)md;
744       if (method->is_old()) {
745         _has_evol_dependency = true;
746       }
747     }
748   }
749   bool has_evol_dependency() const { return _has_evol_dependency; }
750 };
751 
752 bool CompiledMethod::has_evol_metadata() {
753   // Check the metadata in relocIter and CompiledIC and also deoptimize
754   // any nmethod that has reference to old methods.
755   HasEvolDependency check_evol;
756   metadata_do(&check_evol);
757   if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
758     ResourceMark rm;
759     log_debug(redefine, class, nmethod)
760             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
761              _method->method_holder()->external_name(),
762              _method->name()->as_C_string(),
763              _method->signature()->as_C_string(),
764              compile_id());
765   }
766   return check_evol.has_evol_dependency();
767 }