1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeBehaviours.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "code/compiledIC.hpp"
 29 #include "code/icBuffer.hpp"
 30 #include "code/nmethod.hpp"
 31 #include "code/vtableStubs.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "interpreter/linkResolver.hpp"
 34 #include "memory/metadataFactory.hpp"
 35 #include "memory/oopFactory.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "memory/universe.hpp"
 38 #include "oops/klass.inline.hpp"
 39 #include "oops/method.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "oops/symbol.hpp"
 42 #include "runtime/continuationEntry.hpp"
 43 #include "runtime/handles.inline.hpp"
 44 #include "runtime/icache.hpp"
 45 #include "runtime/safepoint.hpp"
 46 #include "runtime/sharedRuntime.hpp"
 47 #include "runtime/stubRoutines.hpp"
 48 #include "sanitizers/leak.hpp"
 49 #include "utilities/events.hpp"
 50 
 51 
 52 // Every time a compiled IC is changed or its type is being accessed,
 53 // either the CompiledIC_lock must be set or we must be at a safe point.
 54 
 55 CompiledICLocker::CompiledICLocker(CompiledMethod* method)
 56   : _method(method),
 57     _behaviour(CompiledICProtectionBehaviour::current()),
 58     _locked(_behaviour->lock(_method)) {
 59 }
 60 
 61 CompiledICLocker::~CompiledICLocker() {
 62   if (_locked) {
 63     _behaviour->unlock(_method);
 64   }
 65 }
 66 
 67 bool CompiledICLocker::is_safe(CompiledMethod* method) {
 68   return CompiledICProtectionBehaviour::current()->is_safe(method);
 69 }
 70 
 71 bool CompiledICLocker::is_safe(address code) {
 72   CodeBlob* cb = CodeCache::find_blob(code);
 73   assert(cb != nullptr && cb->is_compiled(), "must be compiled");
 74   CompiledMethod* cm = cb->as_compiled_method();
 75   return CompiledICProtectionBehaviour::current()->is_safe(cm);
 76 }
 77 
 78 //-----------------------------------------------------------------------------
 79 // Low-level access to an inline cache. Private, since they might not be
 80 // MT-safe to use.
 81 
 82 void* CompiledIC::cached_value() const {
 83   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 84   assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
 85 
 86   if (!is_in_transition_state()) {
 87     void* data = get_data();
 88     // If we let the metadata value here be initialized to zero...
 89     assert(data != nullptr || Universe::non_oop_word() == nullptr,
 90            "no raw nulls in CompiledIC metadatas, because of patching races");
 91     return (data == (void*)Universe::non_oop_word()) ? nullptr : data;
 92   } else {
 93     return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
 94   }
 95 }
 96 
 97 
 98 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
 99   assert(entry_point != nullptr, "must set legal entry point");
100   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
101   assert (!is_optimized() || cache == nullptr, "an optimized virtual call does not have a cached metadata");
102   assert (cache == nullptr || cache != (Metadata*)badOopVal, "invalid metadata");
103 
104   assert(!is_icholder || is_icholder_entry(entry_point), "must be");
105 
106   // Don't use ic_destination for this test since that forwards
107   // through ICBuffer instead of returning the actual current state of
108   // the CompiledIC.
109   if (is_icholder_entry(_call->destination())) {
110     // When patching for the ICStub case the cached value isn't
111     // overwritten until the ICStub copied into the CompiledIC during
112     // the next safepoint.  Make sure that the CompiledICHolder* is
113     // marked for release at this point since it won't be identifiable
114     // once the entry point is overwritten.
115     InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
116   }
117 
118   if (TraceCompiledIC) {
119     tty->print("  ");
120     print_compiled_ic();
121     tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
122     if (!is_optimized()) {
123       tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
124     }
125     if (is_icstub) {
126       tty->print(" (icstub)");
127     }
128     tty->cr();
129   }
130 
131 #ifdef ASSERT
132   {
133     CodeBlob* cb = CodeCache::find_blob(_call->instruction_address());
134     assert(cb != nullptr && cb->is_compiled(), "must be compiled");
135   }
136 #endif
137   _call->set_destination_mt_safe(entry_point);
138 
139   if (is_optimized() || is_icstub) {
140     // Optimized call sites don't have a cache value and ICStub call
141     // sites only change the entry point.  Changing the value in that
142     // case could lead to MT safety issues.
143     assert(cache == nullptr, "must be null");
144     return;
145   }
146 
147   if (cache == nullptr)  cache = Universe::non_oop_word();
148 
149   set_data((intptr_t)cache);
150 }
151 
152 
153 void CompiledIC::set_ic_destination(ICStub* stub) {
154   internal_set_ic_destination(stub->code_begin(), true, nullptr, false);
155 }
156 
157 
158 
159 address CompiledIC::ic_destination() const {
160   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
161   if (!is_in_transition_state()) {
162     return _call->destination();
163   } else {
164     return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
165   }
166 }
167 
168 
169 bool CompiledIC::is_in_transition_state() const {
170   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
171   return InlineCacheBuffer::contains(_call->destination());;
172 }
173 
174 
175 bool CompiledIC::is_icholder_call() const {
176   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
177   return !_is_optimized && is_icholder_entry(ic_destination());
178 }
179 
180 // Returns native address of 'call' instruction in inline-cache. Used by
181 // the InlineCacheBuffer when it needs to find the stub.
182 address CompiledIC::stub_address() const {
183   assert(is_in_transition_state(), "should only be called when we are in a transition state");
184   return _call->destination();
185 }
186 
187 // Clears the IC stub if the compiled IC is in transition state
188 void CompiledIC::clear_ic_stub() {
189   if (is_in_transition_state()) {
190     ICStub* stub = ICStub::from_destination_address(stub_address());
191     stub->clear();
192   }
193 }
194 
195 //-----------------------------------------------------------------------------
196 // High-level access to an inline cache. Guaranteed to be MT-safe.
197 
198 void CompiledIC::initialize_from_iter(RelocIterator* iter) {
199   assert(iter->addr() == _call->instruction_address(), "must find ic_call");
200 
201   if (iter->type() == relocInfo::virtual_call_type) {
202     virtual_call_Relocation* r = iter->virtual_call_reloc();
203     _is_optimized = false;
204     _value = _call->get_load_instruction(r);
205   } else {
206     assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
207     _is_optimized = true;
208     _value = nullptr;
209   }
210 }
211 
212 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
213   : _method(cm)
214 {
215   _call = _method->call_wrapper_at((address) call);
216   address ic_call = _call->instruction_address();
217 
218   assert(ic_call != nullptr, "ic_call address must be set");
219   assert(cm != nullptr, "must pass compiled method");
220   assert(cm->contains(ic_call), "must be in compiled method");
221 
222   // Search for the ic_call at the given address.
223   RelocIterator iter(cm, ic_call, ic_call+1);
224   bool ret = iter.next();
225   assert(ret == true, "relocInfo must exist at this address");
226   assert(iter.addr() == ic_call, "must find ic_call");
227 
228   initialize_from_iter(&iter);
229 }
230 
231 CompiledIC::CompiledIC(RelocIterator* iter)
232   : _method(iter->code())
233 {
234   _call = _method->call_wrapper_at(iter->addr());
235   address ic_call = _call->instruction_address();
236 
237   CompiledMethod* nm = iter->code();
238   assert(ic_call != nullptr, "ic_call address must be set");
239   assert(nm != nullptr, "must pass compiled method");
240   assert(nm->contains(ic_call), "must be in compiled method");
241 
242   initialize_from_iter(iter);
243 }
244 
245 // This function may fail for two reasons: either due to running out of vtable
246 // stubs, or due to running out of IC stubs in an attempted transition to a
247 // transitional state. The needs_ic_stub_refill value will be set if the failure
248 // was due to running out of IC stubs, in which case the caller will refill IC
249 // stubs and retry.
250 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode,
251                                     bool& needs_ic_stub_refill, bool caller_is_c1, TRAPS) {
252   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
253   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
254   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
255 
256   address entry;
257   if (call_info->call_kind() == CallInfo::itable_call) {
258     assert(bytecode == Bytecodes::_invokeinterface, "");
259     int itable_index = call_info->itable_index();
260     entry = VtableStubs::find_itable_stub(itable_index, caller_is_c1);
261     if (entry == nullptr) {
262       return false;
263     }
264 #ifdef ASSERT
265     int index = call_info->resolved_method()->itable_index();
266     assert(index == itable_index, "CallInfo pre-computes this");
267     InstanceKlass* k = call_info->resolved_method()->method_holder();
268     assert(k->verify_itable_index(itable_index), "sanity check");
269 #endif //ASSERT
270     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
271                                                     call_info->resolved_klass(), false);
272     holder->claim();
273     if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
274       delete holder;
275       needs_ic_stub_refill = true;
276       return false;
277     }
278     // LSan appears unable to follow malloc-based memory consistently when embedded as an immediate
279     // in generated machine code. So we have to ignore it.
280     LSAN_IGNORE_OBJECT(holder);
281   } else {
282     assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
283     // Can be different than selected_method->vtable_index(), due to package-private etc.
284     int vtable_index = call_info->vtable_index();
285     assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
286     entry = VtableStubs::find_vtable_stub(vtable_index, caller_is_c1);
287     if (entry == nullptr) {
288       return false;
289     }
290     if (!InlineCacheBuffer::create_transition_stub(this, nullptr, entry)) {
291       needs_ic_stub_refill = true;
292       return false;
293     }
294   }
295 
296   {
297     ResourceMark rm;
298     assert(call_info->selected_method() != nullptr, "Unexpected null selected method");
299     log_trace(inlinecache)("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
300                    p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
301   }
302 
303   // We can't check this anymore. With lazy deopt we could have already
304   // cleaned this IC entry before we even return. This is possible if
305   // we ran out of space in the inline cache buffer trying to do the
306   // set_next and we safepointed to free up space. This is a benign
307   // race because the IC entry was complete when we safepointed so
308   // cleaning it immediately is harmless.
309   // assert(is_megamorphic(), "sanity check");
310   return true;
311 }
312 
313 
314 // true if destination is megamorphic stub
315 bool CompiledIC::is_megamorphic() const {
316   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
317   assert(!is_optimized(), "an optimized call cannot be megamorphic");
318 
319   // Cannot rely on cached_value. It is either an interface or a method.
320   return VtableStubs::entry_point(ic_destination()) != nullptr;
321 }
322 
323 bool CompiledIC::is_call_to_compiled() const {
324   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
325 
326   CodeBlob* cb = CodeCache::find_blob(ic_destination());
327   bool is_monomorphic = (cb != nullptr && cb->is_compiled());
328   // Check that the cached_value is a klass for non-optimized monomorphic calls
329   // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
330   // for calling directly to vep without using the inline cache (i.e., cached_value == nullptr).
331   // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
332   // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
333 #ifdef ASSERT
334   CodeBlob* caller = CodeCache::find_blob(instruction_address());
335   bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
336   assert( is_c1_or_jvmci_method ||
337          !is_monomorphic ||
338          is_optimized() ||
339          (cached_metadata() != nullptr && cached_metadata()->is_klass()), "sanity check");
340 #endif // ASSERT
341   return is_monomorphic;
342 }
343 
344 
345 bool CompiledIC::is_call_to_interpreted() const {
346   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
347   // Call to interpreter if destination is either calling to a stub (if it
348   // is optimized), or calling to an I2C blob
349   bool is_call_to_interpreted = false;
350   if (!is_optimized()) {
351     CodeBlob* cb = CodeCache::find_blob(ic_destination());
352     is_call_to_interpreted = (cb != nullptr && cb->is_adapter_blob());
353     assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != nullptr), "sanity check");
354   } else {
355     // Check if we are calling into our own codeblob (i.e., to a stub)
356     address dest = ic_destination();
357 #ifdef ASSERT
358     {
359       _call->verify_resolve_call(dest);
360     }
361 #endif /* ASSERT */
362     is_call_to_interpreted = _call->is_call_to_interpreted(dest);
363   }
364   return is_call_to_interpreted;
365 }
366 
367 bool CompiledIC::set_to_clean(bool in_use) {
368   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
369   if (TraceInlineCacheClearing) {
370     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
371     print();
372   }
373   log_trace(inlinecache)("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
374 
375   address entry = _call->get_resolve_call_stub(is_optimized());
376 
377   bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
378 
379   if (safe_transition) {
380     // Kill any leftover stub we might have too
381     clear_ic_stub();
382     if (is_optimized()) {
383       set_ic_destination(entry);
384     } else {
385       set_ic_destination_and_value(entry, (void*)nullptr);
386     }
387   } else {
388     // Unsafe transition - create stub.
389     if (!InlineCacheBuffer::create_transition_stub(this, nullptr, entry)) {
390       return false;
391     }
392   }
393   // We can't check this anymore. With lazy deopt we could have already
394   // cleaned this IC entry before we even return. This is possible if
395   // we ran out of space in the inline cache buffer trying to do the
396   // set_next and we safepointed to free up space. This is a benign
397   // race because the IC entry was complete when we safepointed so
398   // cleaning it immediately is harmless.
399   // assert(is_clean(), "sanity check");
400   return true;
401 }
402 
403 bool CompiledIC::is_clean() const {
404   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
405   bool is_clean = false;
406   address dest = ic_destination();
407   is_clean = dest == _call->get_resolve_call_stub(is_optimized());
408   assert(!is_clean || is_optimized() || cached_value() == nullptr, "sanity check");
409   return is_clean;
410 }
411 
412 bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
413   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
414   // Updating a cache to the wrong entry can cause bugs that are very hard
415   // to track down - if cache entry gets invalid - we just clean it. In
416   // this way it is always the same code path that is responsible for
417   // updating and resolving an inline cache
418   //
419   // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
420   // callsites. In addition ic_miss code will update a site to monomorphic if it determines
421   // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
422   //
423   // In both of these cases the only thing being modified is the jump/call target and these
424   // transitions are mt_safe
425 
426   Thread *thread = Thread::current();
427   if (info.to_interpreter()) {
428     // Call to interpreter
429     if (info.is_optimized() && is_optimized()) {
430       assert(is_clean(), "unsafe IC path");
431       // the call analysis (callee structure) specifies that the call is optimized
432       // (either because of CHA or the static target is final)
433       // At code generation time, this call has been emitted as static call
434       // Call via stub
435       assert(info.cached_metadata() != nullptr && info.cached_metadata()->is_method(), "sanity check");
436       methodHandle method (thread, (Method*)info.cached_metadata());
437       _call->set_to_interpreted(method, info);
438 
439       {
440         ResourceMark rm(thread);
441         log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
442            p2i(instruction_address()),
443            method->print_value_string());
444       }
445     } else {
446       // Call via method-klass-holder
447       CompiledICHolder* holder = info.claim_cached_icholder();
448       if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
449         delete holder;
450         return false;
451       }
452       // LSan appears unable to follow malloc-based memory consistently when embedded as an
453       // immediate in generated machine code. So we have to ignore it.
454       LSAN_IGNORE_OBJECT(holder);
455       {
456          ResourceMark rm(thread);
457          log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
458       }
459     }
460   } else {
461     // Call to compiled code
462     bool static_bound = info.is_optimized() || (info.cached_metadata() == nullptr);
463 #ifdef ASSERT
464     CodeBlob* cb = CodeCache::find_blob(info.entry());
465     assert (cb != nullptr && cb->is_compiled(), "must be compiled!");
466 #endif /* ASSERT */
467 
468     // This is MT safe if we come from a clean-cache and go through a
469     // non-verified entry point
470     bool safe = SafepointSynchronize::is_at_safepoint() ||
471                 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
472 
473     if (!safe) {
474       if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
475         return false;
476       }
477     } else {
478       if (is_optimized()) {
479         set_ic_destination(info.entry());
480       } else {
481         set_ic_destination_and_value(info.entry(), info.cached_metadata());
482       }
483     }
484 
485     {
486       ResourceMark rm(thread);
487       assert(info.cached_metadata() == nullptr || info.cached_metadata()->is_klass(), "must be");
488       log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass = %s) %s",
489         p2i(instruction_address()),
490         (info.cached_metadata() != nullptr) ? ((Klass*)info.cached_metadata())->print_value_string() : "nullptr",
491         (safe) ? "" : " via stub");
492     }
493   }
494   // We can't check this anymore. With lazy deopt we could have already
495   // cleaned this IC entry before we even return. This is possible if
496   // we ran out of space in the inline cache buffer trying to do the
497   // set_next and we safepointed to free up space. This is a benign
498   // race because the IC entry was complete when we safepointed so
499   // cleaning it immediately is harmless.
500   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
501   return true;
502 }
503 
504 
505 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
506 // static_bound: The call can be static bound. If it isn't also optimized, the property
507 // wasn't provable at time of compilation. An optimized call will have any necessary
508 // null check, while a static_bound won't. A static_bound (but not optimized) must
509 // therefore use the unverified entry point.
510 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
511                                            Klass* receiver_klass,
512                                            bool is_optimized,
513                                            bool static_bound,
514                                            bool caller_is_nmethod,
515                                            bool caller_is_c1,
516                                            CompiledICInfo& info,
517                                            TRAPS) {
518   CompiledMethod* method_code = method->code();
519 
520   address entry = nullptr;
521   if (method_code != nullptr && method_code->is_in_use() && !method_code->is_unloading()) {
522     assert(method_code->is_compiled(), "must be compiled");
523     // Call to compiled code
524     //
525     // Note: the following problem exists with Compiler1:
526     //   - at compile time we may or may not know if the destination is final
527     //   - if we know that the destination is final (is_optimized), we will emit
528     //     an optimized virtual call (no inline cache), and need a Method* to make
529     //     a call to the interpreter
530     //   - if we don't know if the destination is final, we emit a standard
531     //     virtual call, and use CompiledICHolder to call interpreted code
532     //     (no static call stub has been generated)
533     //   - In the case that we here notice the call is static bound we
534     //     convert the call into what looks to be an optimized virtual call,
535     //     but we must use the unverified entry point (since there will be no
536     //     null check on a call when the target isn't loaded).
537     //     This causes problems when verifying the IC because
538     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
539     //     is aware of this and weakens its asserts.
540     if (is_optimized) {
541       entry      = caller_is_c1 ? method_code->verified_inline_entry_point() : method_code->verified_entry_point();
542     } else {
543       entry      = caller_is_c1 ? method_code->inline_entry_point() : method_code->entry_point();
544     }
545   }
546   if (entry != nullptr) {
547     // Call to near compiled code.
548     info.set_compiled_entry(entry, is_optimized ? nullptr : receiver_klass, is_optimized);
549   } else {
550     if (is_optimized) {
551       // Use stub entry
552       address entry = caller_is_c1 ? method()->get_c2i_inline_entry() : method()->get_c2i_entry();
553       info.set_interpreter_entry(entry, method());
554     } else {
555       // Use icholder entry
556       assert(method_code == nullptr || method_code->is_compiled(), "must be compiled");
557       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
558       entry = (caller_is_c1)? method()->get_c2i_unverified_inline_entry() : method()->get_c2i_unverified_entry();
559       info.set_icholder_entry(entry, holder);
560     }
561   }
562   assert(info.is_optimized() == is_optimized, "must agree");
563 }
564 
565 
566 bool CompiledIC::is_icholder_entry(address entry) {
567   CodeBlob* cb = CodeCache::find_blob(entry);
568   if (cb == nullptr) {
569     return false;
570   }
571   if (cb->is_adapter_blob()) {
572     return true;
573   } else if (cb->is_vtable_blob()) {
574     return VtableStubs::is_icholder_entry(entry);
575   }
576   return false;
577 }
578 
579 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
580   // This call site might have become stale so inspect it carefully.
581   address dest = cm->call_wrapper_at(call_site->addr())->destination();
582   return is_icholder_entry(dest);
583 }
584 
585 // ----------------------------------------------------------------------------
586 
587 bool CompiledStaticCall::set_to_clean(bool in_use) {
588   // in_use is unused but needed to match template function in CompiledMethod
589   assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
590   // Reset call site
591   set_destination_mt_safe(resolve_call_stub());
592 
593   // Do not reset stub here:  It is too expensive to call find_stub.
594   // Instead, rely on caller (nmethod::clear_inline_caches) to clear
595   // both the call and its stub.
596   return true;
597 }
598 
599 bool CompiledStaticCall::is_clean() const {
600   return destination() == resolve_call_stub();
601 }
602 
603 bool CompiledStaticCall::is_call_to_compiled() const {
604   return CodeCache::contains(destination());
605 }
606 
607 bool CompiledDirectStaticCall::is_call_to_interpreted() const {
608   // It is a call to interpreted, if it calls to a stub. Hence, the destination
609   // must be in the stub part of the nmethod that contains the call
610   CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
611   return cm->stub_contains(destination());
612 }
613 
614 void CompiledStaticCall::set_to_compiled(address entry) {
615   {
616     ResourceMark rm;
617     log_trace(inlinecache)("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
618         name(),
619         p2i(instruction_address()),
620         p2i(entry));
621   }
622   // Call to compiled code
623   assert(CodeCache::contains(entry), "wrong entry point");
624   set_destination_mt_safe(entry);
625 }
626 
627 void CompiledStaticCall::set(const StaticCallInfo& info) {
628   assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
629   // Updating a cache to the wrong entry can cause bugs that are very hard
630   // to track down - if cache entry gets invalid - we just clean it. In
631   // this way it is always the same code path that is responsible for
632   // updating and resolving an inline cache
633   assert(is_clean(), "do not update a call entry - use clean");
634 
635   if (info._to_interpreter) {
636     // Call to interpreted code
637     set_to_interpreted(info.callee(), info.entry());
638   } else {
639     set_to_compiled(info.entry());
640   }
641 }
642 
643 // Compute settings for a CompiledStaticCall. Since we might have to set
644 // the stub when calling to the interpreter, we need to return arguments.
645 void CompiledStaticCall::compute_entry(const methodHandle& m, CompiledMethod* caller_nm, StaticCallInfo& info) {
646   assert(!m->mismatch(), "Mismatch for static call");
647   bool caller_is_nmethod = caller_nm->is_nmethod();
648   CompiledMethod* m_code = m->code();
649   info._callee = m;
650   if (m_code != nullptr && m_code->is_in_use() && !m_code->is_unloading()) {
651     info._to_interpreter = false;
652     if (caller_nm->is_compiled_by_c1()) {
653       info._entry = m_code->verified_inline_entry_point();
654     } else {
655       info._entry = m_code->verified_entry_point();
656     }
657   } else {
658     // Callee is interpreted code.  In any case entering the interpreter
659     // puts a converter-frame on the stack to save arguments.
660     assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
661     info._to_interpreter = true;
662     if (caller_nm->is_compiled_by_c1()) {
663       // C1 -> interp: values passed as oops
664       info._entry = m()->get_c2i_inline_entry();
665     } else {
666       // C2 -> interp: values passed as fields
667       info._entry = m()->get_c2i_entry();
668     }
669   }
670 }
671 
672 void CompiledStaticCall::compute_entry_for_continuation_entry(const methodHandle& m, StaticCallInfo& info) {
673   if (ContinuationEntry::is_interpreted_call(instruction_address())) {
674     info._to_interpreter = true;
675     info._entry = m()->get_c2i_entry();
676   }
677 }
678 
679 address CompiledDirectStaticCall::find_stub_for(address instruction) {
680   // Find reloc. information containing this call-site
681   RelocIterator iter((nmethod*)nullptr, instruction);
682   while (iter.next()) {
683     if (iter.addr() == instruction) {
684       switch(iter.type()) {
685         case relocInfo::static_call_type:
686           return iter.static_call_reloc()->static_stub();
687         // We check here for opt_virtual_call_type, since we reuse the code
688         // from the CompiledIC implementation
689         case relocInfo::opt_virtual_call_type:
690           return iter.opt_virtual_call_reloc()->static_stub();
691         case relocInfo::poll_type:
692         case relocInfo::poll_return_type: // A safepoint can't overlap a call.
693         default:
694           ShouldNotReachHere();
695       }
696     }
697   }
698   return nullptr;
699 }
700 
701 address CompiledDirectStaticCall::find_stub() {
702   return CompiledDirectStaticCall::find_stub_for(instruction_address());
703 }
704 
705 address CompiledDirectStaticCall::resolve_call_stub() const {
706   return SharedRuntime::get_resolve_static_call_stub();
707 }
708 
709 //-----------------------------------------------------------------------------
710 // Non-product mode code
711 #ifndef PRODUCT
712 
713 void CompiledIC::verify() {
714   _call->verify();
715   assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
716           || is_optimized() || is_megamorphic(), "sanity check");
717 }
718 
719 void CompiledIC::print() {
720   print_compiled_ic();
721   tty->cr();
722 }
723 
724 void CompiledIC::print_compiled_ic() {
725   tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
726              p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? nullptr : cached_value()));
727 }
728 
729 void CompiledDirectStaticCall::print() {
730   tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
731   if (is_clean()) {
732     tty->print("clean");
733   } else if (is_call_to_compiled()) {
734     tty->print("compiled");
735   } else if (is_call_to_interpreted()) {
736     tty->print("interpreted");
737   }
738   tty->cr();
739 }
740 
741 void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, address entry,
742                                               NativeMovConstReg* method_holder,
743                                               NativeJump*        jump) {
744   // A generated lambda form might be deleted from the Lambdaform
745   // cache in MethodTypeForm.  If a jit compiled lambdaform method
746   // becomes not entrant and the cache access returns null, the new
747   // resolve will lead to a new generated LambdaForm.
748   Method* old_method = reinterpret_cast<Method*>(method_holder->data());
749   assert(old_method == nullptr || old_method == callee() ||
750          callee->is_compiled_lambda_form() ||
751          !old_method->method_holder()->is_loader_alive() ||
752          old_method->is_old(),  // may be race patching deoptimized nmethod due to redefinition.
753          "a) MT-unsafe modification of inline cache");
754 
755   address destination = jump->jump_destination();
756   assert(destination == (address)-1 || destination == entry
757          || old_method == nullptr || !old_method->method_holder()->is_loader_alive() // may have a race due to class unloading.
758          || old_method->is_old(),  // may be race patching deoptimized nmethod due to redefinition.
759          "b) MT-unsafe modification of inline cache");
760 }
761 #endif // !PRODUCT