1 /* 2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBehaviours.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/linkResolver.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/oopFactory.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "memory/universe.hpp" 38 #include "oops/klass.inline.hpp" 39 #include "oops/method.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/symbol.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/icache.hpp" 44 #include "runtime/safepoint.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "utilities/events.hpp" 48 49 50 // Every time a compiled IC is changed or its type is being accessed, 51 // either the CompiledIC_lock must be set or we must be at a safe point. 52 53 CompiledICLocker::CompiledICLocker(CompiledMethod* method) 54 : _method(method), 55 _behaviour(CompiledICProtectionBehaviour::current()), 56 _locked(_behaviour->lock(_method)) { 57 } 58 59 CompiledICLocker::~CompiledICLocker() { 60 if (_locked) { 61 _behaviour->unlock(_method); 62 } 63 } 64 65 bool CompiledICLocker::is_safe(CompiledMethod* method) { 66 return CompiledICProtectionBehaviour::current()->is_safe(method); 67 } 68 69 bool CompiledICLocker::is_safe(address code) { 70 CodeBlob* cb = CodeCache::find_blob_unsafe(code); 71 assert(cb != NULL && cb->is_compiled(), "must be compiled"); 72 CompiledMethod* cm = cb->as_compiled_method(); 73 return CompiledICProtectionBehaviour::current()->is_safe(cm); 74 } 75 76 //----------------------------------------------------------------------------- 77 // Low-level access to an inline cache. Private, since they might not be 78 // MT-safe to use. 79 80 void* CompiledIC::cached_value() const { 81 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 82 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata"); 83 84 if (!is_in_transition_state()) { 85 void* data = get_data(); 86 // If we let the metadata value here be initialized to zero... 87 assert(data != NULL || Universe::non_oop_word() == NULL, 88 "no raw nulls in CompiledIC metadatas, because of patching races"); 89 return (data == (void*)Universe::non_oop_word()) ? NULL : data; 90 } else { 91 return InlineCacheBuffer::cached_value_for((CompiledIC *)this); 92 } 93 } 94 95 96 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) { 97 assert(entry_point != NULL, "must set legal entry point"); 98 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 99 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata"); 100 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata"); 101 102 assert(!is_icholder || is_icholder_entry(entry_point), "must be"); 103 104 // Don't use ic_destination for this test since that forwards 105 // through ICBuffer instead of returning the actual current state of 106 // the CompiledIC. 107 if (is_icholder_entry(_call->destination())) { 108 // When patching for the ICStub case the cached value isn't 109 // overwritten until the ICStub copied into the CompiledIC during 110 // the next safepoint. Make sure that the CompiledICHolder* is 111 // marked for release at this point since it won't be identifiable 112 // once the entry point is overwritten. 113 InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data()); 114 } 115 116 if (TraceCompiledIC) { 117 tty->print(" "); 118 print_compiled_ic(); 119 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point)); 120 if (!is_optimized()) { 121 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache)); 122 } 123 if (is_icstub) { 124 tty->print(" (icstub)"); 125 } 126 tty->cr(); 127 } 128 129 { 130 CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address()); 131 assert(cb != NULL && cb->is_compiled(), "must be compiled"); 132 _call->set_destination_mt_safe(entry_point); 133 } 134 135 if (is_optimized() || is_icstub) { 136 // Optimized call sites don't have a cache value and ICStub call 137 // sites only change the entry point. Changing the value in that 138 // case could lead to MT safety issues. 139 assert(cache == NULL, "must be null"); 140 return; 141 } 142 143 if (cache == NULL) cache = Universe::non_oop_word(); 144 145 set_data((intptr_t)cache); 146 } 147 148 149 void CompiledIC::set_ic_destination(ICStub* stub) { 150 internal_set_ic_destination(stub->code_begin(), true, NULL, false); 151 } 152 153 154 155 address CompiledIC::ic_destination() const { 156 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 157 if (!is_in_transition_state()) { 158 return _call->destination(); 159 } else { 160 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); 161 } 162 } 163 164 165 bool CompiledIC::is_in_transition_state() const { 166 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 167 return InlineCacheBuffer::contains(_call->destination());; 168 } 169 170 171 bool CompiledIC::is_icholder_call() const { 172 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 173 return !_is_optimized && is_icholder_entry(ic_destination()); 174 } 175 176 // Returns native address of 'call' instruction in inline-cache. Used by 177 // the InlineCacheBuffer when it needs to find the stub. 178 address CompiledIC::stub_address() const { 179 assert(is_in_transition_state(), "should only be called when we are in a transition state"); 180 return _call->destination(); 181 } 182 183 // Clears the IC stub if the compiled IC is in transition state 184 void CompiledIC::clear_ic_stub() { 185 if (is_in_transition_state()) { 186 ICStub* stub = ICStub_from_destination_address(stub_address()); 187 stub->clear(); 188 } 189 } 190 191 //----------------------------------------------------------------------------- 192 // High-level access to an inline cache. Guaranteed to be MT-safe. 193 194 void CompiledIC::initialize_from_iter(RelocIterator* iter) { 195 assert(iter->addr() == _call->instruction_address(), "must find ic_call"); 196 197 if (iter->type() == relocInfo::virtual_call_type) { 198 virtual_call_Relocation* r = iter->virtual_call_reloc(); 199 _is_optimized = false; 200 _value = _call->get_load_instruction(r); 201 } else { 202 assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); 203 _is_optimized = true; 204 _value = NULL; 205 } 206 } 207 208 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call) 209 : _method(cm) 210 { 211 _call = _method->call_wrapper_at((address) call); 212 address ic_call = _call->instruction_address(); 213 214 assert(ic_call != NULL, "ic_call address must be set"); 215 assert(cm != NULL, "must pass compiled method"); 216 assert(cm->contains(ic_call), "must be in compiled method"); 217 218 // Search for the ic_call at the given address. 219 RelocIterator iter(cm, ic_call, ic_call+1); 220 bool ret = iter.next(); 221 assert(ret == true, "relocInfo must exist at this address"); 222 assert(iter.addr() == ic_call, "must find ic_call"); 223 224 initialize_from_iter(&iter); 225 } 226 227 CompiledIC::CompiledIC(RelocIterator* iter) 228 : _method(iter->code()) 229 { 230 _call = _method->call_wrapper_at(iter->addr()); 231 address ic_call = _call->instruction_address(); 232 233 CompiledMethod* nm = iter->code(); 234 assert(ic_call != NULL, "ic_call address must be set"); 235 assert(nm != NULL, "must pass compiled method"); 236 assert(nm->contains(ic_call), "must be in compiled method"); 237 238 initialize_from_iter(iter); 239 } 240 241 // This function may fail for two reasons: either due to running out of vtable 242 // stubs, or due to running out of IC stubs in an attempted transition to a 243 // transitional state. The needs_ic_stub_refill value will be set if the failure 244 // was due to running out of IC stubs, in which case the caller will refill IC 245 // stubs and retry. 246 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, 247 bool& needs_ic_stub_refill, bool caller_is_c1, TRAPS) { 248 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 249 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); 250 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); 251 252 address entry; 253 if (call_info->call_kind() == CallInfo::itable_call) { 254 assert(bytecode == Bytecodes::_invokeinterface, ""); 255 int itable_index = call_info->itable_index(); 256 entry = VtableStubs::find_itable_stub(itable_index, caller_is_c1); 257 if (entry == NULL) { 258 return false; 259 } 260 #ifdef ASSERT 261 int index = call_info->resolved_method()->itable_index(); 262 assert(index == itable_index, "CallInfo pre-computes this"); 263 InstanceKlass* k = call_info->resolved_method()->method_holder(); 264 assert(k->verify_itable_index(itable_index), "sanity check"); 265 #endif //ASSERT 266 CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(), 267 call_info->resolved_klass(), false); 268 holder->claim(); 269 if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) { 270 delete holder; 271 needs_ic_stub_refill = true; 272 return false; 273 } 274 } else { 275 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable"); 276 // Can be different than selected_method->vtable_index(), due to package-private etc. 277 int vtable_index = call_info->vtable_index(); 278 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check"); 279 entry = VtableStubs::find_vtable_stub(vtable_index, caller_is_c1); 280 if (entry == NULL) { 281 return false; 282 } 283 if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) { 284 needs_ic_stub_refill = true; 285 return false; 286 } 287 } 288 289 if (TraceICs) { 290 ResourceMark rm; 291 assert(call_info->selected_method() != NULL, "Unexpected null selected method"); 292 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, 293 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry)); 294 } 295 296 // We can't check this anymore. With lazy deopt we could have already 297 // cleaned this IC entry before we even return. This is possible if 298 // we ran out of space in the inline cache buffer trying to do the 299 // set_next and we safepointed to free up space. This is a benign 300 // race because the IC entry was complete when we safepointed so 301 // cleaning it immediately is harmless. 302 // assert(is_megamorphic(), "sanity check"); 303 return true; 304 } 305 306 307 // true if destination is megamorphic stub 308 bool CompiledIC::is_megamorphic() const { 309 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 310 assert(!is_optimized(), "an optimized call cannot be megamorphic"); 311 312 // Cannot rely on cached_value. It is either an interface or a method. 313 return VtableStubs::entry_point(ic_destination()) != NULL; 314 } 315 316 bool CompiledIC::is_call_to_compiled() const { 317 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 318 319 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie 320 // method is guaranteed to still exist, since we only remove methods after all inline caches 321 // has been cleaned up 322 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 323 bool is_monomorphic = (cb != NULL && cb->is_compiled()); 324 // Check that the cached_value is a klass for non-optimized monomorphic calls 325 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used 326 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL). 327 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized 328 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites. 329 #ifdef ASSERT 330 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); 331 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci(); 332 assert( is_c1_or_jvmci_method || 333 !is_monomorphic || 334 is_optimized() || 335 !caller->is_alive() || 336 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); 337 #endif // ASSERT 338 return is_monomorphic; 339 } 340 341 342 bool CompiledIC::is_call_to_interpreted() const { 343 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 344 // Call to interpreter if destination is either calling to a stub (if it 345 // is optimized), or calling to an I2C blob 346 bool is_call_to_interpreted = false; 347 if (!is_optimized()) { 348 // must use unsafe because the destination can be a zombie (and we're cleaning) 349 // and the print_compiled_ic code wants to know if site (in the non-zombie) 350 // is to the interpreter. 351 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 352 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); 353 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); 354 } else { 355 // Check if we are calling into our own codeblob (i.e., to a stub) 356 address dest = ic_destination(); 357 #ifdef ASSERT 358 { 359 _call->verify_resolve_call(dest); 360 } 361 #endif /* ASSERT */ 362 is_call_to_interpreted = _call->is_call_to_interpreted(dest); 363 } 364 return is_call_to_interpreted; 365 } 366 367 bool CompiledIC::set_to_clean(bool in_use) { 368 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 369 if (TraceInlineCacheClearing || TraceICs) { 370 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); 371 print(); 372 } 373 374 address entry = _call->get_resolve_call_stub(is_optimized()); 375 376 // A zombie transition will always be safe, since the metadata has already been set to NULL, so 377 // we only need to patch the destination 378 bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); 379 380 if (safe_transition) { 381 // Kill any leftover stub we might have too 382 clear_ic_stub(); 383 if (is_optimized()) { 384 set_ic_destination(entry); 385 } else { 386 set_ic_destination_and_value(entry, (void*)NULL); 387 } 388 } else { 389 // Unsafe transition - create stub. 390 if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) { 391 return false; 392 } 393 } 394 // We can't check this anymore. With lazy deopt we could have already 395 // cleaned this IC entry before we even return. This is possible if 396 // we ran out of space in the inline cache buffer trying to do the 397 // set_next and we safepointed to free up space. This is a benign 398 // race because the IC entry was complete when we safepointed so 399 // cleaning it immediately is harmless. 400 // assert(is_clean(), "sanity check"); 401 return true; 402 } 403 404 bool CompiledIC::is_clean() const { 405 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 406 bool is_clean = false; 407 address dest = ic_destination(); 408 is_clean = dest == _call->get_resolve_call_stub(is_optimized()); 409 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); 410 return is_clean; 411 } 412 413 bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) { 414 assert(CompiledICLocker::is_safe(_method), "mt unsafe call"); 415 // Updating a cache to the wrong entry can cause bugs that are very hard 416 // to track down - if cache entry gets invalid - we just clean it. In 417 // this way it is always the same code path that is responsible for 418 // updating and resolving an inline cache 419 // 420 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized 421 // callsites. In addition ic_miss code will update a site to monomorphic if it determines 422 // that an monomorphic call to the interpreter can now be monomorphic to compiled code. 423 // 424 // In both of these cases the only thing being modifed is the jump/call target and these 425 // transitions are mt_safe 426 427 Thread *thread = Thread::current(); 428 if (info.to_interpreter()) { 429 // Call to interpreter 430 if (info.is_optimized() && is_optimized()) { 431 assert(is_clean(), "unsafe IC path"); 432 // the call analysis (callee structure) specifies that the call is optimized 433 // (either because of CHA or the static target is final) 434 // At code generation time, this call has been emitted as static call 435 // Call via stub 436 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); 437 methodHandle method (thread, (Method*)info.cached_metadata()); 438 _call->set_to_interpreted(method, info); 439 440 if (TraceICs) { 441 ResourceMark rm(thread); 442 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", 443 p2i(instruction_address()), 444 method->print_value_string()); 445 } 446 } else { 447 // Call via method-klass-holder 448 CompiledICHolder* holder = info.claim_cached_icholder(); 449 if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) { 450 delete holder; 451 return false; 452 } 453 if (TraceICs) { 454 ResourceMark rm(thread); 455 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address())); 456 } 457 } 458 } else { 459 // Call to compiled code 460 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); 461 #ifdef ASSERT 462 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); 463 assert (cb != NULL && cb->is_compiled(), "must be compiled!"); 464 #endif /* ASSERT */ 465 466 // This is MT safe if we come from a clean-cache and go through a 467 // non-verified entry point 468 bool safe = SafepointSynchronize::is_at_safepoint() || 469 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); 470 471 if (!safe) { 472 if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) { 473 return false; 474 } 475 } else { 476 if (is_optimized()) { 477 set_ic_destination(info.entry()); 478 } else { 479 set_ic_destination_and_value(info.entry(), info.cached_metadata()); 480 } 481 } 482 483 if (TraceICs) { 484 ResourceMark rm(thread); 485 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be"); 486 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass = %s) %s", 487 p2i(instruction_address()), 488 (info.cached_metadata() != NULL) ? ((Klass*)info.cached_metadata())->print_value_string() : "NULL", 489 (safe) ? "" : " via stub"); 490 } 491 } 492 // We can't check this anymore. With lazy deopt we could have already 493 // cleaned this IC entry before we even return. This is possible if 494 // we ran out of space in the inline cache buffer trying to do the 495 // set_next and we safepointed to free up space. This is a benign 496 // race because the IC entry was complete when we safepointed so 497 // cleaning it immediately is harmless. 498 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 499 return true; 500 } 501 502 503 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache) 504 // static_bound: The call can be static bound. If it isn't also optimized, the property 505 // wasn't provable at time of compilation. An optimized call will have any necessary 506 // null check, while a static_bound won't. A static_bound (but not optimized) must 507 // therefore use the unverified entry point. 508 void CompiledIC::compute_monomorphic_entry(const methodHandle& method, 509 Klass* receiver_klass, 510 bool is_optimized, 511 bool static_bound, 512 bool caller_is_nmethod, 513 bool caller_is_c1, 514 CompiledICInfo& info, 515 TRAPS) { 516 CompiledMethod* method_code = method->code(); 517 518 address entry = NULL; 519 if (method_code != NULL && method_code->is_in_use() && !method_code->is_unloading()) { 520 assert(method_code->is_compiled(), "must be compiled"); 521 // Call to compiled code 522 // 523 // Note: the following problem exists with Compiler1: 524 // - at compile time we may or may not know if the destination is final 525 // - if we know that the destination is final (is_optimized), we will emit 526 // an optimized virtual call (no inline cache), and need a Method* to make 527 // a call to the interpreter 528 // - if we don't know if the destination is final, we emit a standard 529 // virtual call, and use CompiledICHolder to call interpreted code 530 // (no static call stub has been generated) 531 // - In the case that we here notice the call is static bound we 532 // convert the call into what looks to be an optimized virtual call, 533 // but we must use the unverified entry point (since there will be no 534 // null check on a call when the target isn't loaded). 535 // This causes problems when verifying the IC because 536 // it looks vanilla but is optimized. Code in is_call_to_interpreted 537 // is aware of this and weakens its asserts. 538 if (is_optimized) { 539 entry = caller_is_c1 ? method_code->verified_inline_entry_point() : method_code->verified_entry_point(); 540 } else { 541 entry = caller_is_c1 ? method_code->inline_entry_point() : method_code->entry_point(); 542 } 543 } 544 if (entry != NULL) { 545 // Call to near compiled code. 546 info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized); 547 } else { 548 if (is_optimized) { 549 // Use stub entry 550 address entry = caller_is_c1 ? method()->get_c2i_inline_entry() : method()->get_c2i_entry(); 551 info.set_interpreter_entry(entry, method()); 552 } else { 553 // Use icholder entry 554 assert(method_code == NULL || method_code->is_compiled(), "must be compiled"); 555 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass); 556 entry = (caller_is_c1)? method()->get_c2i_unverified_inline_entry() : method()->get_c2i_unverified_entry(); 557 info.set_icholder_entry(entry, holder); 558 } 559 } 560 assert(info.is_optimized() == is_optimized, "must agree"); 561 } 562 563 564 bool CompiledIC::is_icholder_entry(address entry) { 565 CodeBlob* cb = CodeCache::find_blob_unsafe(entry); 566 if (cb != NULL && cb->is_adapter_blob()) { 567 return true; 568 } 569 // itable stubs also use CompiledICHolder 570 if (cb != NULL && cb->is_vtable_blob()) { 571 VtableStub* s = VtableStubs::entry_point(entry); 572 return (s != NULL) && s->is_itable_stub(); 573 } 574 575 return false; 576 } 577 578 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) { 579 // This call site might have become stale so inspect it carefully. 580 address dest = cm->call_wrapper_at(call_site->addr())->destination(); 581 return is_icholder_entry(dest); 582 } 583 584 // ---------------------------------------------------------------------------- 585 586 bool CompiledStaticCall::set_to_clean(bool in_use) { 587 // in_use is unused but needed to match template function in CompiledMethod 588 assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); 589 // Reset call site 590 set_destination_mt_safe(resolve_call_stub()); 591 592 // Do not reset stub here: It is too expensive to call find_stub. 593 // Instead, rely on caller (nmethod::clear_inline_caches) to clear 594 // both the call and its stub. 595 return true; 596 } 597 598 bool CompiledStaticCall::is_clean() const { 599 return destination() == resolve_call_stub(); 600 } 601 602 bool CompiledStaticCall::is_call_to_compiled() const { 603 return CodeCache::contains(destination()); 604 } 605 606 bool CompiledDirectStaticCall::is_call_to_interpreted() const { 607 // It is a call to interpreted, if it calls to a stub. Hence, the destination 608 // must be in the stub part of the nmethod that contains the call 609 CompiledMethod* cm = CodeCache::find_compiled(instruction_address()); 610 return cm->stub_contains(destination()); 611 } 612 613 void CompiledStaticCall::set_to_compiled(address entry) { 614 if (TraceICs) { 615 ResourceMark rm; 616 tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, 617 name(), 618 p2i(instruction_address()), 619 p2i(entry)); 620 } 621 // Call to compiled code 622 assert(CodeCache::contains(entry), "wrong entry point"); 623 set_destination_mt_safe(entry); 624 } 625 626 void CompiledStaticCall::set(const StaticCallInfo& info) { 627 assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); 628 // Updating a cache to the wrong entry can cause bugs that are very hard 629 // to track down - if cache entry gets invalid - we just clean it. In 630 // this way it is always the same code path that is responsible for 631 // updating and resolving an inline cache 632 assert(is_clean(), "do not update a call entry - use clean"); 633 634 if (info._to_interpreter) { 635 // Call to interpreted code 636 set_to_interpreted(info.callee(), info.entry()); 637 } else { 638 set_to_compiled(info.entry()); 639 } 640 } 641 642 // Compute settings for a CompiledStaticCall. Since we might have to set 643 // the stub when calling to the interpreter, we need to return arguments. 644 void CompiledStaticCall::compute_entry(const methodHandle& m, CompiledMethod* caller_nm, StaticCallInfo& info) { 645 bool caller_is_nmethod = caller_nm->is_nmethod(); 646 CompiledMethod* m_code = m->code(); 647 info._callee = m; 648 if (m_code != NULL && m_code->is_in_use() && !m_code->is_unloading()) { 649 info._to_interpreter = false; 650 if (caller_nm->is_compiled_by_c1()) { 651 info._entry = m_code->verified_inline_entry_point(); 652 } else { 653 info._entry = m_code->verified_entry_point(); 654 } 655 } else { 656 // Callee is interpreted code. In any case entering the interpreter 657 // puts a converter-frame on the stack to save arguments. 658 assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics"); 659 info._to_interpreter = true; 660 661 if (caller_nm->is_compiled_by_c1()) { 662 // C1 -> interp: values passed as oops 663 info._entry = m()->get_c2i_inline_entry(); 664 } else { 665 // C2 -> interp: values passed fields 666 info._entry = m()->get_c2i_entry(); 667 } 668 } 669 } 670 671 address CompiledDirectStaticCall::find_stub_for(address instruction) { 672 // Find reloc. information containing this call-site 673 RelocIterator iter((nmethod*)NULL, instruction); 674 while (iter.next()) { 675 if (iter.addr() == instruction) { 676 switch(iter.type()) { 677 case relocInfo::static_call_type: 678 return iter.static_call_reloc()->static_stub(); 679 // We check here for opt_virtual_call_type, since we reuse the code 680 // from the CompiledIC implementation 681 case relocInfo::opt_virtual_call_type: 682 return iter.opt_virtual_call_reloc()->static_stub(); 683 case relocInfo::poll_type: 684 case relocInfo::poll_return_type: // A safepoint can't overlap a call. 685 default: 686 ShouldNotReachHere(); 687 } 688 } 689 } 690 return NULL; 691 } 692 693 address CompiledDirectStaticCall::find_stub() { 694 return CompiledDirectStaticCall::find_stub_for(instruction_address()); 695 } 696 697 address CompiledDirectStaticCall::resolve_call_stub() const { 698 return SharedRuntime::get_resolve_static_call_stub(); 699 } 700 701 //----------------------------------------------------------------------------- 702 // Non-product mode code 703 #ifndef PRODUCT 704 705 void CompiledIC::verify() { 706 _call->verify(); 707 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() 708 || is_optimized() || is_megamorphic(), "sanity check"); 709 } 710 711 void CompiledIC::print() { 712 print_compiled_ic(); 713 tty->cr(); 714 } 715 716 void CompiledIC::print_compiled_ic() { 717 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, 718 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value())); 719 } 720 721 void CompiledDirectStaticCall::print() { 722 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address())); 723 if (is_clean()) { 724 tty->print("clean"); 725 } else if (is_call_to_compiled()) { 726 tty->print("compiled"); 727 } else if (is_call_to_interpreted()) { 728 tty->print("interpreted"); 729 } 730 tty->cr(); 731 } 732 733 void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, address entry, 734 NativeMovConstReg* method_holder, 735 NativeJump* jump) { 736 // A generated lambda form might be deleted from the Lambdaform 737 // cache in MethodTypeForm. If a jit compiled lambdaform method 738 // becomes not entrant and the cache access returns null, the new 739 // resolve will lead to a new generated LambdaForm. 740 Method* old_method = reinterpret_cast<Method*>(method_holder->data()); 741 assert(old_method == NULL || old_method == callee() || 742 callee->is_compiled_lambda_form() || 743 !old_method->method_holder()->is_loader_alive() || 744 old_method->is_old(), // may be race patching deoptimized nmethod due to redefinition. 745 "a) MT-unsafe modification of inline cache"); 746 747 address destination = jump->jump_destination(); 748 assert(destination == (address)-1 || destination == entry 749 || old_method == NULL || !old_method->method_holder()->is_loader_alive() // may have a race due to class unloading. 750 || old_method->is_old(), // may be race patching deoptimized nmethod due to redefinition. 751 "b) MT-unsafe modification of inline cache"); 752 } 753 #endif // !PRODUCT --- EOF ---