1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "ci/ciCallSite.hpp" 26 #include "ci/ciMethodHandle.hpp" 27 #include "ci/ciSymbols.hpp" 28 #include "classfile/vmIntrinsics.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "compiler/compileLog.hpp" 32 #include "interpreter/linkResolver.hpp" 33 #include "jvm_io.h" 34 #include "logging/log.hpp" 35 #include "logging/logLevel.hpp" 36 #include "logging/logMessage.hpp" 37 #include "logging/logStream.hpp" 38 #include "opto/addnode.hpp" 39 #include "opto/callGenerator.hpp" 40 #include "opto/castnode.hpp" 41 #include "opto/cfgnode.hpp" 42 #include "opto/inlinetypenode.hpp" 43 #include "opto/mulnode.hpp" 44 #include "opto/parse.hpp" 45 #include "opto/rootnode.hpp" 46 #include "opto/runtime.hpp" 47 #include "opto/subnode.hpp" 48 #include "prims/methodHandles.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "utilities/macros.hpp" 51 #if INCLUDE_JFR 52 #include "jfr/jfr.hpp" 53 #endif 54 55 static void print_trace_type_profile(outputStream* out, int depth, ciKlass* prof_klass, int site_count, int receiver_count, 56 bool with_deco) { 57 if (with_deco) { 58 CompileTask::print_inline_indent(depth, out); 59 } 60 out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count); 61 prof_klass->name()->print_symbol_on(out); 62 if (with_deco) { 63 out->cr(); 64 } 65 } 66 67 static void trace_type_profile(Compile* C, ciMethod* method, JVMState* jvms, 68 ciMethod* prof_method, ciKlass* prof_klass, int site_count, int receiver_count) { 69 int depth = jvms->depth() - 1; 70 int bci = jvms->bci(); 71 if (TraceTypeProfile || C->print_inlining()) { 72 if (!C->print_inlining()) { 73 if (!PrintOpto && !PrintCompilation) { 74 method->print_short_name(); 75 tty->cr(); 76 } 77 CompileTask::print_inlining_tty(prof_method, depth, bci, InliningResult::SUCCESS); 78 print_trace_type_profile(tty, depth, prof_klass, site_count, receiver_count, true); 79 } else { 80 auto stream = C->inline_printer()->record(method, jvms, InliningResult::SUCCESS); 81 print_trace_type_profile(stream, depth, prof_klass, site_count, receiver_count, false); 82 } 83 } 84 85 LogTarget(Debug, jit, inlining) lt; 86 if (lt.is_enabled()) { 87 LogStream ls(lt); 88 print_trace_type_profile(&ls, depth, prof_klass, site_count, receiver_count, true); 89 } 90 } 91 92 static bool arg_can_be_larval(ciMethod* callee, int arg_idx) { 93 if (callee->is_object_constructor() && arg_idx == 0) { 94 return true; 95 } 96 97 if (arg_idx != 1 || callee->intrinsic_id() == vmIntrinsicID::_none) { 98 return false; 99 } 100 101 switch (callee->intrinsic_id()) { 102 case vmIntrinsicID::_finishPrivateBuffer: 103 case vmIntrinsicID::_putBoolean: 104 case vmIntrinsicID::_putBooleanOpaque: 105 case vmIntrinsicID::_putBooleanRelease: 106 case vmIntrinsicID::_putBooleanVolatile: 107 case vmIntrinsicID::_putByte: 108 case vmIntrinsicID::_putByteOpaque: 109 case vmIntrinsicID::_putByteRelease: 110 case vmIntrinsicID::_putByteVolatile: 111 case vmIntrinsicID::_putChar: 112 case vmIntrinsicID::_putCharOpaque: 113 case vmIntrinsicID::_putCharRelease: 114 case vmIntrinsicID::_putCharUnaligned: 115 case vmIntrinsicID::_putCharVolatile: 116 case vmIntrinsicID::_putShort: 117 case vmIntrinsicID::_putShortOpaque: 118 case vmIntrinsicID::_putShortRelease: 119 case vmIntrinsicID::_putShortUnaligned: 120 case vmIntrinsicID::_putShortVolatile: 121 case vmIntrinsicID::_putInt: 122 case vmIntrinsicID::_putIntOpaque: 123 case vmIntrinsicID::_putIntRelease: 124 case vmIntrinsicID::_putIntUnaligned: 125 case vmIntrinsicID::_putIntVolatile: 126 case vmIntrinsicID::_putLong: 127 case vmIntrinsicID::_putLongOpaque: 128 case vmIntrinsicID::_putLongRelease: 129 case vmIntrinsicID::_putLongUnaligned: 130 case vmIntrinsicID::_putLongVolatile: 131 case vmIntrinsicID::_putFloat: 132 case vmIntrinsicID::_putFloatOpaque: 133 case vmIntrinsicID::_putFloatRelease: 134 case vmIntrinsicID::_putFloatVolatile: 135 case vmIntrinsicID::_putDouble: 136 case vmIntrinsicID::_putDoubleOpaque: 137 case vmIntrinsicID::_putDoubleRelease: 138 case vmIntrinsicID::_putDoubleVolatile: 139 case vmIntrinsicID::_putReference: 140 case vmIntrinsicID::_putReferenceOpaque: 141 case vmIntrinsicID::_putReferenceRelease: 142 case vmIntrinsicID::_putReferenceVolatile: 143 case vmIntrinsicID::_putValue: 144 return true; 145 default: 146 return false; 147 } 148 } 149 150 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch, 151 JVMState* jvms, bool allow_inline, 152 float prof_factor, ciKlass* speculative_receiver_type, 153 bool allow_intrinsics) { 154 assert(callee != nullptr, "failed method resolution"); 155 156 ciMethod* caller = jvms->method(); 157 int bci = jvms->bci(); 158 Bytecodes::Code bytecode = caller->java_code_at_bci(bci); 159 ciMethod* orig_callee = caller->get_method_at_bci(bci); 160 161 const bool is_virtual_or_interface = (bytecode == Bytecodes::_invokevirtual) || 162 (bytecode == Bytecodes::_invokeinterface) || 163 (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual) || 164 (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface); 165 166 // Dtrace currently doesn't work unless all calls are vanilla 167 if (env()->dtrace_method_probes()) { 168 allow_inline = false; 169 } 170 171 // Note: When we get profiling during stage-1 compiles, we want to pull 172 // from more specific profile data which pertains to this inlining. 173 // Right now, ignore the information in jvms->caller(), and do method[bci]. 174 ciCallProfile profile = caller->call_profile_at_bci(bci); 175 176 // See how many times this site has been invoked. 177 int site_count = profile.count(); 178 int receiver_count = -1; 179 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) { 180 // Receivers in the profile structure are ordered by call counts 181 // so that the most called (major) receiver is profile.receiver(0). 182 receiver_count = profile.receiver_count(0); 183 } 184 185 CompileLog* log = this->log(); 186 if (log != nullptr) { 187 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; 188 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; 189 log->begin_elem("call method='%d' count='%d' prof_factor='%f'", 190 log->identify(callee), site_count, prof_factor); 191 if (call_does_dispatch) log->print(" virtual='1'"); 192 if (allow_inline) log->print(" inline='1'"); 193 if (receiver_count >= 0) { 194 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); 195 if (profile.has_receiver(1)) { 196 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1)); 197 } 198 } 199 if (callee->is_method_handle_intrinsic()) { 200 log->print(" method_handle_intrinsic='1'"); 201 } 202 log->end_elem(); 203 } 204 205 // Special case the handling of certain common, profitable library 206 // methods. If these methods are replaced with specialized code, 207 // then we return it as the inlined version of the call. 208 CallGenerator* cg_intrinsic = nullptr; 209 if (callee->intrinsic_id() == vmIntrinsics::_makePrivateBuffer || callee->intrinsic_id() == vmIntrinsics::_finishPrivateBuffer) { 210 // These methods must be inlined so that we don't have larval value objects crossing method 211 // boundaries 212 assert(!call_does_dispatch, "callee should not be virtual %s", callee->name()->as_utf8()); 213 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); 214 215 if (cg == nullptr) { 216 // This is probably because the intrinsics is disabled from the command line 217 char reason[256]; 218 jio_snprintf(reason, sizeof(reason), "cannot find an intrinsics for %s", callee->name()->as_utf8()); 219 C->record_method_not_compilable(reason); 220 return nullptr; 221 } 222 return cg; 223 } else if (allow_inline && allow_intrinsics) { 224 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); 225 if (cg != nullptr) { 226 if (cg->is_predicated()) { 227 // Code without intrinsic but, hopefully, inlined. 228 CallGenerator* inline_cg = this->call_generator(callee, 229 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false); 230 if (inline_cg != nullptr) { 231 cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg); 232 } 233 } 234 235 // If intrinsic does the virtual dispatch, we try to use the type profile 236 // first, and hopefully inline it as the regular virtual call below. 237 // We will retry the intrinsic if nothing had claimed it afterwards. 238 if (cg->does_virtual_dispatch()) { 239 cg_intrinsic = cg; 240 cg = nullptr; 241 } else if (IncrementalInline && should_delay_vector_inlining(callee, jvms)) { 242 return CallGenerator::for_late_inline(callee, cg); 243 } else { 244 return cg; 245 } 246 } 247 } 248 249 // Do method handle calls. 250 // NOTE: This must happen before normal inlining logic below since 251 // MethodHandle.invoke* are native methods which obviously don't 252 // have bytecodes and so normal inlining fails. 253 if (callee->is_method_handle_intrinsic()) { 254 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, allow_inline); 255 return cg; 256 } 257 258 // Attempt to inline... 259 if (allow_inline) { 260 // The profile data is only partly attributable to this caller, 261 // scale back the call site information. 262 float past_uses = jvms->method()->scale_count(site_count, prof_factor); 263 // This is the number of times we expect the call code to be used. 264 float expected_uses = past_uses; 265 266 // Try inlining a bytecoded method: 267 if (!call_does_dispatch) { 268 InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method()); 269 bool should_delay = C->should_delay_inlining(); 270 if (ilt->ok_to_inline(callee, jvms, profile, should_delay)) { 271 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses); 272 // For optimized virtual calls assert at runtime that receiver object 273 // is a subtype of the inlined method holder. CHA can report a method 274 // as a unique target under an abstract method, but receiver type 275 // sometimes has a broader type. Similar scenario is possible with 276 // default methods when type system loses information about implemented 277 // interfaces. 278 if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) { 279 CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee, 280 Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none); 281 282 cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg); 283 } 284 if (cg != nullptr) { 285 // Delay the inlining of this method to give us the 286 // opportunity to perform some high level optimizations 287 // first. 288 if (should_delay) { 289 return CallGenerator::for_late_inline(callee, cg); 290 } else if (should_delay_string_inlining(callee, jvms)) { 291 return CallGenerator::for_string_late_inline(callee, cg); 292 } else if (should_delay_boxing_inlining(callee, jvms)) { 293 return CallGenerator::for_boxing_late_inline(callee, cg); 294 } else if (should_delay_vector_reboxing_inlining(callee, jvms)) { 295 return CallGenerator::for_vector_reboxing_late_inline(callee, cg); 296 } else { 297 return cg; 298 } 299 } 300 } 301 } 302 303 // Try using the type profile. 304 if (call_does_dispatch && site_count > 0 && UseTypeProfile) { 305 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. 306 bool have_major_receiver = profile.has_receiver(0) && (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); 307 ciMethod* receiver_method = nullptr; 308 309 int morphism = profile.morphism(); 310 if (speculative_receiver_type != nullptr) { 311 if (!too_many_traps_or_recompiles(caller, bci, Deoptimization::Reason_speculate_class_check)) { 312 // We have a speculative type, we should be able to resolve 313 // the call. We do that before looking at the profiling at 314 // this invoke because it may lead to bimorphic inlining which 315 // a speculative type should help us avoid. 316 receiver_method = callee->resolve_invoke(jvms->method()->holder(), 317 speculative_receiver_type); 318 if (receiver_method == nullptr) { 319 speculative_receiver_type = nullptr; 320 } else { 321 morphism = 1; 322 } 323 } else { 324 // speculation failed before. Use profiling at the call 325 // (could allow bimorphic inlining for instance). 326 speculative_receiver_type = nullptr; 327 } 328 } 329 if (receiver_method == nullptr && 330 (have_major_receiver || morphism == 1 || 331 (morphism == 2 && UseBimorphicInlining))) { 332 // receiver_method = profile.method(); 333 // Profiles do not suggest methods now. Look it up in the major receiver. 334 receiver_method = callee->resolve_invoke(jvms->method()->holder(), 335 profile.receiver(0)); 336 } 337 if (receiver_method != nullptr) { 338 // The single majority receiver sufficiently outweighs the minority. 339 CallGenerator* hit_cg = this->call_generator(receiver_method, 340 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor); 341 if (hit_cg != nullptr) { 342 // Look up second receiver. 343 CallGenerator* next_hit_cg = nullptr; 344 ciMethod* next_receiver_method = nullptr; 345 if (morphism == 2 && UseBimorphicInlining) { 346 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), 347 profile.receiver(1)); 348 if (next_receiver_method != nullptr) { 349 next_hit_cg = this->call_generator(next_receiver_method, 350 vtable_index, !call_does_dispatch, jvms, 351 allow_inline, prof_factor); 352 if (next_hit_cg != nullptr && !next_hit_cg->is_inline() && 353 have_major_receiver && UseOnlyInlinedBimorphic) { 354 // Skip if we can't inline second receiver's method 355 next_hit_cg = nullptr; 356 } 357 } 358 } 359 CallGenerator* miss_cg; 360 Deoptimization::DeoptReason reason = (morphism == 2 361 ? Deoptimization::Reason_bimorphic 362 : Deoptimization::reason_class_check(speculative_receiver_type != nullptr)); 363 if ((morphism == 1 || (morphism == 2 && next_hit_cg != nullptr)) && 364 !too_many_traps_or_recompiles(caller, bci, reason) 365 ) { 366 // Generate uncommon trap for class check failure path 367 // in case of monomorphic or bimorphic virtual call site. 368 miss_cg = CallGenerator::for_uncommon_trap(callee, reason, 369 Deoptimization::Action_maybe_recompile); 370 } else { 371 // Generate virtual call for class check failure path 372 // in case of polymorphic virtual call site. 373 miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor) 374 : CallGenerator::for_virtual_call(callee, vtable_index)); 375 } 376 if (miss_cg != nullptr) { 377 if (next_hit_cg != nullptr) { 378 assert(speculative_receiver_type == nullptr, "shouldn't end up here if we used speculation"); 379 trace_type_profile(C, jvms->method(), jvms, next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); 380 // We don't need to record dependency on a receiver here and below. 381 // Whenever we inline, the dependency is added by Parse::Parse(). 382 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); 383 } 384 if (miss_cg != nullptr) { 385 ciKlass* k = speculative_receiver_type != nullptr ? speculative_receiver_type : profile.receiver(0); 386 trace_type_profile(C, jvms->method(), jvms, receiver_method, k, site_count, receiver_count); 387 float hit_prob = speculative_receiver_type != nullptr ? 1.0 : profile.receiver_prob(0); 388 CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob); 389 if (cg != nullptr) { 390 return cg; 391 } 392 } 393 } 394 } 395 } 396 } 397 398 // If there is only one implementor of this interface then we 399 // may be able to bind this invoke directly to the implementing 400 // klass but we need both a dependence on the single interface 401 // and on the method we bind to. Additionally since all we know 402 // about the receiver type is that it's supposed to implement the 403 // interface we have to insert a check that it's the class we 404 // expect. Interface types are not checked by the verifier so 405 // they are roughly equivalent to Object. 406 // The number of implementors for declared_interface is less or 407 // equal to the number of implementors for target->holder() so 408 // if number of implementors of target->holder() == 1 then 409 // number of implementors for decl_interface is 0 or 1. If 410 // it's 0 then no class implements decl_interface and there's 411 // no point in inlining. 412 if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) { 413 ciInstanceKlass* declared_interface = 414 caller->get_declared_method_holder_at_bci(bci)->as_instance_klass(); 415 ciInstanceKlass* singleton = declared_interface->unique_implementor(); 416 417 if (singleton != nullptr) { 418 assert(singleton != declared_interface, "not a unique implementor"); 419 420 ciMethod* cha_monomorphic_target = 421 callee->find_monomorphic_target(caller->holder(), declared_interface, singleton); 422 423 if (cha_monomorphic_target != nullptr && 424 cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless 425 ciKlass* holder = cha_monomorphic_target->holder(); 426 427 // Try to inline the method found by CHA. Inlined method is guarded by the type check. 428 CallGenerator* hit_cg = call_generator(cha_monomorphic_target, 429 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor); 430 431 // Deoptimize on type check fail. The interpreter will throw ICCE for us. 432 CallGenerator* miss_cg = CallGenerator::for_uncommon_trap(callee, 433 Deoptimization::Reason_class_check, Deoptimization::Action_none); 434 435 ciKlass* constraint = (holder->is_subclass_of(singleton) ? holder : singleton); // avoid upcasts 436 CallGenerator* cg = CallGenerator::for_guarded_call(constraint, miss_cg, hit_cg); 437 if (hit_cg != nullptr && cg != nullptr) { 438 dependencies()->assert_unique_implementor(declared_interface, singleton); 439 dependencies()->assert_unique_concrete_method(declared_interface, cha_monomorphic_target, declared_interface, callee); 440 return cg; 441 } 442 } 443 } 444 } // call_does_dispatch && bytecode == Bytecodes::_invokeinterface 445 446 // Nothing claimed the intrinsic, we go with straight-forward inlining 447 // for already discovered intrinsic. 448 if (allow_intrinsics && cg_intrinsic != nullptr) { 449 assert(cg_intrinsic->does_virtual_dispatch(), "sanity"); 450 return cg_intrinsic; 451 } 452 } // allow_inline 453 454 // There was no special inlining tactic, or it bailed out. 455 // Use a more generic tactic, like a simple call. 456 if (call_does_dispatch) { 457 const char* msg = "virtual call"; 458 C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg); 459 C->log_inline_failure(msg); 460 if (IncrementalInlineVirtual && allow_inline) { 461 return CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor); // attempt to inline through virtual call later 462 } else { 463 return CallGenerator::for_virtual_call(callee, vtable_index); 464 } 465 } else { 466 // Class Hierarchy Analysis or Type Profile reveals a unique target, or it is a static or special call. 467 CallGenerator* cg = CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); 468 // For optimized virtual calls assert at runtime that receiver object 469 // is a subtype of the method holder. 470 if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) { 471 CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee, 472 Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none); 473 cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg); 474 } 475 return cg; 476 } 477 } 478 479 // Return true for methods that shouldn't be inlined early so that 480 // they are easier to analyze and optimize as intrinsics. 481 bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) { 482 if (has_stringbuilder()) { 483 484 if ((call_method->holder() == C->env()->StringBuilder_klass() || 485 call_method->holder() == C->env()->StringBuffer_klass()) && 486 (jvms->method()->holder() == C->env()->StringBuilder_klass() || 487 jvms->method()->holder() == C->env()->StringBuffer_klass())) { 488 // Delay SB calls only when called from non-SB code 489 return false; 490 } 491 492 switch (call_method->intrinsic_id()) { 493 case vmIntrinsics::_StringBuilder_void: 494 case vmIntrinsics::_StringBuilder_int: 495 case vmIntrinsics::_StringBuilder_String: 496 case vmIntrinsics::_StringBuilder_append_char: 497 case vmIntrinsics::_StringBuilder_append_int: 498 case vmIntrinsics::_StringBuilder_append_String: 499 case vmIntrinsics::_StringBuilder_toString: 500 case vmIntrinsics::_StringBuffer_void: 501 case vmIntrinsics::_StringBuffer_int: 502 case vmIntrinsics::_StringBuffer_String: 503 case vmIntrinsics::_StringBuffer_append_char: 504 case vmIntrinsics::_StringBuffer_append_int: 505 case vmIntrinsics::_StringBuffer_append_String: 506 case vmIntrinsics::_StringBuffer_toString: 507 case vmIntrinsics::_Integer_toString: 508 return true; 509 510 case vmIntrinsics::_String_String: 511 { 512 Node* receiver = jvms->map()->in(jvms->argoff() + 1); 513 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) { 514 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava(); 515 ciMethod* m = csj->method(); 516 if (m != nullptr && 517 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString || 518 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) 519 // Delay String.<init>(new SB()) 520 return true; 521 } 522 return false; 523 } 524 525 default: 526 return false; 527 } 528 } 529 return false; 530 } 531 532 bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) { 533 if (eliminate_boxing() && call_method->is_boxing_method()) { 534 set_has_boxed_value(true); 535 return aggressive_unboxing(); 536 } 537 return false; 538 } 539 540 bool Compile::should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms) { 541 return EnableVectorSupport && call_method->is_vector_method(); 542 } 543 544 bool Compile::should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms) { 545 return EnableVectorSupport && (call_method->intrinsic_id() == vmIntrinsics::_VectorRebox); 546 } 547 548 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link 549 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) { 550 // Additional inputs to consider... 551 // bc = bc() 552 // caller = method() 553 // iter().get_method_holder_index() 554 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" ); 555 // Interface classes can be loaded & linked and never get around to 556 // being initialized. Uncommon-trap for not-initialized static or 557 // v-calls. Let interface calls happen. 558 ciInstanceKlass* holder_klass = dest_method->holder(); 559 if (!holder_klass->is_being_initialized() && 560 !holder_klass->is_initialized() && 561 !holder_klass->is_interface()) { 562 uncommon_trap(Deoptimization::Reason_uninitialized, 563 Deoptimization::Action_reinterpret, 564 holder_klass); 565 return true; 566 } 567 568 assert(dest_method->is_loaded(), "dest_method: typeflow responsibility"); 569 return false; 570 } 571 572 #ifdef ASSERT 573 static bool check_call_consistency(JVMState* jvms, CallGenerator* cg) { 574 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci()); 575 ciMethod* resolved_method = cg->method(); 576 if (!ciMethod::is_consistent_info(symbolic_info, resolved_method)) { 577 tty->print_cr("JVMS:"); 578 jvms->dump(); 579 tty->print_cr("Bytecode info:"); 580 jvms->method()->get_method_at_bci(jvms->bci())->print(); tty->cr(); 581 tty->print_cr("Resolved method:"); 582 cg->method()->print(); tty->cr(); 583 return false; 584 } 585 return true; 586 } 587 #endif // ASSERT 588 589 //------------------------------do_call---------------------------------------- 590 // Handle your basic call. Inline if we can & want to, else just setup call. 591 void Parse::do_call() { 592 // It's likely we are going to add debug info soon. 593 // Also, if we inline a guy who eventually needs debug info for this JVMS, 594 // our contribution to it is cleaned up right here. 595 kill_dead_locals(); 596 597 // Set frequently used booleans 598 const bool is_virtual = bc() == Bytecodes::_invokevirtual; 599 const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; 600 const bool has_receiver = Bytecodes::has_receiver(bc()); 601 602 // Find target being called 603 bool will_link; 604 ciSignature* declared_signature = nullptr; 605 ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode 606 ciInstanceKlass* holder_klass = orig_callee->holder(); 607 ciKlass* holder = iter().get_declared_method_holder(); 608 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 609 assert(declared_signature != nullptr, "cannot be null"); 610 JFR_ONLY(Jfr::on_resolution(this, holder, orig_callee);) 611 612 // Bump max node limit for JSR292 users 613 if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) { 614 C->set_max_node_limit(3*MaxNodeLimit); 615 } 616 617 // uncommon-trap when callee is unloaded, uninitialized or will not link 618 // bailout when too many arguments for register representation 619 if (!will_link || can_not_compile_call_site(orig_callee, klass)) { 620 if (PrintOpto && (Verbose || WizardMode)) { 621 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); 622 orig_callee->print_name(); tty->cr(); 623 } 624 return; 625 } 626 assert(holder_klass->is_loaded(), ""); 627 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw) 628 // Note: this takes into account invokeinterface of methods declared in java/lang/Object, 629 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces 630 assert(holder_klass->is_interface() || holder_klass->super() == nullptr || (bc() != Bytecodes::_invokeinterface), "must match bc"); 631 // Note: In the absence of miranda methods, an abstract class K can perform 632 // an invokevirtual directly on an interface method I.m if K implements I. 633 634 // orig_callee is the resolved callee which's signature includes the 635 // appendix argument. 636 const int nargs = orig_callee->arg_size(); 637 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id()); 638 639 // Push appendix argument (MethodType, CallSite, etc.), if one. 640 if (iter().has_appendix()) { 641 ciObject* appendix_arg = iter().get_appendix(); 642 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg, /* require_const= */ true); 643 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type); 644 push(appendix_arg_node); 645 } 646 647 // --------------------- 648 // Does Class Hierarchy Analysis reveal only a single target of a v-call? 649 // Then we may inline or make a static call, but become dependent on there being only 1 target. 650 // Does the call-site type profile reveal only one receiver? 651 // Then we may introduce a run-time check and inline on the path where it succeeds. 652 // The other path may uncommon_trap, check for another receiver, or do a v-call. 653 654 // Try to get the most accurate receiver type 655 ciMethod* callee = orig_callee; 656 int vtable_index = Method::invalid_vtable_index; 657 bool call_does_dispatch = false; 658 659 // Speculative type of the receiver if any 660 ciKlass* speculative_receiver_type = nullptr; 661 if (is_virtual_or_interface) { 662 Node* receiver_node = stack(sp() - nargs); 663 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); 664 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 665 // For arrays, klass below is Object. When vtable calls are used, 666 // resolving the call with Object would allow an illegal call to 667 // finalize() on an array. We use holder instead: illegal calls to 668 // finalize() won't be compiled as vtable calls (IC call 669 // resolution will catch the illegal call) and the few legal calls 670 // on array types won't be either. 671 callee = C->optimize_virtual_call(method(), klass, holder, orig_callee, 672 receiver_type, is_virtual, 673 call_does_dispatch, vtable_index); // out-parameters 674 speculative_receiver_type = receiver_type != nullptr ? receiver_type->speculative_type() : nullptr; 675 } 676 677 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface. 678 ciKlass* receiver_constraint = nullptr; 679 if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_constructor()) { 680 ciInstanceKlass* calling_klass = method()->holder(); 681 ciInstanceKlass* sender_klass = calling_klass; 682 if (sender_klass->is_interface()) { 683 receiver_constraint = sender_klass; 684 } 685 } else if (iter().cur_bc_raw() == Bytecodes::_invokeinterface && orig_callee->is_private()) { 686 assert(holder->is_interface(), "How did we get a non-interface method here!"); 687 receiver_constraint = holder; 688 } 689 690 if (receiver_constraint != nullptr) { 691 Node* receiver_node = stack(sp() - nargs); 692 Node* cls_node = makecon(TypeKlassPtr::make(receiver_constraint, Type::trust_interfaces)); 693 Node* bad_type_ctrl = nullptr; 694 Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl); 695 if (bad_type_ctrl != nullptr) { 696 PreserveJVMState pjvms(this); 697 set_control(bad_type_ctrl); 698 uncommon_trap(Deoptimization::Reason_class_check, 699 Deoptimization::Action_none); 700 } 701 if (stopped()) { 702 return; // MUST uncommon-trap? 703 } 704 set_stack(sp() - nargs, casted_receiver); 705 } 706 707 // Scalarize value objects passed into this invocation if we know that they are not larval 708 for (int arg_idx = 0; arg_idx < nargs; arg_idx++) { 709 if (arg_can_be_larval(callee, arg_idx)) { 710 continue; 711 } 712 713 cast_to_non_larval(peek(nargs - 1 - arg_idx)); 714 } 715 716 // Note: It's OK to try to inline a virtual call. 717 // The call generator will not attempt to inline a polymorphic call 718 // unless it knows how to optimize the receiver dispatch. 719 bool try_inline = (C->do_inlining() || InlineAccessors); 720 721 // --------------------- 722 dec_sp(nargs); // Temporarily pop args for JVM state of call 723 JVMState* jvms = sync_jvms(); 724 725 // --------------------- 726 // Decide call tactic. 727 // This call checks with CHA, the interpreter profile, intrinsics table, etc. 728 // It decides whether inlining is desirable or not. 729 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type); 730 if (failing()) { 731 return; 732 } 733 assert(cg != nullptr, "must find a CallGenerator for callee %s", callee->name()->as_utf8()); 734 735 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. 736 orig_callee = callee = nullptr; 737 738 // --------------------- 739 740 // Feed profiling data for arguments to the type system so it can 741 // propagate it as speculative types 742 record_profiled_arguments_for_speculation(cg->method(), bc()); 743 744 #ifndef PRODUCT 745 // bump global counters for calls 746 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline()); 747 748 // Record first part of parsing work for this call 749 parse_histogram()->record_change(); 750 #endif // not PRODUCT 751 752 assert(jvms == this->jvms(), "still operating on the right JVMS"); 753 assert(jvms_in_sync(), "jvms must carry full info into CG"); 754 755 // save across call, for a subsequent cast_not_null. 756 Node* receiver = has_receiver ? argument(0) : nullptr; 757 758 // The extra CheckCastPPs for speculative types mess with PhaseStringOpts 759 if (receiver != nullptr && !call_does_dispatch && !cg->is_string_late_inline()) { 760 // Feed profiling data for a single receiver to the type system so 761 // it can propagate it as a speculative type 762 receiver = record_profiled_receiver_for_speculation(receiver); 763 } 764 765 JVMState* new_jvms = cg->generate(jvms); 766 if (new_jvms == nullptr) { 767 // When inlining attempt fails (e.g., too many arguments), 768 // it may contaminate the current compile state, making it 769 // impossible to pull back and try again. Once we call 770 // cg->generate(), we are committed. If it fails, the whole 771 // compilation task is compromised. 772 if (failing()) return; 773 774 // This can happen if a library intrinsic is available, but refuses 775 // the call site, perhaps because it did not match a pattern the 776 // intrinsic was expecting to optimize. Should always be possible to 777 // get a normal java call that may inline in that case 778 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false); 779 new_jvms = cg->generate(jvms); 780 if (new_jvms == nullptr) { 781 guarantee(failing(), "call failed to generate: calls should work"); 782 return; 783 } 784 } 785 786 if (cg->is_inline()) { 787 // Accumulate has_loops estimate 788 C->env()->notice_inlined_method(cg->method()); 789 } 790 791 // Reset parser state from [new_]jvms, which now carries results of the call. 792 // Return value (if any) is already pushed on the stack by the cg. 793 add_exception_states_from(new_jvms); 794 if (new_jvms->map()->control() == top()) { 795 stop_and_kill_map(); 796 } else { 797 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); 798 set_jvms(new_jvms); 799 } 800 801 assert(check_call_consistency(jvms, cg), "inconsistent info"); 802 803 if (!stopped()) { 804 // This was some sort of virtual call, which did a null check for us. 805 // Now we can assert receiver-not-null, on the normal return path. 806 if (receiver != nullptr && cg->is_virtual()) { 807 Node* cast = cast_not_null(receiver); 808 // %%% assert(receiver == cast, "should already have cast the receiver"); 809 } 810 811 ciType* rtype = cg->method()->return_type(); 812 ciType* ctype = declared_signature->return_type(); 813 814 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) { 815 // Be careful here with return types. 816 if (ctype != rtype) { 817 BasicType rt = rtype->basic_type(); 818 BasicType ct = ctype->basic_type(); 819 if (ct == T_VOID) { 820 // It's OK for a method to return a value that is discarded. 821 // The discarding does not require any special action from the caller. 822 // The Java code knows this, at VerifyType.isNullConversion. 823 pop_node(rt); // whatever it was, pop it 824 } else if (rt == T_INT || is_subword_type(rt)) { 825 // Nothing. These cases are handled in lambda form bytecode. 826 assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct)); 827 } else if (is_reference_type(rt)) { 828 assert(is_reference_type(ct), "rt=%s, ct=%s", type2name(rt), type2name(ct)); 829 if (ctype->is_loaded()) { 830 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); 831 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); 832 if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) { 833 Node* retnode = pop(); 834 Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); 835 push(cast_obj); 836 } 837 } 838 } else { 839 assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)); 840 // push a zero; it's better than getting an oop/int mismatch 841 pop_node(rt); 842 Node* retnode = zerocon(ct); 843 push_node(ct, retnode); 844 } 845 // Now that the value is well-behaved, continue with the call-site type. 846 rtype = ctype; 847 } 848 } else { 849 // Symbolic resolution enforces the types to be the same. 850 // NOTE: We must relax the assert for unloaded types because two 851 // different ciType instances of the same unloaded class type 852 // can appear to be "loaded" by different loaders (depending on 853 // the accessing class). 854 assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype, 855 "mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()); 856 } 857 858 // If the return type of the method is not loaded, assert that the 859 // value we got is a null. Otherwise, we need to recompile. 860 if (!rtype->is_loaded()) { 861 if (PrintOpto && (Verbose || WizardMode)) { 862 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); 863 cg->method()->print_name(); tty->cr(); 864 } 865 if (C->log() != nullptr) { 866 C->log()->elem("assert_null reason='return' klass='%d'", 867 C->log()->identify(rtype)); 868 } 869 // If there is going to be a trap, put it at the next bytecode: 870 set_bci(iter().next_bci()); 871 null_assert(peek()); 872 set_bci(iter().cur_bci()); // put it back 873 } 874 BasicType ct = ctype->basic_type(); 875 if (is_reference_type(ct)) { 876 record_profiled_return_for_speculation(); 877 } 878 879 if (!rtype->is_void() && cg->method()->intrinsic_id() != vmIntrinsicID::_makePrivateBuffer) { 880 Node* retnode = peek(); 881 const Type* rettype = gvn().type(retnode); 882 if (rettype->is_inlinetypeptr() && !retnode->is_InlineType()) { 883 retnode = InlineTypeNode::make_from_oop(this, retnode, rettype->inline_klass()); 884 dec_sp(1); 885 push(retnode); 886 } 887 } 888 889 if (cg->method()->is_object_constructor() && receiver != nullptr && gvn().type(receiver)->is_inlinetypeptr()) { 890 InlineTypeNode* non_larval = InlineTypeNode::make_from_oop(this, receiver, gvn().type(receiver)->inline_klass()); 891 // Relinquish the oop input, we will delay the allocation to the point it is needed, see the 892 // comments in InlineTypeNode::Ideal for more details 893 non_larval = non_larval->clone_if_required(&gvn(), nullptr); 894 non_larval->set_oop(gvn(), null()); 895 non_larval->set_is_buffered(gvn(), false); 896 non_larval = gvn().transform(non_larval)->as_InlineType(); 897 map()->replace_edge(receiver, non_larval); 898 } 899 } 900 901 // Restart record of parsing work after possible inlining of call 902 #ifndef PRODUCT 903 parse_histogram()->set_initial_state(bc()); 904 #endif 905 } 906 907 //---------------------------catch_call_exceptions----------------------------- 908 // Put a Catch and CatchProj nodes behind a just-created call. 909 // Send their caught exceptions to the proper handler. 910 // This may be used after a call to the rethrow VM stub, 911 // when it is needed to process unloaded exception classes. 912 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { 913 // Exceptions are delivered through this channel: 914 Node* i_o = this->i_o(); 915 916 // Add a CatchNode. 917 Arena tmp_mem{mtCompiler}; 918 GrowableArray<int> bcis(&tmp_mem, 8, 0, -1); 919 GrowableArray<const Type*> extypes(&tmp_mem, 8, 0, nullptr); 920 GrowableArray<int> saw_unloaded(&tmp_mem, 8, 0, -1); 921 922 bool default_handler = false; 923 for (; !handlers.is_done(); handlers.next()) { 924 ciExceptionHandler* h = handlers.handler(); 925 int h_bci = h->handler_bci(); 926 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass(); 927 // Do not introduce unloaded exception types into the graph: 928 if (!h_klass->is_loaded()) { 929 if (saw_unloaded.contains(h_bci)) { 930 /* We've already seen an unloaded exception with h_bci, 931 so don't duplicate. Duplication will cause the CatchNode to be 932 unnecessarily large. See 4713716. */ 933 continue; 934 } else { 935 saw_unloaded.append(h_bci); 936 } 937 } 938 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass); 939 // (We use make_from_klass because it respects UseUniqueSubclasses.) 940 h_extype = h_extype->join(TypeInstPtr::NOTNULL); 941 assert(!h_extype->empty(), "sanity"); 942 // Note: It's OK if the BCIs repeat themselves. 943 bcis.append(h_bci); 944 extypes.append(h_extype); 945 if (h_bci == -1) { 946 default_handler = true; 947 } 948 } 949 950 if (!default_handler) { 951 bcis.append(-1); 952 const Type* extype = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr(); 953 extype = extype->join(TypeInstPtr::NOTNULL); 954 extypes.append(extype); 955 } 956 957 int len = bcis.length(); 958 CatchNode *cn = new CatchNode(control(), i_o, len+1); 959 Node *catch_ = _gvn.transform(cn); 960 961 // now branch with the exception state to each of the (potential) 962 // handlers 963 for(int i=0; i < len; i++) { 964 // Setup JVM state to enter the handler. 965 PreserveJVMState pjvms(this); 966 // Locals are just copied from before the call. 967 // Get control from the CatchNode. 968 int handler_bci = bcis.at(i); 969 Node* ctrl = _gvn.transform( new CatchProjNode(catch_, i+1,handler_bci)); 970 // This handler cannot happen? 971 if (ctrl == top()) continue; 972 set_control(ctrl); 973 974 // Create exception oop 975 const TypeInstPtr* extype = extypes.at(i)->is_instptr(); 976 Node* ex_oop = _gvn.transform(new CreateExNode(extypes.at(i), ctrl, i_o)); 977 978 // Handle unloaded exception classes. 979 if (saw_unloaded.contains(handler_bci)) { 980 // An unloaded exception type is coming here. Do an uncommon trap. 981 #ifndef PRODUCT 982 // We do not expect the same handler bci to take both cold unloaded 983 // and hot loaded exceptions. But, watch for it. 984 if (PrintOpto && (Verbose || WizardMode) && extype->is_loaded()) { 985 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci()); 986 method()->print_name(); tty->cr(); 987 } else if (PrintOpto && (Verbose || WizardMode)) { 988 tty->print("Bailing out on unloaded exception type "); 989 extype->instance_klass()->print_name(); 990 tty->print(" at bci:%d in ", bci()); 991 method()->print_name(); tty->cr(); 992 } 993 #endif 994 // Emit an uncommon trap instead of processing the block. 995 set_bci(handler_bci); 996 push_ex_oop(ex_oop); 997 uncommon_trap(Deoptimization::Reason_unloaded, 998 Deoptimization::Action_reinterpret, 999 extype->instance_klass(), "!loaded exception"); 1000 set_bci(iter().cur_bci()); // put it back 1001 continue; 1002 } 1003 1004 // go to the exception handler 1005 if (handler_bci < 0) { // merge with corresponding rethrow node 1006 throw_to_exit(make_exception_state(ex_oop)); 1007 } else { // Else jump to corresponding handle 1008 push_ex_oop(ex_oop); // Clear stack and push just the oop. 1009 merge_exception(handler_bci); 1010 } 1011 } 1012 1013 // The first CatchProj is for the normal return. 1014 // (Note: If this is a call to rethrow_Java, this node goes dead.) 1015 set_control(_gvn.transform( new CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); 1016 } 1017 1018 1019 //----------------------------catch_inline_exceptions-------------------------- 1020 // Handle all exceptions thrown by an inlined method or individual bytecode. 1021 // Common case 1: we have no handler, so all exceptions merge right into 1022 // the rethrow case. 1023 // Case 2: we have some handlers, with loaded exception klasses that have 1024 // no subklasses. We do a Deutsch-Schiffman style type-check on the incoming 1025 // exception oop and branch to the handler directly. 1026 // Case 3: We have some handlers with subklasses or are not loaded at 1027 // compile-time. We have to call the runtime to resolve the exception. 1028 // So we insert a RethrowCall and all the logic that goes with it. 1029 void Parse::catch_inline_exceptions(SafePointNode* ex_map) { 1030 // Caller is responsible for saving away the map for normal control flow! 1031 assert(stopped(), "call set_map(nullptr) first"); 1032 assert(method()->has_exception_handlers(), "don't come here w/o work to do"); 1033 1034 Node* ex_node = saved_ex_oop(ex_map); 1035 if (ex_node == top()) { 1036 // No action needed. 1037 return; 1038 } 1039 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr(); 1040 NOT_PRODUCT(if (ex_type==nullptr) tty->print_cr("*** Exception not InstPtr")); 1041 if (ex_type == nullptr) 1042 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr(); 1043 1044 // determine potential exception handlers 1045 ciExceptionHandlerStream handlers(method(), bci(), 1046 ex_type->instance_klass(), 1047 ex_type->klass_is_exact()); 1048 1049 // Start executing from the given throw state. (Keep its stack, for now.) 1050 // Get the exception oop as known at compile time. 1051 ex_node = use_exception_state(ex_map); 1052 1053 // Get the exception oop klass from its header 1054 Node* ex_klass_node = nullptr; 1055 if (has_exception_handler() && !ex_type->klass_is_exact()) { 1056 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); 1057 ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); 1058 1059 // Compute the exception klass a little more cleverly. 1060 // Obvious solution is to simple do a LoadKlass from the 'ex_node'. 1061 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for 1062 // each arm of the Phi. If I know something clever about the exceptions 1063 // I'm loading the class from, I can replace the LoadKlass with the 1064 // klass constant for the exception oop. 1065 if (ex_node->is_Phi()) { 1066 ex_klass_node = new PhiNode(ex_node->in(0), TypeInstKlassPtr::OBJECT); 1067 for (uint i = 1; i < ex_node->req(); i++) { 1068 Node* ex_in = ex_node->in(i); 1069 if (ex_in == top() || ex_in == nullptr) { 1070 // This path was not taken. 1071 ex_klass_node->init_req(i, top()); 1072 continue; 1073 } 1074 Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes()); 1075 Node* k = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); 1076 ex_klass_node->init_req( i, k ); 1077 } 1078 ex_klass_node = _gvn.transform(ex_klass_node); 1079 } 1080 } 1081 1082 // Scan the exception table for applicable handlers. 1083 // If none, we can call rethrow() and be done! 1084 // If precise (loaded with no subklasses), insert a D.S. style 1085 // pointer compare to the correct handler and loop back. 1086 // If imprecise, switch to the Rethrow VM-call style handling. 1087 1088 int remaining = handlers.count_remaining(); 1089 1090 // iterate through all entries sequentially 1091 for (;!handlers.is_done(); handlers.next()) { 1092 ciExceptionHandler* handler = handlers.handler(); 1093 1094 if (handler->is_rethrow()) { 1095 // If we fell off the end of the table without finding an imprecise 1096 // exception klass (and without finding a generic handler) then we 1097 // know this exception is not handled in this method. We just rethrow 1098 // the exception into the caller. 1099 throw_to_exit(make_exception_state(ex_node)); 1100 return; 1101 } 1102 1103 // exception handler bci range covers throw_bci => investigate further 1104 int handler_bci = handler->handler_bci(); 1105 1106 if (remaining == 1) { 1107 push_ex_oop(ex_node); // Push exception oop for handler 1108 if (PrintOpto && WizardMode) { 1109 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci); 1110 } 1111 // If this is a backwards branch in the bytecodes, add safepoint 1112 maybe_add_safepoint(handler_bci); 1113 merge_exception(handler_bci); // jump to handler 1114 return; // No more handling to be done here! 1115 } 1116 1117 // Get the handler's klass 1118 ciInstanceKlass* klass = handler->catch_klass(); 1119 1120 if (!klass->is_loaded()) { // klass is not loaded? 1121 // fall through into catch_call_exceptions which will emit a 1122 // handler with an uncommon trap. 1123 break; 1124 } 1125 1126 if (klass->is_interface()) // should not happen, but... 1127 break; // bail out 1128 1129 // Check the type of the exception against the catch type 1130 const TypeKlassPtr *tk = TypeKlassPtr::make(klass); 1131 Node* con = _gvn.makecon(tk); 1132 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con); 1133 if (!stopped()) { 1134 PreserveJVMState pjvms(this); 1135 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr(); 1136 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness"); 1137 Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst)); 1138 push_ex_oop(ex_oop); // Push exception oop for handler 1139 if (PrintOpto && WizardMode) { 1140 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci); 1141 klass->print_name(); 1142 tty->cr(); 1143 } 1144 // If this is a backwards branch in the bytecodes, add safepoint 1145 maybe_add_safepoint(handler_bci); 1146 merge_exception(handler_bci); 1147 } 1148 set_control(not_subtype_ctrl); 1149 1150 // Come here if exception does not match handler. 1151 // Carry on with more handler checks. 1152 --remaining; 1153 } 1154 1155 assert(!stopped(), "you should return if you finish the chain"); 1156 1157 // Oops, need to call into the VM to resolve the klasses at runtime. 1158 // Note: This call must not deoptimize, since it is not a real at this bci! 1159 kill_dead_locals(); 1160 1161 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW, 1162 OptoRuntime::rethrow_Type(), 1163 OptoRuntime::rethrow_stub(), 1164 nullptr, nullptr, 1165 ex_node); 1166 1167 // Rethrow is a pure call, no side effects, only a result. 1168 // The result cannot be allocated, so we use I_O 1169 1170 // Catch exceptions from the rethrow 1171 catch_call_exceptions(handlers); 1172 } 1173 1174 1175 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.) 1176 1177 1178 #ifndef PRODUCT 1179 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { 1180 if( CountCompiledCalls ) { 1181 if( at_method_entry ) { 1182 // bump invocation counter if top method (for statistics) 1183 if (CountCompiledCalls && depth() == 1) { 1184 const TypePtr* addr_type = TypeMetadataPtr::make(method()); 1185 Node* adr1 = makecon(addr_type); 1186 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset())); 1187 increment_counter(adr2); 1188 } 1189 } else if (is_inline) { 1190 switch (bc()) { 1191 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break; 1192 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break; 1193 case Bytecodes::_invokestatic: 1194 case Bytecodes::_invokedynamic: 1195 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break; 1196 default: fatal("unexpected call bytecode"); 1197 } 1198 } else { 1199 switch (bc()) { 1200 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break; 1201 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break; 1202 case Bytecodes::_invokestatic: 1203 case Bytecodes::_invokedynamic: 1204 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break; 1205 default: fatal("unexpected call bytecode"); 1206 } 1207 } 1208 } 1209 } 1210 #endif //PRODUCT 1211 1212 1213 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass, 1214 ciKlass* holder, ciMethod* callee, 1215 const TypeOopPtr* receiver_type, bool is_virtual, 1216 bool& call_does_dispatch, int& vtable_index, 1217 bool check_access) { 1218 // Set default values for out-parameters. 1219 call_does_dispatch = true; 1220 vtable_index = Method::invalid_vtable_index; 1221 1222 // Choose call strategy. 1223 ciMethod* optimized_virtual_method = optimize_inlining(caller, klass, holder, callee, 1224 receiver_type, check_access); 1225 1226 // Have the call been sufficiently improved such that it is no longer a virtual? 1227 if (optimized_virtual_method != nullptr) { 1228 callee = optimized_virtual_method; 1229 call_does_dispatch = false; 1230 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { 1231 // We can make a vtable call at this site 1232 vtable_index = callee->resolve_vtable_index(caller->holder(), holder); 1233 } 1234 return callee; 1235 } 1236 1237 // Identify possible target method and inlining style 1238 ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder, 1239 ciMethod* callee, const TypeOopPtr* receiver_type, 1240 bool check_access) { 1241 // only use for virtual or interface calls 1242 1243 // If it is obviously final, do not bother to call find_monomorphic_target, 1244 // because the class hierarchy checks are not needed, and may fail due to 1245 // incompletely loaded classes. Since we do our own class loading checks 1246 // in this module, we may confidently bind to any method. 1247 if (callee->can_be_statically_bound()) { 1248 return callee; 1249 } 1250 1251 if (receiver_type == nullptr) { 1252 return nullptr; // no receiver type info 1253 } 1254 1255 // Attempt to improve the receiver 1256 bool actual_receiver_is_exact = false; 1257 ciInstanceKlass* actual_receiver = klass; 1258 // Array methods are all inherited from Object, and are monomorphic. 1259 // finalize() call on array is not allowed. 1260 if (receiver_type->isa_aryptr() && 1261 callee->holder() == env()->Object_klass() && 1262 callee->name() != ciSymbols::finalize_method_name()) { 1263 return callee; 1264 } 1265 1266 // All other interesting cases are instance klasses. 1267 if (!receiver_type->isa_instptr()) { 1268 return nullptr; 1269 } 1270 1271 ciInstanceKlass* receiver_klass = receiver_type->is_instptr()->instance_klass(); 1272 if (receiver_klass->is_loaded() && receiver_klass->is_initialized() && !receiver_klass->is_interface() && 1273 (receiver_klass == actual_receiver || receiver_klass->is_subtype_of(actual_receiver))) { 1274 // ikl is a same or better type than the original actual_receiver, 1275 // e.g. static receiver from bytecodes. 1276 actual_receiver = receiver_klass; 1277 // Is the actual_receiver exact? 1278 actual_receiver_is_exact = receiver_type->klass_is_exact(); 1279 } 1280 1281 ciInstanceKlass* calling_klass = caller->holder(); 1282 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access); 1283 1284 if (cha_monomorphic_target != nullptr) { 1285 // Hardwiring a virtual. 1286 assert(!callee->can_be_statically_bound(), "should have been handled earlier"); 1287 assert(!cha_monomorphic_target->is_abstract(), ""); 1288 if (!cha_monomorphic_target->can_be_statically_bound(actual_receiver)) { 1289 // If we inlined because CHA revealed only a single target method, 1290 // then we are dependent on that target method not getting overridden 1291 // by dynamic class loading. Be sure to test the "static" receiver 1292 // dest_method here, as opposed to the actual receiver, which may 1293 // falsely lead us to believe that the receiver is final or private. 1294 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target, holder, callee); 1295 } 1296 return cha_monomorphic_target; 1297 } 1298 1299 // If the type is exact, we can still bind the method w/o a vcall. 1300 // (This case comes after CHA so we can see how much extra work it does.) 1301 if (actual_receiver_is_exact) { 1302 // In case of evolution, there is a dependence on every inlined method, since each 1303 // such method can be changed when its class is redefined. 1304 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver); 1305 if (exact_method != nullptr) { 1306 return exact_method; 1307 } 1308 } 1309 1310 return nullptr; 1311 }