1 /* 2 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/inlinetypenode.hpp" 39 #include "opto/parse.hpp" 40 #include "opto/rootnode.hpp" 41 #include "opto/runtime.hpp" 42 #include "opto/subnode.hpp" 43 #include "runtime/os.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "utilities/debug.hpp" 46 47 // Utility function. 48 const TypeFunc* CallGenerator::tf() const { 49 return TypeFunc::make(method()); 50 } 51 52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) { 53 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m); 54 } 55 56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) { 57 ciMethod* symbolic_info = caller->get_method_at_bci(bci); 58 return is_inlined_method_handle_intrinsic(symbolic_info, m); 59 } 60 61 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) { 62 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic(); 63 } 64 65 //-----------------------------ParseGenerator--------------------------------- 66 // Internal class which handles all direct bytecode traversal. 67 class ParseGenerator : public InlineCallGenerator { 68 private: 69 bool _is_osr; 70 float _expected_uses; 71 72 public: 73 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 74 : InlineCallGenerator(method) 75 { 76 _is_osr = is_osr; 77 _expected_uses = expected_uses; 78 assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible"); 79 } 80 81 virtual bool is_parse() const { return true; } 82 virtual JVMState* generate(JVMState* jvms); 83 int is_osr() { return _is_osr; } 84 85 }; 86 87 JVMState* ParseGenerator::generate(JVMState* jvms) { 88 Compile* C = Compile::current(); 89 C->print_inlining_update(this); 90 91 if (is_osr()) { 92 // The JVMS for a OSR has a single argument (see its TypeFunc). 93 assert(jvms->depth() == 1, "no inline OSR"); 94 } 95 96 if (C->failing()) { 97 return nullptr; // bailing out of the compile; do not try to parse 98 } 99 100 Parse parser(jvms, method(), _expected_uses); 101 if (C->failing()) return nullptr; 102 103 // Grab signature for matching/allocation 104 GraphKit& exits = parser.exits(); 105 106 if (C->failing()) { 107 while (exits.pop_exception_state() != nullptr) ; 108 return nullptr; 109 } 110 111 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 112 113 // Simply return the exit state of the parser, 114 // augmented by any exceptional states. 115 return exits.transfer_exceptions_into_jvms(); 116 } 117 118 //---------------------------DirectCallGenerator------------------------------ 119 // Internal class which handles all out-of-line calls w/o receiver type checks. 120 class DirectCallGenerator : public CallGenerator { 121 private: 122 CallStaticJavaNode* _call_node; 123 // Force separate memory and I/O projections for the exceptional 124 // paths to facilitate late inlining. 125 bool _separate_io_proj; 126 127 protected: 128 void set_call_node(CallStaticJavaNode* call) { _call_node = call; } 129 130 public: 131 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 132 : CallGenerator(method), 133 _call_node(nullptr), 134 _separate_io_proj(separate_io_proj) 135 { 136 if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) { 137 // If that call has not been optimized by the time optimizations are over, 138 // we'll need to add a call to create an inline type instance from the klass 139 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return). 140 // Separating memory and I/O projections for exceptions is required to 141 // perform that graph transformation. 142 _separate_io_proj = true; 143 } 144 } 145 virtual JVMState* generate(JVMState* jvms); 146 147 virtual CallNode* call_node() const { return _call_node; } 148 virtual CallGenerator* with_call_node(CallNode* call) { 149 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj); 150 dcg->set_call_node(call->as_CallStaticJava()); 151 return dcg; 152 } 153 }; 154 155 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 156 GraphKit kit(jvms); 157 kit.C->print_inlining_update(this); 158 PhaseGVN& gvn = kit.gvn(); 159 bool is_static = method()->is_static(); 160 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 161 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 162 163 if (kit.C->log() != nullptr) { 164 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 165 } 166 167 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method()); 168 if (is_inlined_method_handle_intrinsic(jvms, method())) { 169 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 170 // additional information about the method being invoked should be attached 171 // to the call site to make resolution logic work 172 // (see SharedRuntime::resolve_static_call_C). 173 call->set_override_symbolic_info(true); 174 } 175 _call_node = call; // Save the call node in case we need it later 176 if (!is_static) { 177 // Make an explicit receiver null_check as part of this call. 178 // Since we share a map with the caller, his JVMS gets adjusted. 179 kit.null_check_receiver_before_call(method()); 180 if (kit.stopped()) { 181 // And dump it back to the caller, decorated with any exceptions: 182 return kit.transfer_exceptions_into_jvms(); 183 } 184 // Mark the call node as virtual, sort of: 185 call->set_optimized_virtual(true); 186 if (method()->is_method_handle_intrinsic() || 187 method()->is_compiled_lambda_form()) { 188 call->set_method_handle_invoke(true); 189 } 190 } 191 kit.set_arguments_for_java_call(call, is_late_inline()); 192 if (kit.stopped()) { 193 return kit.transfer_exceptions_into_jvms(); 194 } 195 kit.set_edges_for_java_call(call, false, _separate_io_proj); 196 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 197 kit.push_node(method()->return_type()->basic_type(), ret); 198 return kit.transfer_exceptions_into_jvms(); 199 } 200 201 //--------------------------VirtualCallGenerator------------------------------ 202 // Internal class which handles all out-of-line calls checking receiver type. 203 class VirtualCallGenerator : public CallGenerator { 204 private: 205 int _vtable_index; 206 bool _separate_io_proj; 207 CallDynamicJavaNode* _call_node; 208 209 protected: 210 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; } 211 212 public: 213 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj) 214 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr) 215 { 216 assert(vtable_index == Method::invalid_vtable_index || 217 vtable_index >= 0, "either invalid or usable"); 218 } 219 virtual bool is_virtual() const { return true; } 220 virtual JVMState* generate(JVMState* jvms); 221 222 virtual CallNode* call_node() const { return _call_node; } 223 int vtable_index() const { return _vtable_index; } 224 225 virtual CallGenerator* with_call_node(CallNode* call) { 226 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj); 227 cg->set_call_node(call->as_CallDynamicJava()); 228 return cg; 229 } 230 }; 231 232 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 233 GraphKit kit(jvms); 234 Node* receiver = kit.argument(0); 235 kit.C->print_inlining_update(this); 236 237 if (kit.C->log() != nullptr) { 238 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 239 } 240 241 // If the receiver is a constant null, do not torture the system 242 // by attempting to call through it. The compile will proceed 243 // correctly, but may bail out in final_graph_reshaping, because 244 // the call instruction will have a seemingly deficient out-count. 245 // (The bailout says something misleading about an "infinite loop".) 246 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 247 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 248 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 249 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 250 kit.inc_sp(arg_size); // restore arguments 251 kit.uncommon_trap(Deoptimization::Reason_null_check, 252 Deoptimization::Action_none, 253 nullptr, "null receiver"); 254 return kit.transfer_exceptions_into_jvms(); 255 } 256 257 // Ideally we would unconditionally do a null check here and let it 258 // be converted to an implicit check based on profile information. 259 // However currently the conversion to implicit null checks in 260 // Block::implicit_null_check() only looks for loads and stores, not calls. 261 ciMethod *caller = kit.method(); 262 ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data(); 263 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 264 ((ImplicitNullCheckThreshold > 0) && caller_md && 265 (caller_md->trap_count(Deoptimization::Reason_null_check) 266 >= (uint)ImplicitNullCheckThreshold))) { 267 // Make an explicit receiver null_check as part of this call. 268 // Since we share a map with the caller, his JVMS gets adjusted. 269 receiver = kit.null_check_receiver_before_call(method()); 270 if (kit.stopped()) { 271 // And dump it back to the caller, decorated with any exceptions: 272 return kit.transfer_exceptions_into_jvms(); 273 } 274 } 275 276 assert(!method()->is_static(), "virtual call must not be to static"); 277 assert(!method()->is_final(), "virtual call should not be to final"); 278 assert(!method()->is_private(), "virtual call should not be to private"); 279 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 280 "no vtable calls if +UseInlineCaches "); 281 address target = SharedRuntime::get_resolve_virtual_call_stub(); 282 // Normal inline cache used for call 283 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index); 284 if (is_inlined_method_handle_intrinsic(jvms, method())) { 285 // To be able to issue a direct call (optimized virtual or virtual) 286 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 287 // about the method being invoked should be attached to the call site to 288 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 289 call->set_override_symbolic_info(true); 290 } 291 _call_node = call; // Save the call node in case we need it later 292 293 kit.set_arguments_for_java_call(call); 294 if (kit.stopped()) { 295 return kit.transfer_exceptions_into_jvms(); 296 } 297 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj); 298 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 299 kit.push_node(method()->return_type()->basic_type(), ret); 300 301 // Represent the effect of an implicit receiver null_check 302 // as part of this call. Since we share a map with the caller, 303 // his JVMS gets adjusted. 304 kit.cast_not_null(receiver); 305 return kit.transfer_exceptions_into_jvms(); 306 } 307 308 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 309 if (InlineTree::check_can_parse(m) != nullptr) return nullptr; 310 return new ParseGenerator(m, expected_uses); 311 } 312 313 // As a special case, the JVMS passed to this CallGenerator is 314 // for the method execution already in progress, not just the JVMS 315 // of the caller. Thus, this CallGenerator cannot be mixed with others! 316 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 317 if (InlineTree::check_can_parse(m) != nullptr) return nullptr; 318 float past_uses = m->interpreter_invocation_count(); 319 float expected_uses = past_uses; 320 return new ParseGenerator(m, expected_uses, true); 321 } 322 323 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 324 assert(!m->is_abstract(), "for_direct_call mismatch"); 325 return new DirectCallGenerator(m, separate_io_proj); 326 } 327 328 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 329 assert(!m->is_static(), "for_virtual_call mismatch"); 330 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 331 return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/); 332 } 333 334 // Allow inlining decisions to be delayed 335 class LateInlineCallGenerator : public DirectCallGenerator { 336 private: 337 jlong _unique_id; // unique id for log compilation 338 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about 339 340 protected: 341 CallGenerator* _inline_cg; 342 virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; } 343 virtual CallGenerator* inline_cg() const { return _inline_cg; } 344 virtual bool is_pure_call() const { return _is_pure_call; } 345 346 public: 347 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) : 348 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {} 349 350 virtual bool is_late_inline() const { return true; } 351 352 // Convert the CallStaticJava into an inline 353 virtual void do_late_inline(); 354 355 virtual JVMState* generate(JVMState* jvms) { 356 Compile *C = Compile::current(); 357 358 C->log_inline_id(this); 359 360 // Record that this call site should be revisited once the main 361 // parse is finished. 362 if (!is_mh_late_inline()) { 363 C->add_late_inline(this); 364 } 365 366 // Emit the CallStaticJava and request separate projections so 367 // that the late inlining logic can distinguish between fall 368 // through and exceptional uses of the memory and io projections 369 // as is done for allocations and macro expansion. 370 return DirectCallGenerator::generate(jvms); 371 } 372 373 virtual void print_inlining_late(InliningResult result, const char* msg) { 374 CallNode* call = call_node(); 375 Compile* C = Compile::current(); 376 C->print_inlining_assert_ready(); 377 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg); 378 C->print_inlining_move_to(this); 379 C->print_inlining_update_delayed(this); 380 } 381 382 virtual void set_unique_id(jlong id) { 383 _unique_id = id; 384 } 385 386 virtual jlong unique_id() const { 387 return _unique_id; 388 } 389 390 virtual CallGenerator* inline_cg() { 391 return _inline_cg; 392 } 393 394 virtual CallGenerator* with_call_node(CallNode* call) { 395 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call); 396 cg->set_call_node(call->as_CallStaticJava()); 397 return cg; 398 } 399 }; 400 401 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 402 return new LateInlineCallGenerator(method, inline_cg); 403 } 404 405 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 406 ciMethod* _caller; 407 bool _input_not_const; 408 409 virtual bool do_late_inline_check(Compile* C, JVMState* jvms); 410 411 public: 412 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 413 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {} 414 415 virtual bool is_mh_late_inline() const { return true; } 416 417 // Convert the CallStaticJava into an inline 418 virtual void do_late_inline(); 419 420 virtual JVMState* generate(JVMState* jvms) { 421 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 422 423 Compile* C = Compile::current(); 424 if (_input_not_const) { 425 // inlining won't be possible so no need to enqueue right now. 426 call_node()->set_generator(this); 427 } else { 428 C->add_late_inline(this); 429 } 430 return new_jvms; 431 } 432 433 virtual CallGenerator* with_call_node(CallNode* call) { 434 LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const); 435 cg->set_call_node(call->as_CallStaticJava()); 436 return cg; 437 } 438 }; 439 440 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) { 441 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different 442 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with 443 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue 444 // of late inlining with exceptions. 445 assert(!jvms->method()->has_exception_handlers() || 446 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual && 447 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected"); 448 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call. 449 bool allow_inline = C->inlining_incrementally(); 450 bool input_not_const = true; 451 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const); 452 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place 453 454 if (cg != nullptr) { 455 // AlwaysIncrementalInline causes for_method_handle_inline() to 456 // return a LateInlineCallGenerator. Extract the 457 // InlineCallGenerator from it. 458 if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) { 459 cg = cg->inline_cg(); 460 assert(cg != nullptr, "inline call generator expected"); 461 } 462 463 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining"); 464 _inline_cg = cg; 465 C->dec_number_of_mh_late_inlines(); 466 return true; 467 } else { 468 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call 469 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later, 470 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely. 471 return false; 472 } 473 } 474 475 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 476 assert(IncrementalInlineMH, "required"); 477 Compile::current()->inc_number_of_mh_late_inlines(); 478 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 479 return cg; 480 } 481 482 // Allow inlining decisions to be delayed 483 class LateInlineVirtualCallGenerator : public VirtualCallGenerator { 484 private: 485 jlong _unique_id; // unique id for log compilation 486 CallGenerator* _inline_cg; 487 ciMethod* _callee; 488 bool _is_pure_call; 489 float _prof_factor; 490 491 protected: 492 virtual bool do_late_inline_check(Compile* C, JVMState* jvms); 493 virtual CallGenerator* inline_cg() const { return _inline_cg; } 494 virtual bool is_pure_call() const { return _is_pure_call; } 495 496 public: 497 LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor) 498 : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/), 499 _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) { 500 assert(IncrementalInlineVirtual, "required"); 501 } 502 503 virtual bool is_late_inline() const { return true; } 504 505 virtual bool is_virtual_late_inline() const { return true; } 506 507 // Convert the CallDynamicJava into an inline 508 virtual void do_late_inline(); 509 510 virtual void set_callee_method(ciMethod* m) { 511 assert(_callee == nullptr, "repeated inlining attempt"); 512 _callee = m; 513 } 514 515 virtual JVMState* generate(JVMState* jvms) { 516 // Emit the CallDynamicJava and request separate projections so 517 // that the late inlining logic can distinguish between fall 518 // through and exceptional uses of the memory and io projections 519 // as is done for allocations and macro expansion. 520 JVMState* new_jvms = VirtualCallGenerator::generate(jvms); 521 if (call_node() != nullptr) { 522 call_node()->set_generator(this); 523 } 524 return new_jvms; 525 } 526 527 virtual void print_inlining_late(InliningResult result, const char* msg) { 528 CallNode* call = call_node(); 529 Compile* C = Compile::current(); 530 C->print_inlining_assert_ready(); 531 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg); 532 C->print_inlining_move_to(this); 533 C->print_inlining_update_delayed(this); 534 } 535 536 virtual void set_unique_id(jlong id) { 537 _unique_id = id; 538 } 539 540 virtual jlong unique_id() const { 541 return _unique_id; 542 } 543 544 virtual CallGenerator* with_call_node(CallNode* call) { 545 LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor); 546 cg->set_call_node(call->as_CallDynamicJava()); 547 return cg; 548 } 549 }; 550 551 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) { 552 // Method handle linker case is handled in CallDynamicJavaNode::Ideal(). 553 // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate(). 554 555 // Implicit receiver null checks introduce problems when exception states are combined. 556 Node* receiver = jvms->map()->argument(jvms, 0); 557 const Type* recv_type = C->initial_gvn()->type(receiver); 558 if (recv_type->maybe_null()) { 559 if (C->print_inlining() || C->print_intrinsics()) { 560 C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, 561 "late call devirtualization failed (receiver may be null)"); 562 } 563 return false; 564 } 565 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call. 566 bool allow_inline = C->inlining_incrementally(); 567 if (!allow_inline && _callee->holder()->is_interface()) { 568 // Don't convert the interface call to a direct call guarded by an interface subtype check. 569 if (C->print_inlining() || C->print_intrinsics()) { 570 C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, 571 "late call devirtualization failed (interface call)"); 572 } 573 return false; 574 } 575 CallGenerator* cg = C->call_generator(_callee, 576 vtable_index(), 577 false /*call_does_dispatch*/, 578 jvms, 579 allow_inline, 580 _prof_factor, 581 nullptr /*speculative_receiver_type*/, 582 true /*allow_intrinsics*/); 583 584 if (cg != nullptr) { 585 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining"); 586 _inline_cg = cg; 587 return true; 588 } else { 589 // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call. 590 assert(false, "no progress"); 591 return false; 592 } 593 } 594 595 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) { 596 assert(IncrementalInlineVirtual, "required"); 597 assert(!m->is_static(), "for_virtual_call mismatch"); 598 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 599 return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor); 600 } 601 602 void LateInlineCallGenerator::do_late_inline() { 603 CallGenerator::do_late_inline_helper(); 604 } 605 606 void LateInlineMHCallGenerator::do_late_inline() { 607 CallGenerator::do_late_inline_helper(); 608 } 609 610 void LateInlineVirtualCallGenerator::do_late_inline() { 611 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal 612 CallGenerator::do_late_inline_helper(); 613 } 614 615 void CallGenerator::do_late_inline_helper() { 616 assert(is_late_inline(), "only late inline allowed"); 617 618 // Can't inline it 619 CallNode* call = call_node(); 620 if (call == nullptr || call->outcnt() == 0 || 621 call->in(0) == nullptr || call->in(0)->is_top()) { 622 return; 623 } 624 625 const TypeTuple* r = call->tf()->domain_cc(); 626 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) { 627 if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) { 628 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 629 return; 630 } 631 } 632 633 if (call->in(TypeFunc::Memory)->is_top()) { 634 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 635 return; 636 } 637 if (call->in(TypeFunc::Memory)->is_MergeMem()) { 638 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem(); 639 if (merge_mem->base_memory() == merge_mem->empty_memory()) { 640 return; // dead path 641 } 642 } 643 644 // check for unreachable loop 645 CallProjections* callprojs = call->extract_projections(true); 646 if ((callprojs->fallthrough_catchproj == call->in(0)) || 647 (callprojs->catchall_catchproj == call->in(0)) || 648 (callprojs->fallthrough_memproj == call->in(TypeFunc::Memory)) || 649 (callprojs->catchall_memproj == call->in(TypeFunc::Memory)) || 650 (callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O)) || 651 (callprojs->catchall_ioproj == call->in(TypeFunc::I_O)) || 652 (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) { 653 return; 654 } 655 656 Compile* C = Compile::current(); 657 // Remove inlined methods from Compiler's lists. 658 if (call->is_macro()) { 659 C->remove_macro_node(call); 660 } 661 662 663 bool result_not_used = true; 664 for (uint i = 0; i < callprojs->nb_resproj; i++) { 665 if (callprojs->resproj[i] != nullptr) { 666 if (callprojs->resproj[i]->outcnt() != 0) { 667 result_not_used = false; 668 } 669 if (call->find_edge(callprojs->resproj[i]) != -1) { 670 return; 671 } 672 } 673 } 674 675 if (is_pure_call() && result_not_used) { 676 // The call is marked as pure (no important side effects), but result isn't used. 677 // It's safe to remove the call. 678 GraphKit kit(call->jvms()); 679 kit.replace_call(call, C->top(), true); 680 } else { 681 // Make a clone of the JVMState that appropriate to use for driving a parse 682 JVMState* old_jvms = call->jvms(); 683 JVMState* jvms = old_jvms->clone_shallow(C); 684 uint size = call->req(); 685 SafePointNode* map = new SafePointNode(size, jvms); 686 for (uint i1 = 0; i1 < size; i1++) { 687 map->init_req(i1, call->in(i1)); 688 } 689 690 PhaseGVN& gvn = *C->initial_gvn(); 691 // Make sure the state is a MergeMem for parsing. 692 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 693 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 694 gvn.set_type_bottom(mem); 695 map->set_req(TypeFunc::Memory, mem); 696 } 697 698 // blow away old call arguments 699 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) { 700 map->set_req(i1, C->top()); 701 } 702 jvms->set_map(map); 703 704 // Make enough space in the expression stack to transfer 705 // the incoming arguments and return value. 706 map->ensure_stack(jvms, jvms->method()->max_stack()); 707 const TypeTuple* domain_sig = call->_tf->domain_sig(); 708 uint nargs = method()->arg_size(); 709 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature"); 710 711 uint j = TypeFunc::Parms; 712 int arg_num = 0; 713 for (uint i1 = 0; i1 < nargs; i1++) { 714 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1); 715 if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) { 716 // Inline type arguments are not passed by reference: we get an argument per 717 // field of the inline type. Build InlineTypeNodes from the inline type arguments. 718 GraphKit arg_kit(jvms, &gvn); 719 Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null()); 720 map->set_control(arg_kit.control()); 721 map->set_argument(jvms, i1, vt); 722 } else { 723 map->set_argument(jvms, i1, call->in(j++)); 724 } 725 if (t != Type::HALF) { 726 arg_num++; 727 } 728 } 729 730 C->print_inlining_assert_ready(); 731 732 C->print_inlining_move_to(this); 733 734 C->log_late_inline(this); 735 736 // JVMState is ready, so time to perform some checks and prepare for inlining attempt. 737 if (!do_late_inline_check(C, jvms)) { 738 map->disconnect_inputs(C); 739 C->print_inlining_update_delayed(this); 740 return; 741 } 742 743 // Check if we are late inlining a method handle call that returns an inline type as fields. 744 Node* buffer_oop = nullptr; 745 ciMethod* inline_method = inline_cg()->method(); 746 ciType* return_type = inline_method->return_type(); 747 if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() && 748 return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) { 749 // Allocate a buffer for the inline type returned as fields because the caller expects an oop return. 750 // Do this before the method handle call in case the buffer allocation triggers deoptimization and 751 // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once). 752 GraphKit arg_kit(jvms, &gvn); 753 { 754 PreserveReexecuteState preexecs(&arg_kit); 755 arg_kit.jvms()->set_should_reexecute(true); 756 arg_kit.inc_sp(nargs); 757 Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass())); 758 buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true); 759 } 760 jvms = arg_kit.transfer_exceptions_into_jvms(); 761 } 762 763 // Setup default node notes to be picked up by the inlining 764 Node_Notes* old_nn = C->node_notes_at(call->_idx); 765 if (old_nn != nullptr) { 766 Node_Notes* entry_nn = old_nn->clone(C); 767 entry_nn->set_jvms(jvms); 768 C->set_default_node_notes(entry_nn); 769 } 770 771 // Now perform the inlining using the synthesized JVMState 772 JVMState* new_jvms = inline_cg()->generate(jvms); 773 if (new_jvms == nullptr) return; // no change 774 if (C->failing()) return; 775 776 // Capture any exceptional control flow 777 GraphKit kit(new_jvms); 778 779 // Find the result object 780 Node* result = C->top(); 781 int result_size = method()->return_type()->size(); 782 if (result_size != 0 && !kit.stopped()) { 783 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 784 } 785 786 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) { 787 result = kit.must_be_not_null(result, false); 788 } 789 790 if (inline_cg()->is_inline()) { 791 C->set_has_loops(C->has_loops() || inline_method->has_loops()); 792 C->env()->notice_inlined_method(inline_method); 793 } 794 C->set_inlining_progress(true); 795 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup 796 797 // Handle inline type returns 798 InlineTypeNode* vt = result->isa_InlineType(); 799 if (vt != nullptr) { 800 if (call->tf()->returns_inline_type_as_fields()) { 801 vt->replace_call_results(&kit, call, C); 802 } else if (vt->is_InlineType()) { 803 // Result might still be allocated (for example, if it has been stored to a non-flat field) 804 if (!vt->is_allocated(&kit.gvn())) { 805 assert(buffer_oop != nullptr, "should have allocated a buffer"); 806 RegionNode* region = new RegionNode(3); 807 808 // Check if result is null 809 Node* null_ctl = kit.top(); 810 kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl); 811 region->init_req(1, null_ctl); 812 PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass())); 813 Node* init_mem = kit.reset_memory(); 814 PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM); 815 816 // Not null, initialize the buffer 817 kit.set_all_memory(init_mem); 818 vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass()); 819 // Do not let stores that initialize this buffer be reordered with a subsequent 820 // store that would make this buffer accessible by other threads. 821 AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop); 822 assert(alloc != nullptr, "must have an allocation node"); 823 kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); 824 region->init_req(2, kit.control()); 825 oop->init_req(2, buffer_oop); 826 mem->init_req(2, kit.merged_memory()); 827 828 // Update oop input to buffer 829 kit.gvn().hash_delete(vt); 830 vt->set_oop(kit.gvn(), kit.gvn().transform(oop)); 831 vt->set_is_buffered(kit.gvn()); 832 vt = kit.gvn().transform(vt)->as_InlineType(); 833 834 kit.set_control(kit.gvn().transform(region)); 835 kit.set_all_memory(kit.gvn().transform(mem)); 836 kit.record_for_igvn(region); 837 kit.record_for_igvn(oop); 838 kit.record_for_igvn(mem); 839 } 840 result = vt; 841 } 842 DEBUG_ONLY(buffer_oop = nullptr); 843 } else { 844 assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value"); 845 } 846 assert(buffer_oop == nullptr, "unused buffer allocation"); 847 848 kit.replace_call(call, result, true); 849 } 850 } 851 852 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 853 854 public: 855 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 856 LateInlineCallGenerator(method, inline_cg) {} 857 858 virtual JVMState* generate(JVMState* jvms) { 859 Compile *C = Compile::current(); 860 861 C->log_inline_id(this); 862 863 C->add_string_late_inline(this); 864 865 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 866 return new_jvms; 867 } 868 869 virtual bool is_string_late_inline() const { return true; } 870 871 virtual CallGenerator* with_call_node(CallNode* call) { 872 LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg); 873 cg->set_call_node(call->as_CallStaticJava()); 874 return cg; 875 } 876 }; 877 878 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 879 return new LateInlineStringCallGenerator(method, inline_cg); 880 } 881 882 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 883 884 public: 885 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 886 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {} 887 888 virtual JVMState* generate(JVMState* jvms) { 889 Compile *C = Compile::current(); 890 891 C->log_inline_id(this); 892 893 C->add_boxing_late_inline(this); 894 895 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 896 return new_jvms; 897 } 898 899 virtual CallGenerator* with_call_node(CallNode* call) { 900 LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg); 901 cg->set_call_node(call->as_CallStaticJava()); 902 return cg; 903 } 904 }; 905 906 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 907 return new LateInlineBoxingCallGenerator(method, inline_cg); 908 } 909 910 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator { 911 912 public: 913 LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 914 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {} 915 916 virtual JVMState* generate(JVMState* jvms) { 917 Compile *C = Compile::current(); 918 919 C->log_inline_id(this); 920 921 C->add_vector_reboxing_late_inline(this); 922 923 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 924 return new_jvms; 925 } 926 927 virtual CallGenerator* with_call_node(CallNode* call) { 928 LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg); 929 cg->set_call_node(call->as_CallStaticJava()); 930 return cg; 931 } 932 }; 933 934 // static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg); 935 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 936 return new LateInlineVectorReboxingCallGenerator(method, inline_cg); 937 } 938 939 //------------------------PredictedCallGenerator------------------------------ 940 // Internal class which handles all out-of-line calls checking receiver type. 941 class PredictedCallGenerator : public CallGenerator { 942 ciKlass* _predicted_receiver; 943 CallGenerator* _if_missed; 944 CallGenerator* _if_hit; 945 float _hit_prob; 946 bool _exact_check; 947 948 public: 949 PredictedCallGenerator(ciKlass* predicted_receiver, 950 CallGenerator* if_missed, 951 CallGenerator* if_hit, bool exact_check, 952 float hit_prob) 953 : CallGenerator(if_missed->method()) 954 { 955 // The call profile data may predict the hit_prob as extreme as 0 or 1. 956 // Remove the extremes values from the range. 957 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 958 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 959 960 _predicted_receiver = predicted_receiver; 961 _if_missed = if_missed; 962 _if_hit = if_hit; 963 _hit_prob = hit_prob; 964 _exact_check = exact_check; 965 } 966 967 virtual bool is_virtual() const { return true; } 968 virtual bool is_inline() const { return _if_hit->is_inline(); } 969 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 970 971 virtual JVMState* generate(JVMState* jvms); 972 }; 973 974 975 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 976 CallGenerator* if_missed, 977 CallGenerator* if_hit, 978 float hit_prob) { 979 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, 980 /*exact_check=*/true, hit_prob); 981 } 982 983 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver, 984 CallGenerator* if_missed, 985 CallGenerator* if_hit) { 986 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit, 987 /*exact_check=*/false, PROB_ALWAYS); 988 } 989 990 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 991 GraphKit kit(jvms); 992 kit.C->print_inlining_update(this); 993 PhaseGVN& gvn = kit.gvn(); 994 // We need an explicit receiver null_check before checking its type. 995 // We share a map with the caller, so his JVMS gets adjusted. 996 Node* receiver = kit.argument(0); 997 CompileLog* log = kit.C->log(); 998 if (log != nullptr) { 999 log->elem("predicted_call bci='%d' exact='%d' klass='%d'", 1000 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver)); 1001 } 1002 1003 receiver = kit.null_check_receiver_before_call(method()); 1004 if (kit.stopped()) { 1005 return kit.transfer_exceptions_into_jvms(); 1006 } 1007 1008 // Make a copy of the replaced nodes in case we need to restore them 1009 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 1010 replaced_nodes.clone(); 1011 1012 Node* casted_receiver = receiver; // will get updated in place... 1013 Node* slow_ctl = nullptr; 1014 if (_exact_check) { 1015 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob, 1016 &casted_receiver); 1017 } else { 1018 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver, 1019 &casted_receiver); 1020 } 1021 1022 SafePointNode* slow_map = nullptr; 1023 JVMState* slow_jvms = nullptr; 1024 { PreserveJVMState pjvms(&kit); 1025 kit.set_control(slow_ctl); 1026 if (!kit.stopped()) { 1027 slow_jvms = _if_missed->generate(kit.sync_jvms()); 1028 if (kit.failing()) 1029 return nullptr; // might happen because of NodeCountInliningCutoff 1030 assert(slow_jvms != nullptr, "must be"); 1031 kit.add_exception_states_from(slow_jvms); 1032 kit.set_map(slow_jvms->map()); 1033 if (!kit.stopped()) 1034 slow_map = kit.stop(); 1035 } 1036 } 1037 1038 if (kit.stopped()) { 1039 // Instance does not match the predicted type. 1040 kit.set_jvms(slow_jvms); 1041 return kit.transfer_exceptions_into_jvms(); 1042 } 1043 1044 // Fall through if the instance matches the desired type. 1045 kit.replace_in_map(receiver, casted_receiver); 1046 1047 // Make the hot call: 1048 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 1049 if (new_jvms == nullptr) { 1050 // Inline failed, so make a direct call. 1051 assert(_if_hit->is_inline(), "must have been a failed inline"); 1052 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 1053 new_jvms = cg->generate(kit.sync_jvms()); 1054 } 1055 kit.add_exception_states_from(new_jvms); 1056 kit.set_jvms(new_jvms); 1057 1058 // Need to merge slow and fast? 1059 if (slow_map == nullptr) { 1060 // The fast path is the only path remaining. 1061 return kit.transfer_exceptions_into_jvms(); 1062 } 1063 1064 if (kit.stopped()) { 1065 // Inlined method threw an exception, so it's just the slow path after all. 1066 kit.set_jvms(slow_jvms); 1067 return kit.transfer_exceptions_into_jvms(); 1068 } 1069 1070 // Allocate inline types if they are merged with objects (similar to Parse::merge_common()) 1071 uint tos = kit.jvms()->stkoff() + kit.sp(); 1072 uint limit = slow_map->req(); 1073 for (uint i = TypeFunc::Parms; i < limit; i++) { 1074 Node* m = kit.map()->in(i); 1075 Node* n = slow_map->in(i); 1076 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 1077 // TODO 8284443 still needed? 1078 if (m->is_InlineType() && !t->is_inlinetypeptr()) { 1079 // Allocate inline type in fast path 1080 m = m->as_InlineType()->buffer(&kit); 1081 kit.map()->set_req(i, m); 1082 } 1083 if (n->is_InlineType() && !t->is_inlinetypeptr()) { 1084 // Allocate inline type in slow path 1085 PreserveJVMState pjvms(&kit); 1086 kit.set_map(slow_map); 1087 n = n->as_InlineType()->buffer(&kit); 1088 kit.map()->set_req(i, n); 1089 slow_map = kit.stop(); 1090 } 1091 } 1092 1093 // There are 2 branches and the replaced nodes are only valid on 1094 // one: restore the replaced nodes to what they were before the 1095 // branch. 1096 kit.map()->set_replaced_nodes(replaced_nodes); 1097 1098 // Finish the diamond. 1099 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1100 RegionNode* region = new RegionNode(3); 1101 region->init_req(1, kit.control()); 1102 region->init_req(2, slow_map->control()); 1103 kit.set_control(gvn.transform(region)); 1104 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1105 iophi->set_req(2, slow_map->i_o()); 1106 kit.set_i_o(gvn.transform(iophi)); 1107 // Merge memory 1108 kit.merge_memory(slow_map->merged_memory(), region, 2); 1109 // Transform new memory Phis. 1110 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1111 Node* phi = mms.memory(); 1112 if (phi->is_Phi() && phi->in(0) == region) { 1113 mms.set_memory(gvn.transform(phi)); 1114 } 1115 } 1116 for (uint i = TypeFunc::Parms; i < limit; i++) { 1117 // Skip unused stack slots; fast forward to monoff(); 1118 if (i == tos) { 1119 i = kit.jvms()->monoff(); 1120 if( i >= limit ) break; 1121 } 1122 Node* m = kit.map()->in(i); 1123 Node* n = slow_map->in(i); 1124 if (m != n) { 1125 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 1126 Node* phi = PhiNode::make(region, m, t); 1127 phi->set_req(2, n); 1128 kit.map()->set_req(i, gvn.transform(phi)); 1129 } 1130 } 1131 return kit.transfer_exceptions_into_jvms(); 1132 } 1133 1134 1135 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) { 1136 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); 1137 bool input_not_const; 1138 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const); 1139 Compile* C = Compile::current(); 1140 bool should_delay = C->should_delay_inlining(); 1141 if (cg != nullptr) { 1142 if (should_delay) { 1143 return CallGenerator::for_late_inline(callee, cg); 1144 } else { 1145 return cg; 1146 } 1147 } 1148 int bci = jvms->bci(); 1149 ciCallProfile profile = caller->call_profile_at_bci(bci); 1150 int call_site_count = caller->scale_count(profile.count()); 1151 1152 if (IncrementalInlineMH && (AlwaysIncrementalInline || 1153 (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) { 1154 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 1155 } else { 1156 // Out-of-line call. 1157 return CallGenerator::for_direct_call(callee); 1158 } 1159 } 1160 1161 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit) { 1162 PhaseGVN& gvn = kit.gvn(); 1163 Node* arg = kit.argument(arg_nb); 1164 const Type* arg_type = arg->bottom_type(); 1165 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 1166 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) { 1167 const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part 1168 arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); 1169 kit.set_argument(arg_nb, arg); 1170 } 1171 if (sig_type->is_inlinetypeptr()) { 1172 arg = InlineTypeNode::make_from_oop(&kit, arg, sig_type->inline_klass(), !kit.gvn().type(arg)->maybe_null()); 1173 kit.set_argument(arg_nb, arg); 1174 } 1175 } 1176 1177 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) { 1178 GraphKit kit(jvms); 1179 PhaseGVN& gvn = kit.gvn(); 1180 Compile* C = kit.C; 1181 vmIntrinsics::ID iid = callee->intrinsic_id(); 1182 input_not_const = true; 1183 if (StressMethodHandleLinkerInlining) { 1184 allow_inline = false; 1185 } 1186 switch (iid) { 1187 case vmIntrinsics::_invokeBasic: 1188 { 1189 // Get MethodHandle receiver: 1190 Node* receiver = kit.argument(0); 1191 if (receiver->Opcode() == Op_ConP) { 1192 input_not_const = false; 1193 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr(); 1194 if (recv_toop != nullptr) { 1195 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget(); 1196 const int vtable_index = Method::invalid_vtable_index; 1197 1198 if (!ciMethod::is_consistent_info(callee, target)) { 1199 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1200 "signatures mismatch"); 1201 return nullptr; 1202 } 1203 1204 CallGenerator *cg = C->call_generator(target, vtable_index, 1205 false /* call_does_dispatch */, 1206 jvms, 1207 allow_inline, 1208 PROB_ALWAYS); 1209 return cg; 1210 } else { 1211 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s", 1212 Type::str(receiver->bottom_type())); 1213 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1214 "receiver is always null"); 1215 } 1216 } else { 1217 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1218 "receiver not constant"); 1219 } 1220 } 1221 break; 1222 1223 case vmIntrinsics::_linkToVirtual: 1224 case vmIntrinsics::_linkToStatic: 1225 case vmIntrinsics::_linkToSpecial: 1226 case vmIntrinsics::_linkToInterface: 1227 { 1228 int nargs = callee->arg_size(); 1229 // Get MemberName argument: 1230 Node* member_name = kit.argument(nargs - 1); 1231 if (member_name->Opcode() == Op_ConP) { 1232 input_not_const = false; 1233 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 1234 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 1235 1236 if (!ciMethod::is_consistent_info(callee, target)) { 1237 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1238 "signatures mismatch"); 1239 return nullptr; 1240 } 1241 1242 // In lambda forms we erase signature types to avoid resolving issues 1243 // involving class loaders. When we optimize a method handle invoke 1244 // to a direct call we must cast the receiver and arguments to its 1245 // actual types. 1246 ciSignature* signature = target->signature(); 1247 const int receiver_skip = target->is_static() ? 0 : 1; 1248 // Cast receiver to its type. 1249 if (!target->is_static()) { 1250 cast_argument(nargs, 0, signature->accessing_klass(), kit); 1251 } 1252 // Cast reference arguments to its type. 1253 for (int i = 0, j = 0; i < signature->count(); i++) { 1254 ciType* t = signature->type_at(i); 1255 if (t->is_klass()) { 1256 cast_argument(nargs, receiver_skip + j, t, kit); 1257 } 1258 j += t->size(); // long and double take two slots 1259 } 1260 1261 // Try to get the most accurate receiver type 1262 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 1263 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 1264 int vtable_index = Method::invalid_vtable_index; 1265 bool call_does_dispatch = false; 1266 1267 ciKlass* speculative_receiver_type = nullptr; 1268 if (is_virtual_or_interface) { 1269 ciInstanceKlass* klass = target->holder(); 1270 Node* receiver_node = kit.argument(0); 1271 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 1272 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 1273 // optimize_virtual_call() takes 2 different holder 1274 // arguments for a corner case that doesn't apply here (see 1275 // Parse::do_call()) 1276 target = C->optimize_virtual_call(caller, klass, klass, 1277 target, receiver_type, is_virtual, 1278 call_does_dispatch, vtable_index, // out-parameters 1279 false /* check_access */); 1280 // We lack profiling at this call but type speculation may 1281 // provide us with a type 1282 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr; 1283 } 1284 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, 1285 allow_inline, 1286 PROB_ALWAYS, 1287 speculative_receiver_type, 1288 true); 1289 return cg; 1290 } else { 1291 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1292 "member_name not constant"); 1293 } 1294 } 1295 break; 1296 1297 case vmIntrinsics::_linkToNative: 1298 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1299 "native call"); 1300 break; 1301 1302 default: 1303 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); 1304 break; 1305 } 1306 return nullptr; 1307 } 1308 1309 //------------------------PredicatedIntrinsicGenerator------------------------------ 1310 // Internal class which handles all predicated Intrinsic calls. 1311 class PredicatedIntrinsicGenerator : public CallGenerator { 1312 CallGenerator* _intrinsic; 1313 CallGenerator* _cg; 1314 1315 public: 1316 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 1317 CallGenerator* cg) 1318 : CallGenerator(cg->method()) 1319 { 1320 _intrinsic = intrinsic; 1321 _cg = cg; 1322 } 1323 1324 virtual bool is_virtual() const { return true; } 1325 virtual bool is_inline() const { return true; } 1326 virtual bool is_intrinsic() const { return true; } 1327 1328 virtual JVMState* generate(JVMState* jvms); 1329 }; 1330 1331 1332 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 1333 CallGenerator* cg) { 1334 return new PredicatedIntrinsicGenerator(intrinsic, cg); 1335 } 1336 1337 1338 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 1339 // The code we want to generate here is: 1340 // if (receiver == nullptr) 1341 // uncommon_Trap 1342 // if (predicate(0)) 1343 // do_intrinsic(0) 1344 // else 1345 // if (predicate(1)) 1346 // do_intrinsic(1) 1347 // ... 1348 // else 1349 // do_java_comp 1350 1351 GraphKit kit(jvms); 1352 PhaseGVN& gvn = kit.gvn(); 1353 1354 CompileLog* log = kit.C->log(); 1355 if (log != nullptr) { 1356 log->elem("predicated_intrinsic bci='%d' method='%d'", 1357 jvms->bci(), log->identify(method())); 1358 } 1359 1360 if (!method()->is_static()) { 1361 // We need an explicit receiver null_check before checking its type in predicate. 1362 // We share a map with the caller, so his JVMS gets adjusted. 1363 kit.null_check_receiver_before_call(method()); 1364 if (kit.stopped()) { 1365 return kit.transfer_exceptions_into_jvms(); 1366 } 1367 } 1368 1369 int n_predicates = _intrinsic->predicates_count(); 1370 assert(n_predicates > 0, "sanity"); 1371 1372 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 1373 1374 // Region for normal compilation code if intrinsic failed. 1375 Node* slow_region = new RegionNode(1); 1376 1377 int results = 0; 1378 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 1379 #ifdef ASSERT 1380 JVMState* old_jvms = kit.jvms(); 1381 SafePointNode* old_map = kit.map(); 1382 Node* old_io = old_map->i_o(); 1383 Node* old_mem = old_map->memory(); 1384 Node* old_exc = old_map->next_exception(); 1385 #endif 1386 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1387 #ifdef ASSERT 1388 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1389 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1390 SafePointNode* new_map = kit.map(); 1391 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1392 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1393 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1394 #endif 1395 if (!kit.stopped()) { 1396 PreserveJVMState pjvms(&kit); 1397 // Generate intrinsic code: 1398 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1399 if (new_jvms == nullptr) { 1400 // Intrinsic failed, use normal compilation path for this predicate. 1401 slow_region->add_req(kit.control()); 1402 } else { 1403 kit.add_exception_states_from(new_jvms); 1404 kit.set_jvms(new_jvms); 1405 if (!kit.stopped()) { 1406 result_jvms[results++] = kit.jvms(); 1407 } 1408 } 1409 } 1410 if (else_ctrl == nullptr) { 1411 else_ctrl = kit.C->top(); 1412 } 1413 kit.set_control(else_ctrl); 1414 } 1415 if (!kit.stopped()) { 1416 // Final 'else' after predicates. 1417 slow_region->add_req(kit.control()); 1418 } 1419 if (slow_region->req() > 1) { 1420 PreserveJVMState pjvms(&kit); 1421 // Generate normal compilation code: 1422 kit.set_control(gvn.transform(slow_region)); 1423 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1424 if (kit.failing()) 1425 return nullptr; // might happen because of NodeCountInliningCutoff 1426 assert(new_jvms != nullptr, "must be"); 1427 kit.add_exception_states_from(new_jvms); 1428 kit.set_jvms(new_jvms); 1429 if (!kit.stopped()) { 1430 result_jvms[results++] = kit.jvms(); 1431 } 1432 } 1433 1434 if (results == 0) { 1435 // All paths ended in uncommon traps. 1436 (void) kit.stop(); 1437 return kit.transfer_exceptions_into_jvms(); 1438 } 1439 1440 if (results == 1) { // Only one path 1441 kit.set_jvms(result_jvms[0]); 1442 return kit.transfer_exceptions_into_jvms(); 1443 } 1444 1445 // Merge all paths. 1446 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1447 RegionNode* region = new RegionNode(results + 1); 1448 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1449 for (int i = 0; i < results; i++) { 1450 JVMState* jvms = result_jvms[i]; 1451 int path = i + 1; 1452 SafePointNode* map = jvms->map(); 1453 region->init_req(path, map->control()); 1454 iophi->set_req(path, map->i_o()); 1455 if (i == 0) { 1456 kit.set_jvms(jvms); 1457 } else { 1458 kit.merge_memory(map->merged_memory(), region, path); 1459 } 1460 } 1461 kit.set_control(gvn.transform(region)); 1462 kit.set_i_o(gvn.transform(iophi)); 1463 // Transform new memory Phis. 1464 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1465 Node* phi = mms.memory(); 1466 if (phi->is_Phi() && phi->in(0) == region) { 1467 mms.set_memory(gvn.transform(phi)); 1468 } 1469 } 1470 1471 // Merge debug info. 1472 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1473 uint tos = kit.jvms()->stkoff() + kit.sp(); 1474 Node* map = kit.map(); 1475 uint limit = map->req(); 1476 for (uint i = TypeFunc::Parms; i < limit; i++) { 1477 // Skip unused stack slots; fast forward to monoff(); 1478 if (i == tos) { 1479 i = kit.jvms()->monoff(); 1480 if( i >= limit ) break; 1481 } 1482 Node* n = map->in(i); 1483 ins[0] = n; 1484 const Type* t = gvn.type(n); 1485 bool needs_phi = false; 1486 for (int j = 1; j < results; j++) { 1487 JVMState* jvms = result_jvms[j]; 1488 Node* jmap = jvms->map(); 1489 Node* m = nullptr; 1490 if (jmap->req() > i) { 1491 m = jmap->in(i); 1492 if (m != n) { 1493 needs_phi = true; 1494 t = t->meet_speculative(gvn.type(m)); 1495 } 1496 } 1497 ins[j] = m; 1498 } 1499 if (needs_phi) { 1500 Node* phi = PhiNode::make(region, n, t); 1501 for (int j = 1; j < results; j++) { 1502 phi->set_req(j + 1, ins[j]); 1503 } 1504 map->set_req(i, gvn.transform(phi)); 1505 } 1506 } 1507 1508 return kit.transfer_exceptions_into_jvms(); 1509 } 1510 1511 //-------------------------UncommonTrapCallGenerator----------------------------- 1512 // Internal class which handles all out-of-line calls checking receiver type. 1513 class UncommonTrapCallGenerator : public CallGenerator { 1514 Deoptimization::DeoptReason _reason; 1515 Deoptimization::DeoptAction _action; 1516 1517 public: 1518 UncommonTrapCallGenerator(ciMethod* m, 1519 Deoptimization::DeoptReason reason, 1520 Deoptimization::DeoptAction action) 1521 : CallGenerator(m) 1522 { 1523 _reason = reason; 1524 _action = action; 1525 } 1526 1527 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1528 virtual bool is_trap() const { return true; } 1529 1530 virtual JVMState* generate(JVMState* jvms); 1531 }; 1532 1533 1534 CallGenerator* 1535 CallGenerator::for_uncommon_trap(ciMethod* m, 1536 Deoptimization::DeoptReason reason, 1537 Deoptimization::DeoptAction action) { 1538 return new UncommonTrapCallGenerator(m, reason, action); 1539 } 1540 1541 1542 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1543 GraphKit kit(jvms); 1544 kit.C->print_inlining_update(this); 1545 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1546 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1547 // Use callsite signature always. 1548 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1549 int nargs = declared_method->arg_size(); 1550 kit.inc_sp(nargs); 1551 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1552 if (_reason == Deoptimization::Reason_class_check && 1553 _action == Deoptimization::Action_maybe_recompile) { 1554 // Temp fix for 6529811 1555 // Don't allow uncommon_trap to override our decision to recompile in the event 1556 // of a class cast failure for a monomorphic call as it will never let us convert 1557 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1558 bool keep_exact_action = true; 1559 kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action); 1560 } else { 1561 kit.uncommon_trap(_reason, _action); 1562 } 1563 return kit.transfer_exceptions_into_jvms(); 1564 } 1565 1566 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1567 1568 // (Node: Merged hook_up_exits into ParseGenerator::generate.)