1 /* 2 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "runtime/os.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "utilities/debug.hpp" 45 46 // Utility function. 47 const TypeFunc* CallGenerator::tf() const { 48 return TypeFunc::make(method()); 49 } 50 51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) { 52 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m); 53 } 54 55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) { 56 ciMethod* symbolic_info = caller->get_method_at_bci(bci); 57 return is_inlined_method_handle_intrinsic(symbolic_info, m); 58 } 59 60 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) { 61 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic(); 62 } 63 64 //-----------------------------ParseGenerator--------------------------------- 65 // Internal class which handles all direct bytecode traversal. 66 class ParseGenerator : public InlineCallGenerator { 67 private: 68 bool _is_osr; 69 float _expected_uses; 70 71 public: 72 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 73 : InlineCallGenerator(method) 74 { 75 _is_osr = is_osr; 76 _expected_uses = expected_uses; 77 assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible"); 78 } 79 80 virtual bool is_parse() const { return true; } 81 virtual JVMState* generate(JVMState* jvms); 82 int is_osr() { return _is_osr; } 83 84 }; 85 86 JVMState* ParseGenerator::generate(JVMState* jvms) { 87 Compile* C = Compile::current(); 88 C->print_inlining_update(this); 89 90 if (is_osr()) { 91 // The JVMS for a OSR has a single argument (see its TypeFunc). 92 assert(jvms->depth() == 1, "no inline OSR"); 93 } 94 95 if (C->failing()) { 96 return nullptr; // bailing out of the compile; do not try to parse 97 } 98 99 Parse parser(jvms, method(), _expected_uses); 100 if (C->failing()) return nullptr; 101 102 // Grab signature for matching/allocation 103 GraphKit& exits = parser.exits(); 104 105 if (C->failing()) { 106 while (exits.pop_exception_state() != nullptr) ; 107 return nullptr; 108 } 109 110 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 111 112 // Simply return the exit state of the parser, 113 // augmented by any exceptional states. 114 return exits.transfer_exceptions_into_jvms(); 115 } 116 117 //---------------------------DirectCallGenerator------------------------------ 118 // Internal class which handles all out-of-line calls w/o receiver type checks. 119 class DirectCallGenerator : public CallGenerator { 120 private: 121 CallStaticJavaNode* _call_node; 122 // Force separate memory and I/O projections for the exceptional 123 // paths to facilitate late inlinig. 124 bool _separate_io_proj; 125 126 protected: 127 void set_call_node(CallStaticJavaNode* call) { _call_node = call; } 128 129 public: 130 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 131 : CallGenerator(method), 132 _separate_io_proj(separate_io_proj) 133 { 134 } 135 virtual JVMState* generate(JVMState* jvms); 136 137 virtual CallNode* call_node() const { return _call_node; } 138 virtual CallGenerator* with_call_node(CallNode* call) { 139 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj); 140 dcg->set_call_node(call->as_CallStaticJava()); 141 return dcg; 142 } 143 }; 144 145 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 146 GraphKit kit(jvms); 147 kit.C->print_inlining_update(this); 148 bool is_static = method()->is_static(); 149 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 150 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 151 152 if (kit.C->log() != nullptr) { 153 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 154 } 155 156 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method()); 157 if (is_inlined_method_handle_intrinsic(jvms, method())) { 158 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 159 // additional information about the method being invoked should be attached 160 // to the call site to make resolution logic work 161 // (see SharedRuntime::resolve_static_call_C). 162 call->set_override_symbolic_info(true); 163 } 164 _call_node = call; // Save the call node in case we need it later 165 if (!is_static) { 166 // Make an explicit receiver null_check as part of this call. 167 // Since we share a map with the caller, his JVMS gets adjusted. 168 kit.null_check_receiver_before_call(method()); 169 if (kit.stopped()) { 170 // And dump it back to the caller, decorated with any exceptions: 171 return kit.transfer_exceptions_into_jvms(); 172 } 173 // Mark the call node as virtual, sort of: 174 call->set_optimized_virtual(true); 175 if (method()->is_method_handle_intrinsic() || 176 method()->is_compiled_lambda_form()) { 177 call->set_method_handle_invoke(true); 178 } 179 } 180 kit.set_arguments_for_java_call(call); 181 kit.set_edges_for_java_call(call, false, _separate_io_proj); 182 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 183 kit.push_node(method()->return_type()->basic_type(), ret); 184 return kit.transfer_exceptions_into_jvms(); 185 } 186 187 //--------------------------VirtualCallGenerator------------------------------ 188 // Internal class which handles all out-of-line calls checking receiver type. 189 class VirtualCallGenerator : public CallGenerator { 190 private: 191 int _vtable_index; 192 bool _separate_io_proj; 193 CallDynamicJavaNode* _call_node; 194 195 protected: 196 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; } 197 198 public: 199 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj) 200 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr) 201 { 202 assert(vtable_index == Method::invalid_vtable_index || 203 vtable_index >= 0, "either invalid or usable"); 204 } 205 virtual bool is_virtual() const { return true; } 206 virtual JVMState* generate(JVMState* jvms); 207 208 virtual CallNode* call_node() const { return _call_node; } 209 int vtable_index() const { return _vtable_index; } 210 211 virtual CallGenerator* with_call_node(CallNode* call) { 212 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj); 213 cg->set_call_node(call->as_CallDynamicJava()); 214 return cg; 215 } 216 }; 217 218 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 219 GraphKit kit(jvms); 220 Node* receiver = kit.argument(0); 221 222 kit.C->print_inlining_update(this); 223 224 if (kit.C->log() != nullptr) { 225 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 226 } 227 228 // If the receiver is a constant null, do not torture the system 229 // by attempting to call through it. The compile will proceed 230 // correctly, but may bail out in final_graph_reshaping, because 231 // the call instruction will have a seemingly deficient out-count. 232 // (The bailout says something misleading about an "infinite loop".) 233 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 234 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 235 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 236 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 237 kit.inc_sp(arg_size); // restore arguments 238 kit.uncommon_trap(Deoptimization::Reason_null_check, 239 Deoptimization::Action_none, 240 nullptr, "null receiver"); 241 return kit.transfer_exceptions_into_jvms(); 242 } 243 244 // Ideally we would unconditionally do a null check here and let it 245 // be converted to an implicit check based on profile information. 246 // However currently the conversion to implicit null checks in 247 // Block::implicit_null_check() only looks for loads and stores, not calls. 248 ciMethod *caller = kit.method(); 249 ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data(); 250 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 251 ((ImplicitNullCheckThreshold > 0) && caller_md && 252 (caller_md->trap_count(Deoptimization::Reason_null_check) 253 >= (uint)ImplicitNullCheckThreshold))) { 254 // Make an explicit receiver null_check as part of this call. 255 // Since we share a map with the caller, his JVMS gets adjusted. 256 receiver = kit.null_check_receiver_before_call(method()); 257 if (kit.stopped()) { 258 // And dump it back to the caller, decorated with any exceptions: 259 return kit.transfer_exceptions_into_jvms(); 260 } 261 } 262 263 assert(!method()->is_static(), "virtual call must not be to static"); 264 assert(!method()->is_final(), "virtual call should not be to final"); 265 assert(!method()->is_private(), "virtual call should not be to private"); 266 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 267 "no vtable calls if +UseInlineCaches "); 268 address target = SharedRuntime::get_resolve_virtual_call_stub(); 269 // Normal inline cache used for call 270 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index); 271 if (is_inlined_method_handle_intrinsic(jvms, method())) { 272 // To be able to issue a direct call (optimized virtual or virtual) 273 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 274 // about the method being invoked should be attached to the call site to 275 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 276 call->set_override_symbolic_info(true); 277 } 278 _call_node = call; // Save the call node in case we need it later 279 280 kit.set_arguments_for_java_call(call); 281 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj); 282 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 283 kit.push_node(method()->return_type()->basic_type(), ret); 284 285 // Represent the effect of an implicit receiver null_check 286 // as part of this call. Since we share a map with the caller, 287 // his JVMS gets adjusted. 288 kit.cast_not_null(receiver); 289 return kit.transfer_exceptions_into_jvms(); 290 } 291 292 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 293 if (InlineTree::check_can_parse(m) != nullptr) return nullptr; 294 return new ParseGenerator(m, expected_uses); 295 } 296 297 // As a special case, the JVMS passed to this CallGenerator is 298 // for the method execution already in progress, not just the JVMS 299 // of the caller. Thus, this CallGenerator cannot be mixed with others! 300 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 301 if (InlineTree::check_can_parse(m) != nullptr) return nullptr; 302 float past_uses = m->interpreter_invocation_count(); 303 float expected_uses = past_uses; 304 return new ParseGenerator(m, expected_uses, true); 305 } 306 307 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 308 assert(!m->is_abstract(), "for_direct_call mismatch"); 309 return new DirectCallGenerator(m, separate_io_proj); 310 } 311 312 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 313 assert(!m->is_static(), "for_virtual_call mismatch"); 314 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 315 return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/); 316 } 317 318 // Allow inlining decisions to be delayed 319 class LateInlineCallGenerator : public DirectCallGenerator { 320 private: 321 jlong _unique_id; // unique id for log compilation 322 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about 323 324 protected: 325 CallGenerator* _inline_cg; 326 virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; } 327 virtual CallGenerator* inline_cg() const { return _inline_cg; } 328 virtual bool is_pure_call() const { return _is_pure_call; } 329 330 public: 331 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) : 332 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {} 333 334 virtual bool is_late_inline() const { return true; } 335 336 // Convert the CallStaticJava into an inline 337 virtual void do_late_inline(); 338 339 virtual JVMState* generate(JVMState* jvms) { 340 Compile *C = Compile::current(); 341 342 C->log_inline_id(this); 343 344 // Record that this call site should be revisited once the main 345 // parse is finished. 346 if (!is_mh_late_inline()) { 347 C->add_late_inline(this); 348 } 349 350 // Emit the CallStaticJava and request separate projections so 351 // that the late inlining logic can distinguish between fall 352 // through and exceptional uses of the memory and io projections 353 // as is done for allocations and macro expansion. 354 return DirectCallGenerator::generate(jvms); 355 } 356 357 virtual void print_inlining_late(InliningResult result, const char* msg) { 358 CallNode* call = call_node(); 359 Compile* C = Compile::current(); 360 C->print_inlining_assert_ready(); 361 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg); 362 C->print_inlining_move_to(this); 363 C->print_inlining_update_delayed(this); 364 } 365 366 virtual void set_unique_id(jlong id) { 367 _unique_id = id; 368 } 369 370 virtual jlong unique_id() const { 371 return _unique_id; 372 } 373 374 virtual CallGenerator* with_call_node(CallNode* call) { 375 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call); 376 cg->set_call_node(call->as_CallStaticJava()); 377 return cg; 378 } 379 }; 380 381 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 382 return new LateInlineCallGenerator(method, inline_cg); 383 } 384 385 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 386 ciMethod* _caller; 387 bool _input_not_const; 388 389 virtual bool do_late_inline_check(Compile* C, JVMState* jvms); 390 391 public: 392 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 393 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {} 394 395 virtual bool is_mh_late_inline() const { return true; } 396 397 // Convert the CallStaticJava into an inline 398 virtual void do_late_inline(); 399 400 virtual JVMState* generate(JVMState* jvms) { 401 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 402 403 Compile* C = Compile::current(); 404 if (_input_not_const) { 405 // inlining won't be possible so no need to enqueue right now. 406 call_node()->set_generator(this); 407 } else { 408 C->add_late_inline(this); 409 } 410 return new_jvms; 411 } 412 413 virtual CallGenerator* with_call_node(CallNode* call) { 414 LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const); 415 cg->set_call_node(call->as_CallStaticJava()); 416 return cg; 417 } 418 }; 419 420 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) { 421 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different 422 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with 423 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue 424 // of late inlining with exceptions. 425 assert(!jvms->method()->has_exception_handlers() || 426 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual && 427 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected"); 428 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call. 429 bool allow_inline = C->inlining_incrementally(); 430 bool input_not_const = true; 431 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const); 432 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place 433 434 if (cg != nullptr) { 435 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining"); 436 _inline_cg = cg; 437 C->dec_number_of_mh_late_inlines(); 438 return true; 439 } else { 440 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call 441 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later, 442 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely. 443 return false; 444 } 445 } 446 447 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 448 assert(IncrementalInlineMH, "required"); 449 Compile::current()->inc_number_of_mh_late_inlines(); 450 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 451 return cg; 452 } 453 454 // Allow inlining decisions to be delayed 455 class LateInlineVirtualCallGenerator : public VirtualCallGenerator { 456 private: 457 jlong _unique_id; // unique id for log compilation 458 CallGenerator* _inline_cg; 459 ciMethod* _callee; 460 bool _is_pure_call; 461 float _prof_factor; 462 463 protected: 464 virtual bool do_late_inline_check(Compile* C, JVMState* jvms); 465 virtual CallGenerator* inline_cg() const { return _inline_cg; } 466 virtual bool is_pure_call() const { return _is_pure_call; } 467 468 public: 469 LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor) 470 : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/), 471 _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) { 472 assert(IncrementalInlineVirtual, "required"); 473 } 474 475 virtual bool is_late_inline() const { return true; } 476 477 virtual bool is_virtual_late_inline() const { return true; } 478 479 // Convert the CallDynamicJava into an inline 480 virtual void do_late_inline(); 481 482 virtual void set_callee_method(ciMethod* m) { 483 assert(_callee == nullptr, "repeated inlining attempt"); 484 _callee = m; 485 } 486 487 virtual JVMState* generate(JVMState* jvms) { 488 // Emit the CallDynamicJava and request separate projections so 489 // that the late inlining logic can distinguish between fall 490 // through and exceptional uses of the memory and io projections 491 // as is done for allocations and macro expansion. 492 JVMState* new_jvms = VirtualCallGenerator::generate(jvms); 493 if (call_node() != nullptr) { 494 call_node()->set_generator(this); 495 } 496 return new_jvms; 497 } 498 499 virtual void print_inlining_late(InliningResult result, const char* msg) { 500 CallNode* call = call_node(); 501 Compile* C = Compile::current(); 502 C->print_inlining_assert_ready(); 503 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg); 504 C->print_inlining_move_to(this); 505 C->print_inlining_update_delayed(this); 506 } 507 508 virtual void set_unique_id(jlong id) { 509 _unique_id = id; 510 } 511 512 virtual jlong unique_id() const { 513 return _unique_id; 514 } 515 516 virtual CallGenerator* with_call_node(CallNode* call) { 517 LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor); 518 cg->set_call_node(call->as_CallDynamicJava()); 519 return cg; 520 } 521 }; 522 523 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) { 524 // Method handle linker case is handled in CallDynamicJavaNode::Ideal(). 525 // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate(). 526 527 // Implicit receiver null checks introduce problems when exception states are combined. 528 Node* receiver = jvms->map()->argument(jvms, 0); 529 const Type* recv_type = C->initial_gvn()->type(receiver); 530 if (recv_type->maybe_null()) { 531 if (C->print_inlining() || C->print_intrinsics()) { 532 C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, 533 "late call devirtualization failed (receiver may be null)"); 534 } 535 return false; 536 } 537 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call. 538 bool allow_inline = C->inlining_incrementally(); 539 if (!allow_inline && _callee->holder()->is_interface()) { 540 // Don't convert the interface call to a direct call guarded by an interface subtype check. 541 if (C->print_inlining() || C->print_intrinsics()) { 542 C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, 543 "late call devirtualization failed (interface call)"); 544 } 545 return false; 546 } 547 CallGenerator* cg = C->call_generator(_callee, 548 vtable_index(), 549 false /*call_does_dispatch*/, 550 jvms, 551 allow_inline, 552 _prof_factor, 553 nullptr /*speculative_receiver_type*/, 554 true /*allow_intrinsics*/); 555 556 if (cg != nullptr) { 557 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining"); 558 _inline_cg = cg; 559 return true; 560 } else { 561 // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call. 562 assert(false, "no progress"); 563 return false; 564 } 565 } 566 567 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) { 568 assert(IncrementalInlineVirtual, "required"); 569 assert(!m->is_static(), "for_virtual_call mismatch"); 570 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 571 return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor); 572 } 573 574 void LateInlineCallGenerator::do_late_inline() { 575 CallGenerator::do_late_inline_helper(); 576 } 577 578 void LateInlineMHCallGenerator::do_late_inline() { 579 CallGenerator::do_late_inline_helper(); 580 } 581 582 void LateInlineVirtualCallGenerator::do_late_inline() { 583 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal 584 CallGenerator::do_late_inline_helper(); 585 } 586 587 void CallGenerator::do_late_inline_helper() { 588 assert(is_late_inline(), "only late inline allowed"); 589 590 // Can't inline it 591 CallNode* call = call_node(); 592 if (call == nullptr || call->outcnt() == 0 || 593 call->in(0) == nullptr || call->in(0)->is_top()) { 594 return; 595 } 596 597 const TypeTuple *r = call->tf()->domain(); 598 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 599 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 600 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 601 return; 602 } 603 } 604 605 if (call->in(TypeFunc::Memory)->is_top()) { 606 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 607 return; 608 } 609 if (call->in(TypeFunc::Memory)->is_MergeMem()) { 610 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem(); 611 if (merge_mem->base_memory() == merge_mem->empty_memory()) { 612 return; // dead path 613 } 614 } 615 616 // check for unreachable loop 617 CallProjections callprojs; 618 call->extract_projections(&callprojs, true); 619 if ((callprojs.fallthrough_catchproj == call->in(0)) || 620 (callprojs.catchall_catchproj == call->in(0)) || 621 (callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) || 622 (callprojs.catchall_memproj == call->in(TypeFunc::Memory)) || 623 (callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) || 624 (callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) || 625 (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) || 626 (callprojs.exobj != nullptr && call->find_edge(callprojs.exobj) != -1)) { 627 return; 628 } 629 630 Compile* C = Compile::current(); 631 // Remove inlined methods from Compiler's lists. 632 if (call->is_macro()) { 633 C->remove_macro_node(call); 634 } 635 636 // The call is marked as pure (no important side effects), but result isn't used. 637 // It's safe to remove the call. 638 bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0); 639 640 if (is_pure_call() && result_not_used) { 641 GraphKit kit(call->jvms()); 642 kit.replace_call(call, C->top(), true); 643 } else { 644 // Make a clone of the JVMState that appropriate to use for driving a parse 645 JVMState* old_jvms = call->jvms(); 646 JVMState* jvms = old_jvms->clone_shallow(C); 647 uint size = call->req(); 648 SafePointNode* map = new SafePointNode(size, jvms); 649 for (uint i1 = 0; i1 < size; i1++) { 650 map->init_req(i1, call->in(i1)); 651 } 652 653 // Make sure the state is a MergeMem for parsing. 654 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 655 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 656 C->initial_gvn()->set_type_bottom(mem); 657 map->set_req(TypeFunc::Memory, mem); 658 } 659 660 uint nargs = method()->arg_size(); 661 // blow away old call arguments 662 Node* top = C->top(); 663 for (uint i1 = 0; i1 < nargs; i1++) { 664 map->set_req(TypeFunc::Parms + i1, top); 665 } 666 jvms->set_map(map); 667 668 // Make enough space in the expression stack to transfer 669 // the incoming arguments and return value. 670 map->ensure_stack(jvms, jvms->method()->max_stack()); 671 for (uint i1 = 0; i1 < nargs; i1++) { 672 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 673 } 674 675 C->print_inlining_assert_ready(); 676 677 C->print_inlining_move_to(this); 678 679 C->log_late_inline(this); 680 681 // JVMState is ready, so time to perform some checks and prepare for inlining attempt. 682 if (!do_late_inline_check(C, jvms)) { 683 map->disconnect_inputs(C); 684 C->print_inlining_update_delayed(this); 685 return; 686 } 687 688 // Setup default node notes to be picked up by the inlining 689 Node_Notes* old_nn = C->node_notes_at(call->_idx); 690 if (old_nn != nullptr) { 691 Node_Notes* entry_nn = old_nn->clone(C); 692 entry_nn->set_jvms(jvms); 693 C->set_default_node_notes(entry_nn); 694 } 695 696 // Now perform the inlining using the synthesized JVMState 697 JVMState* new_jvms = inline_cg()->generate(jvms); 698 if (new_jvms == nullptr) return; // no change 699 if (C->failing()) return; 700 701 // Capture any exceptional control flow 702 GraphKit kit(new_jvms); 703 704 // Find the result object 705 Node* result = C->top(); 706 int result_size = method()->return_type()->size(); 707 if (result_size != 0 && !kit.stopped()) { 708 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 709 } 710 711 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) { 712 result = kit.must_be_not_null(result, false); 713 } 714 715 if (inline_cg()->is_inline()) { 716 C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops()); 717 C->env()->notice_inlined_method(inline_cg()->method()); 718 } 719 C->set_inlining_progress(true); 720 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup 721 kit.replace_call(call, result, true); 722 } 723 } 724 725 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 726 727 public: 728 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 729 LateInlineCallGenerator(method, inline_cg) {} 730 731 virtual JVMState* generate(JVMState* jvms) { 732 Compile *C = Compile::current(); 733 734 C->log_inline_id(this); 735 736 C->add_string_late_inline(this); 737 738 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 739 return new_jvms; 740 } 741 742 virtual bool is_string_late_inline() const { return true; } 743 744 virtual CallGenerator* with_call_node(CallNode* call) { 745 LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg); 746 cg->set_call_node(call->as_CallStaticJava()); 747 return cg; 748 } 749 }; 750 751 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 752 return new LateInlineStringCallGenerator(method, inline_cg); 753 } 754 755 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 756 757 public: 758 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 759 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {} 760 761 virtual JVMState* generate(JVMState* jvms) { 762 Compile *C = Compile::current(); 763 764 C->log_inline_id(this); 765 766 C->add_boxing_late_inline(this); 767 768 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 769 return new_jvms; 770 } 771 772 virtual CallGenerator* with_call_node(CallNode* call) { 773 LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg); 774 cg->set_call_node(call->as_CallStaticJava()); 775 return cg; 776 } 777 }; 778 779 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 780 return new LateInlineBoxingCallGenerator(method, inline_cg); 781 } 782 783 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator { 784 785 public: 786 LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 787 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {} 788 789 virtual JVMState* generate(JVMState* jvms) { 790 Compile *C = Compile::current(); 791 792 C->log_inline_id(this); 793 794 C->add_vector_reboxing_late_inline(this); 795 796 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 797 return new_jvms; 798 } 799 800 virtual CallGenerator* with_call_node(CallNode* call) { 801 LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg); 802 cg->set_call_node(call->as_CallStaticJava()); 803 return cg; 804 } 805 }; 806 807 // static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg); 808 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 809 return new LateInlineVectorReboxingCallGenerator(method, inline_cg); 810 } 811 812 //------------------------PredictedCallGenerator------------------------------ 813 // Internal class which handles all out-of-line calls checking receiver type. 814 class PredictedCallGenerator : public CallGenerator { 815 ciKlass* _predicted_receiver; 816 CallGenerator* _if_missed; 817 CallGenerator* _if_hit; 818 float _hit_prob; 819 bool _exact_check; 820 821 public: 822 PredictedCallGenerator(ciKlass* predicted_receiver, 823 CallGenerator* if_missed, 824 CallGenerator* if_hit, bool exact_check, 825 float hit_prob) 826 : CallGenerator(if_missed->method()) 827 { 828 // The call profile data may predict the hit_prob as extreme as 0 or 1. 829 // Remove the extremes values from the range. 830 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 831 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 832 833 _predicted_receiver = predicted_receiver; 834 _if_missed = if_missed; 835 _if_hit = if_hit; 836 _hit_prob = hit_prob; 837 _exact_check = exact_check; 838 } 839 840 virtual bool is_virtual() const { return true; } 841 virtual bool is_inline() const { return _if_hit->is_inline(); } 842 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 843 844 virtual JVMState* generate(JVMState* jvms); 845 }; 846 847 848 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 849 CallGenerator* if_missed, 850 CallGenerator* if_hit, 851 float hit_prob) { 852 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, 853 /*exact_check=*/true, hit_prob); 854 } 855 856 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver, 857 CallGenerator* if_missed, 858 CallGenerator* if_hit) { 859 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit, 860 /*exact_check=*/false, PROB_ALWAYS); 861 } 862 863 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 864 GraphKit kit(jvms); 865 kit.C->print_inlining_update(this); 866 PhaseGVN& gvn = kit.gvn(); 867 // We need an explicit receiver null_check before checking its type. 868 // We share a map with the caller, so his JVMS gets adjusted. 869 Node* receiver = kit.argument(0); 870 CompileLog* log = kit.C->log(); 871 if (log != nullptr) { 872 log->elem("predicted_call bci='%d' exact='%d' klass='%d'", 873 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver)); 874 } 875 876 receiver = kit.null_check_receiver_before_call(method()); 877 if (kit.stopped()) { 878 return kit.transfer_exceptions_into_jvms(); 879 } 880 881 // Make a copy of the replaced nodes in case we need to restore them 882 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 883 replaced_nodes.clone(); 884 885 Node* casted_receiver = receiver; // will get updated in place... 886 Node* slow_ctl = nullptr; 887 if (_exact_check) { 888 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob, 889 &casted_receiver); 890 } else { 891 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver, 892 &casted_receiver); 893 } 894 895 SafePointNode* slow_map = nullptr; 896 JVMState* slow_jvms = nullptr; 897 { PreserveJVMState pjvms(&kit); 898 kit.set_control(slow_ctl); 899 if (!kit.stopped()) { 900 slow_jvms = _if_missed->generate(kit.sync_jvms()); 901 if (kit.failing()) 902 return nullptr; // might happen because of NodeCountInliningCutoff 903 assert(slow_jvms != nullptr, "must be"); 904 kit.add_exception_states_from(slow_jvms); 905 kit.set_map(slow_jvms->map()); 906 if (!kit.stopped()) 907 slow_map = kit.stop(); 908 } 909 } 910 911 if (kit.stopped()) { 912 // Instance does not match the predicted type. 913 kit.set_jvms(slow_jvms); 914 return kit.transfer_exceptions_into_jvms(); 915 } 916 917 // Fall through if the instance matches the desired type. 918 kit.replace_in_map(receiver, casted_receiver); 919 920 // Make the hot call: 921 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 922 if (new_jvms == nullptr) { 923 // Inline failed, so make a direct call. 924 assert(_if_hit->is_inline(), "must have been a failed inline"); 925 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 926 new_jvms = cg->generate(kit.sync_jvms()); 927 } 928 kit.add_exception_states_from(new_jvms); 929 kit.set_jvms(new_jvms); 930 931 // Need to merge slow and fast? 932 if (slow_map == nullptr) { 933 // The fast path is the only path remaining. 934 return kit.transfer_exceptions_into_jvms(); 935 } 936 937 if (kit.stopped()) { 938 // Inlined method threw an exception, so it's just the slow path after all. 939 kit.set_jvms(slow_jvms); 940 return kit.transfer_exceptions_into_jvms(); 941 } 942 943 // There are 2 branches and the replaced nodes are only valid on 944 // one: restore the replaced nodes to what they were before the 945 // branch. 946 kit.map()->set_replaced_nodes(replaced_nodes); 947 948 // Finish the diamond. 949 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 950 RegionNode* region = new RegionNode(3); 951 region->init_req(1, kit.control()); 952 region->init_req(2, slow_map->control()); 953 kit.set_control(gvn.transform(region)); 954 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 955 iophi->set_req(2, slow_map->i_o()); 956 kit.set_i_o(gvn.transform(iophi)); 957 // Merge memory 958 kit.merge_memory(slow_map->merged_memory(), region, 2); 959 // Transform new memory Phis. 960 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 961 Node* phi = mms.memory(); 962 if (phi->is_Phi() && phi->in(0) == region) { 963 mms.set_memory(gvn.transform(phi)); 964 } 965 } 966 uint tos = kit.jvms()->stkoff() + kit.sp(); 967 uint limit = slow_map->req(); 968 for (uint i = TypeFunc::Parms; i < limit; i++) { 969 // Skip unused stack slots; fast forward to monoff(); 970 if (i == tos) { 971 i = kit.jvms()->monoff(); 972 if( i >= limit ) break; 973 } 974 Node* m = kit.map()->in(i); 975 Node* n = slow_map->in(i); 976 if (m != n) { 977 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 978 Node* phi = PhiNode::make(region, m, t); 979 phi->set_req(2, n); 980 kit.map()->set_req(i, gvn.transform(phi)); 981 } 982 } 983 return kit.transfer_exceptions_into_jvms(); 984 } 985 986 987 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) { 988 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); 989 bool input_not_const; 990 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const); 991 Compile* C = Compile::current(); 992 bool should_delay = C->should_delay_inlining(); 993 if (cg != nullptr) { 994 if (should_delay) { 995 return CallGenerator::for_late_inline(callee, cg); 996 } else { 997 return cg; 998 } 999 } 1000 int bci = jvms->bci(); 1001 ciCallProfile profile = caller->call_profile_at_bci(bci); 1002 int call_site_count = caller->scale_count(profile.count()); 1003 1004 if (IncrementalInlineMH && call_site_count > 0 && 1005 (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 1006 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 1007 } else { 1008 // Out-of-line call. 1009 return CallGenerator::for_direct_call(callee); 1010 } 1011 } 1012 1013 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) { 1014 GraphKit kit(jvms); 1015 PhaseGVN& gvn = kit.gvn(); 1016 Compile* C = kit.C; 1017 vmIntrinsics::ID iid = callee->intrinsic_id(); 1018 input_not_const = true; 1019 if (StressMethodHandleLinkerInlining) { 1020 allow_inline = false; 1021 } 1022 switch (iid) { 1023 case vmIntrinsics::_invokeBasic: 1024 { 1025 // Get MethodHandle receiver: 1026 Node* receiver = kit.argument(0); 1027 if (receiver->Opcode() == Op_ConP) { 1028 input_not_const = false; 1029 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr(); 1030 if (recv_toop != nullptr) { 1031 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget(); 1032 const int vtable_index = Method::invalid_vtable_index; 1033 1034 if (!ciMethod::is_consistent_info(callee, target)) { 1035 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1036 "signatures mismatch"); 1037 return nullptr; 1038 } 1039 1040 CallGenerator *cg = C->call_generator(target, vtable_index, 1041 false /* call_does_dispatch */, 1042 jvms, 1043 allow_inline, 1044 PROB_ALWAYS); 1045 return cg; 1046 } else { 1047 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s", 1048 Type::str(receiver->bottom_type())); 1049 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1050 "receiver is always null"); 1051 } 1052 } else { 1053 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1054 "receiver not constant"); 1055 } 1056 } 1057 break; 1058 1059 case vmIntrinsics::_linkToVirtual: 1060 case vmIntrinsics::_linkToStatic: 1061 case vmIntrinsics::_linkToSpecial: 1062 case vmIntrinsics::_linkToInterface: 1063 { 1064 // Get MemberName argument: 1065 Node* member_name = kit.argument(callee->arg_size() - 1); 1066 if (member_name->Opcode() == Op_ConP) { 1067 input_not_const = false; 1068 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 1069 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 1070 1071 if (!ciMethod::is_consistent_info(callee, target)) { 1072 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1073 "signatures mismatch"); 1074 return nullptr; 1075 } 1076 1077 // In lambda forms we erase signature types to avoid resolving issues 1078 // involving class loaders. When we optimize a method handle invoke 1079 // to a direct call we must cast the receiver and arguments to its 1080 // actual types. 1081 ciSignature* signature = target->signature(); 1082 const int receiver_skip = target->is_static() ? 0 : 1; 1083 // Cast receiver to its type. 1084 if (!target->is_static()) { 1085 Node* arg = kit.argument(0); 1086 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 1087 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 1088 if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) { 1089 const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part 1090 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type)); 1091 kit.set_argument(0, cast_obj); 1092 } 1093 } 1094 // Cast reference arguments to its type. 1095 for (int i = 0, j = 0; i < signature->count(); i++) { 1096 ciType* t = signature->type_at(i); 1097 if (t->is_klass()) { 1098 Node* arg = kit.argument(receiver_skip + j); 1099 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 1100 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 1101 if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) { 1102 const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part 1103 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); 1104 kit.set_argument(receiver_skip + j, cast_obj); 1105 } 1106 } 1107 j += t->size(); // long and double take two slots 1108 } 1109 1110 // Try to get the most accurate receiver type 1111 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 1112 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 1113 int vtable_index = Method::invalid_vtable_index; 1114 bool call_does_dispatch = false; 1115 1116 ciKlass* speculative_receiver_type = nullptr; 1117 if (is_virtual_or_interface) { 1118 ciInstanceKlass* klass = target->holder(); 1119 Node* receiver_node = kit.argument(0); 1120 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 1121 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 1122 // optimize_virtual_call() takes 2 different holder 1123 // arguments for a corner case that doesn't apply here (see 1124 // Parse::do_call()) 1125 target = C->optimize_virtual_call(caller, klass, klass, 1126 target, receiver_type, is_virtual, 1127 call_does_dispatch, vtable_index, // out-parameters 1128 false /* check_access */); 1129 // We lack profiling at this call but type speculation may 1130 // provide us with a type 1131 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr; 1132 } 1133 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, 1134 allow_inline, 1135 PROB_ALWAYS, 1136 speculative_receiver_type); 1137 return cg; 1138 } else { 1139 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1140 "member_name not constant"); 1141 } 1142 } 1143 break; 1144 1145 case vmIntrinsics::_linkToNative: 1146 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1147 "native call"); 1148 break; 1149 1150 default: 1151 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); 1152 break; 1153 } 1154 return nullptr; 1155 } 1156 1157 //------------------------PredicatedIntrinsicGenerator------------------------------ 1158 // Internal class which handles all predicated Intrinsic calls. 1159 class PredicatedIntrinsicGenerator : public CallGenerator { 1160 CallGenerator* _intrinsic; 1161 CallGenerator* _cg; 1162 1163 public: 1164 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 1165 CallGenerator* cg) 1166 : CallGenerator(cg->method()) 1167 { 1168 _intrinsic = intrinsic; 1169 _cg = cg; 1170 } 1171 1172 virtual bool is_virtual() const { return true; } 1173 virtual bool is_inline() const { return true; } 1174 virtual bool is_intrinsic() const { return true; } 1175 1176 virtual JVMState* generate(JVMState* jvms); 1177 }; 1178 1179 1180 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 1181 CallGenerator* cg) { 1182 return new PredicatedIntrinsicGenerator(intrinsic, cg); 1183 } 1184 1185 1186 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 1187 // The code we want to generate here is: 1188 // if (receiver == nullptr) 1189 // uncommon_Trap 1190 // if (predicate(0)) 1191 // do_intrinsic(0) 1192 // else 1193 // if (predicate(1)) 1194 // do_intrinsic(1) 1195 // ... 1196 // else 1197 // do_java_comp 1198 1199 GraphKit kit(jvms); 1200 PhaseGVN& gvn = kit.gvn(); 1201 1202 CompileLog* log = kit.C->log(); 1203 if (log != nullptr) { 1204 log->elem("predicated_intrinsic bci='%d' method='%d'", 1205 jvms->bci(), log->identify(method())); 1206 } 1207 1208 if (!method()->is_static()) { 1209 // We need an explicit receiver null_check before checking its type in predicate. 1210 // We share a map with the caller, so his JVMS gets adjusted. 1211 Node* receiver = kit.null_check_receiver_before_call(method()); 1212 if (kit.stopped()) { 1213 return kit.transfer_exceptions_into_jvms(); 1214 } 1215 } 1216 1217 int n_predicates = _intrinsic->predicates_count(); 1218 assert(n_predicates > 0, "sanity"); 1219 1220 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 1221 1222 // Region for normal compilation code if intrinsic failed. 1223 Node* slow_region = new RegionNode(1); 1224 1225 int results = 0; 1226 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 1227 #ifdef ASSERT 1228 JVMState* old_jvms = kit.jvms(); 1229 SafePointNode* old_map = kit.map(); 1230 Node* old_io = old_map->i_o(); 1231 Node* old_mem = old_map->memory(); 1232 Node* old_exc = old_map->next_exception(); 1233 #endif 1234 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1235 #ifdef ASSERT 1236 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1237 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1238 SafePointNode* new_map = kit.map(); 1239 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1240 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1241 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1242 #endif 1243 if (!kit.stopped()) { 1244 PreserveJVMState pjvms(&kit); 1245 // Generate intrinsic code: 1246 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1247 if (new_jvms == nullptr) { 1248 // Intrinsic failed, use normal compilation path for this predicate. 1249 slow_region->add_req(kit.control()); 1250 } else { 1251 kit.add_exception_states_from(new_jvms); 1252 kit.set_jvms(new_jvms); 1253 if (!kit.stopped()) { 1254 result_jvms[results++] = kit.jvms(); 1255 } 1256 } 1257 } 1258 if (else_ctrl == nullptr) { 1259 else_ctrl = kit.C->top(); 1260 } 1261 kit.set_control(else_ctrl); 1262 } 1263 if (!kit.stopped()) { 1264 // Final 'else' after predicates. 1265 slow_region->add_req(kit.control()); 1266 } 1267 if (slow_region->req() > 1) { 1268 PreserveJVMState pjvms(&kit); 1269 // Generate normal compilation code: 1270 kit.set_control(gvn.transform(slow_region)); 1271 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1272 if (kit.failing()) 1273 return nullptr; // might happen because of NodeCountInliningCutoff 1274 assert(new_jvms != nullptr, "must be"); 1275 kit.add_exception_states_from(new_jvms); 1276 kit.set_jvms(new_jvms); 1277 if (!kit.stopped()) { 1278 result_jvms[results++] = kit.jvms(); 1279 } 1280 } 1281 1282 if (results == 0) { 1283 // All paths ended in uncommon traps. 1284 (void) kit.stop(); 1285 return kit.transfer_exceptions_into_jvms(); 1286 } 1287 1288 if (results == 1) { // Only one path 1289 kit.set_jvms(result_jvms[0]); 1290 return kit.transfer_exceptions_into_jvms(); 1291 } 1292 1293 // Merge all paths. 1294 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1295 RegionNode* region = new RegionNode(results + 1); 1296 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1297 for (int i = 0; i < results; i++) { 1298 JVMState* jvms = result_jvms[i]; 1299 int path = i + 1; 1300 SafePointNode* map = jvms->map(); 1301 region->init_req(path, map->control()); 1302 iophi->set_req(path, map->i_o()); 1303 if (i == 0) { 1304 kit.set_jvms(jvms); 1305 } else { 1306 kit.merge_memory(map->merged_memory(), region, path); 1307 } 1308 } 1309 kit.set_control(gvn.transform(region)); 1310 kit.set_i_o(gvn.transform(iophi)); 1311 // Transform new memory Phis. 1312 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1313 Node* phi = mms.memory(); 1314 if (phi->is_Phi() && phi->in(0) == region) { 1315 mms.set_memory(gvn.transform(phi)); 1316 } 1317 } 1318 1319 // Merge debug info. 1320 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1321 uint tos = kit.jvms()->stkoff() + kit.sp(); 1322 Node* map = kit.map(); 1323 uint limit = map->req(); 1324 for (uint i = TypeFunc::Parms; i < limit; i++) { 1325 // Skip unused stack slots; fast forward to monoff(); 1326 if (i == tos) { 1327 i = kit.jvms()->monoff(); 1328 if( i >= limit ) break; 1329 } 1330 Node* n = map->in(i); 1331 ins[0] = n; 1332 const Type* t = gvn.type(n); 1333 bool needs_phi = false; 1334 for (int j = 1; j < results; j++) { 1335 JVMState* jvms = result_jvms[j]; 1336 Node* jmap = jvms->map(); 1337 Node* m = nullptr; 1338 if (jmap->req() > i) { 1339 m = jmap->in(i); 1340 if (m != n) { 1341 needs_phi = true; 1342 t = t->meet_speculative(gvn.type(m)); 1343 } 1344 } 1345 ins[j] = m; 1346 } 1347 if (needs_phi) { 1348 Node* phi = PhiNode::make(region, n, t); 1349 for (int j = 1; j < results; j++) { 1350 phi->set_req(j + 1, ins[j]); 1351 } 1352 map->set_req(i, gvn.transform(phi)); 1353 } 1354 } 1355 1356 return kit.transfer_exceptions_into_jvms(); 1357 } 1358 1359 //-------------------------UncommonTrapCallGenerator----------------------------- 1360 // Internal class which handles all out-of-line calls checking receiver type. 1361 class UncommonTrapCallGenerator : public CallGenerator { 1362 Deoptimization::DeoptReason _reason; 1363 Deoptimization::DeoptAction _action; 1364 1365 public: 1366 UncommonTrapCallGenerator(ciMethod* m, 1367 Deoptimization::DeoptReason reason, 1368 Deoptimization::DeoptAction action) 1369 : CallGenerator(m) 1370 { 1371 _reason = reason; 1372 _action = action; 1373 } 1374 1375 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1376 virtual bool is_trap() const { return true; } 1377 1378 virtual JVMState* generate(JVMState* jvms); 1379 }; 1380 1381 1382 CallGenerator* 1383 CallGenerator::for_uncommon_trap(ciMethod* m, 1384 Deoptimization::DeoptReason reason, 1385 Deoptimization::DeoptAction action) { 1386 return new UncommonTrapCallGenerator(m, reason, action); 1387 } 1388 1389 1390 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1391 GraphKit kit(jvms); 1392 kit.C->print_inlining_update(this); 1393 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1394 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1395 // Use callsite signature always. 1396 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1397 int nargs = declared_method->arg_size(); 1398 kit.inc_sp(nargs); 1399 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1400 if (_reason == Deoptimization::Reason_class_check && 1401 _action == Deoptimization::Action_maybe_recompile) { 1402 // Temp fix for 6529811 1403 // Don't allow uncommon_trap to override our decision to recompile in the event 1404 // of a class cast failure for a monomorphic call as it will never let us convert 1405 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1406 bool keep_exact_action = true; 1407 kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action); 1408 } else { 1409 kit.uncommon_trap(_reason, _action); 1410 } 1411 return kit.transfer_exceptions_into_jvms(); 1412 } 1413 1414 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1415 1416 // (Node: Merged hook_up_exits into ParseGenerator::generate.)