1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciMemberName.hpp"
  28 #include "ci/ciMethodHandle.hpp"
  29 #include "ci/ciObjArray.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/parse.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "runtime/os.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 // Utility function.
  46 const TypeFunc* CallGenerator::tf() const {
  47   return TypeFunc::make(method());
  48 }
  49 
  50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  51   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  52 }
  53 
  54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  55   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  56   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  57 }
  58 
  59 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  60   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  61 }
  62 
  63 //-----------------------------ParseGenerator---------------------------------
  64 // Internal class which handles all direct bytecode traversal.
  65 class ParseGenerator : public InlineCallGenerator {
  66 private:
  67   bool  _is_osr;
  68   float _expected_uses;
  69 
  70 public:
  71   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  72     : InlineCallGenerator(method)
  73   {
  74     _is_osr        = is_osr;
  75     _expected_uses = expected_uses;
  76     assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
  77   }
  78 
  79   virtual bool      is_parse() const           { return true; }
  80   virtual JVMState* generate(JVMState* jvms);
  81   int is_osr() { return _is_osr; }
  82 
  83 };
  84 
  85 JVMState* ParseGenerator::generate(JVMState* jvms) {
  86   Compile* C = Compile::current();
  87 
  88   if (is_osr()) {
  89     // The JVMS for a OSR has a single argument (see its TypeFunc).
  90     assert(jvms->depth() == 1, "no inline OSR");
  91   }
  92 
  93   if (C->failing()) {
  94     return nullptr;  // bailing out of the compile; do not try to parse
  95   }
  96 
  97   Parse parser(jvms, method(), _expected_uses);
  98   if (C->failing()) return nullptr;
  99 
 100   // Grab signature for matching/allocation
 101   GraphKit& exits = parser.exits();
 102 
 103   if (C->failing()) {
 104     while (exits.pop_exception_state() != nullptr) ;
 105     return nullptr;
 106   }
 107 
 108   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 109 
 110   // Simply return the exit state of the parser,
 111   // augmented by any exceptional states.
 112   return exits.transfer_exceptions_into_jvms();
 113 }
 114 
 115 //---------------------------DirectCallGenerator------------------------------
 116 // Internal class which handles all out-of-line calls w/o receiver type checks.
 117 class DirectCallGenerator : public CallGenerator {
 118  private:
 119   CallStaticJavaNode* _call_node;
 120   // Force separate memory and I/O projections for the exceptional
 121   // paths to facilitate late inlinig.
 122   bool                _separate_io_proj;
 123 
 124 protected:
 125   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 126 
 127  public:
 128   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 129     : CallGenerator(method),
 130       _separate_io_proj(separate_io_proj)
 131   {
 132   }
 133   virtual JVMState* generate(JVMState* jvms);
 134 
 135   virtual CallNode* call_node() const { return _call_node; }
 136   virtual CallGenerator* with_call_node(CallNode* call) {
 137     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 138     dcg->set_call_node(call->as_CallStaticJava());
 139     return dcg;
 140   }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);
 145   bool is_static = method()->is_static();
 146   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 147                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 148 
 149   if (kit.C->log() != nullptr) {
 150     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 151   }
 152 
 153   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 154   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 155     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 156     // additional information about the method being invoked should be attached
 157     // to the call site to make resolution logic work
 158     // (see SharedRuntime::resolve_static_call_C).
 159     call->set_override_symbolic_info(true);
 160   }
 161   _call_node = call;  // Save the call node in case we need it later
 162   if (!is_static) {
 163     // Make an explicit receiver null_check as part of this call.
 164     // Since we share a map with the caller, his JVMS gets adjusted.
 165     kit.null_check_receiver_before_call(method());
 166     if (kit.stopped()) {
 167       // And dump it back to the caller, decorated with any exceptions:
 168       return kit.transfer_exceptions_into_jvms();
 169     }
 170     // Mark the call node as virtual, sort of:
 171     call->set_optimized_virtual(true);
 172   }
 173   kit.set_arguments_for_java_call(call);
 174   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 175   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 176   kit.push_node(method()->return_type()->basic_type(), ret);
 177   return kit.transfer_exceptions_into_jvms();
 178 }
 179 
 180 //--------------------------VirtualCallGenerator------------------------------
 181 // Internal class which handles all out-of-line calls checking receiver type.
 182 class VirtualCallGenerator : public CallGenerator {
 183 private:
 184   int _vtable_index;
 185   bool _separate_io_proj;
 186   CallDynamicJavaNode* _call_node;
 187 
 188 protected:
 189   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 190 
 191 public:
 192   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 193     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 194   {
 195     assert(vtable_index == Method::invalid_vtable_index ||
 196            vtable_index >= 0, "either invalid or usable");
 197   }
 198   virtual bool      is_virtual() const          { return true; }
 199   virtual JVMState* generate(JVMState* jvms);
 200 
 201   virtual CallNode* call_node() const { return _call_node; }
 202   int vtable_index() const { return _vtable_index; }
 203 
 204   virtual CallGenerator* with_call_node(CallNode* call) {
 205     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 206     cg->set_call_node(call->as_CallDynamicJava());
 207     return cg;
 208   }
 209 };
 210 
 211 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 212   GraphKit kit(jvms);
 213   Node* receiver = kit.argument(0);
 214 
 215   if (kit.C->log() != nullptr) {
 216     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 217   }
 218 
 219   // If the receiver is a constant null, do not torture the system
 220   // by attempting to call through it.  The compile will proceed
 221   // correctly, but may bail out in final_graph_reshaping, because
 222   // the call instruction will have a seemingly deficient out-count.
 223   // (The bailout says something misleading about an "infinite loop".)
 224   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 225     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 226     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 227     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 228     kit.inc_sp(arg_size);  // restore arguments
 229     kit.uncommon_trap(Deoptimization::Reason_null_check,
 230                       Deoptimization::Action_none,
 231                       nullptr, "null receiver");
 232     return kit.transfer_exceptions_into_jvms();
 233   }
 234 
 235   // Ideally we would unconditionally do a null check here and let it
 236   // be converted to an implicit check based on profile information.
 237   // However currently the conversion to implicit null checks in
 238   // Block::implicit_null_check() only looks for loads and stores, not calls.
 239   ciMethod *caller = kit.method();
 240   ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
 241   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 242        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 243        (caller_md->trap_count(Deoptimization::Reason_null_check)
 244        >= (uint)ImplicitNullCheckThreshold))) {
 245     // Make an explicit receiver null_check as part of this call.
 246     // Since we share a map with the caller, his JVMS gets adjusted.
 247     receiver = kit.null_check_receiver_before_call(method());
 248     if (kit.stopped()) {
 249       // And dump it back to the caller, decorated with any exceptions:
 250       return kit.transfer_exceptions_into_jvms();
 251     }
 252   }
 253 
 254   assert(!method()->is_static(), "virtual call must not be to static");
 255   assert(!method()->is_final(), "virtual call should not be to final");
 256   assert(!method()->is_private(), "virtual call should not be to private");
 257   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 258          "no vtable calls if +UseInlineCaches ");
 259   address target = SharedRuntime::get_resolve_virtual_call_stub();
 260   // Normal inline cache used for call
 261   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 262   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 263     // To be able to issue a direct call (optimized virtual or virtual)
 264     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 265     // about the method being invoked should be attached to the call site to
 266     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 267     call->set_override_symbolic_info(true);
 268   }
 269   _call_node = call;  // Save the call node in case we need it later
 270 
 271   kit.set_arguments_for_java_call(call);
 272   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 273   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 274   kit.push_node(method()->return_type()->basic_type(), ret);
 275 
 276   // Represent the effect of an implicit receiver null_check
 277   // as part of this call.  Since we share a map with the caller,
 278   // his JVMS gets adjusted.
 279   kit.cast_not_null(receiver);
 280   return kit.transfer_exceptions_into_jvms();
 281 }
 282 
 283 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 284   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 285   return new ParseGenerator(m, expected_uses);
 286 }
 287 
 288 // As a special case, the JVMS passed to this CallGenerator is
 289 // for the method execution already in progress, not just the JVMS
 290 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 291 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 292   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 293   float past_uses = m->interpreter_invocation_count();
 294   float expected_uses = past_uses;
 295   return new ParseGenerator(m, expected_uses, true);
 296 }
 297 
 298 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 299   assert(!m->is_abstract(), "for_direct_call mismatch");
 300   return new DirectCallGenerator(m, separate_io_proj);
 301 }
 302 
 303 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 304   assert(!m->is_static(), "for_virtual_call mismatch");
 305   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 306   return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
 307 }
 308 
 309 // Allow inlining decisions to be delayed
 310 class LateInlineCallGenerator : public DirectCallGenerator {
 311  private:
 312   jlong _unique_id;   // unique id for log compilation
 313   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
 314 
 315  protected:
 316   CallGenerator* _inline_cg;
 317   virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
 318   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 319   virtual bool is_pure_call() const { return _is_pure_call; }
 320 
 321  public:
 322   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
 323     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
 324 
 325   virtual bool is_late_inline() const { return true; }
 326 
 327   // Convert the CallStaticJava into an inline
 328   virtual void do_late_inline();
 329 
 330   virtual JVMState* generate(JVMState* jvms) {
 331     Compile *C = Compile::current();
 332 
 333     C->log_inline_id(this);
 334 
 335     // Record that this call site should be revisited once the main
 336     // parse is finished.
 337     if (!is_mh_late_inline()) {
 338       C->add_late_inline(this);
 339     }
 340 
 341     // Emit the CallStaticJava and request separate projections so
 342     // that the late inlining logic can distinguish between fall
 343     // through and exceptional uses of the memory and io projections
 344     // as is done for allocations and macro expansion.
 345     return DirectCallGenerator::generate(jvms);
 346   }
 347 
 348   virtual void set_unique_id(jlong id) {
 349     _unique_id = id;
 350   }
 351 
 352   virtual jlong unique_id() const {
 353     return _unique_id;
 354   }
 355 
 356   virtual CallGenerator* with_call_node(CallNode* call) {
 357     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 358     cg->set_call_node(call->as_CallStaticJava());
 359     return cg;
 360   }
 361 };
 362 
 363 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 364   return new LateInlineCallGenerator(method, inline_cg);
 365 }
 366 
 367 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 368   ciMethod* _caller;
 369   bool _input_not_const;
 370 
 371   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 372 
 373  public:
 374   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 375     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
 376 
 377   virtual bool is_mh_late_inline() const { return true; }
 378 
 379   // Convert the CallStaticJava into an inline
 380   virtual void do_late_inline();
 381 
 382   virtual JVMState* generate(JVMState* jvms) {
 383     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 384 
 385     Compile* C = Compile::current();
 386     if (_input_not_const) {
 387       // inlining won't be possible so no need to enqueue right now.
 388       call_node()->set_generator(this);
 389     } else {
 390       C->add_late_inline(this);
 391     }
 392     return new_jvms;
 393   }
 394 
 395   virtual CallGenerator* with_call_node(CallNode* call) {
 396     LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
 397     cg->set_call_node(call->as_CallStaticJava());
 398     return cg;
 399   }
 400 };
 401 
 402 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 403   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 404   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 405   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 406   // of late inlining with exceptions.
 407   assert(!jvms->method()->has_exception_handlers() ||
 408          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 409           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 410   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 411   bool allow_inline = C->inlining_incrementally();
 412   bool input_not_const = true;
 413   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 414   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 415 
 416   if (cg != nullptr) {
 417     if (!allow_inline) {
 418       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 419                                   "late method handle call resolution");
 420     }
 421     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 422     _inline_cg = cg;
 423     return true;
 424   } else {
 425     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 426     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 427     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 428     return false;
 429   }
 430 }
 431 
 432 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 433   assert(IncrementalInlineMH, "required");
 434   Compile::current()->mark_has_mh_late_inlines();
 435   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 436   return cg;
 437 }
 438 
 439 // Allow inlining decisions to be delayed
 440 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
 441  private:
 442   jlong          _unique_id;   // unique id for log compilation
 443   CallGenerator* _inline_cg;
 444   ciMethod*      _callee;
 445   bool           _is_pure_call;
 446   float          _prof_factor;
 447 
 448  protected:
 449   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 450   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 451   virtual bool is_pure_call() const { return _is_pure_call; }
 452 
 453  public:
 454   LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
 455   : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
 456     _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
 457     assert(IncrementalInlineVirtual, "required");
 458   }
 459 
 460   virtual bool is_late_inline() const { return true; }
 461 
 462   virtual bool is_virtual_late_inline() const { return true; }
 463 
 464   // Convert the CallDynamicJava into an inline
 465   virtual void do_late_inline();
 466 
 467   virtual ciMethod* callee_method() {
 468     return _callee;
 469   }
 470 
 471   virtual void set_callee_method(ciMethod* m) {
 472     assert(_callee == nullptr || _callee == m, "repeated inline attempt with different callee");
 473     _callee = m;
 474   }
 475 
 476   virtual JVMState* generate(JVMState* jvms) {
 477     // Emit the CallDynamicJava and request separate projections so
 478     // that the late inlining logic can distinguish between fall
 479     // through and exceptional uses of the memory and io projections
 480     // as is done for allocations and macro expansion.
 481     JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
 482     if (call_node() != nullptr) {
 483       call_node()->set_generator(this);
 484     }
 485     return new_jvms;
 486   }
 487 
 488   virtual void set_unique_id(jlong id) {
 489     _unique_id = id;
 490   }
 491 
 492   virtual jlong unique_id() const {
 493     return _unique_id;
 494   }
 495 
 496   virtual CallGenerator* with_call_node(CallNode* call) {
 497     LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
 498     cg->set_call_node(call->as_CallDynamicJava());
 499     return cg;
 500   }
 501 };
 502 
 503 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 504   // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
 505   // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
 506 
 507   // Implicit receiver null checks introduce problems when exception states are combined.
 508   Node* receiver = jvms->map()->argument(jvms, 0);
 509   const Type* recv_type = C->initial_gvn()->type(receiver);
 510   if (recv_type->maybe_null()) {
 511     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 512                                 "late call devirtualization failed (receiver may be null)");
 513     return false;
 514   }
 515   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 516   bool allow_inline = C->inlining_incrementally();
 517   if (!allow_inline && _callee->holder()->is_interface()) {
 518     // Don't convert the interface call to a direct call guarded by an interface subtype check.
 519     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 520                                 "late call devirtualization failed (interface call)");
 521     return false;
 522   }
 523   CallGenerator* cg = C->call_generator(_callee,
 524                                         vtable_index(),
 525                                         false /*call_does_dispatch*/,
 526                                         jvms,
 527                                         allow_inline,
 528                                         _prof_factor,
 529                                         nullptr /*speculative_receiver_type*/,
 530                                         true /*allow_intrinsics*/);
 531 
 532   if (cg != nullptr) {
 533     if (!allow_inline) {
 534       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization");
 535     }
 536     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 537     _inline_cg = cg;
 538     return true;
 539   } else {
 540     // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
 541     assert(false, "no progress");
 542     return false;
 543   }
 544 }
 545 
 546 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
 547   assert(IncrementalInlineVirtual, "required");
 548   assert(!m->is_static(), "for_virtual_call mismatch");
 549   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 550   return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
 551 }
 552 
 553 void LateInlineCallGenerator::do_late_inline() {
 554   CallGenerator::do_late_inline_helper();
 555 }
 556 
 557 void LateInlineMHCallGenerator::do_late_inline() {
 558   CallGenerator::do_late_inline_helper();
 559 }
 560 
 561 void LateInlineVirtualCallGenerator::do_late_inline() {
 562   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 563   CallGenerator::do_late_inline_helper();
 564 }
 565 
 566 void CallGenerator::do_late_inline_helper() {
 567   assert(is_late_inline(), "only late inline allowed");
 568 
 569   // Can't inline it
 570   CallNode* call = call_node();
 571   if (call == nullptr || call->outcnt() == 0 ||
 572       call->in(0) == nullptr || call->in(0)->is_top()) {
 573     return;
 574   }
 575 
 576   const TypeTuple *r = call->tf()->domain();
 577   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 578     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 579       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 580       return;
 581     }
 582   }
 583 
 584   if (call->in(TypeFunc::Memory)->is_top()) {
 585     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 586     return;
 587   }
 588   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 589     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 590     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 591       return; // dead path
 592     }
 593   }
 594 
 595   // check for unreachable loop
 596   CallProjections callprojs;
 597   // Similar to incremental inlining, don't assert that all call
 598   // projections are still there for post-parse call devirtualization.
 599   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 600   call->extract_projections(&callprojs, true, do_asserts);
 601   if ((callprojs.fallthrough_catchproj == call->in(0)) ||
 602       (callprojs.catchall_catchproj    == call->in(0)) ||
 603       (callprojs.fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 604       (callprojs.catchall_memproj      == call->in(TypeFunc::Memory)) ||
 605       (callprojs.fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 606       (callprojs.catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 607       (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
 608       (callprojs.exobj   != nullptr && call->find_edge(callprojs.exobj) != -1)) {
 609     return;
 610   }
 611 
 612   Compile* C = Compile::current();
 613   // Remove inlined methods from Compiler's lists.
 614   if (call->is_macro()) {
 615     C->remove_macro_node(call);
 616   }
 617 
 618   // The call is marked as pure (no important side effects), but result isn't used.
 619   // It's safe to remove the call.
 620   bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
 621 
 622   if (is_pure_call() && result_not_used) {
 623     GraphKit kit(call->jvms());
 624     kit.replace_call(call, C->top(), true, do_asserts);
 625   } else {
 626     // Make a clone of the JVMState that appropriate to use for driving a parse
 627     JVMState* old_jvms = call->jvms();
 628     JVMState* jvms = old_jvms->clone_shallow(C);
 629     uint size = call->req();
 630     SafePointNode* map = new SafePointNode(size, jvms);
 631     for (uint i1 = 0; i1 < size; i1++) {
 632       map->init_req(i1, call->in(i1));
 633     }
 634 
 635     // Make sure the state is a MergeMem for parsing.
 636     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 637       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 638       C->initial_gvn()->set_type_bottom(mem);
 639       map->set_req(TypeFunc::Memory, mem);
 640     }
 641 
 642     uint nargs = method()->arg_size();
 643     // blow away old call arguments
 644     Node* top = C->top();
 645     for (uint i1 = 0; i1 < nargs; i1++) {
 646       map->set_req(TypeFunc::Parms + i1, top);
 647     }
 648     jvms->set_map(map);
 649 
 650     // Make enough space in the expression stack to transfer
 651     // the incoming arguments and return value.
 652     map->ensure_stack(jvms, jvms->method()->max_stack());
 653     for (uint i1 = 0; i1 < nargs; i1++) {
 654       map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
 655     }
 656 
 657     C->log_late_inline(this);
 658 
 659     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 660     if (!do_late_inline_check(C, jvms)) {
 661       map->disconnect_inputs(C);
 662       return;
 663     }
 664 
 665     // Setup default node notes to be picked up by the inlining
 666     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 667     if (old_nn != nullptr) {
 668       Node_Notes* entry_nn = old_nn->clone(C);
 669       entry_nn->set_jvms(jvms);
 670       C->set_default_node_notes(entry_nn);
 671     }
 672 
 673     // Now perform the inlining using the synthesized JVMState
 674     JVMState* new_jvms = inline_cg()->generate(jvms);
 675     if (new_jvms == nullptr)  return;  // no change
 676     if (C->failing())      return;
 677 
 678     if (is_mh_late_inline()) {
 679       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 680     } else if (is_string_late_inline()) {
 681       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 682     } else if (is_boxing_late_inline()) {
 683       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 684     } else if (is_vector_reboxing_late_inline()) {
 685       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 686     } else {
 687       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 688     }
 689 
 690     // Capture any exceptional control flow
 691     GraphKit kit(new_jvms);
 692 
 693     // Find the result object
 694     Node* result = C->top();
 695     int   result_size = method()->return_type()->size();
 696     if (result_size != 0 && !kit.stopped()) {
 697       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 698     }
 699 
 700     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 701       result = kit.must_be_not_null(result, false);
 702     }
 703 
 704     if (inline_cg()->is_inline()) {
 705       C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
 706       C->env()->notice_inlined_method(inline_cg()->method());
 707     }
 708     C->set_inlining_progress(true);
 709     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 710     kit.replace_call(call, result, true, do_asserts);
 711   }
 712 }
 713 
 714 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 715 
 716  public:
 717   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 718     LateInlineCallGenerator(method, inline_cg) {}
 719 
 720   virtual JVMState* generate(JVMState* jvms) {
 721     Compile *C = Compile::current();
 722 
 723     C->log_inline_id(this);
 724 
 725     C->add_string_late_inline(this);
 726 
 727     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 728     return new_jvms;
 729   }
 730 
 731   virtual bool is_string_late_inline() const { return true; }
 732 
 733   virtual CallGenerator* with_call_node(CallNode* call) {
 734     LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
 735     cg->set_call_node(call->as_CallStaticJava());
 736     return cg;
 737   }
 738 };
 739 
 740 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 741   return new LateInlineStringCallGenerator(method, inline_cg);
 742 }
 743 
 744 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 745 
 746  public:
 747   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 748     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 749 
 750   virtual JVMState* generate(JVMState* jvms) {
 751     Compile *C = Compile::current();
 752 
 753     C->log_inline_id(this);
 754 
 755     C->add_boxing_late_inline(this);
 756 
 757     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 758     return new_jvms;
 759   }
 760 
 761   virtual bool is_boxing_late_inline() const { return true; }
 762 
 763   virtual CallGenerator* with_call_node(CallNode* call) {
 764     LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
 765     cg->set_call_node(call->as_CallStaticJava());
 766     return cg;
 767   }
 768 };
 769 
 770 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 771   return new LateInlineBoxingCallGenerator(method, inline_cg);
 772 }
 773 
 774 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
 775 
 776  public:
 777   LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 778     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 779 
 780   virtual JVMState* generate(JVMState* jvms) {
 781     Compile *C = Compile::current();
 782 
 783     C->log_inline_id(this);
 784 
 785     C->add_vector_reboxing_late_inline(this);
 786 
 787     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 788     return new_jvms;
 789   }
 790 
 791   virtual bool is_vector_reboxing_late_inline() const { return true; }
 792 
 793   virtual CallGenerator* with_call_node(CallNode* call) {
 794     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 795     cg->set_call_node(call->as_CallStaticJava());
 796     return cg;
 797   }
 798 };
 799 
 800 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 801 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 802   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 803 }
 804 
 805 //------------------------PredictedCallGenerator------------------------------
 806 // Internal class which handles all out-of-line calls checking receiver type.
 807 class PredictedCallGenerator : public CallGenerator {
 808   ciKlass*       _predicted_receiver;
 809   CallGenerator* _if_missed;
 810   CallGenerator* _if_hit;
 811   float          _hit_prob;
 812   bool           _exact_check;
 813 
 814 public:
 815   PredictedCallGenerator(ciKlass* predicted_receiver,
 816                          CallGenerator* if_missed,
 817                          CallGenerator* if_hit, bool exact_check,
 818                          float hit_prob)
 819     : CallGenerator(if_missed->method())
 820   {
 821     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 822     // Remove the extremes values from the range.
 823     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 824     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 825 
 826     _predicted_receiver = predicted_receiver;
 827     _if_missed          = if_missed;
 828     _if_hit             = if_hit;
 829     _hit_prob           = hit_prob;
 830     _exact_check        = exact_check;
 831   }
 832 
 833   virtual bool      is_virtual()   const    { return true; }
 834   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 835   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 836 
 837   virtual JVMState* generate(JVMState* jvms);
 838 };
 839 
 840 
 841 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 842                                                  CallGenerator* if_missed,
 843                                                  CallGenerator* if_hit,
 844                                                  float hit_prob) {
 845   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
 846                                     /*exact_check=*/true, hit_prob);
 847 }
 848 
 849 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
 850                                                CallGenerator* if_missed,
 851                                                CallGenerator* if_hit) {
 852   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
 853                                     /*exact_check=*/false, PROB_ALWAYS);
 854 }
 855 
 856 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 857   GraphKit kit(jvms);
 858   PhaseGVN& gvn = kit.gvn();
 859   // We need an explicit receiver null_check before checking its type.
 860   // We share a map with the caller, so his JVMS gets adjusted.
 861   Node* receiver = kit.argument(0);
 862   CompileLog* log = kit.C->log();
 863   if (log != nullptr) {
 864     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
 865               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
 866   }
 867 
 868   receiver = kit.null_check_receiver_before_call(method());
 869   if (kit.stopped()) {
 870     return kit.transfer_exceptions_into_jvms();
 871   }
 872 
 873   // Make a copy of the replaced nodes in case we need to restore them
 874   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
 875   replaced_nodes.clone();
 876 
 877   Node* casted_receiver = receiver;  // will get updated in place...
 878   Node* slow_ctl = nullptr;
 879   if (_exact_check) {
 880     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
 881                                        &casted_receiver);
 882   } else {
 883     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
 884                                           &casted_receiver);
 885   }
 886 
 887   SafePointNode* slow_map = nullptr;
 888   JVMState* slow_jvms = nullptr;
 889   { PreserveJVMState pjvms(&kit);
 890     kit.set_control(slow_ctl);
 891     if (!kit.stopped()) {
 892       slow_jvms = _if_missed->generate(kit.sync_jvms());
 893       if (kit.failing())
 894         return nullptr;  // might happen because of NodeCountInliningCutoff
 895       assert(slow_jvms != nullptr, "must be");
 896       kit.add_exception_states_from(slow_jvms);
 897       kit.set_map(slow_jvms->map());
 898       if (!kit.stopped())
 899         slow_map = kit.stop();
 900     }
 901   }
 902 
 903   if (kit.stopped()) {
 904     // Instance does not match the predicted type.
 905     kit.set_jvms(slow_jvms);
 906     return kit.transfer_exceptions_into_jvms();
 907   }
 908 
 909   // Fall through if the instance matches the desired type.
 910   kit.replace_in_map(receiver, casted_receiver);
 911 
 912   // Make the hot call:
 913   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 914   if (kit.failing()) {
 915     return nullptr;
 916   }
 917   if (new_jvms == nullptr) {
 918     // Inline failed, so make a direct call.
 919     assert(_if_hit->is_inline(), "must have been a failed inline");
 920     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 921     new_jvms = cg->generate(kit.sync_jvms());
 922   }
 923   kit.add_exception_states_from(new_jvms);
 924   kit.set_jvms(new_jvms);
 925 
 926   // Need to merge slow and fast?
 927   if (slow_map == nullptr) {
 928     // The fast path is the only path remaining.
 929     return kit.transfer_exceptions_into_jvms();
 930   }
 931 
 932   if (kit.stopped()) {
 933     // Inlined method threw an exception, so it's just the slow path after all.
 934     kit.set_jvms(slow_jvms);
 935     return kit.transfer_exceptions_into_jvms();
 936   }
 937 
 938   // There are 2 branches and the replaced nodes are only valid on
 939   // one: restore the replaced nodes to what they were before the
 940   // branch.
 941   kit.map()->set_replaced_nodes(replaced_nodes);
 942 
 943   // Finish the diamond.
 944   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 945   RegionNode* region = new RegionNode(3);
 946   region->init_req(1, kit.control());
 947   region->init_req(2, slow_map->control());
 948   kit.set_control(gvn.transform(region));
 949   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 950   iophi->set_req(2, slow_map->i_o());
 951   kit.set_i_o(gvn.transform(iophi));
 952   // Merge memory
 953   kit.merge_memory(slow_map->merged_memory(), region, 2);
 954   // Transform new memory Phis.
 955   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 956     Node* phi = mms.memory();
 957     if (phi->is_Phi() && phi->in(0) == region) {
 958       mms.set_memory(gvn.transform(phi));
 959     }
 960   }
 961   uint tos = kit.jvms()->stkoff() + kit.sp();
 962   uint limit = slow_map->req();
 963   for (uint i = TypeFunc::Parms; i < limit; i++) {
 964     // Skip unused stack slots; fast forward to monoff();
 965     if (i == tos) {
 966       i = kit.jvms()->monoff();
 967       if( i >= limit ) break;
 968     }
 969     Node* m = kit.map()->in(i);
 970     Node* n = slow_map->in(i);
 971     if (m != n) {
 972       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 973       Node* phi = PhiNode::make(region, m, t);
 974       phi->set_req(2, n);
 975       kit.map()->set_req(i, gvn.transform(phi));
 976     }
 977   }
 978   return kit.transfer_exceptions_into_jvms();
 979 }
 980 
 981 
 982 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
 983   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 984   bool input_not_const;
 985   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
 986   Compile* C = Compile::current();
 987   bool should_delay = C->should_delay_inlining();
 988   if (cg != nullptr) {
 989     if (should_delay && IncrementalInlineMH) {
 990       return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 991     } else {
 992       return cg;
 993     }
 994   }
 995   int bci = jvms->bci();
 996   ciCallProfile profile = caller->call_profile_at_bci(bci);
 997   int call_site_count = caller->scale_count(profile.count());
 998 
 999   if (IncrementalInlineMH && call_site_count > 0 &&
1000       (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1001     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1002   } else {
1003     // Out-of-line call.
1004     return CallGenerator::for_direct_call(callee);
1005   }
1006 }
1007 
1008 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1009   GraphKit kit(jvms);
1010   PhaseGVN& gvn = kit.gvn();
1011   Compile* C = kit.C;
1012   vmIntrinsics::ID iid = callee->intrinsic_id();
1013   input_not_const = true;
1014   if (StressMethodHandleLinkerInlining) {
1015     allow_inline = false;
1016   }
1017   switch (iid) {
1018   case vmIntrinsics::_invokeBasic:
1019     {
1020       // Get MethodHandle receiver:
1021       Node* receiver = kit.argument(0);
1022       if (receiver->Opcode() == Op_ConP) {
1023         input_not_const = false;
1024         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1025         if (recv_toop != nullptr) {
1026           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1027           const int vtable_index = Method::invalid_vtable_index;
1028 
1029           if (!ciMethod::is_consistent_info(callee, target)) {
1030             print_inlining_failure(C, callee, jvms, "signatures mismatch");
1031             return nullptr;
1032           }
1033 
1034           CallGenerator *cg = C->call_generator(target, vtable_index,
1035                                                 false /* call_does_dispatch */,
1036                                                 jvms,
1037                                                 allow_inline,
1038                                                 PROB_ALWAYS);
1039           return cg;
1040         } else {
1041           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1042                  Type::str(receiver->bottom_type()));
1043           print_inlining_failure(C, callee, jvms, "receiver is always null");
1044         }
1045       } else {
1046         print_inlining_failure(C, callee, jvms, "receiver not constant");
1047       }
1048   } break;
1049 
1050   case vmIntrinsics::_linkToVirtual:
1051   case vmIntrinsics::_linkToStatic:
1052   case vmIntrinsics::_linkToSpecial:
1053   case vmIntrinsics::_linkToInterface:
1054     {
1055       // Get MemberName argument:
1056       Node* member_name = kit.argument(callee->arg_size() - 1);
1057       if (member_name->Opcode() == Op_ConP) {
1058         input_not_const = false;
1059         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1060         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1061 
1062         if (!ciMethod::is_consistent_info(callee, target)) {
1063           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1064           return nullptr;
1065         }
1066 
1067         // In lambda forms we erase signature types to avoid resolving issues
1068         // involving class loaders.  When we optimize a method handle invoke
1069         // to a direct call we must cast the receiver and arguments to its
1070         // actual types.
1071         ciSignature* signature = target->signature();
1072         const int receiver_skip = target->is_static() ? 0 : 1;
1073         // Cast receiver to its type.
1074         if (!target->is_static()) {
1075           Node* recv = kit.argument(0);
1076           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1077           if (casted_recv->is_top()) {
1078             print_inlining_failure(C, callee, jvms, "argument types mismatch");
1079             return nullptr; // FIXME: effectively dead; issue a halt node instead
1080           } else if (casted_recv != recv) {
1081             kit.set_argument(0, casted_recv);
1082           }
1083         }
1084         // Cast reference arguments to its type.
1085         for (int i = 0, j = 0; i < signature->count(); i++) {
1086           ciType* t = signature->type_at(i);
1087           if (t->is_klass()) {
1088             Node* arg = kit.argument(receiver_skip + j);
1089             Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass());
1090             if (casted_arg->is_top()) {
1091               print_inlining_failure(C, callee, jvms, "argument types mismatch");
1092               return nullptr; // FIXME: effectively dead; issue a halt node instead
1093             } else if (casted_arg != arg) {
1094               kit.set_argument(receiver_skip + j, casted_arg);
1095             }
1096           }
1097           j += t->size();  // long and double take two slots
1098         }
1099 
1100         // Try to get the most accurate receiver type
1101         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1102         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1103         int  vtable_index       = Method::invalid_vtable_index;
1104         bool call_does_dispatch = false;
1105 
1106         ciKlass* speculative_receiver_type = nullptr;
1107         if (is_virtual_or_interface) {
1108           ciInstanceKlass* klass = target->holder();
1109           Node*             receiver_node = kit.argument(0);
1110           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1111           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1112           // optimize_virtual_call() takes 2 different holder
1113           // arguments for a corner case that doesn't apply here (see
1114           // Parse::do_call())
1115           target = C->optimize_virtual_call(caller, klass, klass,
1116                                             target, receiver_type, is_virtual,
1117                                             call_does_dispatch, vtable_index, // out-parameters
1118                                             false /* check_access */);
1119           // We lack profiling at this call but type speculation may
1120           // provide us with a type
1121           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1122         }
1123         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1124                                               allow_inline,
1125                                               PROB_ALWAYS,
1126                                               speculative_receiver_type);
1127         return cg;
1128       } else {
1129         print_inlining_failure(C, callee, jvms, "member_name not constant");
1130       }
1131   } break;
1132 
1133   case vmIntrinsics::_linkToNative:
1134     print_inlining_failure(C, callee, jvms, "native call");
1135     break;
1136 
1137   default:
1138     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1139     break;
1140   }
1141   return nullptr;
1142 }
1143 
1144 //------------------------PredicatedIntrinsicGenerator------------------------------
1145 // Internal class which handles all predicated Intrinsic calls.
1146 class PredicatedIntrinsicGenerator : public CallGenerator {
1147   CallGenerator* _intrinsic;
1148   CallGenerator* _cg;
1149 
1150 public:
1151   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1152                                CallGenerator* cg)
1153     : CallGenerator(cg->method())
1154   {
1155     _intrinsic = intrinsic;
1156     _cg        = cg;
1157   }
1158 
1159   virtual bool      is_virtual()   const    { return true; }
1160   virtual bool      is_inline()    const    { return true; }
1161   virtual bool      is_intrinsic() const    { return true; }
1162 
1163   virtual JVMState* generate(JVMState* jvms);
1164 };
1165 
1166 
1167 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1168                                                        CallGenerator* cg) {
1169   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1170 }
1171 
1172 
1173 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1174   // The code we want to generate here is:
1175   //    if (receiver == nullptr)
1176   //        uncommon_Trap
1177   //    if (predicate(0))
1178   //        do_intrinsic(0)
1179   //    else
1180   //    if (predicate(1))
1181   //        do_intrinsic(1)
1182   //    ...
1183   //    else
1184   //        do_java_comp
1185 
1186   GraphKit kit(jvms);
1187   PhaseGVN& gvn = kit.gvn();
1188 
1189   CompileLog* log = kit.C->log();
1190   if (log != nullptr) {
1191     log->elem("predicated_intrinsic bci='%d' method='%d'",
1192               jvms->bci(), log->identify(method()));
1193   }
1194 
1195   if (!method()->is_static()) {
1196     // We need an explicit receiver null_check before checking its type in predicate.
1197     // We share a map with the caller, so his JVMS gets adjusted.
1198     Node* receiver = kit.null_check_receiver_before_call(method());
1199     if (kit.stopped()) {
1200       return kit.transfer_exceptions_into_jvms();
1201     }
1202   }
1203 
1204   int n_predicates = _intrinsic->predicates_count();
1205   assert(n_predicates > 0, "sanity");
1206 
1207   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1208 
1209   // Region for normal compilation code if intrinsic failed.
1210   Node* slow_region = new RegionNode(1);
1211 
1212   int results = 0;
1213   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1214 #ifdef ASSERT
1215     JVMState* old_jvms = kit.jvms();
1216     SafePointNode* old_map = kit.map();
1217     Node* old_io  = old_map->i_o();
1218     Node* old_mem = old_map->memory();
1219     Node* old_exc = old_map->next_exception();
1220 #endif
1221     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1222 #ifdef ASSERT
1223     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1224     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1225     SafePointNode* new_map = kit.map();
1226     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1227     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1228     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1229 #endif
1230     if (!kit.stopped()) {
1231       PreserveJVMState pjvms(&kit);
1232       // Generate intrinsic code:
1233       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1234       if (kit.failing()) {
1235         return nullptr;
1236       }
1237       if (new_jvms == nullptr) {
1238         // Intrinsic failed, use normal compilation path for this predicate.
1239         slow_region->add_req(kit.control());
1240       } else {
1241         kit.add_exception_states_from(new_jvms);
1242         kit.set_jvms(new_jvms);
1243         if (!kit.stopped()) {
1244           result_jvms[results++] = kit.jvms();
1245         }
1246       }
1247     }
1248     if (else_ctrl == nullptr) {
1249       else_ctrl = kit.C->top();
1250     }
1251     kit.set_control(else_ctrl);
1252   }
1253   if (!kit.stopped()) {
1254     // Final 'else' after predicates.
1255     slow_region->add_req(kit.control());
1256   }
1257   if (slow_region->req() > 1) {
1258     PreserveJVMState pjvms(&kit);
1259     // Generate normal compilation code:
1260     kit.set_control(gvn.transform(slow_region));
1261     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1262     if (kit.failing())
1263       return nullptr;  // might happen because of NodeCountInliningCutoff
1264     assert(new_jvms != nullptr, "must be");
1265     kit.add_exception_states_from(new_jvms);
1266     kit.set_jvms(new_jvms);
1267     if (!kit.stopped()) {
1268       result_jvms[results++] = kit.jvms();
1269     }
1270   }
1271 
1272   if (results == 0) {
1273     // All paths ended in uncommon traps.
1274     (void) kit.stop();
1275     return kit.transfer_exceptions_into_jvms();
1276   }
1277 
1278   if (results == 1) { // Only one path
1279     kit.set_jvms(result_jvms[0]);
1280     return kit.transfer_exceptions_into_jvms();
1281   }
1282 
1283   // Merge all paths.
1284   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1285   RegionNode* region = new RegionNode(results + 1);
1286   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1287   for (int i = 0; i < results; i++) {
1288     JVMState* jvms = result_jvms[i];
1289     int path = i + 1;
1290     SafePointNode* map = jvms->map();
1291     region->init_req(path, map->control());
1292     iophi->set_req(path, map->i_o());
1293     if (i == 0) {
1294       kit.set_jvms(jvms);
1295     } else {
1296       kit.merge_memory(map->merged_memory(), region, path);
1297     }
1298   }
1299   kit.set_control(gvn.transform(region));
1300   kit.set_i_o(gvn.transform(iophi));
1301   // Transform new memory Phis.
1302   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1303     Node* phi = mms.memory();
1304     if (phi->is_Phi() && phi->in(0) == region) {
1305       mms.set_memory(gvn.transform(phi));
1306     }
1307   }
1308 
1309   // Merge debug info.
1310   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1311   uint tos = kit.jvms()->stkoff() + kit.sp();
1312   Node* map = kit.map();
1313   uint limit = map->req();
1314   for (uint i = TypeFunc::Parms; i < limit; i++) {
1315     // Skip unused stack slots; fast forward to monoff();
1316     if (i == tos) {
1317       i = kit.jvms()->monoff();
1318       if( i >= limit ) break;
1319     }
1320     Node* n = map->in(i);
1321     ins[0] = n;
1322     const Type* t = gvn.type(n);
1323     bool needs_phi = false;
1324     for (int j = 1; j < results; j++) {
1325       JVMState* jvms = result_jvms[j];
1326       Node* jmap = jvms->map();
1327       Node* m = nullptr;
1328       if (jmap->req() > i) {
1329         m = jmap->in(i);
1330         if (m != n) {
1331           needs_phi = true;
1332           t = t->meet_speculative(gvn.type(m));
1333         }
1334       }
1335       ins[j] = m;
1336     }
1337     if (needs_phi) {
1338       Node* phi = PhiNode::make(region, n, t);
1339       for (int j = 1; j < results; j++) {
1340         phi->set_req(j + 1, ins[j]);
1341       }
1342       map->set_req(i, gvn.transform(phi));
1343     }
1344   }
1345 
1346   return kit.transfer_exceptions_into_jvms();
1347 }
1348 
1349 //-------------------------UncommonTrapCallGenerator-----------------------------
1350 // Internal class which handles all out-of-line calls checking receiver type.
1351 class UncommonTrapCallGenerator : public CallGenerator {
1352   Deoptimization::DeoptReason _reason;
1353   Deoptimization::DeoptAction _action;
1354 
1355 public:
1356   UncommonTrapCallGenerator(ciMethod* m,
1357                             Deoptimization::DeoptReason reason,
1358                             Deoptimization::DeoptAction action)
1359     : CallGenerator(m)
1360   {
1361     _reason = reason;
1362     _action = action;
1363   }
1364 
1365   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1366   virtual bool      is_trap() const             { return true; }
1367 
1368   virtual JVMState* generate(JVMState* jvms);
1369 };
1370 
1371 
1372 CallGenerator*
1373 CallGenerator::for_uncommon_trap(ciMethod* m,
1374                                  Deoptimization::DeoptReason reason,
1375                                  Deoptimization::DeoptAction action) {
1376   return new UncommonTrapCallGenerator(m, reason, action);
1377 }
1378 
1379 
1380 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1381   GraphKit kit(jvms);
1382   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1383   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1384   // Use callsite signature always.
1385   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1386   int nargs = declared_method->arg_size();
1387   kit.inc_sp(nargs);
1388   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1389   if (_reason == Deoptimization::Reason_class_check &&
1390       _action == Deoptimization::Action_maybe_recompile) {
1391     // Temp fix for 6529811
1392     // Don't allow uncommon_trap to override our decision to recompile in the event
1393     // of a class cast failure for a monomorphic call as it will never let us convert
1394     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1395     bool keep_exact_action = true;
1396     kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action);
1397   } else {
1398     kit.uncommon_trap(_reason, _action);
1399   }
1400   return kit.transfer_exceptions_into_jvms();
1401 }
1402 
1403 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1404 
1405 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)