1 /*
   2  * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciMemberName.hpp"
  28 #include "ci/ciMethodHandle.hpp"
  29 #include "ci/ciObjArray.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "oops/accessDecorators.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/inlinetypenode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/debug.hpp"
  46 
  47 // Utility function.
  48 const TypeFunc* CallGenerator::tf() const {
  49   return TypeFunc::make(method());
  50 }
  51 
  52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  53   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  54 }
  55 
  56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  57   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  58   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  59 }
  60 
  61 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  62   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  63 }
  64 
  65 //-----------------------------ParseGenerator---------------------------------
  66 // Internal class which handles all direct bytecode traversal.
  67 class ParseGenerator : public InlineCallGenerator {
  68 private:
  69   bool  _is_osr;
  70   float _expected_uses;
  71 
  72 public:
  73   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  74     : InlineCallGenerator(method)
  75   {
  76     _is_osr        = is_osr;
  77     _expected_uses = expected_uses;
  78     assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
  79   }
  80 
  81   virtual bool      is_parse() const           { return true; }
  82   virtual JVMState* generate(JVMState* jvms);
  83   int is_osr() { return _is_osr; }
  84 
  85 };
  86 
  87 JVMState* ParseGenerator::generate(JVMState* jvms) {
  88   Compile* C = Compile::current();
  89 
  90   if (is_osr()) {
  91     // The JVMS for a OSR has a single argument (see its TypeFunc).
  92     assert(jvms->depth() == 1, "no inline OSR");
  93   }
  94 
  95   if (C->failing()) {
  96     return nullptr;  // bailing out of the compile; do not try to parse
  97   }
  98 
  99   Parse parser(jvms, method(), _expected_uses);
 100   if (C->failing()) return nullptr;
 101 
 102   // Grab signature for matching/allocation
 103   GraphKit& exits = parser.exits();
 104 
 105   if (C->failing()) {
 106     while (exits.pop_exception_state() != nullptr) ;
 107     return nullptr;
 108   }
 109 
 110   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 111 
 112   // Simply return the exit state of the parser,
 113   // augmented by any exceptional states.
 114   return exits.transfer_exceptions_into_jvms();
 115 }
 116 
 117 //---------------------------DirectCallGenerator------------------------------
 118 // Internal class which handles all out-of-line calls w/o receiver type checks.
 119 class DirectCallGenerator : public CallGenerator {
 120  private:
 121   CallStaticJavaNode* _call_node;
 122   // Force separate memory and I/O projections for the exceptional
 123   // paths to facilitate late inlining.
 124   bool                _separate_io_proj;
 125 
 126 protected:
 127   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 128 
 129  public:
 130   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 131     : CallGenerator(method),
 132       _call_node(nullptr),
 133       _separate_io_proj(separate_io_proj)
 134   {
 135     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 136       // If that call has not been optimized by the time optimizations are over,
 137       // we'll need to add a call to create an inline type instance from the klass
 138       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 139       // Separating memory and I/O projections for exceptions is required to
 140       // perform that graph transformation.
 141       _separate_io_proj = true;
 142     }
 143   }
 144   virtual JVMState* generate(JVMState* jvms);
 145 
 146   virtual CallNode* call_node() const { return _call_node; }
 147   virtual CallGenerator* with_call_node(CallNode* call) {
 148     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 149     dcg->set_call_node(call->as_CallStaticJava());
 150     return dcg;
 151   }
 152 };
 153 
 154 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 155   GraphKit kit(jvms);
 156   bool is_static = method()->is_static();
 157   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 158                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 159 
 160   if (kit.C->log() != nullptr) {
 161     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 162   }
 163 
 164   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 165   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 166     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 167     // additional information about the method being invoked should be attached
 168     // to the call site to make resolution logic work
 169     // (see SharedRuntime::resolve_static_call_C).
 170     call->set_override_symbolic_info(true);
 171   }
 172   _call_node = call;  // Save the call node in case we need it later
 173   if (!is_static) {
 174     // Make an explicit receiver null_check as part of this call.
 175     // Since we share a map with the caller, his JVMS gets adjusted.
 176     kit.null_check_receiver_before_call(method());
 177     if (kit.stopped()) {
 178       // And dump it back to the caller, decorated with any exceptions:
 179       return kit.transfer_exceptions_into_jvms();
 180     }
 181     // Mark the call node as virtual, sort of:
 182     call->set_optimized_virtual(true);
 183   }
 184   kit.set_arguments_for_java_call(call);
 185   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 186   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 187   kit.push_node(method()->return_type()->basic_type(), ret);
 188   return kit.transfer_exceptions_into_jvms();
 189 }
 190 
 191 //--------------------------VirtualCallGenerator------------------------------
 192 // Internal class which handles all out-of-line calls checking receiver type.
 193 class VirtualCallGenerator : public CallGenerator {
 194 private:
 195   int _vtable_index;
 196   bool _separate_io_proj;
 197   CallDynamicJavaNode* _call_node;
 198 
 199 protected:
 200   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 201 
 202 public:
 203   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 204     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 205   {
 206     assert(vtable_index == Method::invalid_vtable_index ||
 207            vtable_index >= 0, "either invalid or usable");
 208   }
 209   virtual bool      is_virtual() const          { return true; }
 210   virtual JVMState* generate(JVMState* jvms);
 211 
 212   virtual CallNode* call_node() const { return _call_node; }
 213   int vtable_index() const { return _vtable_index; }
 214 
 215   virtual CallGenerator* with_call_node(CallNode* call) {
 216     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 217     cg->set_call_node(call->as_CallDynamicJava());
 218     return cg;
 219   }
 220 };
 221 
 222 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 223   GraphKit kit(jvms);
 224   Node* receiver = kit.argument(0);
 225   if (kit.C->log() != nullptr) {
 226     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 227   }
 228 
 229   // If the receiver is a constant null, do not torture the system
 230   // by attempting to call through it.  The compile will proceed
 231   // correctly, but may bail out in final_graph_reshaping, because
 232   // the call instruction will have a seemingly deficient out-count.
 233   // (The bailout says something misleading about an "infinite loop".)
 234   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 235     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 236     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 237     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 238     kit.inc_sp(arg_size);  // restore arguments
 239     kit.uncommon_trap(Deoptimization::Reason_null_check,
 240                       Deoptimization::Action_none,
 241                       nullptr, "null receiver");
 242     return kit.transfer_exceptions_into_jvms();
 243   }
 244 
 245   // Ideally we would unconditionally do a null check here and let it
 246   // be converted to an implicit check based on profile information.
 247   // However currently the conversion to implicit null checks in
 248   // Block::implicit_null_check() only looks for loads and stores, not calls.
 249   ciMethod *caller = kit.method();
 250   ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
 251   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 252        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 253        (caller_md->trap_count(Deoptimization::Reason_null_check)
 254        >= (uint)ImplicitNullCheckThreshold))) {
 255     // Make an explicit receiver null_check as part of this call.
 256     // Since we share a map with the caller, his JVMS gets adjusted.
 257     receiver = kit.null_check_receiver_before_call(method());
 258     if (kit.stopped()) {
 259       // And dump it back to the caller, decorated with any exceptions:
 260       return kit.transfer_exceptions_into_jvms();
 261     }
 262   }
 263 
 264   assert(!method()->is_static(), "virtual call must not be to static");
 265   assert(!method()->is_final(), "virtual call should not be to final");
 266   assert(!method()->is_private(), "virtual call should not be to private");
 267   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 268          "no vtable calls if +UseInlineCaches ");
 269   address target = SharedRuntime::get_resolve_virtual_call_stub();
 270   // Normal inline cache used for call
 271   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 272   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 273     // To be able to issue a direct call (optimized virtual or virtual)
 274     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 275     // about the method being invoked should be attached to the call site to
 276     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 277     call->set_override_symbolic_info(true);
 278   }
 279   _call_node = call;  // Save the call node in case we need it later
 280 
 281   kit.set_arguments_for_java_call(call);
 282   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 283   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 284   kit.push_node(method()->return_type()->basic_type(), ret);
 285 
 286   // Represent the effect of an implicit receiver null_check
 287   // as part of this call.  Since we share a map with the caller,
 288   // his JVMS gets adjusted.
 289   kit.cast_not_null(receiver);
 290   return kit.transfer_exceptions_into_jvms();
 291 }
 292 
 293 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 294   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 295   return new ParseGenerator(m, expected_uses);
 296 }
 297 
 298 // As a special case, the JVMS passed to this CallGenerator is
 299 // for the method execution already in progress, not just the JVMS
 300 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 301 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 302   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 303   float past_uses = m->interpreter_invocation_count();
 304   float expected_uses = past_uses;
 305   return new ParseGenerator(m, expected_uses, true);
 306 }
 307 
 308 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 309   assert(!m->is_abstract(), "for_direct_call mismatch");
 310   return new DirectCallGenerator(m, separate_io_proj);
 311 }
 312 
 313 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 314   assert(!m->is_static(), "for_virtual_call mismatch");
 315   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 316   return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
 317 }
 318 
 319 // Allow inlining decisions to be delayed
 320 class LateInlineCallGenerator : public DirectCallGenerator {
 321  private:
 322   jlong _unique_id;   // unique id for log compilation
 323   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
 324 
 325  protected:
 326   CallGenerator* _inline_cg;
 327   virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
 328   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 329   virtual bool is_pure_call() const { return _is_pure_call; }
 330 
 331  public:
 332   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
 333     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
 334 
 335   virtual bool is_late_inline() const { return true; }
 336 
 337   // Convert the CallStaticJava into an inline
 338   virtual void do_late_inline();
 339 
 340   virtual JVMState* generate(JVMState* jvms) {
 341     Compile *C = Compile::current();
 342 
 343     C->log_inline_id(this);
 344 
 345     // Record that this call site should be revisited once the main
 346     // parse is finished.
 347     if (!is_mh_late_inline()) {
 348       C->add_late_inline(this);
 349     }
 350 
 351     // Emit the CallStaticJava and request separate projections so
 352     // that the late inlining logic can distinguish between fall
 353     // through and exceptional uses of the memory and io projections
 354     // as is done for allocations and macro expansion.
 355     return DirectCallGenerator::generate(jvms);
 356   }
 357 
 358   virtual void set_unique_id(jlong id) {
 359     _unique_id = id;
 360   }
 361 
 362   virtual jlong unique_id() const {
 363     return _unique_id;
 364   }
 365 
 366   virtual CallGenerator* inline_cg() {
 367     return _inline_cg;
 368   }
 369 
 370   virtual CallGenerator* with_call_node(CallNode* call) {
 371     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 372     cg->set_call_node(call->as_CallStaticJava());
 373     return cg;
 374   }
 375 };
 376 
 377 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 378   return new LateInlineCallGenerator(method, inline_cg);
 379 }
 380 
 381 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 382   ciMethod* _caller;
 383   bool _input_not_const;
 384 
 385   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 386 
 387  public:
 388   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 389     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
 390 
 391   virtual bool is_mh_late_inline() const { return true; }
 392 
 393   // Convert the CallStaticJava into an inline
 394   virtual void do_late_inline();
 395 
 396   virtual JVMState* generate(JVMState* jvms) {
 397     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 398 
 399     Compile* C = Compile::current();
 400     if (_input_not_const) {
 401       // inlining won't be possible so no need to enqueue right now.
 402       call_node()->set_generator(this);
 403     } else {
 404       C->add_late_inline(this);
 405     }
 406     return new_jvms;
 407   }
 408 
 409   virtual CallGenerator* with_call_node(CallNode* call) {
 410     LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
 411     cg->set_call_node(call->as_CallStaticJava());
 412     return cg;
 413   }
 414 };
 415 
 416 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 417   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 418   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 419   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 420   // of late inlining with exceptions.
 421   assert(!jvms->method()->has_exception_handlers() ||
 422          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 423           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 424   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 425   bool allow_inline = C->inlining_incrementally();
 426   bool input_not_const = true;
 427   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 428   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 429 
 430   if (cg != nullptr) {
 431     // AlwaysIncrementalInline causes for_method_handle_inline() to
 432     // return a LateInlineCallGenerator. Extract the
 433     // InlineCallGenerator from it.
 434     if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
 435       cg = cg->inline_cg();
 436       assert(cg != nullptr, "inline call generator expected");
 437     }
 438 
 439     if (!allow_inline) {
 440       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 441                                   "late method handle call resolution");
 442     }
 443     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || cg->is_virtual_late_inline() ||
 444            AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 445     _inline_cg = cg;
 446     return true;
 447   } else {
 448     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 449     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 450     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 451     return false;
 452   }
 453 }
 454 
 455 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 456   assert(IncrementalInlineMH, "required");
 457   Compile::current()->mark_has_mh_late_inlines();
 458   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 459   return cg;
 460 }
 461 
 462 // Allow inlining decisions to be delayed
 463 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
 464  private:
 465   jlong          _unique_id;   // unique id for log compilation
 466   CallGenerator* _inline_cg;
 467   ciMethod*      _callee;
 468   bool           _is_pure_call;
 469   float          _prof_factor;
 470 
 471  protected:
 472   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 473   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 474   virtual bool is_pure_call() const { return _is_pure_call; }
 475 
 476  public:
 477   LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
 478   : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
 479     _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
 480     assert(IncrementalInlineVirtual, "required");
 481   }
 482 
 483   virtual bool is_late_inline() const { return true; }
 484 
 485   virtual bool is_virtual_late_inline() const { return true; }
 486 
 487   // Convert the CallDynamicJava into an inline
 488   virtual void do_late_inline();
 489 
 490   virtual ciMethod* callee_method() {
 491     return _callee;
 492   }
 493 
 494   virtual void set_callee_method(ciMethod* m) {
 495     assert(_callee == nullptr || _callee == m, "repeated inline attempt with different callee");
 496     _callee = m;
 497   }
 498 
 499   virtual JVMState* generate(JVMState* jvms) {
 500     // Emit the CallDynamicJava and request separate projections so
 501     // that the late inlining logic can distinguish between fall
 502     // through and exceptional uses of the memory and io projections
 503     // as is done for allocations and macro expansion.
 504     JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
 505     if (call_node() != nullptr) {
 506       call_node()->set_generator(this);
 507     }
 508     return new_jvms;
 509   }
 510 
 511   virtual void set_unique_id(jlong id) {
 512     _unique_id = id;
 513   }
 514 
 515   virtual jlong unique_id() const {
 516     return _unique_id;
 517   }
 518 
 519   virtual CallGenerator* with_call_node(CallNode* call) {
 520     LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
 521     cg->set_call_node(call->as_CallDynamicJava());
 522     return cg;
 523   }
 524 };
 525 
 526 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 527   // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
 528   // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
 529 
 530   // Implicit receiver null checks introduce problems when exception states are combined.
 531   Node* receiver = jvms->map()->argument(jvms, 0);
 532   const Type* recv_type = C->initial_gvn()->type(receiver);
 533   if (recv_type->maybe_null()) {
 534     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 535                                 "late call devirtualization failed (receiver may be null)");
 536     return false;
 537   }
 538   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 539   bool allow_inline = C->inlining_incrementally();
 540   if (!allow_inline && _callee->holder()->is_interface()) {
 541     // Don't convert the interface call to a direct call guarded by an interface subtype check.
 542     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 543                                 "late call devirtualization failed (interface call)");
 544     return false;
 545   }
 546   CallGenerator* cg = C->call_generator(_callee,
 547                                         vtable_index(),
 548                                         false /*call_does_dispatch*/,
 549                                         jvms,
 550                                         allow_inline,
 551                                         _prof_factor,
 552                                         nullptr /*speculative_receiver_type*/,
 553                                         true /*allow_intrinsics*/);
 554 
 555   if (cg != nullptr) {
 556     if (!allow_inline) {
 557       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization");
 558     }
 559     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 560     _inline_cg = cg;
 561     return true;
 562   } else {
 563     // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
 564     assert(false, "no progress");
 565     return false;
 566   }
 567 }
 568 
 569 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
 570   assert(IncrementalInlineVirtual, "required");
 571   assert(!m->is_static(), "for_virtual_call mismatch");
 572   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 573   return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
 574 }
 575 
 576 void LateInlineCallGenerator::do_late_inline() {
 577   CallGenerator::do_late_inline_helper();
 578 }
 579 
 580 void LateInlineMHCallGenerator::do_late_inline() {
 581   CallGenerator::do_late_inline_helper();
 582 }
 583 
 584 void LateInlineVirtualCallGenerator::do_late_inline() {
 585   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 586   CallGenerator::do_late_inline_helper();
 587 }
 588 
 589 void CallGenerator::do_late_inline_helper() {
 590   assert(is_late_inline(), "only late inline allowed");
 591 
 592   // Can't inline it
 593   CallNode* call = call_node();
 594   if (call == nullptr || call->outcnt() == 0 ||
 595       call->in(0) == nullptr || call->in(0)->is_top()) {
 596     return;
 597   }
 598 
 599   const TypeTuple* r = call->tf()->domain_cc();
 600   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 601     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
 602       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 603       return;
 604     }
 605   }
 606 
 607   if (call->in(TypeFunc::Memory)->is_top()) {
 608     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 609     return;
 610   }
 611   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 612     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 613     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 614       return; // dead path
 615     }
 616   }
 617 
 618   // check for unreachable loop
 619   // Similar to incremental inlining, don't assert that all call
 620   // projections are still there for post-parse call devirtualization.
 621   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 622   CallProjections* callprojs = call->extract_projections(true, do_asserts);
 623   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
 624       (callprojs->catchall_catchproj    == call->in(0)) ||
 625       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 626       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
 627       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 628       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 629       (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
 630     return;
 631   }
 632 
 633   Compile* C = Compile::current();
 634 
 635   uint endoff = call->jvms()->endoff();
 636   if (C->inlining_incrementally()) {
 637     // No reachability edges should be present when incremental inlining takes place.
 638     // Inlining logic doesn't expect any extra edges past debug info and fails with
 639     // an assert in SafePointNode::grow_stack.
 640     assert(endoff == call->req(), "reachability edges not supported");
 641   } else {
 642     if (call->req() > endoff) { // reachability edges present
 643       assert(OptimizeReachabilityFences, "required");
 644       return; // keep the original call node as the holder of reachability info
 645     }
 646   }
 647 
 648   // Remove inlined methods from Compiler's lists.
 649   if (call->is_macro()) {
 650     C->remove_macro_node(call);
 651   }
 652 
 653 
 654   bool result_not_used = true;
 655   for (uint i = 0; i < callprojs->nb_resproj; i++) {
 656     if (callprojs->resproj[i] != nullptr) {
 657       if (callprojs->resproj[i]->outcnt() != 0) {
 658         result_not_used = false;
 659       }
 660       if (call->find_edge(callprojs->resproj[i]) != -1) {
 661         return;
 662       }
 663     }
 664   }
 665 
 666   if (is_pure_call() && result_not_used) {
 667     // The call is marked as pure (no important side effects), but result isn't used.
 668     // It's safe to remove the call.
 669     GraphKit kit(call->jvms());
 670     kit.replace_call(call, C->top(), true, do_asserts);
 671   } else {
 672     // Make a clone of the JVMState that appropriate to use for driving a parse
 673     JVMState* old_jvms = call->jvms();
 674     JVMState* jvms = old_jvms->clone_shallow(C);
 675     uint size = call->req();
 676     SafePointNode* map = new SafePointNode(size, jvms);
 677     for (uint i1 = 0; i1 < size; i1++) {
 678       map->init_req(i1, call->in(i1));
 679     }
 680 
 681     PhaseGVN& gvn = *C->initial_gvn();
 682     // Make sure the state is a MergeMem for parsing.
 683     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 684       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 685       gvn.set_type_bottom(mem);
 686       map->set_req(TypeFunc::Memory, mem);
 687     }
 688 
 689     // blow away old call arguments
 690     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 691       map->set_req(i1, C->top());
 692     }
 693     jvms->set_map(map);
 694 
 695     // Make enough space in the expression stack to transfer
 696     // the incoming arguments and return value.
 697     map->ensure_stack(jvms, jvms->method()->max_stack());
 698     const TypeTuple* domain_sig = call->_tf->domain_sig();
 699     uint nargs = method()->arg_size();
 700     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 701 
 702     uint j = TypeFunc::Parms;
 703     int arg_num = 0;
 704     for (uint i1 = 0; i1 < nargs; i1++) {
 705       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 706       if (t->is_inlinetypeptr() && !method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
 707         // Inline type arguments are not passed by reference: we get an argument per
 708         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 709         GraphKit arg_kit(jvms, &gvn);
 710         Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 711         map->set_control(arg_kit.control());
 712         map->set_argument(jvms, i1, vt);
 713       } else {
 714         map->set_argument(jvms, i1, call->in(j++));
 715       }
 716       if (t != Type::HALF) {
 717         arg_num++;
 718       }
 719     }
 720 
 721     C->log_late_inline(this);
 722 
 723     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 724     if (!do_late_inline_check(C, jvms)) {
 725       map->disconnect_inputs(C);
 726       return;
 727     }
 728 
 729     // Check if we are late inlining a method handle call that returns an inline type as fields.
 730     Node* buffer_oop = nullptr;
 731     ciMethod* inline_method = inline_cg()->method();
 732     ciType* return_type = inline_method->return_type();
 733     if (!call->tf()->returns_inline_type_as_fields() &&
 734         return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
 735       assert(is_mh_late_inline(), "Unexpected return type");
 736 
 737       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
 738       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
 739       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
 740       GraphKit arg_kit(jvms, &gvn);
 741       {
 742         PreserveReexecuteState preexecs(&arg_kit);
 743         arg_kit.jvms()->set_should_reexecute(true);
 744         arg_kit.inc_sp(nargs);
 745         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
 746         buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
 747       }
 748       jvms = arg_kit.transfer_exceptions_into_jvms();
 749     }
 750 
 751     // Setup default node notes to be picked up by the inlining
 752     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 753     if (old_nn != nullptr) {
 754       Node_Notes* entry_nn = old_nn->clone(C);
 755       entry_nn->set_jvms(jvms);
 756       C->set_default_node_notes(entry_nn);
 757     }
 758 
 759     // Now perform the inlining using the synthesized JVMState
 760     JVMState* new_jvms = inline_cg()->generate(jvms);
 761     if (new_jvms == nullptr)  return;  // no change
 762     if (C->failing())      return;
 763 
 764     if (is_mh_late_inline()) {
 765       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 766     } else if (is_string_late_inline()) {
 767       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 768     } else if (is_boxing_late_inline()) {
 769       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 770     } else if (is_vector_reboxing_late_inline()) {
 771       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 772     } else {
 773       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 774     }
 775 
 776     // Capture any exceptional control flow
 777     GraphKit kit(new_jvms);
 778 
 779     // Find the result object
 780     Node* result = C->top();
 781     int   result_size = method()->return_type()->size();
 782     if (result_size != 0 && !kit.stopped()) {
 783       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 784     }
 785 
 786     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 787       result = kit.must_be_not_null(result, false);
 788     }
 789 
 790     if (inline_cg()->is_inline()) {
 791       C->set_has_loops(C->has_loops() || inline_method->has_loops());
 792       C->env()->notice_inlined_method(inline_method);
 793     }
 794     C->set_inlining_progress(true);
 795     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 796 
 797     // Handle inline type returns
 798     InlineTypeNode* vt = result->isa_InlineType();
 799     if (vt != nullptr) {
 800       if (call->tf()->returns_inline_type_as_fields()) {
 801         vt->replace_call_results(&kit, call, C);
 802       } else {
 803         // Result might still be allocated (for example, if it has been stored to a non-flat field)
 804         if (!vt->is_allocated(&kit.gvn())) {
 805           assert(buffer_oop != nullptr, "should have allocated a buffer");
 806           RegionNode* region = new RegionNode(3);
 807 
 808           // Check if result is null
 809           Node* null_ctl = kit.top();
 810           kit.null_check_common(vt->get_null_marker(), T_INT, false, &null_ctl);
 811           region->init_req(1, null_ctl);
 812           PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
 813           Node* init_mem = kit.reset_memory();
 814           PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
 815 
 816           // Not null, initialize the buffer
 817           kit.set_all_memory(init_mem);
 818 
 819           Node* payload_ptr = kit.basic_plus_adr(buffer_oop, kit.gvn().type(vt)->inline_klass()->payload_offset());
 820           vt->store_flat(&kit, buffer_oop, payload_ptr, false, true, true, IN_HEAP | MO_UNORDERED);
 821           // Do not let stores that initialize this buffer be reordered with a subsequent
 822           // store that would make this buffer accessible by other threads.
 823           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
 824           assert(alloc != nullptr, "must have an allocation node");
 825           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 826           region->init_req(2, kit.control());
 827           oop->init_req(2, buffer_oop);
 828           mem->init_req(2, kit.merged_memory());
 829 
 830           // Update oop input to buffer
 831           kit.gvn().hash_delete(vt);
 832           vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
 833           vt->set_is_buffered(kit.gvn());
 834           vt = kit.gvn().transform(vt)->as_InlineType();
 835 
 836           kit.set_control(kit.gvn().transform(region));
 837           kit.set_all_memory(kit.gvn().transform(mem));
 838           kit.record_for_igvn(region);
 839           kit.record_for_igvn(oop);
 840           kit.record_for_igvn(mem);
 841         }
 842         result = vt;
 843       }
 844       DEBUG_ONLY(buffer_oop = nullptr);
 845     } else {
 846       assert(result->is_top() || !call->tf()->returns_inline_type_as_fields() || !call->as_CallJava()->method()->return_type()->is_loaded(), "Unexpected return value");
 847     }
 848     assert(kit.stopped() || buffer_oop == nullptr, "unused buffer allocation");
 849 
 850     kit.replace_call(call, result, true, do_asserts);
 851   }
 852 }
 853 
 854 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 855 
 856  public:
 857   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 858     LateInlineCallGenerator(method, inline_cg) {}
 859 
 860   virtual JVMState* generate(JVMState* jvms) {
 861     Compile *C = Compile::current();
 862 
 863     C->log_inline_id(this);
 864 
 865     C->add_string_late_inline(this);
 866 
 867     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 868     return new_jvms;
 869   }
 870 
 871   virtual bool is_string_late_inline() const { return true; }
 872 
 873   virtual CallGenerator* with_call_node(CallNode* call) {
 874     LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
 875     cg->set_call_node(call->as_CallStaticJava());
 876     return cg;
 877   }
 878 };
 879 
 880 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 881   return new LateInlineStringCallGenerator(method, inline_cg);
 882 }
 883 
 884 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 885 
 886  public:
 887   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 888     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 889 
 890   virtual JVMState* generate(JVMState* jvms) {
 891     Compile *C = Compile::current();
 892 
 893     C->log_inline_id(this);
 894 
 895     C->add_boxing_late_inline(this);
 896 
 897     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 898     return new_jvms;
 899   }
 900 
 901   virtual bool is_boxing_late_inline() const { return true; }
 902 
 903   virtual CallGenerator* with_call_node(CallNode* call) {
 904     LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
 905     cg->set_call_node(call->as_CallStaticJava());
 906     return cg;
 907   }
 908 };
 909 
 910 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 911   return new LateInlineBoxingCallGenerator(method, inline_cg);
 912 }
 913 
 914 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
 915 
 916  public:
 917   LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 918     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 919 
 920   virtual JVMState* generate(JVMState* jvms) {
 921     Compile *C = Compile::current();
 922 
 923     C->log_inline_id(this);
 924 
 925     C->add_vector_reboxing_late_inline(this);
 926 
 927     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 928     return new_jvms;
 929   }
 930 
 931   virtual bool is_vector_reboxing_late_inline() const { return true; }
 932 
 933   virtual CallGenerator* with_call_node(CallNode* call) {
 934     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 935     cg->set_call_node(call->as_CallStaticJava());
 936     return cg;
 937   }
 938 };
 939 
 940 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 941 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 942   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 943 }
 944 
 945 //------------------------PredictedCallGenerator------------------------------
 946 // Internal class which handles all out-of-line calls checking receiver type.
 947 class PredictedCallGenerator : public CallGenerator {
 948   ciKlass*       _predicted_receiver;
 949   CallGenerator* _if_missed;
 950   CallGenerator* _if_hit;
 951   float          _hit_prob;
 952   bool           _exact_check;
 953 
 954 public:
 955   PredictedCallGenerator(ciKlass* predicted_receiver,
 956                          CallGenerator* if_missed,
 957                          CallGenerator* if_hit, bool exact_check,
 958                          float hit_prob)
 959     : CallGenerator(if_missed->method())
 960   {
 961     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 962     // Remove the extremes values from the range.
 963     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 964     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 965 
 966     _predicted_receiver = predicted_receiver;
 967     _if_missed          = if_missed;
 968     _if_hit             = if_hit;
 969     _hit_prob           = hit_prob;
 970     _exact_check        = exact_check;
 971   }
 972 
 973   virtual bool      is_virtual()   const    { return true; }
 974   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 975   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 976 
 977   virtual JVMState* generate(JVMState* jvms);
 978 };
 979 
 980 
 981 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 982                                                  CallGenerator* if_missed,
 983                                                  CallGenerator* if_hit,
 984                                                  float hit_prob) {
 985   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
 986                                     /*exact_check=*/true, hit_prob);
 987 }
 988 
 989 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
 990                                                CallGenerator* if_missed,
 991                                                CallGenerator* if_hit) {
 992   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
 993                                     /*exact_check=*/false, PROB_ALWAYS);
 994 }
 995 
 996 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 997   GraphKit kit(jvms);
 998   PhaseGVN& gvn = kit.gvn();
 999   // We need an explicit receiver null_check before checking its type.
1000   // We share a map with the caller, so his JVMS gets adjusted.
1001   Node* receiver = kit.argument(0);
1002   CompileLog* log = kit.C->log();
1003   if (log != nullptr) {
1004     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
1005               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
1006   }
1007 
1008   receiver = kit.null_check_receiver_before_call(method());
1009   if (kit.stopped()) {
1010     return kit.transfer_exceptions_into_jvms();
1011   }
1012 
1013   // Make a copy of the replaced nodes in case we need to restore them
1014   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
1015   replaced_nodes.clone();
1016 
1017   Node* casted_receiver = receiver;  // will get updated in place...
1018   Node* slow_ctl = nullptr;
1019   if (_exact_check) {
1020     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
1021                                        &casted_receiver);
1022   } else {
1023     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
1024                                           &casted_receiver);
1025   }
1026 
1027   SafePointNode* slow_map = nullptr;
1028   JVMState* slow_jvms = nullptr;
1029   { PreserveJVMState pjvms(&kit);
1030     kit.set_control(slow_ctl);
1031     if (!kit.stopped()) {
1032       slow_jvms = _if_missed->generate(kit.sync_jvms());
1033       if (kit.failing())
1034         return nullptr;  // might happen because of NodeCountInliningCutoff
1035       assert(slow_jvms != nullptr, "must be");
1036       kit.add_exception_states_from(slow_jvms);
1037       kit.set_map(slow_jvms->map());
1038       if (!kit.stopped())
1039         slow_map = kit.stop();
1040     }
1041   }
1042 
1043   if (kit.stopped()) {
1044     // Instance does not match the predicted type.
1045     kit.set_jvms(slow_jvms);
1046     return kit.transfer_exceptions_into_jvms();
1047   }
1048 
1049   // Fall through if the instance matches the desired type.
1050   kit.replace_in_map(receiver, casted_receiver);
1051 
1052   // Make the hot call:
1053   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
1054   if (kit.failing()) {
1055     return nullptr;
1056   }
1057   if (new_jvms == nullptr) {
1058     // Inline failed, so make a direct call.
1059     assert(_if_hit->is_inline(), "must have been a failed inline");
1060     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1061     new_jvms = cg->generate(kit.sync_jvms());
1062   }
1063   kit.add_exception_states_from(new_jvms);
1064   kit.set_jvms(new_jvms);
1065 
1066   // Need to merge slow and fast?
1067   if (slow_map == nullptr) {
1068     // The fast path is the only path remaining.
1069     return kit.transfer_exceptions_into_jvms();
1070   }
1071 
1072   if (kit.stopped()) {
1073     // Inlined method threw an exception, so it's just the slow path after all.
1074     kit.set_jvms(slow_jvms);
1075     return kit.transfer_exceptions_into_jvms();
1076   }
1077 
1078   // There are 2 branches and the replaced nodes are only valid on
1079   // one: restore the replaced nodes to what they were before the
1080   // branch.
1081   kit.map()->set_replaced_nodes(replaced_nodes);
1082 
1083   // Finish the diamond.
1084   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1085   RegionNode* region = new RegionNode(3);
1086   region->init_req(1, kit.control());
1087   region->init_req(2, slow_map->control());
1088   kit.set_control(gvn.transform(region));
1089   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1090   iophi->set_req(2, slow_map->i_o());
1091   kit.set_i_o(gvn.transform(iophi));
1092   // Merge memory
1093   kit.merge_memory(slow_map->merged_memory(), region, 2);
1094   // Transform new memory Phis.
1095   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1096     Node* phi = mms.memory();
1097     if (phi->is_Phi() && phi->in(0) == region) {
1098       mms.set_memory(gvn.transform(phi));
1099     }
1100   }
1101   uint tos = kit.jvms()->stkoff() + kit.sp();
1102   uint limit = slow_map->req();
1103   for (uint i = TypeFunc::Parms; i < limit; i++) {
1104     // Skip unused stack slots; fast forward to monoff();
1105     if (i == tos) {
1106       i = kit.jvms()->monoff();
1107       if( i >= limit ) break;
1108     }
1109     Node* m = kit.map()->in(i);
1110     Node* n = slow_map->in(i);
1111     if (m != n) {
1112 #ifdef ASSERT
1113       if (m->is_InlineType() != n->is_InlineType()) {
1114         InlineTypeNode* unique_vt = m->is_InlineType() ? m->as_InlineType() : n->as_InlineType();
1115         assert(unique_vt->is_allocated(&gvn), "InlineType can be merged with an oop only if it is allocated");
1116       }
1117 #endif
1118       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1119       Node* phi = PhiNode::make(region, m, t);
1120       phi->set_req(2, n);
1121       kit.map()->set_req(i, gvn.transform(phi));
1122     }
1123   }
1124   return kit.transfer_exceptions_into_jvms();
1125 }
1126 
1127 
1128 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1129   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1130   bool input_not_const;
1131   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1132   Compile* C = Compile::current();
1133   bool should_delay = C->should_delay_inlining();
1134   if (cg != nullptr) {
1135     if (should_delay && IncrementalInlineMH) {
1136       return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1137     } else {
1138       return cg;
1139     }
1140   }
1141   int bci = jvms->bci();
1142   ciCallProfile profile = caller->call_profile_at_bci(bci);
1143   int call_site_count = caller->scale_count(profile.count());
1144 
1145   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1146                             (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1147     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1148   } else {
1149     // Out-of-line call.
1150     return CallGenerator::for_direct_call(callee);
1151   }
1152 }
1153 
1154 
1155 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1156   GraphKit kit(jvms);
1157   PhaseGVN& gvn = kit.gvn();
1158   Compile* C = kit.C;
1159   vmIntrinsics::ID iid = callee->intrinsic_id();
1160   input_not_const = true;
1161   if (StressMethodHandleLinkerInlining) {
1162     allow_inline = false;
1163   }
1164   switch (iid) {
1165   case vmIntrinsics::_invokeBasic:
1166     {
1167       // Get MethodHandle receiver:
1168       Node* receiver = kit.argument(0);
1169       if (receiver->Opcode() == Op_ConP) {
1170         input_not_const = false;
1171         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1172         if (recv_toop != nullptr) {
1173           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1174           const int vtable_index = Method::invalid_vtable_index;
1175 
1176           if (!ciMethod::is_consistent_info(callee, target)) {
1177             print_inlining_failure(C, callee, jvms, "signatures mismatch");
1178             return nullptr;
1179           }
1180 
1181           CallGenerator *cg = C->call_generator(target, vtable_index,
1182                                                 false /* call_does_dispatch */,
1183                                                 jvms,
1184                                                 allow_inline,
1185                                                 PROB_ALWAYS);
1186           return cg;
1187         } else {
1188           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1189                  Type::str(receiver->bottom_type()));
1190           print_inlining_failure(C, callee, jvms, "receiver is always null");
1191         }
1192       } else {
1193         print_inlining_failure(C, callee, jvms, "receiver not constant");
1194       }
1195   } break;
1196 
1197   case vmIntrinsics::_linkToVirtual:
1198   case vmIntrinsics::_linkToStatic:
1199   case vmIntrinsics::_linkToSpecial:
1200   case vmIntrinsics::_linkToInterface:
1201     {
1202       int nargs = callee->arg_size();
1203       // Get MemberName argument:
1204       Node* member_name = kit.argument(nargs - 1);
1205       if (member_name->Opcode() == Op_ConP) {
1206         input_not_const = false;
1207         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1208         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1209 
1210         if (!ciMethod::is_consistent_info(callee, target)) {
1211           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1212           return nullptr;
1213         }
1214 
1215         // In lambda forms we erase signature types to avoid resolving issues
1216         // involving class loaders.  When we optimize a method handle invoke
1217         // to a direct call we must cast the receiver and arguments to its
1218         // actual types.
1219         ciSignature* signature = target->signature();
1220         const int receiver_skip = target->is_static() ? 0 : 1;
1221         // Cast receiver to its type.
1222         if (!target->is_static()) {
1223           Node* recv = kit.argument(0);
1224           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass(), target->receiver_maybe_larval());
1225           if (casted_recv->is_top()) {
1226             print_inlining_failure(C, callee, jvms, "argument types mismatch");
1227             return nullptr; // FIXME: effectively dead; issue a halt node instead
1228           } else if (casted_recv != recv) {
1229             kit.set_argument(0, casted_recv);
1230           }
1231         }
1232         // Cast reference arguments to its type.
1233         for (int i = 0, j = 0; i < signature->count(); i++) {
1234           ciType* t = signature->type_at(i);
1235           if (t->is_klass()) {
1236             Node* arg = kit.argument(receiver_skip + j);
1237             Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass(), false);
1238             if (casted_arg->is_top()) {
1239               print_inlining_failure(C, callee, jvms, "argument types mismatch");
1240               return nullptr; // FIXME: effectively dead; issue a halt node instead
1241             } else if (casted_arg != arg) {
1242               kit.set_argument(receiver_skip + j, casted_arg);
1243             }
1244           }
1245           j += t->size();  // long and double take two slots
1246         }
1247 
1248         // Try to get the most accurate receiver type
1249         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1250         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1251         int  vtable_index       = Method::invalid_vtable_index;
1252         bool call_does_dispatch = false;
1253 
1254         ciKlass* speculative_receiver_type = nullptr;
1255         if (is_virtual_or_interface) {
1256           ciInstanceKlass* klass = target->holder();
1257           Node*             receiver_node = kit.argument(0);
1258           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1259           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1260           // optimize_virtual_call() takes 2 different holder
1261           // arguments for a corner case that doesn't apply here (see
1262           // Parse::do_call())
1263           target = C->optimize_virtual_call(caller, klass, klass,
1264                                             target, receiver_type, is_virtual,
1265                                             call_does_dispatch, vtable_index, // out-parameters
1266                                             false /* check_access */);
1267           // We lack profiling at this call but type speculation may
1268           // provide us with a type
1269           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1270         }
1271         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1272                                               allow_inline,
1273                                               PROB_ALWAYS,
1274                                               speculative_receiver_type,
1275                                               true);
1276         return cg;
1277       } else {
1278         print_inlining_failure(C, callee, jvms, "member_name not constant");
1279       }
1280   } break;
1281 
1282   case vmIntrinsics::_linkToNative:
1283     print_inlining_failure(C, callee, jvms, "native call");
1284     break;
1285 
1286   default:
1287     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1288     break;
1289   }
1290   return nullptr;
1291 }
1292 
1293 //------------------------PredicatedIntrinsicGenerator------------------------------
1294 // Internal class which handles all predicated Intrinsic calls.
1295 class PredicatedIntrinsicGenerator : public CallGenerator {
1296   CallGenerator* _intrinsic;
1297   CallGenerator* _cg;
1298 
1299 public:
1300   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1301                                CallGenerator* cg)
1302     : CallGenerator(cg->method())
1303   {
1304     _intrinsic = intrinsic;
1305     _cg        = cg;
1306   }
1307 
1308   virtual bool      is_virtual()   const    { return true; }
1309   virtual bool      is_inline()    const    { return true; }
1310   virtual bool      is_intrinsic() const    { return true; }
1311 
1312   virtual JVMState* generate(JVMState* jvms);
1313 };
1314 
1315 
1316 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1317                                                        CallGenerator* cg) {
1318   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1319 }
1320 
1321 
1322 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1323   // The code we want to generate here is:
1324   //    if (receiver == nullptr)
1325   //        uncommon_Trap
1326   //    if (predicate(0))
1327   //        do_intrinsic(0)
1328   //    else
1329   //    if (predicate(1))
1330   //        do_intrinsic(1)
1331   //    ...
1332   //    else
1333   //        do_java_comp
1334 
1335   GraphKit kit(jvms);
1336   PhaseGVN& gvn = kit.gvn();
1337 
1338   CompileLog* log = kit.C->log();
1339   if (log != nullptr) {
1340     log->elem("predicated_intrinsic bci='%d' method='%d'",
1341               jvms->bci(), log->identify(method()));
1342   }
1343 
1344   if (!method()->is_static()) {
1345     // We need an explicit receiver null_check before checking its type in predicate.
1346     // We share a map with the caller, so his JVMS gets adjusted.
1347     kit.null_check_receiver_before_call(method());
1348     if (kit.stopped()) {
1349       return kit.transfer_exceptions_into_jvms();
1350     }
1351   }
1352 
1353   int n_predicates = _intrinsic->predicates_count();
1354   assert(n_predicates > 0, "sanity");
1355 
1356   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1357 
1358   // Region for normal compilation code if intrinsic failed.
1359   Node* slow_region = new RegionNode(1);
1360 
1361   int results = 0;
1362   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1363 #ifdef ASSERT
1364     JVMState* old_jvms = kit.jvms();
1365     SafePointNode* old_map = kit.map();
1366     Node* old_io  = old_map->i_o();
1367     Node* old_mem = old_map->memory();
1368     Node* old_exc = old_map->next_exception();
1369 #endif
1370     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1371 #ifdef ASSERT
1372     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1373     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1374     SafePointNode* new_map = kit.map();
1375     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1376     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1377     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1378 #endif
1379     if (!kit.stopped()) {
1380       PreserveJVMState pjvms(&kit);
1381       // Generate intrinsic code:
1382       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1383       if (kit.failing()) {
1384         return nullptr;
1385       }
1386       if (new_jvms == nullptr) {
1387         // Intrinsic failed, use normal compilation path for this predicate.
1388         slow_region->add_req(kit.control());
1389       } else {
1390         kit.add_exception_states_from(new_jvms);
1391         kit.set_jvms(new_jvms);
1392         if (!kit.stopped()) {
1393           result_jvms[results++] = kit.jvms();
1394         }
1395       }
1396     }
1397     if (else_ctrl == nullptr) {
1398       else_ctrl = kit.C->top();
1399     }
1400     kit.set_control(else_ctrl);
1401   }
1402   if (!kit.stopped()) {
1403     // Final 'else' after predicates.
1404     slow_region->add_req(kit.control());
1405   }
1406   if (slow_region->req() > 1) {
1407     PreserveJVMState pjvms(&kit);
1408     // Generate normal compilation code:
1409     kit.set_control(gvn.transform(slow_region));
1410     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1411     if (kit.failing())
1412       return nullptr;  // might happen because of NodeCountInliningCutoff
1413     assert(new_jvms != nullptr, "must be");
1414     kit.add_exception_states_from(new_jvms);
1415     kit.set_jvms(new_jvms);
1416     if (!kit.stopped()) {
1417       result_jvms[results++] = kit.jvms();
1418     }
1419   }
1420 
1421   if (results == 0) {
1422     // All paths ended in uncommon traps.
1423     (void) kit.stop();
1424     return kit.transfer_exceptions_into_jvms();
1425   }
1426 
1427   if (results == 1) { // Only one path
1428     kit.set_jvms(result_jvms[0]);
1429     return kit.transfer_exceptions_into_jvms();
1430   }
1431 
1432   // Merge all paths.
1433   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1434   RegionNode* region = new RegionNode(results + 1);
1435   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1436   for (int i = 0; i < results; i++) {
1437     JVMState* jvms = result_jvms[i];
1438     int path = i + 1;
1439     SafePointNode* map = jvms->map();
1440     region->init_req(path, map->control());
1441     iophi->set_req(path, map->i_o());
1442     if (i == 0) {
1443       kit.set_jvms(jvms);
1444     } else {
1445       kit.merge_memory(map->merged_memory(), region, path);
1446     }
1447   }
1448   kit.set_control(gvn.transform(region));
1449   kit.set_i_o(gvn.transform(iophi));
1450   // Transform new memory Phis.
1451   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1452     Node* phi = mms.memory();
1453     if (phi->is_Phi() && phi->in(0) == region) {
1454       mms.set_memory(gvn.transform(phi));
1455     }
1456   }
1457 
1458   // Merge debug info.
1459   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1460   uint tos = kit.jvms()->stkoff() + kit.sp();
1461   Node* map = kit.map();
1462   uint limit = map->req();
1463   for (uint i = TypeFunc::Parms; i < limit; i++) {
1464     // Skip unused stack slots; fast forward to monoff();
1465     if (i == tos) {
1466       i = kit.jvms()->monoff();
1467       if( i >= limit ) break;
1468     }
1469     Node* n = map->in(i);
1470     ins[0] = n;
1471     const Type* t = gvn.type(n);
1472     bool needs_phi = false;
1473     for (int j = 1; j < results; j++) {
1474       JVMState* jvms = result_jvms[j];
1475       Node* jmap = jvms->map();
1476       Node* m = nullptr;
1477       if (jmap->req() > i) {
1478         m = jmap->in(i);
1479         if (m != n) {
1480           needs_phi = true;
1481           t = t->meet_speculative(gvn.type(m));
1482         }
1483       }
1484       ins[j] = m;
1485     }
1486     if (needs_phi) {
1487       Node* phi = PhiNode::make(region, n, t);
1488       for (int j = 1; j < results; j++) {
1489         phi->set_req(j + 1, ins[j]);
1490       }
1491       map->set_req(i, gvn.transform(phi));
1492     }
1493   }
1494 
1495   return kit.transfer_exceptions_into_jvms();
1496 }
1497 
1498 //-------------------------UncommonTrapCallGenerator-----------------------------
1499 // Internal class which handles all out-of-line calls checking receiver type.
1500 class UncommonTrapCallGenerator : public CallGenerator {
1501   Deoptimization::DeoptReason _reason;
1502   Deoptimization::DeoptAction _action;
1503 
1504 public:
1505   UncommonTrapCallGenerator(ciMethod* m,
1506                             Deoptimization::DeoptReason reason,
1507                             Deoptimization::DeoptAction action)
1508     : CallGenerator(m)
1509   {
1510     _reason = reason;
1511     _action = action;
1512   }
1513 
1514   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1515   virtual bool      is_trap() const             { return true; }
1516 
1517   virtual JVMState* generate(JVMState* jvms);
1518 };
1519 
1520 
1521 CallGenerator*
1522 CallGenerator::for_uncommon_trap(ciMethod* m,
1523                                  Deoptimization::DeoptReason reason,
1524                                  Deoptimization::DeoptAction action) {
1525   return new UncommonTrapCallGenerator(m, reason, action);
1526 }
1527 
1528 
1529 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1530   GraphKit kit(jvms);
1531   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1532   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1533   // Use callsite signature always.
1534   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1535   int nargs = declared_method->arg_size();
1536   kit.inc_sp(nargs);
1537   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1538   if (_reason == Deoptimization::Reason_class_check &&
1539       _action == Deoptimization::Action_maybe_recompile) {
1540     // Temp fix for 6529811
1541     // Don't allow uncommon_trap to override our decision to recompile in the event
1542     // of a class cast failure for a monomorphic call as it will never let us convert
1543     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1544     bool keep_exact_action = true;
1545     kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action);
1546   } else {
1547     kit.uncommon_trap(_reason, _action);
1548   }
1549   return kit.transfer_exceptions_into_jvms();
1550 }
1551 
1552 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1553 
1554 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)