1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciObjArray.hpp"
  28 #include "ci/ciMemberName.hpp"
  29 #include "ci/ciMethodHandle.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "oops/accessDecorators.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/inlinetypenode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/debug.hpp"
  46 
  47 // Utility function.
  48 const TypeFunc* CallGenerator::tf() const {
  49   return TypeFunc::make(method());
  50 }
  51 
  52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  53   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  54 }
  55 
  56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  57   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  58   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  59 }
  60 
  61 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  62   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  63 }
  64 
  65 //-----------------------------ParseGenerator---------------------------------
  66 // Internal class which handles all direct bytecode traversal.
  67 class ParseGenerator : public InlineCallGenerator {
  68 private:
  69   bool  _is_osr;
  70   float _expected_uses;
  71 
  72 public:
  73   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  74     : InlineCallGenerator(method)
  75   {
  76     _is_osr        = is_osr;
  77     _expected_uses = expected_uses;
  78     assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
  79   }
  80 
  81   virtual bool      is_parse() const           { return true; }
  82   virtual JVMState* generate(JVMState* jvms);
  83   int is_osr() { return _is_osr; }
  84 
  85 };
  86 
  87 JVMState* ParseGenerator::generate(JVMState* jvms) {
  88   Compile* C = Compile::current();
  89 
  90   if (is_osr()) {
  91     // The JVMS for a OSR has a single argument (see its TypeFunc).
  92     assert(jvms->depth() == 1, "no inline OSR");
  93   }
  94 
  95   if (C->failing()) {
  96     return nullptr;  // bailing out of the compile; do not try to parse
  97   }
  98 
  99   Parse parser(jvms, method(), _expected_uses);
 100   if (C->failing()) return nullptr;
 101 
 102   // Grab signature for matching/allocation
 103   GraphKit& exits = parser.exits();
 104 
 105   if (C->failing()) {
 106     while (exits.pop_exception_state() != nullptr) ;
 107     return nullptr;
 108   }
 109 
 110   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 111 
 112   // Simply return the exit state of the parser,
 113   // augmented by any exceptional states.
 114   return exits.transfer_exceptions_into_jvms();
 115 }
 116 
 117 //---------------------------DirectCallGenerator------------------------------
 118 // Internal class which handles all out-of-line calls w/o receiver type checks.
 119 class DirectCallGenerator : public CallGenerator {
 120  private:
 121   CallStaticJavaNode* _call_node;
 122   // Force separate memory and I/O projections for the exceptional
 123   // paths to facilitate late inlining.
 124   bool                _separate_io_proj;
 125 
 126 protected:
 127   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 128 
 129  public:
 130   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 131     : CallGenerator(method),
 132       _call_node(nullptr),
 133       _separate_io_proj(separate_io_proj)
 134   {
 135     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 136       // If that call has not been optimized by the time optimizations are over,
 137       // we'll need to add a call to create an inline type instance from the klass
 138       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 139       // Separating memory and I/O projections for exceptions is required to
 140       // perform that graph transformation.
 141       _separate_io_proj = true;
 142     }
 143   }
 144   virtual JVMState* generate(JVMState* jvms);
 145 
 146   virtual CallNode* call_node() const { return _call_node; }
 147   virtual CallGenerator* with_call_node(CallNode* call) {
 148     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 149     dcg->set_call_node(call->as_CallStaticJava());
 150     return dcg;
 151   }
 152 };
 153 
 154 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 155   GraphKit kit(jvms);
 156   PhaseGVN& gvn = kit.gvn();
 157   bool is_static = method()->is_static();
 158   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 159                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 160 
 161   if (kit.C->log() != nullptr) {
 162     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 163   }
 164 
 165   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 166   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 167     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 168     // additional information about the method being invoked should be attached
 169     // to the call site to make resolution logic work
 170     // (see SharedRuntime::resolve_static_call_C).
 171     call->set_override_symbolic_info(true);
 172   }
 173   _call_node = call;  // Save the call node in case we need it later
 174   if (!is_static) {
 175     // Make an explicit receiver null_check as part of this call.
 176     // Since we share a map with the caller, his JVMS gets adjusted.
 177     kit.null_check_receiver_before_call(method());
 178     if (kit.stopped()) {
 179       // And dump it back to the caller, decorated with any exceptions:
 180       return kit.transfer_exceptions_into_jvms();
 181     }
 182     // Mark the call node as virtual, sort of:
 183     call->set_optimized_virtual(true);
 184     if (method()->is_method_handle_intrinsic() ||
 185         method()->is_compiled_lambda_form()) {
 186       call->set_method_handle_invoke(true);
 187     }
 188   }
 189   kit.set_arguments_for_java_call(call, is_late_inline());
 190   if (kit.stopped()) {
 191     return kit.transfer_exceptions_into_jvms();
 192   }
 193   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 194   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 195   kit.push_node(method()->return_type()->basic_type(), ret);
 196   return kit.transfer_exceptions_into_jvms();
 197 }
 198 
 199 //--------------------------VirtualCallGenerator------------------------------
 200 // Internal class which handles all out-of-line calls checking receiver type.
 201 class VirtualCallGenerator : public CallGenerator {
 202 private:
 203   int _vtable_index;
 204   bool _separate_io_proj;
 205   CallDynamicJavaNode* _call_node;
 206 
 207 protected:
 208   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 209 
 210 public:
 211   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 212     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 213   {
 214     assert(vtable_index == Method::invalid_vtable_index ||
 215            vtable_index >= 0, "either invalid or usable");
 216   }
 217   virtual bool      is_virtual() const          { return true; }
 218   virtual JVMState* generate(JVMState* jvms);
 219 
 220   virtual CallNode* call_node() const { return _call_node; }
 221   int vtable_index() const { return _vtable_index; }
 222 
 223   virtual CallGenerator* with_call_node(CallNode* call) {
 224     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 225     cg->set_call_node(call->as_CallDynamicJava());
 226     return cg;
 227   }
 228 };
 229 
 230 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 231   GraphKit kit(jvms);
 232   Node* receiver = kit.argument(0);
 233   if (kit.C->log() != nullptr) {
 234     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 235   }
 236 
 237   // If the receiver is a constant null, do not torture the system
 238   // by attempting to call through it.  The compile will proceed
 239   // correctly, but may bail out in final_graph_reshaping, because
 240   // the call instruction will have a seemingly deficient out-count.
 241   // (The bailout says something misleading about an "infinite loop".)
 242   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 243     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 244     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 245     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 246     kit.inc_sp(arg_size);  // restore arguments
 247     kit.uncommon_trap(Deoptimization::Reason_null_check,
 248                       Deoptimization::Action_none,
 249                       nullptr, "null receiver");
 250     return kit.transfer_exceptions_into_jvms();
 251   }
 252 
 253   // Ideally we would unconditionally do a null check here and let it
 254   // be converted to an implicit check based on profile information.
 255   // However currently the conversion to implicit null checks in
 256   // Block::implicit_null_check() only looks for loads and stores, not calls.
 257   ciMethod *caller = kit.method();
 258   ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
 259   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 260        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 261        (caller_md->trap_count(Deoptimization::Reason_null_check)
 262        >= (uint)ImplicitNullCheckThreshold))) {
 263     // Make an explicit receiver null_check as part of this call.
 264     // Since we share a map with the caller, his JVMS gets adjusted.
 265     receiver = kit.null_check_receiver_before_call(method());
 266     if (kit.stopped()) {
 267       // And dump it back to the caller, decorated with any exceptions:
 268       return kit.transfer_exceptions_into_jvms();
 269     }
 270   }
 271 
 272   assert(!method()->is_static(), "virtual call must not be to static");
 273   assert(!method()->is_final(), "virtual call should not be to final");
 274   assert(!method()->is_private(), "virtual call should not be to private");
 275   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 276          "no vtable calls if +UseInlineCaches ");
 277   address target = SharedRuntime::get_resolve_virtual_call_stub();
 278   // Normal inline cache used for call
 279   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 280   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 281     // To be able to issue a direct call (optimized virtual or virtual)
 282     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 283     // about the method being invoked should be attached to the call site to
 284     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 285     call->set_override_symbolic_info(true);
 286   }
 287   _call_node = call;  // Save the call node in case we need it later
 288 
 289   kit.set_arguments_for_java_call(call);
 290   if (kit.stopped()) {
 291     return kit.transfer_exceptions_into_jvms();
 292   }
 293   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 294   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 295   kit.push_node(method()->return_type()->basic_type(), ret);
 296 
 297   // Represent the effect of an implicit receiver null_check
 298   // as part of this call.  Since we share a map with the caller,
 299   // his JVMS gets adjusted.
 300   kit.cast_not_null(receiver);
 301   return kit.transfer_exceptions_into_jvms();
 302 }
 303 
 304 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 305   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 306   return new ParseGenerator(m, expected_uses);
 307 }
 308 
 309 // As a special case, the JVMS passed to this CallGenerator is
 310 // for the method execution already in progress, not just the JVMS
 311 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 312 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 313   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 314   float past_uses = m->interpreter_invocation_count();
 315   float expected_uses = past_uses;
 316   return new ParseGenerator(m, expected_uses, true);
 317 }
 318 
 319 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 320   assert(!m->is_abstract(), "for_direct_call mismatch");
 321   return new DirectCallGenerator(m, separate_io_proj);
 322 }
 323 
 324 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 325   assert(!m->is_static(), "for_virtual_call mismatch");
 326   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 327   return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
 328 }
 329 
 330 // Allow inlining decisions to be delayed
 331 class LateInlineCallGenerator : public DirectCallGenerator {
 332  private:
 333   jlong _unique_id;   // unique id for log compilation
 334   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
 335 
 336  protected:
 337   CallGenerator* _inline_cg;
 338   virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
 339   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 340   virtual bool is_pure_call() const { return _is_pure_call; }
 341 
 342  public:
 343   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
 344     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
 345 
 346   virtual bool is_late_inline() const { return true; }
 347 
 348   // Convert the CallStaticJava into an inline
 349   virtual void do_late_inline();
 350 
 351   virtual JVMState* generate(JVMState* jvms) {
 352     Compile *C = Compile::current();
 353 
 354     C->log_inline_id(this);
 355 
 356     // Record that this call site should be revisited once the main
 357     // parse is finished.
 358     if (!is_mh_late_inline()) {
 359       C->add_late_inline(this);
 360     }
 361 
 362     // Emit the CallStaticJava and request separate projections so
 363     // that the late inlining logic can distinguish between fall
 364     // through and exceptional uses of the memory and io projections
 365     // as is done for allocations and macro expansion.
 366     return DirectCallGenerator::generate(jvms);
 367   }
 368 
 369   virtual void set_unique_id(jlong id) {
 370     _unique_id = id;
 371   }
 372 
 373   virtual jlong unique_id() const {
 374     return _unique_id;
 375   }
 376 
 377   virtual CallGenerator* inline_cg() {
 378     return _inline_cg;
 379   }
 380 
 381   virtual CallGenerator* with_call_node(CallNode* call) {
 382     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 383     cg->set_call_node(call->as_CallStaticJava());
 384     return cg;
 385   }
 386 };
 387 
 388 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 389   return new LateInlineCallGenerator(method, inline_cg);
 390 }
 391 
 392 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 393   ciMethod* _caller;
 394   bool _input_not_const;
 395 
 396   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 397 
 398  public:
 399   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 400     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
 401 
 402   virtual bool is_mh_late_inline() const { return true; }
 403 
 404   // Convert the CallStaticJava into an inline
 405   virtual void do_late_inline();
 406 
 407   virtual JVMState* generate(JVMState* jvms) {
 408     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 409 
 410     Compile* C = Compile::current();
 411     if (_input_not_const) {
 412       // inlining won't be possible so no need to enqueue right now.
 413       call_node()->set_generator(this);
 414     } else {
 415       C->add_late_inline(this);
 416     }
 417     return new_jvms;
 418   }
 419 
 420   virtual CallGenerator* with_call_node(CallNode* call) {
 421     LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
 422     cg->set_call_node(call->as_CallStaticJava());
 423     return cg;
 424   }
 425 };
 426 
 427 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 428   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 429   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 430   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 431   // of late inlining with exceptions.
 432   assert(!jvms->method()->has_exception_handlers() ||
 433          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 434           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 435   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 436   bool allow_inline = C->inlining_incrementally();
 437   bool input_not_const = true;
 438   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 439   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 440 
 441   if (cg != nullptr) {
 442     // AlwaysIncrementalInline causes for_method_handle_inline() to
 443     // return a LateInlineCallGenerator. Extract the
 444     // InlineCallGenerator from it.
 445     if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
 446       cg = cg->inline_cg();
 447       assert(cg != nullptr, "inline call generator expected");
 448     }
 449 
 450     if (!allow_inline) {
 451       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 452                                   "late method handle call resolution");
 453     }
 454     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 455     _inline_cg = cg;
 456     C->dec_number_of_mh_late_inlines();
 457     return true;
 458   } else {
 459     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 460     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 461     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 462     return false;
 463   }
 464 }
 465 
 466 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 467   assert(IncrementalInlineMH, "required");
 468   Compile::current()->inc_number_of_mh_late_inlines();
 469   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 470   return cg;
 471 }
 472 
 473 // Allow inlining decisions to be delayed
 474 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
 475  private:
 476   jlong          _unique_id;   // unique id for log compilation
 477   CallGenerator* _inline_cg;
 478   ciMethod*      _callee;
 479   bool           _is_pure_call;
 480   float          _prof_factor;
 481 
 482  protected:
 483   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 484   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 485   virtual bool is_pure_call() const { return _is_pure_call; }
 486 
 487  public:
 488   LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
 489   : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
 490     _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
 491     assert(IncrementalInlineVirtual, "required");
 492   }
 493 
 494   virtual bool is_late_inline() const { return true; }
 495 
 496   virtual bool is_virtual_late_inline() const { return true; }
 497 
 498   // Convert the CallDynamicJava into an inline
 499   virtual void do_late_inline();
 500 
 501   virtual void set_callee_method(ciMethod* m) {
 502     assert(_callee == nullptr || _callee == m, "repeated inline attempt with different callee");
 503     _callee = m;
 504   }
 505 
 506   virtual JVMState* generate(JVMState* jvms) {
 507     // Emit the CallDynamicJava and request separate projections so
 508     // that the late inlining logic can distinguish between fall
 509     // through and exceptional uses of the memory and io projections
 510     // as is done for allocations and macro expansion.
 511     JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
 512     if (call_node() != nullptr) {
 513       call_node()->set_generator(this);
 514     }
 515     return new_jvms;
 516   }
 517 
 518   virtual void set_unique_id(jlong id) {
 519     _unique_id = id;
 520   }
 521 
 522   virtual jlong unique_id() const {
 523     return _unique_id;
 524   }
 525 
 526   virtual CallGenerator* with_call_node(CallNode* call) {
 527     LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
 528     cg->set_call_node(call->as_CallDynamicJava());
 529     return cg;
 530   }
 531 };
 532 
 533 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 534   // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
 535   // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
 536 
 537   // Implicit receiver null checks introduce problems when exception states are combined.
 538   Node* receiver = jvms->map()->argument(jvms, 0);
 539   const Type* recv_type = C->initial_gvn()->type(receiver);
 540   if (recv_type->maybe_null()) {
 541     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 542                                 "late call devirtualization failed (receiver may be null)");
 543     return false;
 544   }
 545   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 546   bool allow_inline = C->inlining_incrementally();
 547   if (!allow_inline && _callee->holder()->is_interface()) {
 548     // Don't convert the interface call to a direct call guarded by an interface subtype check.
 549     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 550                                 "late call devirtualization failed (interface call)");
 551     return false;
 552   }
 553   CallGenerator* cg = C->call_generator(_callee,
 554                                         vtable_index(),
 555                                         false /*call_does_dispatch*/,
 556                                         jvms,
 557                                         allow_inline,
 558                                         _prof_factor,
 559                                         nullptr /*speculative_receiver_type*/,
 560                                         true /*allow_intrinsics*/);
 561 
 562   if (cg != nullptr) {
 563     if (!allow_inline) {
 564       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization");
 565     }
 566     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 567     _inline_cg = cg;
 568     return true;
 569   } else {
 570     // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
 571     assert(false, "no progress");
 572     return false;
 573   }
 574 }
 575 
 576 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
 577   assert(IncrementalInlineVirtual, "required");
 578   assert(!m->is_static(), "for_virtual_call mismatch");
 579   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 580   return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
 581 }
 582 
 583 void LateInlineCallGenerator::do_late_inline() {
 584   CallGenerator::do_late_inline_helper();
 585 }
 586 
 587 void LateInlineMHCallGenerator::do_late_inline() {
 588   CallGenerator::do_late_inline_helper();
 589 }
 590 
 591 void LateInlineVirtualCallGenerator::do_late_inline() {
 592   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 593   CallGenerator::do_late_inline_helper();
 594 }
 595 
 596 void CallGenerator::do_late_inline_helper() {
 597   assert(is_late_inline(), "only late inline allowed");
 598 
 599   // Can't inline it
 600   CallNode* call = call_node();
 601   if (call == nullptr || call->outcnt() == 0 ||
 602       call->in(0) == nullptr || call->in(0)->is_top()) {
 603     return;
 604   }
 605 
 606   const TypeTuple* r = call->tf()->domain_cc();
 607   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 608     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
 609       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 610       return;
 611     }
 612   }
 613 
 614   if (call->in(TypeFunc::Memory)->is_top()) {
 615     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 616     return;
 617   }
 618   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 619     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 620     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 621       return; // dead path
 622     }
 623   }
 624 
 625   // check for unreachable loop
 626   // Similar to incremental inlining, don't assert that all call
 627   // projections are still there for post-parse call devirtualization.
 628   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 629   CallProjections* callprojs = call->extract_projections(true, do_asserts);
 630   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
 631       (callprojs->catchall_catchproj    == call->in(0)) ||
 632       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 633       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
 634       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 635       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 636       (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
 637     return;
 638   }
 639 
 640   Compile* C = Compile::current();
 641   // Remove inlined methods from Compiler's lists.
 642   if (call->is_macro()) {
 643     C->remove_macro_node(call);
 644   }
 645 
 646 
 647   bool result_not_used = true;
 648   for (uint i = 0; i < callprojs->nb_resproj; i++) {
 649     if (callprojs->resproj[i] != nullptr) {
 650       if (callprojs->resproj[i]->outcnt() != 0) {
 651         result_not_used = false;
 652       }
 653       if (call->find_edge(callprojs->resproj[i]) != -1) {
 654         return;
 655       }
 656     }
 657   }
 658 
 659   if (is_pure_call() && result_not_used) {
 660     // The call is marked as pure (no important side effects), but result isn't used.
 661     // It's safe to remove the call.
 662     GraphKit kit(call->jvms());
 663     kit.replace_call(call, C->top(), true, do_asserts);
 664   } else {
 665     // Make a clone of the JVMState that appropriate to use for driving a parse
 666     JVMState* old_jvms = call->jvms();
 667     JVMState* jvms = old_jvms->clone_shallow(C);
 668     uint size = call->req();
 669     SafePointNode* map = new SafePointNode(size, jvms);
 670     for (uint i1 = 0; i1 < size; i1++) {
 671       map->init_req(i1, call->in(i1));
 672     }
 673 
 674     PhaseGVN& gvn = *C->initial_gvn();
 675     // Make sure the state is a MergeMem for parsing.
 676     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 677       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 678       gvn.set_type_bottom(mem);
 679       map->set_req(TypeFunc::Memory, mem);
 680     }
 681 
 682     // blow away old call arguments
 683     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 684       map->set_req(i1, C->top());
 685     }
 686     jvms->set_map(map);
 687 
 688     // Make enough space in the expression stack to transfer
 689     // the incoming arguments and return value.
 690     map->ensure_stack(jvms, jvms->method()->max_stack());
 691     const TypeTuple* domain_sig = call->_tf->domain_sig();
 692     uint nargs = method()->arg_size();
 693     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 694 
 695     uint j = TypeFunc::Parms;
 696     int arg_num = 0;
 697     for (uint i1 = 0; i1 < nargs; i1++) {
 698       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 699       if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
 700         // Inline type arguments are not passed by reference: we get an argument per
 701         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 702         GraphKit arg_kit(jvms, &gvn);
 703         Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 704         map->set_control(arg_kit.control());
 705         map->set_argument(jvms, i1, vt);
 706       } else {
 707         map->set_argument(jvms, i1, call->in(j++));
 708       }
 709       if (t != Type::HALF) {
 710         arg_num++;
 711       }
 712     }
 713 
 714     C->log_late_inline(this);
 715 
 716     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 717     if (!do_late_inline_check(C, jvms)) {
 718       map->disconnect_inputs(C);
 719       return;
 720     }
 721 
 722     // Check if we are late inlining a method handle call that returns an inline type as fields.
 723     Node* buffer_oop = nullptr;
 724     ciMethod* inline_method = inline_cg()->method();
 725     ciType* return_type = inline_method->return_type();
 726     if (!call->tf()->returns_inline_type_as_fields() &&
 727         return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
 728       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
 729       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
 730       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
 731       GraphKit arg_kit(jvms, &gvn);
 732       {
 733         PreserveReexecuteState preexecs(&arg_kit);
 734         arg_kit.jvms()->set_should_reexecute(true);
 735         arg_kit.inc_sp(nargs);
 736         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
 737         buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
 738       }
 739       jvms = arg_kit.transfer_exceptions_into_jvms();
 740     }
 741 
 742     // Setup default node notes to be picked up by the inlining
 743     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 744     if (old_nn != nullptr) {
 745       Node_Notes* entry_nn = old_nn->clone(C);
 746       entry_nn->set_jvms(jvms);
 747       C->set_default_node_notes(entry_nn);
 748     }
 749 
 750     // Now perform the inlining using the synthesized JVMState
 751     JVMState* new_jvms = inline_cg()->generate(jvms);
 752     if (new_jvms == nullptr)  return;  // no change
 753     if (C->failing())      return;
 754 
 755     if (is_mh_late_inline()) {
 756       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 757     } else if (is_string_late_inline()) {
 758       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 759     } else if (is_boxing_late_inline()) {
 760       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 761     } else if (is_vector_reboxing_late_inline()) {
 762       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 763     } else {
 764       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 765     }
 766 
 767     // Capture any exceptional control flow
 768     GraphKit kit(new_jvms);
 769 
 770     // Find the result object
 771     Node* result = C->top();
 772     int   result_size = method()->return_type()->size();
 773     if (result_size != 0 && !kit.stopped()) {
 774       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 775     }
 776 
 777     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 778       result = kit.must_be_not_null(result, false);
 779     }
 780 
 781     if (inline_cg()->is_inline()) {
 782       C->set_has_loops(C->has_loops() || inline_method->has_loops());
 783       C->env()->notice_inlined_method(inline_method);
 784     }
 785     C->set_inlining_progress(true);
 786     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 787 
 788     // Handle inline type returns
 789     InlineTypeNode* vt = result->isa_InlineType();
 790     if (vt != nullptr) {
 791       if (call->tf()->returns_inline_type_as_fields()) {
 792         vt->replace_call_results(&kit, call, C);
 793       } else {
 794         // Result might still be allocated (for example, if it has been stored to a non-flat field)
 795         if (!vt->is_allocated(&kit.gvn())) {
 796           assert(buffer_oop != nullptr, "should have allocated a buffer");
 797           RegionNode* region = new RegionNode(3);
 798 
 799           // Check if result is null
 800           Node* null_ctl = kit.top();
 801           kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
 802           region->init_req(1, null_ctl);
 803           PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
 804           Node* init_mem = kit.reset_memory();
 805           PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
 806 
 807           // Not null, initialize the buffer
 808           kit.set_all_memory(init_mem);
 809 
 810           Node* payload_ptr = kit.basic_plus_adr(buffer_oop, kit.gvn().type(vt)->inline_klass()->payload_offset());
 811           vt->store_flat(&kit, buffer_oop, payload_ptr, false, true, true, IN_HEAP | MO_UNORDERED);
 812           // Do not let stores that initialize this buffer be reordered with a subsequent
 813           // store that would make this buffer accessible by other threads.
 814           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
 815           assert(alloc != nullptr, "must have an allocation node");
 816           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 817           region->init_req(2, kit.control());
 818           oop->init_req(2, buffer_oop);
 819           mem->init_req(2, kit.merged_memory());
 820 
 821           // Update oop input to buffer
 822           kit.gvn().hash_delete(vt);
 823           vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
 824           vt->set_is_buffered(kit.gvn());
 825           vt = kit.gvn().transform(vt)->as_InlineType();
 826 
 827           kit.set_control(kit.gvn().transform(region));
 828           kit.set_all_memory(kit.gvn().transform(mem));
 829           kit.record_for_igvn(region);
 830           kit.record_for_igvn(oop);
 831           kit.record_for_igvn(mem);
 832         }
 833         result = vt;
 834       }
 835       DEBUG_ONLY(buffer_oop = nullptr);
 836     } else {
 837       assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
 838     }
 839     assert(buffer_oop == nullptr, "unused buffer allocation");
 840 
 841     kit.replace_call(call, result, true, do_asserts);
 842   }
 843 }
 844 
 845 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 846 
 847  public:
 848   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 849     LateInlineCallGenerator(method, inline_cg) {}
 850 
 851   virtual JVMState* generate(JVMState* jvms) {
 852     Compile *C = Compile::current();
 853 
 854     C->log_inline_id(this);
 855 
 856     C->add_string_late_inline(this);
 857 
 858     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 859     return new_jvms;
 860   }
 861 
 862   virtual bool is_string_late_inline() const { return true; }
 863 
 864   virtual CallGenerator* with_call_node(CallNode* call) {
 865     LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
 866     cg->set_call_node(call->as_CallStaticJava());
 867     return cg;
 868   }
 869 };
 870 
 871 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 872   return new LateInlineStringCallGenerator(method, inline_cg);
 873 }
 874 
 875 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 876 
 877  public:
 878   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 879     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 880 
 881   virtual JVMState* generate(JVMState* jvms) {
 882     Compile *C = Compile::current();
 883 
 884     C->log_inline_id(this);
 885 
 886     C->add_boxing_late_inline(this);
 887 
 888     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 889     return new_jvms;
 890   }
 891 
 892   virtual bool is_boxing_late_inline() const { return true; }
 893 
 894   virtual CallGenerator* with_call_node(CallNode* call) {
 895     LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
 896     cg->set_call_node(call->as_CallStaticJava());
 897     return cg;
 898   }
 899 };
 900 
 901 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 902   return new LateInlineBoxingCallGenerator(method, inline_cg);
 903 }
 904 
 905 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
 906 
 907  public:
 908   LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 909     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 910 
 911   virtual JVMState* generate(JVMState* jvms) {
 912     Compile *C = Compile::current();
 913 
 914     C->log_inline_id(this);
 915 
 916     C->add_vector_reboxing_late_inline(this);
 917 
 918     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 919     return new_jvms;
 920   }
 921 
 922   virtual bool is_vector_reboxing_late_inline() const { return true; }
 923 
 924   virtual CallGenerator* with_call_node(CallNode* call) {
 925     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 926     cg->set_call_node(call->as_CallStaticJava());
 927     return cg;
 928   }
 929 };
 930 
 931 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 932 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 933   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 934 }
 935 
 936 //------------------------PredictedCallGenerator------------------------------
 937 // Internal class which handles all out-of-line calls checking receiver type.
 938 class PredictedCallGenerator : public CallGenerator {
 939   ciKlass*       _predicted_receiver;
 940   CallGenerator* _if_missed;
 941   CallGenerator* _if_hit;
 942   float          _hit_prob;
 943   bool           _exact_check;
 944 
 945 public:
 946   PredictedCallGenerator(ciKlass* predicted_receiver,
 947                          CallGenerator* if_missed,
 948                          CallGenerator* if_hit, bool exact_check,
 949                          float hit_prob)
 950     : CallGenerator(if_missed->method())
 951   {
 952     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 953     // Remove the extremes values from the range.
 954     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 955     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 956 
 957     _predicted_receiver = predicted_receiver;
 958     _if_missed          = if_missed;
 959     _if_hit             = if_hit;
 960     _hit_prob           = hit_prob;
 961     _exact_check        = exact_check;
 962   }
 963 
 964   virtual bool      is_virtual()   const    { return true; }
 965   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 966   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 967 
 968   virtual JVMState* generate(JVMState* jvms);
 969 };
 970 
 971 
 972 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 973                                                  CallGenerator* if_missed,
 974                                                  CallGenerator* if_hit,
 975                                                  float hit_prob) {
 976   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
 977                                     /*exact_check=*/true, hit_prob);
 978 }
 979 
 980 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
 981                                                CallGenerator* if_missed,
 982                                                CallGenerator* if_hit) {
 983   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
 984                                     /*exact_check=*/false, PROB_ALWAYS);
 985 }
 986 
 987 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 988   GraphKit kit(jvms);
 989   PhaseGVN& gvn = kit.gvn();
 990   // We need an explicit receiver null_check before checking its type.
 991   // We share a map with the caller, so his JVMS gets adjusted.
 992   Node* receiver = kit.argument(0);
 993   CompileLog* log = kit.C->log();
 994   if (log != nullptr) {
 995     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
 996               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
 997   }
 998 
 999   receiver = kit.null_check_receiver_before_call(method());
1000   if (kit.stopped()) {
1001     return kit.transfer_exceptions_into_jvms();
1002   }
1003 
1004   // Make a copy of the replaced nodes in case we need to restore them
1005   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
1006   replaced_nodes.clone();
1007 
1008   Node* casted_receiver = receiver;  // will get updated in place...
1009   Node* slow_ctl = nullptr;
1010   if (_exact_check) {
1011     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
1012                                        &casted_receiver);
1013   } else {
1014     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
1015                                           &casted_receiver);
1016   }
1017 
1018   SafePointNode* slow_map = nullptr;
1019   JVMState* slow_jvms = nullptr;
1020   { PreserveJVMState pjvms(&kit);
1021     kit.set_control(slow_ctl);
1022     if (!kit.stopped()) {
1023       slow_jvms = _if_missed->generate(kit.sync_jvms());
1024       if (kit.failing())
1025         return nullptr;  // might happen because of NodeCountInliningCutoff
1026       assert(slow_jvms != nullptr, "must be");
1027       kit.add_exception_states_from(slow_jvms);
1028       kit.set_map(slow_jvms->map());
1029       if (!kit.stopped())
1030         slow_map = kit.stop();
1031     }
1032   }
1033 
1034   if (kit.stopped()) {
1035     // Instance does not match the predicted type.
1036     kit.set_jvms(slow_jvms);
1037     return kit.transfer_exceptions_into_jvms();
1038   }
1039 
1040   // Fall through if the instance matches the desired type.
1041   kit.replace_in_map(receiver, casted_receiver);
1042 
1043   // Make the hot call:
1044   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
1045   if (kit.failing()) {
1046     return nullptr;
1047   }
1048   if (new_jvms == nullptr) {
1049     // Inline failed, so make a direct call.
1050     assert(_if_hit->is_inline(), "must have been a failed inline");
1051     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1052     new_jvms = cg->generate(kit.sync_jvms());
1053   }
1054   kit.add_exception_states_from(new_jvms);
1055   kit.set_jvms(new_jvms);
1056 
1057   // Need to merge slow and fast?
1058   if (slow_map == nullptr) {
1059     // The fast path is the only path remaining.
1060     return kit.transfer_exceptions_into_jvms();
1061   }
1062 
1063   if (kit.stopped()) {
1064     // Inlined method threw an exception, so it's just the slow path after all.
1065     kit.set_jvms(slow_jvms);
1066     return kit.transfer_exceptions_into_jvms();
1067   }
1068 
1069   // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1070   uint tos = kit.jvms()->stkoff() + kit.sp();
1071   uint limit = slow_map->req();
1072   for (uint i = TypeFunc::Parms; i < limit; i++) {
1073     Node* m = kit.map()->in(i);
1074     Node* n = slow_map->in(i);
1075     const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1076     // TODO 8284443 still needed?
1077     if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1078       // Allocate inline type in fast path
1079       m = m->as_InlineType()->buffer(&kit);
1080       kit.map()->set_req(i, m);
1081     }
1082     if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1083       // Allocate inline type in slow path
1084       PreserveJVMState pjvms(&kit);
1085       kit.set_map(slow_map);
1086       n = n->as_InlineType()->buffer(&kit);
1087       kit.map()->set_req(i, n);
1088       slow_map = kit.stop();
1089     }
1090   }
1091 
1092   // There are 2 branches and the replaced nodes are only valid on
1093   // one: restore the replaced nodes to what they were before the
1094   // branch.
1095   kit.map()->set_replaced_nodes(replaced_nodes);
1096 
1097   // Finish the diamond.
1098   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1099   RegionNode* region = new RegionNode(3);
1100   region->init_req(1, kit.control());
1101   region->init_req(2, slow_map->control());
1102   kit.set_control(gvn.transform(region));
1103   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1104   iophi->set_req(2, slow_map->i_o());
1105   kit.set_i_o(gvn.transform(iophi));
1106   // Merge memory
1107   kit.merge_memory(slow_map->merged_memory(), region, 2);
1108   // Transform new memory Phis.
1109   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1110     Node* phi = mms.memory();
1111     if (phi->is_Phi() && phi->in(0) == region) {
1112       mms.set_memory(gvn.transform(phi));
1113     }
1114   }
1115   for (uint i = TypeFunc::Parms; i < limit; i++) {
1116     // Skip unused stack slots; fast forward to monoff();
1117     if (i == tos) {
1118       i = kit.jvms()->monoff();
1119       if( i >= limit ) break;
1120     }
1121     Node* m = kit.map()->in(i);
1122     Node* n = slow_map->in(i);
1123     if (m != n) {
1124       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1125       Node* phi = PhiNode::make(region, m, t);
1126       phi->set_req(2, n);
1127       kit.map()->set_req(i, gvn.transform(phi));
1128     }
1129   }
1130   return kit.transfer_exceptions_into_jvms();
1131 }
1132 
1133 
1134 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1135   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1136   bool input_not_const;
1137   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1138   Compile* C = Compile::current();
1139   bool should_delay = C->should_delay_inlining();
1140   if (cg != nullptr) {
1141     if (should_delay && IncrementalInlineMH) {
1142       return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1143     } else {
1144       return cg;
1145     }
1146   }
1147   int bci = jvms->bci();
1148   ciCallProfile profile = caller->call_profile_at_bci(bci);
1149   int call_site_count = caller->scale_count(profile.count());
1150 
1151   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1152                             (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1153     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1154   } else {
1155     // Out-of-line call.
1156     return CallGenerator::for_direct_call(callee);
1157   }
1158 }
1159 
1160 
1161 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1162   GraphKit kit(jvms);
1163   PhaseGVN& gvn = kit.gvn();
1164   Compile* C = kit.C;
1165   vmIntrinsics::ID iid = callee->intrinsic_id();
1166   input_not_const = true;
1167   if (StressMethodHandleLinkerInlining) {
1168     allow_inline = false;
1169   }
1170   switch (iid) {
1171   case vmIntrinsics::_invokeBasic:
1172     {
1173       // Get MethodHandle receiver:
1174       Node* receiver = kit.argument(0);
1175       if (receiver->Opcode() == Op_ConP) {
1176         input_not_const = false;
1177         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1178         if (recv_toop != nullptr) {
1179           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1180           const int vtable_index = Method::invalid_vtable_index;
1181 
1182           if (!ciMethod::is_consistent_info(callee, target)) {
1183             print_inlining_failure(C, callee, jvms, "signatures mismatch");
1184             return nullptr;
1185           }
1186 
1187           CallGenerator *cg = C->call_generator(target, vtable_index,
1188                                                 false /* call_does_dispatch */,
1189                                                 jvms,
1190                                                 allow_inline,
1191                                                 PROB_ALWAYS);
1192           return cg;
1193         } else {
1194           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1195                  Type::str(receiver->bottom_type()));
1196           print_inlining_failure(C, callee, jvms, "receiver is always null");
1197         }
1198       } else {
1199         print_inlining_failure(C, callee, jvms, "receiver not constant");
1200       }
1201   } break;
1202 
1203   case vmIntrinsics::_linkToVirtual:
1204   case vmIntrinsics::_linkToStatic:
1205   case vmIntrinsics::_linkToSpecial:
1206   case vmIntrinsics::_linkToInterface:
1207     {
1208       int nargs = callee->arg_size();
1209       // Get MemberName argument:
1210       Node* member_name = kit.argument(nargs - 1);
1211       if (member_name->Opcode() == Op_ConP) {
1212         input_not_const = false;
1213         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1214         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1215 
1216         if (!ciMethod::is_consistent_info(callee, target)) {
1217           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1218           return nullptr;
1219         }
1220 
1221         // In lambda forms we erase signature types to avoid resolving issues
1222         // involving class loaders.  When we optimize a method handle invoke
1223         // to a direct call we must cast the receiver and arguments to its
1224         // actual types.
1225         ciSignature* signature = target->signature();
1226         const int receiver_skip = target->is_static() ? 0 : 1;
1227         // Cast receiver to its type.
1228         if (!target->is_static()) {
1229           Node* recv = kit.argument(0);
1230           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1231           if (casted_recv->is_top()) {
1232             print_inlining_failure(C, callee, jvms, "argument types mismatch");
1233             return nullptr; // FIXME: effectively dead; issue a halt node instead
1234           } else if (casted_recv != recv) {
1235             kit.set_argument(0, casted_recv);
1236           }
1237         }
1238         // Cast reference arguments to its type.
1239         for (int i = 0, j = 0; i < signature->count(); i++) {
1240           ciType* t = signature->type_at(i);
1241           if (t->is_klass()) {
1242             Node* arg = kit.argument(receiver_skip + j);
1243             Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass());
1244             if (casted_arg->is_top()) {
1245               print_inlining_failure(C, callee, jvms, "argument types mismatch");
1246               return nullptr; // FIXME: effectively dead; issue a halt node instead
1247             } else if (casted_arg != arg) {
1248               kit.set_argument(receiver_skip + j, casted_arg);
1249             }
1250           }
1251           j += t->size();  // long and double take two slots
1252         }
1253 
1254         // Try to get the most accurate receiver type
1255         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1256         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1257         int  vtable_index       = Method::invalid_vtable_index;
1258         bool call_does_dispatch = false;
1259 
1260         ciKlass* speculative_receiver_type = nullptr;
1261         if (is_virtual_or_interface) {
1262           ciInstanceKlass* klass = target->holder();
1263           Node*             receiver_node = kit.argument(0);
1264           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1265           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1266           // optimize_virtual_call() takes 2 different holder
1267           // arguments for a corner case that doesn't apply here (see
1268           // Parse::do_call())
1269           target = C->optimize_virtual_call(caller, klass, klass,
1270                                             target, receiver_type, is_virtual,
1271                                             call_does_dispatch, vtable_index, // out-parameters
1272                                             false /* check_access */);
1273           // We lack profiling at this call but type speculation may
1274           // provide us with a type
1275           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1276         }
1277         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1278                                               allow_inline,
1279                                               PROB_ALWAYS,
1280                                               speculative_receiver_type,
1281                                               true);
1282         return cg;
1283       } else {
1284         print_inlining_failure(C, callee, jvms, "member_name not constant");
1285       }
1286   } break;
1287 
1288   case vmIntrinsics::_linkToNative:
1289     print_inlining_failure(C, callee, jvms, "native call");
1290     break;
1291 
1292   default:
1293     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1294     break;
1295   }
1296   return nullptr;
1297 }
1298 
1299 //------------------------PredicatedIntrinsicGenerator------------------------------
1300 // Internal class which handles all predicated Intrinsic calls.
1301 class PredicatedIntrinsicGenerator : public CallGenerator {
1302   CallGenerator* _intrinsic;
1303   CallGenerator* _cg;
1304 
1305 public:
1306   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1307                                CallGenerator* cg)
1308     : CallGenerator(cg->method())
1309   {
1310     _intrinsic = intrinsic;
1311     _cg        = cg;
1312   }
1313 
1314   virtual bool      is_virtual()   const    { return true; }
1315   virtual bool      is_inline()    const    { return true; }
1316   virtual bool      is_intrinsic() const    { return true; }
1317 
1318   virtual JVMState* generate(JVMState* jvms);
1319 };
1320 
1321 
1322 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1323                                                        CallGenerator* cg) {
1324   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1325 }
1326 
1327 
1328 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1329   // The code we want to generate here is:
1330   //    if (receiver == nullptr)
1331   //        uncommon_Trap
1332   //    if (predicate(0))
1333   //        do_intrinsic(0)
1334   //    else
1335   //    if (predicate(1))
1336   //        do_intrinsic(1)
1337   //    ...
1338   //    else
1339   //        do_java_comp
1340 
1341   GraphKit kit(jvms);
1342   PhaseGVN& gvn = kit.gvn();
1343 
1344   CompileLog* log = kit.C->log();
1345   if (log != nullptr) {
1346     log->elem("predicated_intrinsic bci='%d' method='%d'",
1347               jvms->bci(), log->identify(method()));
1348   }
1349 
1350   if (!method()->is_static()) {
1351     // We need an explicit receiver null_check before checking its type in predicate.
1352     // We share a map with the caller, so his JVMS gets adjusted.
1353     kit.null_check_receiver_before_call(method());
1354     if (kit.stopped()) {
1355       return kit.transfer_exceptions_into_jvms();
1356     }
1357   }
1358 
1359   int n_predicates = _intrinsic->predicates_count();
1360   assert(n_predicates > 0, "sanity");
1361 
1362   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1363 
1364   // Region for normal compilation code if intrinsic failed.
1365   Node* slow_region = new RegionNode(1);
1366 
1367   int results = 0;
1368   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1369 #ifdef ASSERT
1370     JVMState* old_jvms = kit.jvms();
1371     SafePointNode* old_map = kit.map();
1372     Node* old_io  = old_map->i_o();
1373     Node* old_mem = old_map->memory();
1374     Node* old_exc = old_map->next_exception();
1375 #endif
1376     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1377 #ifdef ASSERT
1378     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1379     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1380     SafePointNode* new_map = kit.map();
1381     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1382     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1383     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1384 #endif
1385     if (!kit.stopped()) {
1386       PreserveJVMState pjvms(&kit);
1387       // Generate intrinsic code:
1388       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1389       if (kit.failing()) {
1390         return nullptr;
1391       }
1392       if (new_jvms == nullptr) {
1393         // Intrinsic failed, use normal compilation path for this predicate.
1394         slow_region->add_req(kit.control());
1395       } else {
1396         kit.add_exception_states_from(new_jvms);
1397         kit.set_jvms(new_jvms);
1398         if (!kit.stopped()) {
1399           result_jvms[results++] = kit.jvms();
1400         }
1401       }
1402     }
1403     if (else_ctrl == nullptr) {
1404       else_ctrl = kit.C->top();
1405     }
1406     kit.set_control(else_ctrl);
1407   }
1408   if (!kit.stopped()) {
1409     // Final 'else' after predicates.
1410     slow_region->add_req(kit.control());
1411   }
1412   if (slow_region->req() > 1) {
1413     PreserveJVMState pjvms(&kit);
1414     // Generate normal compilation code:
1415     kit.set_control(gvn.transform(slow_region));
1416     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1417     if (kit.failing())
1418       return nullptr;  // might happen because of NodeCountInliningCutoff
1419     assert(new_jvms != nullptr, "must be");
1420     kit.add_exception_states_from(new_jvms);
1421     kit.set_jvms(new_jvms);
1422     if (!kit.stopped()) {
1423       result_jvms[results++] = kit.jvms();
1424     }
1425   }
1426 
1427   if (results == 0) {
1428     // All paths ended in uncommon traps.
1429     (void) kit.stop();
1430     return kit.transfer_exceptions_into_jvms();
1431   }
1432 
1433   if (results == 1) { // Only one path
1434     kit.set_jvms(result_jvms[0]);
1435     return kit.transfer_exceptions_into_jvms();
1436   }
1437 
1438   // Merge all paths.
1439   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1440   RegionNode* region = new RegionNode(results + 1);
1441   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1442   for (int i = 0; i < results; i++) {
1443     JVMState* jvms = result_jvms[i];
1444     int path = i + 1;
1445     SafePointNode* map = jvms->map();
1446     region->init_req(path, map->control());
1447     iophi->set_req(path, map->i_o());
1448     if (i == 0) {
1449       kit.set_jvms(jvms);
1450     } else {
1451       kit.merge_memory(map->merged_memory(), region, path);
1452     }
1453   }
1454   kit.set_control(gvn.transform(region));
1455   kit.set_i_o(gvn.transform(iophi));
1456   // Transform new memory Phis.
1457   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1458     Node* phi = mms.memory();
1459     if (phi->is_Phi() && phi->in(0) == region) {
1460       mms.set_memory(gvn.transform(phi));
1461     }
1462   }
1463 
1464   // Merge debug info.
1465   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1466   uint tos = kit.jvms()->stkoff() + kit.sp();
1467   Node* map = kit.map();
1468   uint limit = map->req();
1469   for (uint i = TypeFunc::Parms; i < limit; i++) {
1470     // Skip unused stack slots; fast forward to monoff();
1471     if (i == tos) {
1472       i = kit.jvms()->monoff();
1473       if( i >= limit ) break;
1474     }
1475     Node* n = map->in(i);
1476     ins[0] = n;
1477     const Type* t = gvn.type(n);
1478     bool needs_phi = false;
1479     for (int j = 1; j < results; j++) {
1480       JVMState* jvms = result_jvms[j];
1481       Node* jmap = jvms->map();
1482       Node* m = nullptr;
1483       if (jmap->req() > i) {
1484         m = jmap->in(i);
1485         if (m != n) {
1486           needs_phi = true;
1487           t = t->meet_speculative(gvn.type(m));
1488         }
1489       }
1490       ins[j] = m;
1491     }
1492     if (needs_phi) {
1493       Node* phi = PhiNode::make(region, n, t);
1494       for (int j = 1; j < results; j++) {
1495         phi->set_req(j + 1, ins[j]);
1496       }
1497       map->set_req(i, gvn.transform(phi));
1498     }
1499   }
1500 
1501   return kit.transfer_exceptions_into_jvms();
1502 }
1503 
1504 //-------------------------UncommonTrapCallGenerator-----------------------------
1505 // Internal class which handles all out-of-line calls checking receiver type.
1506 class UncommonTrapCallGenerator : public CallGenerator {
1507   Deoptimization::DeoptReason _reason;
1508   Deoptimization::DeoptAction _action;
1509 
1510 public:
1511   UncommonTrapCallGenerator(ciMethod* m,
1512                             Deoptimization::DeoptReason reason,
1513                             Deoptimization::DeoptAction action)
1514     : CallGenerator(m)
1515   {
1516     _reason = reason;
1517     _action = action;
1518   }
1519 
1520   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1521   virtual bool      is_trap() const             { return true; }
1522 
1523   virtual JVMState* generate(JVMState* jvms);
1524 };
1525 
1526 
1527 CallGenerator*
1528 CallGenerator::for_uncommon_trap(ciMethod* m,
1529                                  Deoptimization::DeoptReason reason,
1530                                  Deoptimization::DeoptAction action) {
1531   return new UncommonTrapCallGenerator(m, reason, action);
1532 }
1533 
1534 
1535 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1536   GraphKit kit(jvms);
1537   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1538   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1539   // Use callsite signature always.
1540   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1541   int nargs = declared_method->arg_size();
1542   kit.inc_sp(nargs);
1543   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1544   if (_reason == Deoptimization::Reason_class_check &&
1545       _action == Deoptimization::Action_maybe_recompile) {
1546     // Temp fix for 6529811
1547     // Don't allow uncommon_trap to override our decision to recompile in the event
1548     // of a class cast failure for a monomorphic call as it will never let us convert
1549     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1550     bool keep_exact_action = true;
1551     kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action);
1552   } else {
1553     kit.uncommon_trap(_reason, _action);
1554   }
1555   return kit.transfer_exceptions_into_jvms();
1556 }
1557 
1558 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1559 
1560 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)