1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "ci/ciNativeEntryPoint.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 // Utility function.
  47 const TypeFunc* CallGenerator::tf() const {
  48   return TypeFunc::make(method());
  49 }
  50 
  51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  52   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  53 }
  54 
  55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  56   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  57   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  58 }
  59 
  60 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  61   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  62 }
  63 
  64 //-----------------------------ParseGenerator---------------------------------
  65 // Internal class which handles all direct bytecode traversal.
  66 class ParseGenerator : public InlineCallGenerator {
  67 private:
  68   bool  _is_osr;
  69   float _expected_uses;
  70 
  71 public:
  72   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  73     : InlineCallGenerator(method)
  74   {
  75     _is_osr        = is_osr;
  76     _expected_uses = expected_uses;
  77     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  78   }
  79 
  80   virtual bool      is_parse() const           { return true; }
  81   virtual JVMState* generate(JVMState* jvms);
  82   int is_osr() { return _is_osr; }
  83 
  84 };
  85 
  86 JVMState* ParseGenerator::generate(JVMState* jvms) {
  87   Compile* C = Compile::current();
  88   C->print_inlining_update(this);
  89 
  90   if (is_osr()) {
  91     // The JVMS for a OSR has a single argument (see its TypeFunc).
  92     assert(jvms->depth() == 1, "no inline OSR");
  93   }
  94 
  95   if (C->failing()) {
  96     return NULL;  // bailing out of the compile; do not try to parse
  97   }
  98 
  99   Parse parser(jvms, method(), _expected_uses);
 100   // Grab signature for matching/allocation
 101   GraphKit& exits = parser.exits();
 102 
 103   if (C->failing()) {
 104     while (exits.pop_exception_state() != NULL) ;
 105     return NULL;
 106   }
 107 
 108   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 109 
 110   // Simply return the exit state of the parser,
 111   // augmented by any exceptional states.
 112   return exits.transfer_exceptions_into_jvms();
 113 }
 114 
 115 //---------------------------DirectCallGenerator------------------------------
 116 // Internal class which handles all out-of-line calls w/o receiver type checks.
 117 class DirectCallGenerator : public CallGenerator {
 118  private:
 119   CallStaticJavaNode* _call_node;
 120   // Force separate memory and I/O projections for the exceptional
 121   // paths to facilitate late inlinig.
 122   bool                _separate_io_proj;
 123 
 124 protected:
 125   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 126 
 127  public:
 128   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 129     : CallGenerator(method),
 130       _separate_io_proj(separate_io_proj)
 131   {
 132   }
 133   virtual JVMState* generate(JVMState* jvms);
 134 
 135   virtual CallNode* call_node() const { return _call_node; }
 136   virtual CallGenerator* with_call_node(CallNode* call) {
 137     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 138     dcg->set_call_node(call->as_CallStaticJava());
 139     return dcg;
 140   }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);
 145   kit.C->print_inlining_update(this);
 146   bool is_static = method()->is_static();
 147   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 148                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 149 
 150   if (kit.C->log() != NULL) {
 151     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 152   }
 153 
 154   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 155   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 156     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 157     // additional information about the method being invoked should be attached
 158     // to the call site to make resolution logic work
 159     // (see SharedRuntime::resolve_static_call_C).
 160     call->set_override_symbolic_info(true);
 161   }
 162   _call_node = call;  // Save the call node in case we need it later
 163   if (!is_static) {
 164     // Make an explicit receiver null_check as part of this call.
 165     // Since we share a map with the caller, his JVMS gets adjusted.
 166     kit.null_check_receiver_before_call(method());
 167     if (kit.stopped()) {
 168       // And dump it back to the caller, decorated with any exceptions:
 169       return kit.transfer_exceptions_into_jvms();
 170     }
 171     // Mark the call node as virtual, sort of:
 172     call->set_optimized_virtual(true);
 173     if (method()->is_method_handle_intrinsic() ||
 174         method()->is_compiled_lambda_form()) {
 175       call->set_method_handle_invoke(true);
 176     }
 177   }
 178   kit.set_arguments_for_java_call(call);
 179   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 180   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 181   kit.push_node(method()->return_type()->basic_type(), ret);
 182   return kit.transfer_exceptions_into_jvms();
 183 }
 184 
 185 //--------------------------VirtualCallGenerator------------------------------
 186 // Internal class which handles all out-of-line calls checking receiver type.
 187 class VirtualCallGenerator : public CallGenerator {
 188 private:
 189   int _vtable_index;
 190   bool _separate_io_proj;
 191   CallDynamicJavaNode* _call_node;
 192 
 193 protected:
 194   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 195 
 196 public:
 197   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 198     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
 199   {
 200     assert(vtable_index == Method::invalid_vtable_index ||
 201            vtable_index >= 0, "either invalid or usable");
 202   }
 203   virtual bool      is_virtual() const          { return true; }
 204   virtual JVMState* generate(JVMState* jvms);
 205 
 206   virtual CallNode* call_node() const { return _call_node; }
 207   int vtable_index() const { return _vtable_index; }
 208 
 209   virtual CallGenerator* with_call_node(CallNode* call) {
 210     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 211     cg->set_call_node(call->as_CallDynamicJava());
 212     return cg;
 213   }
 214 };
 215 
 216 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 217   GraphKit kit(jvms);
 218   Node* receiver = kit.argument(0);
 219 
 220   kit.C->print_inlining_update(this);
 221 
 222   if (kit.C->log() != NULL) {
 223     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 224   }
 225 
 226   // If the receiver is a constant null, do not torture the system
 227   // by attempting to call through it.  The compile will proceed
 228   // correctly, but may bail out in final_graph_reshaping, because
 229   // the call instruction will have a seemingly deficient out-count.
 230   // (The bailout says something misleading about an "infinite loop".)
 231   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 232     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 233     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 234     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 235     kit.inc_sp(arg_size);  // restore arguments
 236     kit.uncommon_trap(Deoptimization::Reason_null_check,
 237                       Deoptimization::Action_none,
 238                       NULL, "null receiver");
 239     return kit.transfer_exceptions_into_jvms();
 240   }
 241 
 242   // Ideally we would unconditionally do a null check here and let it
 243   // be converted to an implicit check based on profile information.
 244   // However currently the conversion to implicit null checks in
 245   // Block::implicit_null_check() only looks for loads and stores, not calls.
 246   ciMethod *caller = kit.method();
 247   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 248   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 249        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 250        (caller_md->trap_count(Deoptimization::Reason_null_check)
 251        >= (uint)ImplicitNullCheckThreshold))) {
 252     // Make an explicit receiver null_check as part of this call.
 253     // Since we share a map with the caller, his JVMS gets adjusted.
 254     receiver = kit.null_check_receiver_before_call(method());
 255     if (kit.stopped()) {
 256       // And dump it back to the caller, decorated with any exceptions:
 257       return kit.transfer_exceptions_into_jvms();
 258     }
 259   }
 260 
 261   assert(!method()->is_static(), "virtual call must not be to static");
 262   assert(!method()->is_final(), "virtual call should not be to final");
 263   assert(!method()->is_private(), "virtual call should not be to private");
 264   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 265          "no vtable calls if +UseInlineCaches ");
 266   address target = SharedRuntime::get_resolve_virtual_call_stub();
 267   // Normal inline cache used for call
 268   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 269   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 270     // To be able to issue a direct call (optimized virtual or virtual)
 271     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 272     // about the method being invoked should be attached to the call site to
 273     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 274     call->set_override_symbolic_info(true);
 275   }
 276   _call_node = call;  // Save the call node in case we need it later
 277 
 278   kit.set_arguments_for_java_call(call);
 279   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 280   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 281   kit.push_node(method()->return_type()->basic_type(), ret);
 282 
 283   // Represent the effect of an implicit receiver null_check
 284   // as part of this call.  Since we share a map with the caller,
 285   // his JVMS gets adjusted.
 286   kit.cast_not_null(receiver);
 287   return kit.transfer_exceptions_into_jvms();
 288 }
 289 
 290 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 291   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 292   return new ParseGenerator(m, expected_uses);
 293 }
 294 
 295 // As a special case, the JVMS passed to this CallGenerator is
 296 // for the method execution already in progress, not just the JVMS
 297 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 298 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 299   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 300   float past_uses = m->interpreter_invocation_count();
 301   float expected_uses = past_uses;
 302   return new ParseGenerator(m, expected_uses, true);
 303 }
 304 
 305 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 306   assert(!m->is_abstract(), "for_direct_call mismatch");
 307   return new DirectCallGenerator(m, separate_io_proj);
 308 }
 309 
 310 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 311   assert(!m->is_static(), "for_virtual_call mismatch");
 312   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 313   return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
 314 }
 315 
 316 // Allow inlining decisions to be delayed
 317 class LateInlineCallGenerator : public DirectCallGenerator {
 318  private:
 319   jlong _unique_id;   // unique id for log compilation
 320   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
 321 
 322  protected:
 323   CallGenerator* _inline_cg;
 324   virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
 325   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 326   virtual bool is_pure_call() const { return _is_pure_call; }
 327 
 328  public:
 329   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
 330     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
 331 
 332   virtual bool is_late_inline() const { return true; }
 333 
 334   // Convert the CallStaticJava into an inline
 335   virtual void do_late_inline();
 336 
 337   virtual JVMState* generate(JVMState* jvms) {
 338     Compile *C = Compile::current();
 339 
 340     C->log_inline_id(this);
 341 
 342     // Record that this call site should be revisited once the main
 343     // parse is finished.
 344     if (!is_mh_late_inline()) {
 345       C->add_late_inline(this);
 346     }
 347 
 348     // Emit the CallStaticJava and request separate projections so
 349     // that the late inlining logic can distinguish between fall
 350     // through and exceptional uses of the memory and io projections
 351     // as is done for allocations and macro expansion.
 352     return DirectCallGenerator::generate(jvms);
 353   }
 354 
 355   virtual void print_inlining_late(const char* msg) {
 356     CallNode* call = call_node();
 357     Compile* C = Compile::current();
 358     C->print_inlining_assert_ready();
 359     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 360     C->print_inlining_move_to(this);
 361     C->print_inlining_update_delayed(this);
 362   }
 363 
 364   virtual void set_unique_id(jlong id) {
 365     _unique_id = id;
 366   }
 367 
 368   virtual jlong unique_id() const {
 369     return _unique_id;
 370   }
 371 
 372   virtual CallGenerator* with_call_node(CallNode* call) {
 373     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 374     cg->set_call_node(call->as_CallStaticJava());
 375     return cg;
 376   }
 377 };
 378 
 379 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 380   return new LateInlineCallGenerator(method, inline_cg);
 381 }
 382 
 383 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 384   ciMethod* _caller;
 385   bool _input_not_const;
 386 
 387   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 388 
 389  public:
 390   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 391     LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
 392 
 393   virtual bool is_mh_late_inline() const { return true; }
 394 
 395   // Convert the CallStaticJava into an inline
 396   virtual void do_late_inline();
 397 
 398   virtual JVMState* generate(JVMState* jvms) {
 399     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 400 
 401     Compile* C = Compile::current();
 402     if (_input_not_const) {
 403       // inlining won't be possible so no need to enqueue right now.
 404       call_node()->set_generator(this);
 405     } else {
 406       C->add_late_inline(this);
 407     }
 408     return new_jvms;
 409   }
 410 
 411   virtual CallGenerator* with_call_node(CallNode* call) {
 412     LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
 413     cg->set_call_node(call->as_CallStaticJava());
 414     return cg;
 415   }
 416 };
 417 
 418 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 419   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 420   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method wih
 421   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 422   // of late inlining with exceptions.
 423   assert(!jvms->method()->has_exception_handlers() ||
 424          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 425           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 426   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 427   bool allow_inline = C->inlining_incrementally();
 428   bool input_not_const = true;
 429   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 430   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 431 
 432   if (cg != NULL) {
 433     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
 434     _inline_cg = cg;
 435     C->dec_number_of_mh_late_inlines();
 436     return true;
 437   } else {
 438     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 439     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 440     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 441     return false;
 442   }
 443 }
 444 
 445 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 446   assert(IncrementalInlineMH, "required");
 447   Compile::current()->inc_number_of_mh_late_inlines();
 448   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 449   return cg;
 450 }
 451 
 452 // Allow inlining decisions to be delayed
 453 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
 454  private:
 455   jlong          _unique_id;   // unique id for log compilation
 456   CallGenerator* _inline_cg;
 457   ciMethod*      _callee;
 458   bool           _is_pure_call;
 459   float          _prof_factor;
 460 
 461  protected:
 462   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 463   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 464   virtual bool is_pure_call() const { return _is_pure_call; }
 465 
 466  public:
 467   LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
 468   : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
 469     _unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {
 470     assert(IncrementalInlineVirtual, "required");
 471   }
 472 
 473   virtual bool is_late_inline() const { return true; }
 474 
 475   virtual bool is_virtual_late_inline() const { return true; }
 476 
 477   // Convert the CallDynamicJava into an inline
 478   virtual void do_late_inline();
 479 
 480   virtual void set_callee_method(ciMethod* m) {
 481     assert(_callee == NULL, "repeated inlining attempt");
 482     _callee = m;
 483   }
 484 
 485   virtual JVMState* generate(JVMState* jvms) {
 486     // Emit the CallDynamicJava and request separate projections so
 487     // that the late inlining logic can distinguish between fall
 488     // through and exceptional uses of the memory and io projections
 489     // as is done for allocations and macro expansion.
 490     JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
 491     if (call_node() != NULL) {
 492       call_node()->set_generator(this);
 493     }
 494     return new_jvms;
 495   }
 496 
 497   virtual void print_inlining_late(const char* msg) {
 498     CallNode* call = call_node();
 499     Compile* C = Compile::current();
 500     C->print_inlining_assert_ready();
 501     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 502     C->print_inlining_move_to(this);
 503     C->print_inlining_update_delayed(this);
 504   }
 505 
 506   virtual void set_unique_id(jlong id) {
 507     _unique_id = id;
 508   }
 509 
 510   virtual jlong unique_id() const {
 511     return _unique_id;
 512   }
 513 
 514   virtual CallGenerator* with_call_node(CallNode* call) {
 515     LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
 516     cg->set_call_node(call->as_CallDynamicJava());
 517     return cg;
 518   }
 519 };
 520 
 521 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 522   // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
 523   // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
 524 
 525   // Implicit receiver null checks introduce problems when exception states are combined.
 526   Node* receiver = jvms->map()->argument(jvms, 0);
 527   const Type* recv_type = C->initial_gvn()->type(receiver);
 528   if (recv_type->maybe_null()) {
 529     if (C->print_inlining() || C->print_intrinsics()) {
 530       C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),
 531                         "late call devirtualization failed (receiver may be null)");
 532     }
 533     return false;
 534   }
 535   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 536   bool allow_inline = C->inlining_incrementally();
 537   if (!allow_inline && _callee->holder()->is_interface()) {
 538     // Don't convert the interface call to a direct call guarded by an interface subtype check.
 539     if (C->print_inlining() || C->print_intrinsics()) {
 540       C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),
 541                         "late call devirtualization failed (interface call)");
 542     }
 543     return false;
 544   }
 545   CallGenerator* cg = C->call_generator(_callee,
 546                                         vtable_index(),
 547                                         false /*call_does_dispatch*/,
 548                                         jvms,
 549                                         allow_inline,
 550                                         _prof_factor,
 551                                         NULL /*speculative_receiver_type*/,
 552                                         true /*allow_intrinsics*/);
 553 
 554   if (cg != NULL) {
 555     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
 556     _inline_cg = cg;
 557     return true;
 558   } else {
 559     // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
 560     assert(false, "no progress");
 561     return false;
 562   }
 563 }
 564 
 565 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
 566   assert(IncrementalInlineVirtual, "required");
 567   assert(!m->is_static(), "for_virtual_call mismatch");
 568   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 569   return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
 570 }
 571 
 572 void LateInlineCallGenerator::do_late_inline() {
 573   CallGenerator::do_late_inline_helper();
 574 }
 575 
 576 void LateInlineMHCallGenerator::do_late_inline() {
 577   CallGenerator::do_late_inline_helper();
 578 }
 579 
 580 void LateInlineVirtualCallGenerator::do_late_inline() {
 581   assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal
 582   CallGenerator::do_late_inline_helper();
 583 }
 584 
 585 static bool has_non_debug_usages(Node* n) {
 586   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 587     Node* m = n->fast_out(i);
 588     if (!m->is_SafePoint()
 589         || (m->is_Call() && m->as_Call()->has_non_debug_use(n))) {
 590       return true;
 591     }
 592   }
 593   return false;
 594 }
 595 
 596 static bool is_box_cache_valid(CallNode* call) {
 597   ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();
 598   return klass->is_box_cache_valid();
 599 }
 600 
 601 // delay box in runtime, treat box as a scalarized object
 602 static void scalarize_debug_usages(CallNode* call, Node* resproj) {
 603   GraphKit kit(call->jvms());
 604   PhaseGVN& gvn = kit.gvn();
 605 
 606   ProjNode* res = resproj->as_Proj();
 607   ciInstanceKlass* klass = call->as_CallStaticJava()->method()->holder();
 608   int n_fields = klass->nof_nonstatic_fields();
 609   assert(n_fields == 1, "the klass must be an auto-boxing klass");
 610 
 611   for (DUIterator_Last imin, i = res->last_outs(imin); i >= imin;) {
 612     SafePointNode* sfpt = res->last_out(i)->as_SafePoint();
 613     uint first_ind = sfpt->req() - sfpt->jvms()->scloff();
 614     Node* sobj = new SafePointScalarObjectNode(gvn.type(res)->isa_oopptr(),
 615 #ifdef ASSERT
 616                                                 call,
 617 #endif // ASSERT
 618                                                 first_ind, n_fields, true);
 619     sobj->init_req(0, kit.root());
 620     sfpt->add_req(call->in(TypeFunc::Parms));
 621     sobj = gvn.transform(sobj);
 622     JVMState* jvms = sfpt->jvms();
 623     jvms->set_endoff(sfpt->req());
 624     int start = jvms->debug_start();
 625     int end   = jvms->debug_end();
 626     int num_edges = sfpt->replace_edges_in_range(res, sobj, start, end, &gvn);
 627     i -= num_edges;
 628   }
 629 
 630   assert(res->outcnt() == 0, "the box must have no use after replace");
 631 
 632 #ifndef PRODUCT
 633   if (PrintEliminateAllocations) {
 634     tty->print("++++ Eliminated: %d ", call->_idx);
 635     call->as_CallStaticJava()->method()->print_short_name(tty);
 636     tty->cr();
 637   }
 638 #endif
 639 }
 640 
 641 void CallGenerator::do_late_inline_helper() {
 642   assert(is_late_inline(), "only late inline allowed");
 643 
 644   // Can't inline it
 645   CallNode* call = call_node();
 646   if (call == NULL || call->outcnt() == 0 ||
 647       call->in(0) == NULL || call->in(0)->is_top()) {
 648     return;
 649   }
 650 
 651   const TypeTuple *r = call->tf()->domain();
 652   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 653     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 654       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 655       return;
 656     }
 657   }
 658 
 659   if (call->in(TypeFunc::Memory)->is_top()) {
 660     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 661     return;
 662   }
 663   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 664     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 665     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 666       return; // dead path
 667     }
 668   }
 669 
 670   // check for unreachable loop
 671   CallProjections callprojs;
 672   call->extract_projections(&callprojs, true);
 673   if ((callprojs.fallthrough_catchproj == call->in(0)) ||
 674       (callprojs.catchall_catchproj    == call->in(0)) ||
 675       (callprojs.fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 676       (callprojs.catchall_memproj      == call->in(TypeFunc::Memory)) ||
 677       (callprojs.fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 678       (callprojs.catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 679       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
 680       (callprojs.exobj   != NULL && call->find_edge(callprojs.exobj) != -1)) {
 681     return;
 682   }
 683 
 684   Compile* C = Compile::current();
 685   // Remove inlined methods from Compiler's lists.
 686   if (call->is_macro()) {
 687     C->remove_macro_node(call);
 688   }
 689 
 690   bool result_not_used = false;
 691 
 692   if (is_pure_call()) {
 693     // Disabled due to JDK-8276112
 694     if (false && is_boxing_late_inline() && callprojs.resproj != nullptr) {
 695       // replace box node to scalar node only in case it is directly referenced by debug info
 696       assert(call->as_CallStaticJava()->is_boxing_method(), "sanity");
 697       if (!has_non_debug_usages(callprojs.resproj) && is_box_cache_valid(call)) {
 698         scalarize_debug_usages(call, callprojs.resproj);
 699       }
 700     }
 701 
 702     // The call is marked as pure (no important side effects), but result isn't used.
 703     // It's safe to remove the call.
 704     result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
 705   }
 706 
 707   if (result_not_used) {
 708     GraphKit kit(call->jvms());
 709     kit.replace_call(call, C->top(), true);
 710   } else {
 711     // Make a clone of the JVMState that appropriate to use for driving a parse
 712     JVMState* old_jvms = call->jvms();
 713     JVMState* jvms = old_jvms->clone_shallow(C);
 714     uint size = call->req();
 715     SafePointNode* map = new SafePointNode(size, jvms);
 716     for (uint i1 = 0; i1 < size; i1++) {
 717       map->init_req(i1, call->in(i1));
 718     }
 719 
 720     // Make sure the state is a MergeMem for parsing.
 721     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 722       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 723       C->initial_gvn()->set_type_bottom(mem);
 724       map->set_req(TypeFunc::Memory, mem);
 725     }
 726 
 727     uint nargs = method()->arg_size();
 728     // blow away old call arguments
 729     Node* top = C->top();
 730     for (uint i1 = 0; i1 < nargs; i1++) {
 731       map->set_req(TypeFunc::Parms + i1, top);
 732     }
 733     jvms->set_map(map);
 734 
 735     // Make enough space in the expression stack to transfer
 736     // the incoming arguments and return value.
 737     map->ensure_stack(jvms, jvms->method()->max_stack());
 738     for (uint i1 = 0; i1 < nargs; i1++) {
 739       map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
 740     }
 741 
 742     C->print_inlining_assert_ready();
 743 
 744     C->print_inlining_move_to(this);
 745 
 746     C->log_late_inline(this);
 747 
 748     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 749     if (!do_late_inline_check(C, jvms)) {
 750       map->disconnect_inputs(C);
 751       C->print_inlining_update_delayed(this);
 752       return;
 753     }
 754 
 755     // Setup default node notes to be picked up by the inlining
 756     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 757     if (old_nn != NULL) {
 758       Node_Notes* entry_nn = old_nn->clone(C);
 759       entry_nn->set_jvms(jvms);
 760       C->set_default_node_notes(entry_nn);
 761     }
 762 
 763     // Now perform the inlining using the synthesized JVMState
 764     JVMState* new_jvms = inline_cg()->generate(jvms);
 765     if (new_jvms == NULL)  return;  // no change
 766     if (C->failing())      return;
 767 
 768     // Capture any exceptional control flow
 769     GraphKit kit(new_jvms);
 770 
 771     // Find the result object
 772     Node* result = C->top();
 773     int   result_size = method()->return_type()->size();
 774     if (result_size != 0 && !kit.stopped()) {
 775       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 776     }
 777 
 778     if (inline_cg()->is_inline()) {
 779       C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
 780       C->env()->notice_inlined_method(inline_cg()->method());
 781     }
 782     C->set_inlining_progress(true);
 783     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 784     kit.replace_call(call, result, true);
 785   }
 786 }
 787 
 788 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 789 
 790  public:
 791   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 792     LateInlineCallGenerator(method, inline_cg) {}
 793 
 794   virtual JVMState* generate(JVMState* jvms) {
 795     Compile *C = Compile::current();
 796 
 797     C->log_inline_id(this);
 798 
 799     C->add_string_late_inline(this);
 800 
 801     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 802     return new_jvms;
 803   }
 804 
 805   virtual bool is_string_late_inline() const { return true; }
 806 
 807   virtual CallGenerator* with_call_node(CallNode* call) {
 808     LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
 809     cg->set_call_node(call->as_CallStaticJava());
 810     return cg;
 811   }
 812 };
 813 
 814 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 815   return new LateInlineStringCallGenerator(method, inline_cg);
 816 }
 817 
 818 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 819 
 820  public:
 821   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 822     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 823 
 824   virtual JVMState* generate(JVMState* jvms) {
 825     Compile *C = Compile::current();
 826 
 827     C->log_inline_id(this);
 828 
 829     C->add_boxing_late_inline(this);
 830 
 831     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 832     return new_jvms;
 833   }
 834 
 835   virtual bool is_boxing_late_inline() const { return true; }
 836 
 837   virtual CallGenerator* with_call_node(CallNode* call) {
 838     LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
 839     cg->set_call_node(call->as_CallStaticJava());
 840     return cg;
 841   }
 842 };
 843 
 844 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 845   return new LateInlineBoxingCallGenerator(method, inline_cg);
 846 }
 847 
 848 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
 849 
 850  public:
 851   LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 852     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 853 
 854   virtual JVMState* generate(JVMState* jvms) {
 855     Compile *C = Compile::current();
 856 
 857     C->log_inline_id(this);
 858 
 859     C->add_vector_reboxing_late_inline(this);
 860 
 861     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 862     return new_jvms;
 863   }
 864 
 865   virtual CallGenerator* with_call_node(CallNode* call) {
 866     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 867     cg->set_call_node(call->as_CallStaticJava());
 868     return cg;
 869   }
 870 };
 871 
 872 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 873 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 874   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 875 }
 876 
 877 //------------------------PredictedCallGenerator------------------------------
 878 // Internal class which handles all out-of-line calls checking receiver type.
 879 class PredictedCallGenerator : public CallGenerator {
 880   ciKlass*       _predicted_receiver;
 881   CallGenerator* _if_missed;
 882   CallGenerator* _if_hit;
 883   float          _hit_prob;
 884   bool           _exact_check;
 885 
 886 public:
 887   PredictedCallGenerator(ciKlass* predicted_receiver,
 888                          CallGenerator* if_missed,
 889                          CallGenerator* if_hit, bool exact_check,
 890                          float hit_prob)
 891     : CallGenerator(if_missed->method())
 892   {
 893     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 894     // Remove the extremes values from the range.
 895     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 896     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 897 
 898     _predicted_receiver = predicted_receiver;
 899     _if_missed          = if_missed;
 900     _if_hit             = if_hit;
 901     _hit_prob           = hit_prob;
 902     _exact_check        = exact_check;
 903   }
 904 
 905   virtual bool      is_virtual()   const    { return true; }
 906   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 907   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 908 
 909   virtual JVMState* generate(JVMState* jvms);
 910 };
 911 
 912 
 913 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 914                                                  CallGenerator* if_missed,
 915                                                  CallGenerator* if_hit,
 916                                                  float hit_prob) {
 917   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
 918                                     /*exact_check=*/true, hit_prob);
 919 }
 920 
 921 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
 922                                                CallGenerator* if_missed,
 923                                                CallGenerator* if_hit) {
 924   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
 925                                     /*exact_check=*/false, PROB_ALWAYS);
 926 }
 927 
 928 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 929   GraphKit kit(jvms);
 930   kit.C->print_inlining_update(this);
 931   PhaseGVN& gvn = kit.gvn();
 932   // We need an explicit receiver null_check before checking its type.
 933   // We share a map with the caller, so his JVMS gets adjusted.
 934   Node* receiver = kit.argument(0);
 935   CompileLog* log = kit.C->log();
 936   if (log != NULL) {
 937     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
 938               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
 939   }
 940 
 941   receiver = kit.null_check_receiver_before_call(method());
 942   if (kit.stopped()) {
 943     return kit.transfer_exceptions_into_jvms();
 944   }
 945 
 946   // Make a copy of the replaced nodes in case we need to restore them
 947   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
 948   replaced_nodes.clone();
 949 
 950   Node* casted_receiver = receiver;  // will get updated in place...
 951   Node* slow_ctl = NULL;
 952   if (_exact_check) {
 953     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
 954                                        &casted_receiver);
 955   } else {
 956     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
 957                                           &casted_receiver);
 958   }
 959 
 960   SafePointNode* slow_map = NULL;
 961   JVMState* slow_jvms = NULL;
 962   { PreserveJVMState pjvms(&kit);
 963     kit.set_control(slow_ctl);
 964     if (!kit.stopped()) {
 965       slow_jvms = _if_missed->generate(kit.sync_jvms());
 966       if (kit.failing())
 967         return NULL;  // might happen because of NodeCountInliningCutoff
 968       assert(slow_jvms != NULL, "must be");
 969       kit.add_exception_states_from(slow_jvms);
 970       kit.set_map(slow_jvms->map());
 971       if (!kit.stopped())
 972         slow_map = kit.stop();
 973     }
 974   }
 975 
 976   if (kit.stopped()) {
 977     // Instance does not match the predicted type.
 978     kit.set_jvms(slow_jvms);
 979     return kit.transfer_exceptions_into_jvms();
 980   }
 981 
 982   // Fall through if the instance matches the desired type.
 983   kit.replace_in_map(receiver, casted_receiver);
 984 
 985   // Make the hot call:
 986   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 987   if (new_jvms == NULL) {
 988     // Inline failed, so make a direct call.
 989     assert(_if_hit->is_inline(), "must have been a failed inline");
 990     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 991     new_jvms = cg->generate(kit.sync_jvms());
 992   }
 993   kit.add_exception_states_from(new_jvms);
 994   kit.set_jvms(new_jvms);
 995 
 996   // Need to merge slow and fast?
 997   if (slow_map == NULL) {
 998     // The fast path is the only path remaining.
 999     return kit.transfer_exceptions_into_jvms();
1000   }
1001 
1002   if (kit.stopped()) {
1003     // Inlined method threw an exception, so it's just the slow path after all.
1004     kit.set_jvms(slow_jvms);
1005     return kit.transfer_exceptions_into_jvms();
1006   }
1007 
1008   // There are 2 branches and the replaced nodes are only valid on
1009   // one: restore the replaced nodes to what they were before the
1010   // branch.
1011   kit.map()->set_replaced_nodes(replaced_nodes);
1012 
1013   // Finish the diamond.
1014   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1015   RegionNode* region = new RegionNode(3);
1016   region->init_req(1, kit.control());
1017   region->init_req(2, slow_map->control());
1018   kit.set_control(gvn.transform(region));
1019   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1020   iophi->set_req(2, slow_map->i_o());
1021   kit.set_i_o(gvn.transform(iophi));
1022   // Merge memory
1023   kit.merge_memory(slow_map->merged_memory(), region, 2);
1024   // Transform new memory Phis.
1025   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1026     Node* phi = mms.memory();
1027     if (phi->is_Phi() && phi->in(0) == region) {
1028       mms.set_memory(gvn.transform(phi));
1029     }
1030   }
1031   uint tos = kit.jvms()->stkoff() + kit.sp();
1032   uint limit = slow_map->req();
1033   for (uint i = TypeFunc::Parms; i < limit; i++) {
1034     // Skip unused stack slots; fast forward to monoff();
1035     if (i == tos) {
1036       i = kit.jvms()->monoff();
1037       if( i >= limit ) break;
1038     }
1039     Node* m = kit.map()->in(i);
1040     Node* n = slow_map->in(i);
1041     if (m != n) {
1042       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1043       Node* phi = PhiNode::make(region, m, t);
1044       phi->set_req(2, n);
1045       kit.map()->set_req(i, gvn.transform(phi));
1046     }
1047   }
1048   return kit.transfer_exceptions_into_jvms();
1049 }
1050 
1051 
1052 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1053   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1054   bool input_not_const;
1055   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1056   Compile* C = Compile::current();
1057   if (cg != NULL) {
1058     if (AlwaysIncrementalInline) {
1059       return CallGenerator::for_late_inline(callee, cg);
1060     } else {
1061       return cg;
1062     }
1063   }
1064   int bci = jvms->bci();
1065   ciCallProfile profile = caller->call_profile_at_bci(bci);
1066   int call_site_count = caller->scale_count(profile.count());
1067 
1068   if (IncrementalInlineMH && call_site_count > 0 &&
1069       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1070     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1071   } else {
1072     // Out-of-line call.
1073     return CallGenerator::for_direct_call(callee);
1074   }
1075 }
1076 
1077 class NativeCallGenerator : public CallGenerator {
1078 private:
1079   address _call_addr;
1080   ciNativeEntryPoint* _nep;
1081 public:
1082   NativeCallGenerator(ciMethod* m, address call_addr, ciNativeEntryPoint* nep)
1083    : CallGenerator(m), _call_addr(call_addr), _nep(nep) {}
1084 
1085   virtual JVMState* generate(JVMState* jvms);
1086 };
1087 
1088 JVMState* NativeCallGenerator::generate(JVMState* jvms) {
1089   GraphKit kit(jvms);
1090 
1091   Node* call = kit.make_native_call(_call_addr, tf(), method()->arg_size(), _nep); // -fallback, - nep
1092   if (call == NULL) return NULL;
1093 
1094   kit.C->print_inlining_update(this);
1095   if (kit.C->log() != NULL) {
1096     kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(_call_addr));
1097   }
1098 
1099   return kit.transfer_exceptions_into_jvms();
1100 }
1101 
1102 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1103   GraphKit kit(jvms);
1104   PhaseGVN& gvn = kit.gvn();
1105   Compile* C = kit.C;
1106   vmIntrinsics::ID iid = callee->intrinsic_id();
1107   input_not_const = true;
1108   if (StressMethodHandleLinkerInlining) {
1109     allow_inline = false;
1110   }
1111   switch (iid) {
1112   case vmIntrinsics::_invokeBasic:
1113     {
1114       // Get MethodHandle receiver:
1115       Node* receiver = kit.argument(0);
1116       if (receiver->Opcode() == Op_ConP) {
1117         input_not_const = false;
1118         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
1119         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
1120         const int vtable_index = Method::invalid_vtable_index;
1121 
1122         if (!ciMethod::is_consistent_info(callee, target)) {
1123           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1124                                  "signatures mismatch");
1125           return NULL;
1126         }
1127 
1128         CallGenerator* cg = C->call_generator(target, vtable_index,
1129                                               false /* call_does_dispatch */,
1130                                               jvms,
1131                                               allow_inline,
1132                                               PROB_ALWAYS);
1133         return cg;
1134       } else {
1135         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1136                                "receiver not constant");
1137       }
1138     }
1139     break;
1140 
1141   case vmIntrinsics::_linkToVirtual:
1142   case vmIntrinsics::_linkToStatic:
1143   case vmIntrinsics::_linkToSpecial:
1144   case vmIntrinsics::_linkToInterface:
1145     {
1146       // Get MemberName argument:
1147       Node* member_name = kit.argument(callee->arg_size() - 1);
1148       if (member_name->Opcode() == Op_ConP) {
1149         input_not_const = false;
1150         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1151         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1152 
1153         if (!ciMethod::is_consistent_info(callee, target)) {
1154           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1155                                  "signatures mismatch");
1156           return NULL;
1157         }
1158 
1159         // In lambda forms we erase signature types to avoid resolving issues
1160         // involving class loaders.  When we optimize a method handle invoke
1161         // to a direct call we must cast the receiver and arguments to its
1162         // actual types.
1163         ciSignature* signature = target->signature();
1164         const int receiver_skip = target->is_static() ? 0 : 1;
1165         // Cast receiver to its type.
1166         if (!target->is_static()) {
1167           Node* arg = kit.argument(0);
1168           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1169           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
1170           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1171             const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part
1172             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
1173             kit.set_argument(0, cast_obj);
1174           }
1175         }
1176         // Cast reference arguments to its type.
1177         for (int i = 0, j = 0; i < signature->count(); i++) {
1178           ciType* t = signature->type_at(i);
1179           if (t->is_klass()) {
1180             Node* arg = kit.argument(receiver_skip + j);
1181             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1182             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1183             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1184               const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1185               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1186               kit.set_argument(receiver_skip + j, cast_obj);
1187             }
1188           }
1189           j += t->size();  // long and double take two slots
1190         }
1191 
1192         // Try to get the most accurate receiver type
1193         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1194         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1195         int  vtable_index       = Method::invalid_vtable_index;
1196         bool call_does_dispatch = false;
1197 
1198         ciKlass* speculative_receiver_type = NULL;
1199         if (is_virtual_or_interface) {
1200           ciInstanceKlass* klass = target->holder();
1201           Node*             receiver_node = kit.argument(0);
1202           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1203           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1204           // optimize_virtual_call() takes 2 different holder
1205           // arguments for a corner case that doesn't apply here (see
1206           // Parse::do_call())
1207           target = C->optimize_virtual_call(caller, klass, klass,
1208                                             target, receiver_type, is_virtual,
1209                                             call_does_dispatch, vtable_index, // out-parameters
1210                                             false /* check_access */);
1211           // We lack profiling at this call but type speculation may
1212           // provide us with a type
1213           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1214         }
1215         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1216                                               allow_inline,
1217                                               PROB_ALWAYS,
1218                                               speculative_receiver_type);
1219         return cg;
1220       } else {
1221         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1222                                "member_name not constant");
1223       }
1224     }
1225     break;
1226 
1227     case vmIntrinsics::_linkToNative:
1228     {
1229       Node* addr_n = kit.argument(1); // target address
1230       Node* nep_n = kit.argument(callee->arg_size() - 1); // NativeEntryPoint
1231       // This check needs to be kept in sync with the one in CallStaticJavaNode::Ideal
1232       if (addr_n->Opcode() == Op_ConL && nep_n->Opcode() == Op_ConP) {
1233         input_not_const = false;
1234         const TypeLong* addr_t = addr_n->bottom_type()->is_long();
1235         const TypeOopPtr* nep_t = nep_n->bottom_type()->is_oopptr();
1236         address addr = (address) addr_t->get_con();
1237         ciNativeEntryPoint* nep = nep_t->const_oop()->as_native_entry_point();
1238         return new NativeCallGenerator(callee, addr, nep);
1239       } else {
1240         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1241                                "NativeEntryPoint not constant");
1242       }
1243     }
1244     break;
1245 
1246   default:
1247     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1248     break;
1249   }
1250   return NULL;
1251 }
1252 
1253 
1254 //------------------------PredicatedIntrinsicGenerator------------------------------
1255 // Internal class which handles all predicated Intrinsic calls.
1256 class PredicatedIntrinsicGenerator : public CallGenerator {
1257   CallGenerator* _intrinsic;
1258   CallGenerator* _cg;
1259 
1260 public:
1261   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1262                                CallGenerator* cg)
1263     : CallGenerator(cg->method())
1264   {
1265     _intrinsic = intrinsic;
1266     _cg        = cg;
1267   }
1268 
1269   virtual bool      is_virtual()   const    { return true; }
1270   virtual bool      is_inline()    const    { return true; }
1271   virtual bool      is_intrinsic() const    { return true; }
1272 
1273   virtual JVMState* generate(JVMState* jvms);
1274 };
1275 
1276 
1277 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1278                                                        CallGenerator* cg) {
1279   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1280 }
1281 
1282 
1283 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1284   // The code we want to generate here is:
1285   //    if (receiver == NULL)
1286   //        uncommon_Trap
1287   //    if (predicate(0))
1288   //        do_intrinsic(0)
1289   //    else
1290   //    if (predicate(1))
1291   //        do_intrinsic(1)
1292   //    ...
1293   //    else
1294   //        do_java_comp
1295 
1296   GraphKit kit(jvms);
1297   PhaseGVN& gvn = kit.gvn();
1298 
1299   CompileLog* log = kit.C->log();
1300   if (log != NULL) {
1301     log->elem("predicated_intrinsic bci='%d' method='%d'",
1302               jvms->bci(), log->identify(method()));
1303   }
1304 
1305   if (!method()->is_static()) {
1306     // We need an explicit receiver null_check before checking its type in predicate.
1307     // We share a map with the caller, so his JVMS gets adjusted.
1308     Node* receiver = kit.null_check_receiver_before_call(method());
1309     if (kit.stopped()) {
1310       return kit.transfer_exceptions_into_jvms();
1311     }
1312   }
1313 
1314   int n_predicates = _intrinsic->predicates_count();
1315   assert(n_predicates > 0, "sanity");
1316 
1317   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1318 
1319   // Region for normal compilation code if intrinsic failed.
1320   Node* slow_region = new RegionNode(1);
1321 
1322   int results = 0;
1323   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1324 #ifdef ASSERT
1325     JVMState* old_jvms = kit.jvms();
1326     SafePointNode* old_map = kit.map();
1327     Node* old_io  = old_map->i_o();
1328     Node* old_mem = old_map->memory();
1329     Node* old_exc = old_map->next_exception();
1330 #endif
1331     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1332 #ifdef ASSERT
1333     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1334     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1335     SafePointNode* new_map = kit.map();
1336     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1337     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1338     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1339 #endif
1340     if (!kit.stopped()) {
1341       PreserveJVMState pjvms(&kit);
1342       // Generate intrinsic code:
1343       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1344       if (new_jvms == NULL) {
1345         // Intrinsic failed, use normal compilation path for this predicate.
1346         slow_region->add_req(kit.control());
1347       } else {
1348         kit.add_exception_states_from(new_jvms);
1349         kit.set_jvms(new_jvms);
1350         if (!kit.stopped()) {
1351           result_jvms[results++] = kit.jvms();
1352         }
1353       }
1354     }
1355     if (else_ctrl == NULL) {
1356       else_ctrl = kit.C->top();
1357     }
1358     kit.set_control(else_ctrl);
1359   }
1360   if (!kit.stopped()) {
1361     // Final 'else' after predicates.
1362     slow_region->add_req(kit.control());
1363   }
1364   if (slow_region->req() > 1) {
1365     PreserveJVMState pjvms(&kit);
1366     // Generate normal compilation code:
1367     kit.set_control(gvn.transform(slow_region));
1368     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1369     if (kit.failing())
1370       return NULL;  // might happen because of NodeCountInliningCutoff
1371     assert(new_jvms != NULL, "must be");
1372     kit.add_exception_states_from(new_jvms);
1373     kit.set_jvms(new_jvms);
1374     if (!kit.stopped()) {
1375       result_jvms[results++] = kit.jvms();
1376     }
1377   }
1378 
1379   if (results == 0) {
1380     // All paths ended in uncommon traps.
1381     (void) kit.stop();
1382     return kit.transfer_exceptions_into_jvms();
1383   }
1384 
1385   if (results == 1) { // Only one path
1386     kit.set_jvms(result_jvms[0]);
1387     return kit.transfer_exceptions_into_jvms();
1388   }
1389 
1390   // Merge all paths.
1391   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1392   RegionNode* region = new RegionNode(results + 1);
1393   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1394   for (int i = 0; i < results; i++) {
1395     JVMState* jvms = result_jvms[i];
1396     int path = i + 1;
1397     SafePointNode* map = jvms->map();
1398     region->init_req(path, map->control());
1399     iophi->set_req(path, map->i_o());
1400     if (i == 0) {
1401       kit.set_jvms(jvms);
1402     } else {
1403       kit.merge_memory(map->merged_memory(), region, path);
1404     }
1405   }
1406   kit.set_control(gvn.transform(region));
1407   kit.set_i_o(gvn.transform(iophi));
1408   // Transform new memory Phis.
1409   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1410     Node* phi = mms.memory();
1411     if (phi->is_Phi() && phi->in(0) == region) {
1412       mms.set_memory(gvn.transform(phi));
1413     }
1414   }
1415 
1416   // Merge debug info.
1417   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1418   uint tos = kit.jvms()->stkoff() + kit.sp();
1419   Node* map = kit.map();
1420   uint limit = map->req();
1421   for (uint i = TypeFunc::Parms; i < limit; i++) {
1422     // Skip unused stack slots; fast forward to monoff();
1423     if (i == tos) {
1424       i = kit.jvms()->monoff();
1425       if( i >= limit ) break;
1426     }
1427     Node* n = map->in(i);
1428     ins[0] = n;
1429     const Type* t = gvn.type(n);
1430     bool needs_phi = false;
1431     for (int j = 1; j < results; j++) {
1432       JVMState* jvms = result_jvms[j];
1433       Node* jmap = jvms->map();
1434       Node* m = NULL;
1435       if (jmap->req() > i) {
1436         m = jmap->in(i);
1437         if (m != n) {
1438           needs_phi = true;
1439           t = t->meet_speculative(gvn.type(m));
1440         }
1441       }
1442       ins[j] = m;
1443     }
1444     if (needs_phi) {
1445       Node* phi = PhiNode::make(region, n, t);
1446       for (int j = 1; j < results; j++) {
1447         phi->set_req(j + 1, ins[j]);
1448       }
1449       map->set_req(i, gvn.transform(phi));
1450     }
1451   }
1452 
1453   return kit.transfer_exceptions_into_jvms();
1454 }
1455 
1456 //-------------------------UncommonTrapCallGenerator-----------------------------
1457 // Internal class which handles all out-of-line calls checking receiver type.
1458 class UncommonTrapCallGenerator : public CallGenerator {
1459   Deoptimization::DeoptReason _reason;
1460   Deoptimization::DeoptAction _action;
1461 
1462 public:
1463   UncommonTrapCallGenerator(ciMethod* m,
1464                             Deoptimization::DeoptReason reason,
1465                             Deoptimization::DeoptAction action)
1466     : CallGenerator(m)
1467   {
1468     _reason = reason;
1469     _action = action;
1470   }
1471 
1472   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1473   virtual bool      is_trap() const             { return true; }
1474 
1475   virtual JVMState* generate(JVMState* jvms);
1476 };
1477 
1478 
1479 CallGenerator*
1480 CallGenerator::for_uncommon_trap(ciMethod* m,
1481                                  Deoptimization::DeoptReason reason,
1482                                  Deoptimization::DeoptAction action) {
1483   return new UncommonTrapCallGenerator(m, reason, action);
1484 }
1485 
1486 
1487 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1488   GraphKit kit(jvms);
1489   kit.C->print_inlining_update(this);
1490   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1491   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1492   // Use callsite signature always.
1493   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1494   int nargs = declared_method->arg_size();
1495   kit.inc_sp(nargs);
1496   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1497   if (_reason == Deoptimization::Reason_class_check &&
1498       _action == Deoptimization::Action_maybe_recompile) {
1499     // Temp fix for 6529811
1500     // Don't allow uncommon_trap to override our decision to recompile in the event
1501     // of a class cast failure for a monomorphic call as it will never let us convert
1502     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1503     bool keep_exact_action = true;
1504     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1505   } else {
1506     kit.uncommon_trap(_reason, _action);
1507   }
1508   return kit.transfer_exceptions_into_jvms();
1509 }
1510 
1511 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1512 
1513 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)