1 /*
   2  * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/inlinetypenode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/debug.hpp"
  46 
  47 // Utility function.
  48 const TypeFunc* CallGenerator::tf() const {
  49   return TypeFunc::make(method());
  50 }
  51 
  52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  53   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  54 }
  55 
  56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  57   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  58   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  59 }
  60 
  61 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  62   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  63 }
  64 
  65 //-----------------------------ParseGenerator---------------------------------
  66 // Internal class which handles all direct bytecode traversal.
  67 class ParseGenerator : public InlineCallGenerator {
  68 private:
  69   bool  _is_osr;
  70   float _expected_uses;
  71 
  72 public:
  73   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  74     : InlineCallGenerator(method)
  75   {
  76     _is_osr        = is_osr;
  77     _expected_uses = expected_uses;
  78     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  79   }
  80 
  81   virtual bool      is_parse() const           { return true; }
  82   virtual JVMState* generate(JVMState* jvms);
  83   int is_osr() { return _is_osr; }
  84 
  85 };
  86 
  87 JVMState* ParseGenerator::generate(JVMState* jvms) {
  88   Compile* C = Compile::current();
  89   C->print_inlining_update(this);
  90 
  91   if (is_osr()) {
  92     // The JVMS for a OSR has a single argument (see its TypeFunc).
  93     assert(jvms->depth() == 1, "no inline OSR");
  94   }
  95 
  96   if (C->failing()) {
  97     return NULL;  // bailing out of the compile; do not try to parse
  98   }
  99 
 100   Parse parser(jvms, method(), _expected_uses);
 101   // Grab signature for matching/allocation
 102   GraphKit& exits = parser.exits();
 103 
 104   if (C->failing()) {
 105     while (exits.pop_exception_state() != NULL) ;
 106     return NULL;
 107   }
 108 
 109   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 110 
 111   // Simply return the exit state of the parser,
 112   // augmented by any exceptional states.
 113   return exits.transfer_exceptions_into_jvms();
 114 }
 115 
 116 //---------------------------DirectCallGenerator------------------------------
 117 // Internal class which handles all out-of-line calls w/o receiver type checks.
 118 class DirectCallGenerator : public CallGenerator {
 119  private:
 120   CallStaticJavaNode* _call_node;
 121   // Force separate memory and I/O projections for the exceptional
 122   // paths to facilitate late inlining.
 123   bool                _separate_io_proj;
 124 
 125 protected:
 126   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 127 
 128  public:
 129   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 130     : CallGenerator(method),
 131       _call_node(NULL),
 132       _separate_io_proj(separate_io_proj)
 133   {
 134     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 135       // If that call has not been optimized by the time optimizations are over,
 136       // we'll need to add a call to create an inline type instance from the klass
 137       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 138       // Separating memory and I/O projections for exceptions is required to
 139       // perform that graph transformation.
 140       _separate_io_proj = true;
 141     }
 142   }
 143   virtual JVMState* generate(JVMState* jvms);
 144 
 145   virtual CallNode* call_node() const { return _call_node; }
 146   virtual CallGenerator* with_call_node(CallNode* call) {
 147     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 148     dcg->set_call_node(call->as_CallStaticJava());
 149     return dcg;
 150   }
 151 };
 152 
 153 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 154   GraphKit kit(jvms);
 155   kit.C->print_inlining_update(this);
 156   PhaseGVN& gvn = kit.gvn();
 157   bool is_static = method()->is_static();
 158   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 159                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 160 
 161   if (kit.C->log() != NULL) {
 162     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 163   }
 164 
 165   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 166   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 167     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 168     // additional information about the method being invoked should be attached
 169     // to the call site to make resolution logic work
 170     // (see SharedRuntime::resolve_static_call_C).
 171     call->set_override_symbolic_info(true);
 172   }
 173   _call_node = call;  // Save the call node in case we need it later
 174   if (!is_static) {
 175     // Make an explicit receiver null_check as part of this call.
 176     // Since we share a map with the caller, his JVMS gets adjusted.
 177     kit.null_check_receiver_before_call(method());
 178     if (kit.stopped()) {
 179       // And dump it back to the caller, decorated with any exceptions:
 180       return kit.transfer_exceptions_into_jvms();
 181     }
 182     // Mark the call node as virtual, sort of:
 183     call->set_optimized_virtual(true);
 184     if (method()->is_method_handle_intrinsic() ||
 185         method()->is_compiled_lambda_form()) {
 186       call->set_method_handle_invoke(true);
 187     }
 188   }
 189   kit.set_arguments_for_java_call(call, is_late_inline());
 190   if (kit.stopped()) {
 191     return kit.transfer_exceptions_into_jvms();
 192   }
 193   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 194   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 195   kit.push_node(method()->return_type()->basic_type(), ret);
 196   return kit.transfer_exceptions_into_jvms();
 197 }
 198 
 199 //--------------------------VirtualCallGenerator------------------------------
 200 // Internal class which handles all out-of-line calls checking receiver type.
 201 class VirtualCallGenerator : public CallGenerator {
 202 private:
 203   int _vtable_index;
 204   bool _separate_io_proj;
 205   CallDynamicJavaNode* _call_node;
 206 
 207 protected:
 208   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 209 
 210 public:
 211   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 212     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
 213   {
 214     assert(vtable_index == Method::invalid_vtable_index ||
 215            vtable_index >= 0, "either invalid or usable");
 216   }
 217   virtual bool      is_virtual() const          { return true; }
 218   virtual JVMState* generate(JVMState* jvms);
 219 
 220   virtual CallNode* call_node() const { return _call_node; }
 221   int vtable_index() const { return _vtable_index; }
 222 
 223   virtual CallGenerator* with_call_node(CallNode* call) {
 224     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 225     cg->set_call_node(call->as_CallDynamicJava());
 226     return cg;
 227   }
 228 };
 229 
 230 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 231   GraphKit kit(jvms);
 232   Node* receiver = kit.argument(0);
 233   kit.C->print_inlining_update(this);
 234 
 235   if (kit.C->log() != NULL) {
 236     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 237   }
 238 
 239   // If the receiver is a constant null, do not torture the system
 240   // by attempting to call through it.  The compile will proceed
 241   // correctly, but may bail out in final_graph_reshaping, because
 242   // the call instruction will have a seemingly deficient out-count.
 243   // (The bailout says something misleading about an "infinite loop".)
 244   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 245     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 246     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 247     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 248     kit.inc_sp(arg_size);  // restore arguments
 249     kit.uncommon_trap(Deoptimization::Reason_null_check,
 250                       Deoptimization::Action_none,
 251                       NULL, "null receiver");
 252     return kit.transfer_exceptions_into_jvms();
 253   }
 254 
 255   // Ideally we would unconditionally do a null check here and let it
 256   // be converted to an implicit check based on profile information.
 257   // However currently the conversion to implicit null checks in
 258   // Block::implicit_null_check() only looks for loads and stores, not calls.
 259   ciMethod *caller = kit.method();
 260   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 261   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 262        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 263        (caller_md->trap_count(Deoptimization::Reason_null_check)
 264        >= (uint)ImplicitNullCheckThreshold))) {
 265     // Make an explicit receiver null_check as part of this call.
 266     // Since we share a map with the caller, his JVMS gets adjusted.
 267     receiver = kit.null_check_receiver_before_call(method());
 268     if (kit.stopped()) {
 269       // And dump it back to the caller, decorated with any exceptions:
 270       return kit.transfer_exceptions_into_jvms();
 271     }
 272   }
 273 
 274   assert(!method()->is_static(), "virtual call must not be to static");
 275   assert(!method()->is_final(), "virtual call should not be to final");
 276   assert(!method()->is_private(), "virtual call should not be to private");
 277   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 278          "no vtable calls if +UseInlineCaches ");
 279   address target = SharedRuntime::get_resolve_virtual_call_stub();
 280   // Normal inline cache used for call
 281   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 282   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 283     // To be able to issue a direct call (optimized virtual or virtual)
 284     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 285     // about the method being invoked should be attached to the call site to
 286     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 287     call->set_override_symbolic_info(true);
 288   }
 289   _call_node = call;  // Save the call node in case we need it later
 290 
 291   kit.set_arguments_for_java_call(call);
 292   if (kit.stopped()) {
 293     return kit.transfer_exceptions_into_jvms();
 294   }
 295   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 296   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 297   kit.push_node(method()->return_type()->basic_type(), ret);
 298 
 299   // Represent the effect of an implicit receiver null_check
 300   // as part of this call.  Since we share a map with the caller,
 301   // his JVMS gets adjusted.
 302   kit.cast_not_null(receiver);
 303   return kit.transfer_exceptions_into_jvms();
 304 }
 305 
 306 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 307   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 308   return new ParseGenerator(m, expected_uses);
 309 }
 310 
 311 // As a special case, the JVMS passed to this CallGenerator is
 312 // for the method execution already in progress, not just the JVMS
 313 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 314 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 315   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 316   float past_uses = m->interpreter_invocation_count();
 317   float expected_uses = past_uses;
 318   return new ParseGenerator(m, expected_uses, true);
 319 }
 320 
 321 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 322   assert(!m->is_abstract(), "for_direct_call mismatch");
 323   return new DirectCallGenerator(m, separate_io_proj);
 324 }
 325 
 326 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 327   assert(!m->is_static(), "for_virtual_call mismatch");
 328   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 329   return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
 330 }
 331 
 332 // Allow inlining decisions to be delayed
 333 class LateInlineCallGenerator : public DirectCallGenerator {
 334  private:
 335   jlong _unique_id;   // unique id for log compilation
 336   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
 337 
 338  protected:
 339   CallGenerator* _inline_cg;
 340   virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
 341   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 342   virtual bool is_pure_call() const { return _is_pure_call; }
 343 
 344  public:
 345   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
 346     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
 347 
 348   virtual bool is_late_inline() const { return true; }
 349 
 350   // Convert the CallStaticJava into an inline
 351   virtual void do_late_inline();
 352 
 353   virtual JVMState* generate(JVMState* jvms) {
 354     Compile *C = Compile::current();
 355 
 356     C->log_inline_id(this);
 357 
 358     // Record that this call site should be revisited once the main
 359     // parse is finished.
 360     if (!is_mh_late_inline()) {
 361       C->add_late_inline(this);
 362     }
 363 
 364     // Emit the CallStaticJava and request separate projections so
 365     // that the late inlining logic can distinguish between fall
 366     // through and exceptional uses of the memory and io projections
 367     // as is done for allocations and macro expansion.
 368     return DirectCallGenerator::generate(jvms);
 369   }
 370 
 371   virtual void print_inlining_late(const char* msg) {
 372     CallNode* call = call_node();
 373     Compile* C = Compile::current();
 374     C->print_inlining_assert_ready();
 375     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 376     C->print_inlining_move_to(this);
 377     C->print_inlining_update_delayed(this);
 378   }
 379 
 380   virtual void set_unique_id(jlong id) {
 381     _unique_id = id;
 382   }
 383 
 384   virtual jlong unique_id() const {
 385     return _unique_id;
 386   }
 387 
 388   virtual CallGenerator* inline_cg() {
 389     return _inline_cg;
 390   }
 391 
 392   virtual CallGenerator* with_call_node(CallNode* call) {
 393     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 394     cg->set_call_node(call->as_CallStaticJava());
 395     return cg;
 396   }
 397 };
 398 
 399 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 400   return new LateInlineCallGenerator(method, inline_cg);
 401 }
 402 
 403 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 404   ciMethod* _caller;
 405   bool _input_not_const;
 406 
 407   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 408 
 409  public:
 410   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 411     LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
 412 
 413   virtual bool is_mh_late_inline() const { return true; }
 414 
 415   // Convert the CallStaticJava into an inline
 416   virtual void do_late_inline();
 417 
 418   virtual JVMState* generate(JVMState* jvms) {
 419     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 420 
 421     Compile* C = Compile::current();
 422     if (_input_not_const) {
 423       // inlining won't be possible so no need to enqueue right now.
 424       call_node()->set_generator(this);
 425     } else {
 426       C->add_late_inline(this);
 427     }
 428     return new_jvms;
 429   }
 430 
 431   virtual CallGenerator* with_call_node(CallNode* call) {
 432     LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
 433     cg->set_call_node(call->as_CallStaticJava());
 434     return cg;
 435   }
 436 };
 437 
 438 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 439   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 440   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 441   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 442   // of late inlining with exceptions.
 443   assert(!jvms->method()->has_exception_handlers() ||
 444          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 445           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 446   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 447   bool allow_inline = C->inlining_incrementally();
 448   bool input_not_const = true;
 449   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 450   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 451 
 452   if (cg != NULL) {
 453     // AlwaysIncrementalInline causes for_method_handle_inline() to
 454     // return a LateInlineCallGenerator. Extract the
 455     // InlineCallGenerator from it.
 456     if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
 457       cg = cg->inline_cg();
 458       assert(cg != NULL, "inline call generator expected");
 459     }
 460 
 461     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
 462     _inline_cg = cg;
 463     C->dec_number_of_mh_late_inlines();
 464     return true;
 465   } else {
 466     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 467     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 468     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 469     return false;
 470   }
 471 }
 472 
 473 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 474   assert(IncrementalInlineMH, "required");
 475   Compile::current()->inc_number_of_mh_late_inlines();
 476   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 477   return cg;
 478 }
 479 
 480 // Allow inlining decisions to be delayed
 481 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
 482  private:
 483   jlong          _unique_id;   // unique id for log compilation
 484   CallGenerator* _inline_cg;
 485   ciMethod*      _callee;
 486   bool           _is_pure_call;
 487   float          _prof_factor;
 488 
 489  protected:
 490   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 491   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 492   virtual bool is_pure_call() const { return _is_pure_call; }
 493 
 494  public:
 495   LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
 496   : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
 497     _unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {
 498     assert(IncrementalInlineVirtual, "required");
 499   }
 500 
 501   virtual bool is_late_inline() const { return true; }
 502 
 503   virtual bool is_virtual_late_inline() const { return true; }
 504 
 505   // Convert the CallDynamicJava into an inline
 506   virtual void do_late_inline();
 507 
 508   virtual void set_callee_method(ciMethod* m) {
 509     assert(_callee == NULL, "repeated inlining attempt");
 510     _callee = m;
 511   }
 512 
 513   virtual JVMState* generate(JVMState* jvms) {
 514     // Emit the CallDynamicJava and request separate projections so
 515     // that the late inlining logic can distinguish between fall
 516     // through and exceptional uses of the memory and io projections
 517     // as is done for allocations and macro expansion.
 518     JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
 519     if (call_node() != NULL) {
 520       call_node()->set_generator(this);
 521     }
 522     return new_jvms;
 523   }
 524 
 525   virtual void print_inlining_late(const char* msg) {
 526     CallNode* call = call_node();
 527     Compile* C = Compile::current();
 528     C->print_inlining_assert_ready();
 529     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 530     C->print_inlining_move_to(this);
 531     C->print_inlining_update_delayed(this);
 532   }
 533 
 534   virtual void set_unique_id(jlong id) {
 535     _unique_id = id;
 536   }
 537 
 538   virtual jlong unique_id() const {
 539     return _unique_id;
 540   }
 541 
 542   virtual CallGenerator* with_call_node(CallNode* call) {
 543     LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
 544     cg->set_call_node(call->as_CallDynamicJava());
 545     return cg;
 546   }
 547 };
 548 
 549 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 550   // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
 551   // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
 552 
 553   // Implicit receiver null checks introduce problems when exception states are combined.
 554   Node* receiver = jvms->map()->argument(jvms, 0);
 555   const Type* recv_type = C->initial_gvn()->type(receiver);
 556   if (recv_type->maybe_null()) {
 557     if (C->print_inlining() || C->print_intrinsics()) {
 558       C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),
 559                         "late call devirtualization failed (receiver may be null)");
 560     }
 561     return false;
 562   }
 563   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 564   bool allow_inline = C->inlining_incrementally();
 565   if (!allow_inline && _callee->holder()->is_interface()) {
 566     // Don't convert the interface call to a direct call guarded by an interface subtype check.
 567     if (C->print_inlining() || C->print_intrinsics()) {
 568       C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(),
 569                         "late call devirtualization failed (interface call)");
 570     }
 571     return false;
 572   }
 573   CallGenerator* cg = C->call_generator(_callee,
 574                                         vtable_index(),
 575                                         false /*call_does_dispatch*/,
 576                                         jvms,
 577                                         allow_inline,
 578                                         _prof_factor,
 579                                         NULL /*speculative_receiver_type*/,
 580                                         true /*allow_intrinsics*/);
 581 
 582   if (cg != NULL) {
 583     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
 584     _inline_cg = cg;
 585     return true;
 586   } else {
 587     // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
 588     assert(false, "no progress");
 589     return false;
 590   }
 591 }
 592 
 593 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
 594   assert(IncrementalInlineVirtual, "required");
 595   assert(!m->is_static(), "for_virtual_call mismatch");
 596   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 597   return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
 598 }
 599 
 600 void LateInlineCallGenerator::do_late_inline() {
 601   CallGenerator::do_late_inline_helper();
 602 }
 603 
 604 void LateInlineMHCallGenerator::do_late_inline() {
 605   CallGenerator::do_late_inline_helper();
 606 }
 607 
 608 void LateInlineVirtualCallGenerator::do_late_inline() {
 609   assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal
 610   CallGenerator::do_late_inline_helper();
 611 }
 612 
 613 void CallGenerator::do_late_inline_helper() {
 614   assert(is_late_inline(), "only late inline allowed");
 615 
 616   // Can't inline it
 617   CallNode* call = call_node();
 618   if (call == NULL || call->outcnt() == 0 ||
 619       call->in(0) == NULL || call->in(0)->is_top()) {
 620     return;
 621   }
 622 
 623   const TypeTuple* r = call->tf()->domain_cc();
 624   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 625     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
 626       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 627       return;
 628     }
 629   }
 630 
 631   if (call->in(TypeFunc::Memory)->is_top()) {
 632     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 633     return;
 634   }
 635   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 636     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 637     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 638       return; // dead path
 639     }
 640   }
 641 
 642   // check for unreachable loop
 643   CallProjections* callprojs = call->extract_projections(true);
 644   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
 645       (callprojs->catchall_catchproj    == call->in(0)) ||
 646       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 647       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
 648       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 649       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 650       (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) {
 651     return;
 652   }
 653 
 654   Compile* C = Compile::current();
 655   // Remove inlined methods from Compiler's lists.
 656   if (call->is_macro()) {
 657     C->remove_macro_node(call);
 658   }
 659 
 660 
 661   bool result_not_used = true;
 662   for (uint i = 0; i < callprojs->nb_resproj; i++) {
 663     if (callprojs->resproj[i] != NULL) {
 664       if (callprojs->resproj[i]->outcnt() != 0) {
 665         result_not_used = false;
 666       }
 667       if (call->find_edge(callprojs->resproj[i]) != -1) {
 668         return;
 669       }
 670     }
 671   }
 672 
 673   if (is_pure_call() && result_not_used) {
 674     // The call is marked as pure (no important side effects), but result isn't used.
 675     // It's safe to remove the call.
 676     GraphKit kit(call->jvms());
 677     kit.replace_call(call, C->top(), true);
 678   } else {
 679     // Make a clone of the JVMState that appropriate to use for driving a parse
 680     JVMState* old_jvms = call->jvms();
 681     JVMState* jvms = old_jvms->clone_shallow(C);
 682     uint size = call->req();
 683     SafePointNode* map = new SafePointNode(size, jvms);
 684     for (uint i1 = 0; i1 < size; i1++) {
 685       map->init_req(i1, call->in(i1));
 686     }
 687 
 688     PhaseGVN& gvn = *C->initial_gvn();
 689     // Make sure the state is a MergeMem for parsing.
 690     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 691       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 692       gvn.set_type_bottom(mem);
 693       map->set_req(TypeFunc::Memory, mem);
 694     }
 695 
 696     // blow away old call arguments
 697     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 698       map->set_req(i1, C->top());
 699     }
 700     jvms->set_map(map);
 701 
 702     // Make enough space in the expression stack to transfer
 703     // the incoming arguments and return value.
 704     map->ensure_stack(jvms, jvms->method()->max_stack());
 705     const TypeTuple* domain_sig = call->_tf->domain_sig();
 706     uint nargs = method()->arg_size();
 707     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 708 
 709     uint j = TypeFunc::Parms;
 710     int arg_num = 0;
 711     for (uint i1 = 0; i1 < nargs; i1++) {
 712       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 713       if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
 714         // Inline type arguments are not passed by reference: we get an argument per
 715         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 716         GraphKit arg_kit(jvms, &gvn);
 717         Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 718         map->set_control(arg_kit.control());
 719         map->set_argument(jvms, i1, vt);
 720       } else {
 721         map->set_argument(jvms, i1, call->in(j++));
 722       }
 723       if (t != Type::HALF) {
 724         arg_num++;
 725       }
 726     }
 727 
 728     C->print_inlining_assert_ready();
 729 
 730     C->print_inlining_move_to(this);
 731 
 732     C->log_late_inline(this);
 733 
 734     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 735     if (!do_late_inline_check(C, jvms)) {
 736       map->disconnect_inputs(C);
 737       C->print_inlining_update_delayed(this);
 738       return;
 739     }
 740 
 741     // Check if we are late inlining a method handle call that returns an inline type as fields.
 742     Node* buffer_oop = NULL;
 743     ciMethod* inline_method = inline_cg()->method();
 744     ciType* return_type = inline_method->return_type();
 745     if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() &&
 746         return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
 747       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
 748       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
 749       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
 750       GraphKit arg_kit(jvms, &gvn);
 751       {
 752         PreserveReexecuteState preexecs(&arg_kit);
 753         arg_kit.jvms()->set_should_reexecute(true);
 754         arg_kit.inc_sp(nargs);
 755         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
 756         buffer_oop = arg_kit.new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true);
 757       }
 758       jvms = arg_kit.transfer_exceptions_into_jvms();
 759     }
 760 
 761     // Setup default node notes to be picked up by the inlining
 762     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 763     if (old_nn != NULL) {
 764       Node_Notes* entry_nn = old_nn->clone(C);
 765       entry_nn->set_jvms(jvms);
 766       C->set_default_node_notes(entry_nn);
 767     }
 768 
 769     // Now perform the inlining using the synthesized JVMState
 770     JVMState* new_jvms = inline_cg()->generate(jvms);
 771     if (new_jvms == NULL)  return;  // no change
 772     if (C->failing())      return;
 773 
 774     // Capture any exceptional control flow
 775     GraphKit kit(new_jvms);
 776 
 777     // Find the result object
 778     Node* result = C->top();
 779     int   result_size = method()->return_type()->size();
 780     if (result_size != 0 && !kit.stopped()) {
 781       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 782     }
 783 
 784     if (inline_cg()->is_inline()) {
 785       C->set_has_loops(C->has_loops() || inline_method->has_loops());
 786       C->env()->notice_inlined_method(inline_method);
 787     }
 788     C->set_inlining_progress(true);
 789     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 790 
 791     // Handle inline type returns
 792     InlineTypeNode* vt = result->isa_InlineType();
 793     if (vt != NULL) {
 794       if (call->tf()->returns_inline_type_as_fields()) {
 795         vt->replace_call_results(&kit, call, C, inline_method->signature()->returns_null_free_inline_type());
 796       } else if (vt->is_InlineType()) {
 797         // Result might still be allocated (for example, if it has been stored to a non-flattened field)
 798         if (!vt->is_allocated(&kit.gvn())) {
 799           assert(buffer_oop != NULL, "should have allocated a buffer");
 800           RegionNode* region = new RegionNode(3);
 801 
 802           // Check if result is null
 803           Node* null_ctl = kit.top();
 804           if (!inline_method->signature()->returns_null_free_inline_type()) {
 805             kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
 806           }
 807           region->init_req(1, null_ctl);
 808           PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
 809           Node* init_mem = kit.reset_memory();
 810           PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
 811 
 812           // Not null, initialize the buffer
 813           kit.set_all_memory(init_mem);
 814           vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
 815           // Do not let stores that initialize this buffer be reordered with a subsequent
 816           // store that would make this buffer accessible by other threads.
 817           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop, &kit.gvn());
 818           assert(alloc != NULL, "must have an allocation node");
 819           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 820           region->init_req(2, kit.control());
 821           oop->init_req(2, buffer_oop);
 822           mem->init_req(2, kit.merged_memory());
 823 
 824           // Update oop input to buffer
 825           kit.gvn().hash_delete(vt);
 826           vt->set_is_buffered();
 827           vt->set_oop(kit.gvn().transform(oop));
 828           vt = kit.gvn().transform(vt)->as_InlineType();
 829 
 830           kit.set_control(kit.gvn().transform(region));
 831           kit.set_all_memory(kit.gvn().transform(mem));
 832           kit.record_for_igvn(region);
 833           kit.record_for_igvn(oop);
 834           kit.record_for_igvn(mem);
 835         }
 836         result = vt;
 837       }
 838       DEBUG_ONLY(buffer_oop = NULL);
 839     } else {
 840       assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
 841     }
 842     assert(buffer_oop == NULL, "unused buffer allocation");
 843 
 844     kit.replace_call(call, result, true);
 845   }
 846 }
 847 
 848 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 849 
 850  public:
 851   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 852     LateInlineCallGenerator(method, inline_cg) {}
 853 
 854   virtual JVMState* generate(JVMState* jvms) {
 855     Compile *C = Compile::current();
 856 
 857     C->log_inline_id(this);
 858 
 859     C->add_string_late_inline(this);
 860 
 861     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 862     return new_jvms;
 863   }
 864 
 865   virtual bool is_string_late_inline() const { return true; }
 866 
 867   virtual CallGenerator* with_call_node(CallNode* call) {
 868     LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
 869     cg->set_call_node(call->as_CallStaticJava());
 870     return cg;
 871   }
 872 };
 873 
 874 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 875   return new LateInlineStringCallGenerator(method, inline_cg);
 876 }
 877 
 878 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 879 
 880  public:
 881   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 882     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 883 
 884   virtual JVMState* generate(JVMState* jvms) {
 885     Compile *C = Compile::current();
 886 
 887     C->log_inline_id(this);
 888 
 889     C->add_boxing_late_inline(this);
 890 
 891     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 892     return new_jvms;
 893   }
 894 
 895   virtual CallGenerator* with_call_node(CallNode* call) {
 896     LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
 897     cg->set_call_node(call->as_CallStaticJava());
 898     return cg;
 899   }
 900 };
 901 
 902 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 903   return new LateInlineBoxingCallGenerator(method, inline_cg);
 904 }
 905 
 906 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
 907 
 908  public:
 909   LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 910     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 911 
 912   virtual JVMState* generate(JVMState* jvms) {
 913     Compile *C = Compile::current();
 914 
 915     C->log_inline_id(this);
 916 
 917     C->add_vector_reboxing_late_inline(this);
 918 
 919     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 920     return new_jvms;
 921   }
 922 
 923   virtual CallGenerator* with_call_node(CallNode* call) {
 924     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 925     cg->set_call_node(call->as_CallStaticJava());
 926     return cg;
 927   }
 928 };
 929 
 930 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 931 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 932   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 933 }
 934 
 935 //------------------------PredictedCallGenerator------------------------------
 936 // Internal class which handles all out-of-line calls checking receiver type.
 937 class PredictedCallGenerator : public CallGenerator {
 938   ciKlass*       _predicted_receiver;
 939   CallGenerator* _if_missed;
 940   CallGenerator* _if_hit;
 941   float          _hit_prob;
 942   bool           _exact_check;
 943 
 944 public:
 945   PredictedCallGenerator(ciKlass* predicted_receiver,
 946                          CallGenerator* if_missed,
 947                          CallGenerator* if_hit, bool exact_check,
 948                          float hit_prob)
 949     : CallGenerator(if_missed->method())
 950   {
 951     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 952     // Remove the extremes values from the range.
 953     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 954     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 955 
 956     _predicted_receiver = predicted_receiver;
 957     _if_missed          = if_missed;
 958     _if_hit             = if_hit;
 959     _hit_prob           = hit_prob;
 960     _exact_check        = exact_check;
 961   }
 962 
 963   virtual bool      is_virtual()   const    { return true; }
 964   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 965   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 966 
 967   virtual JVMState* generate(JVMState* jvms);
 968 };
 969 
 970 
 971 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 972                                                  CallGenerator* if_missed,
 973                                                  CallGenerator* if_hit,
 974                                                  float hit_prob) {
 975   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
 976                                     /*exact_check=*/true, hit_prob);
 977 }
 978 
 979 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
 980                                                CallGenerator* if_missed,
 981                                                CallGenerator* if_hit) {
 982   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
 983                                     /*exact_check=*/false, PROB_ALWAYS);
 984 }
 985 
 986 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 987   GraphKit kit(jvms);
 988   kit.C->print_inlining_update(this);
 989   PhaseGVN& gvn = kit.gvn();
 990   // We need an explicit receiver null_check before checking its type.
 991   // We share a map with the caller, so his JVMS gets adjusted.
 992   Node* receiver = kit.argument(0);
 993   CompileLog* log = kit.C->log();
 994   if (log != NULL) {
 995     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
 996               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
 997   }
 998 
 999   receiver = kit.null_check_receiver_before_call(method());
1000   if (kit.stopped()) {
1001     return kit.transfer_exceptions_into_jvms();
1002   }
1003 
1004   // Make a copy of the replaced nodes in case we need to restore them
1005   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
1006   replaced_nodes.clone();
1007 
1008   Node* casted_receiver = receiver;  // will get updated in place...
1009   Node* slow_ctl = NULL;
1010   if (_exact_check) {
1011     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
1012                                        &casted_receiver);
1013   } else {
1014     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
1015                                           &casted_receiver);
1016   }
1017 
1018   SafePointNode* slow_map = NULL;
1019   JVMState* slow_jvms = NULL;
1020   { PreserveJVMState pjvms(&kit);
1021     kit.set_control(slow_ctl);
1022     if (!kit.stopped()) {
1023       slow_jvms = _if_missed->generate(kit.sync_jvms());
1024       if (kit.failing())
1025         return NULL;  // might happen because of NodeCountInliningCutoff
1026       assert(slow_jvms != NULL, "must be");
1027       kit.add_exception_states_from(slow_jvms);
1028       kit.set_map(slow_jvms->map());
1029       if (!kit.stopped())
1030         slow_map = kit.stop();
1031     }
1032   }
1033 
1034   if (kit.stopped()) {
1035     // Instance does not match the predicted type.
1036     kit.set_jvms(slow_jvms);
1037     return kit.transfer_exceptions_into_jvms();
1038   }
1039 
1040   // Fall through if the instance matches the desired type.
1041   kit.replace_in_map(receiver, casted_receiver);
1042 
1043   // Make the hot call:
1044   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
1045   if (new_jvms == NULL) {
1046     // Inline failed, so make a direct call.
1047     assert(_if_hit->is_inline(), "must have been a failed inline");
1048     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1049     new_jvms = cg->generate(kit.sync_jvms());
1050   }
1051   kit.add_exception_states_from(new_jvms);
1052   kit.set_jvms(new_jvms);
1053 
1054   // Need to merge slow and fast?
1055   if (slow_map == NULL) {
1056     // The fast path is the only path remaining.
1057     return kit.transfer_exceptions_into_jvms();
1058   }
1059 
1060   if (kit.stopped()) {
1061     // Inlined method threw an exception, so it's just the slow path after all.
1062     kit.set_jvms(slow_jvms);
1063     return kit.transfer_exceptions_into_jvms();
1064   }
1065 
1066   // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1067   uint tos = kit.jvms()->stkoff() + kit.sp();
1068   uint limit = slow_map->req();
1069   for (uint i = TypeFunc::Parms; i < limit; i++) {
1070     Node* m = kit.map()->in(i);
1071     Node* n = slow_map->in(i);
1072     const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1073     // TODO 8284443 still needed?
1074     if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1075       // Allocate inline type in fast path
1076       m = m->as_InlineType()->buffer(&kit);
1077       kit.map()->set_req(i, m);
1078     }
1079     if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1080       // Allocate inline type in slow path
1081       PreserveJVMState pjvms(&kit);
1082       kit.set_map(slow_map);
1083       n = n->as_InlineType()->buffer(&kit);
1084       kit.map()->set_req(i, n);
1085       slow_map = kit.stop();
1086     }
1087   }
1088 
1089   // There are 2 branches and the replaced nodes are only valid on
1090   // one: restore the replaced nodes to what they were before the
1091   // branch.
1092   kit.map()->set_replaced_nodes(replaced_nodes);
1093 
1094   // Finish the diamond.
1095   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1096   RegionNode* region = new RegionNode(3);
1097   region->init_req(1, kit.control());
1098   region->init_req(2, slow_map->control());
1099   kit.set_control(gvn.transform(region));
1100   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1101   iophi->set_req(2, slow_map->i_o());
1102   kit.set_i_o(gvn.transform(iophi));
1103   // Merge memory
1104   kit.merge_memory(slow_map->merged_memory(), region, 2);
1105   // Transform new memory Phis.
1106   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1107     Node* phi = mms.memory();
1108     if (phi->is_Phi() && phi->in(0) == region) {
1109       mms.set_memory(gvn.transform(phi));
1110     }
1111   }
1112   for (uint i = TypeFunc::Parms; i < limit; i++) {
1113     // Skip unused stack slots; fast forward to monoff();
1114     if (i == tos) {
1115       i = kit.jvms()->monoff();
1116       if( i >= limit ) break;
1117     }
1118     Node* m = kit.map()->in(i);
1119     Node* n = slow_map->in(i);
1120     if (m != n) {
1121       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1122       Node* phi = PhiNode::make(region, m, t);
1123       phi->set_req(2, n);
1124       kit.map()->set_req(i, gvn.transform(phi));
1125     }
1126   }
1127   return kit.transfer_exceptions_into_jvms();
1128 }
1129 
1130 
1131 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1132   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1133   bool input_not_const;
1134   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1135   Compile* C = Compile::current();
1136   if (cg != NULL) {
1137     if (AlwaysIncrementalInline) {
1138       return CallGenerator::for_late_inline(callee, cg);
1139     } else {
1140       return cg;
1141     }
1142   }
1143   int bci = jvms->bci();
1144   ciCallProfile profile = caller->call_profile_at_bci(bci);
1145   int call_site_count = caller->scale_count(profile.count());
1146 
1147   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1148                             (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1149     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1150   } else {
1151     // Out-of-line call.
1152     return CallGenerator::for_direct_call(callee);
1153   }
1154 }
1155 
1156 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit, bool null_free) {
1157   PhaseGVN& gvn = kit.gvn();
1158   Node* arg = kit.argument(arg_nb);
1159   const Type* arg_type = arg->bottom_type();
1160   const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1161   if (t->as_klass()->is_inlinetype() && null_free) {
1162     sig_type = sig_type->filter_speculative(TypePtr::NOTNULL);
1163   }
1164   if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
1165     const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1166     arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1167     kit.set_argument(arg_nb, arg);
1168   }
1169   if (sig_type->is_inlinetypeptr()) {
1170     arg = InlineTypeNode::make_from_oop(&kit, arg, t->as_inline_klass(), !kit.gvn().type(arg)->maybe_null());
1171     kit.set_argument(arg_nb, arg);
1172   }
1173 }
1174 
1175 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1176   GraphKit kit(jvms);
1177   PhaseGVN& gvn = kit.gvn();
1178   Compile* C = kit.C;
1179   vmIntrinsics::ID iid = callee->intrinsic_id();
1180   input_not_const = true;
1181   if (StressMethodHandleLinkerInlining) {
1182     allow_inline = false;
1183   }
1184   switch (iid) {
1185   case vmIntrinsics::_invokeBasic:
1186     {
1187       // Get MethodHandle receiver:
1188       Node* receiver = kit.argument(0);
1189       if (receiver->Opcode() == Op_ConP) {
1190         input_not_const = false;
1191         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1192         if (recv_toop != NULL) {
1193           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1194           const int vtable_index = Method::invalid_vtable_index;
1195 
1196           if (!ciMethod::is_consistent_info(callee, target)) {
1197             print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1198                                    "signatures mismatch");
1199             return NULL;
1200           }
1201 
1202           CallGenerator *cg = C->call_generator(target, vtable_index,
1203                                                 false /* call_does_dispatch */,
1204                                                 jvms,
1205                                                 allow_inline,
1206                                                 PROB_ALWAYS);
1207           return cg;
1208         } else {
1209           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1210                  Type::str(receiver->bottom_type()));
1211           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1212                                  "receiver is always null");
1213         }
1214       } else {
1215         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1216                                "receiver not constant");
1217       }
1218     }
1219     break;
1220 
1221   case vmIntrinsics::_linkToVirtual:
1222   case vmIntrinsics::_linkToStatic:
1223   case vmIntrinsics::_linkToSpecial:
1224   case vmIntrinsics::_linkToInterface:
1225     {
1226       int nargs = callee->arg_size();
1227       // Get MemberName argument:
1228       Node* member_name = kit.argument(nargs - 1);
1229       if (member_name->Opcode() == Op_ConP) {
1230         input_not_const = false;
1231         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1232         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1233 
1234         if (!ciMethod::is_consistent_info(callee, target)) {
1235           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1236                                  "signatures mismatch");
1237           return NULL;
1238         }
1239 
1240         // In lambda forms we erase signature types to avoid resolving issues
1241         // involving class loaders.  When we optimize a method handle invoke
1242         // to a direct call we must cast the receiver and arguments to its
1243         // actual types.
1244         ciSignature* signature = target->signature();
1245         const int receiver_skip = target->is_static() ? 0 : 1;
1246         // Cast receiver to its type.
1247         if (!target->is_static()) {
1248           cast_argument(nargs, 0, signature->accessing_klass(), kit, false);
1249         }
1250         // Cast reference arguments to its type.
1251         for (int i = 0, j = 0; i < signature->count(); i++) {
1252           ciType* t = signature->type_at(i);
1253           if (t->is_klass()) {
1254             bool null_free = signature->is_null_free_at(i);
1255             cast_argument(nargs, receiver_skip + j, t, kit, null_free);
1256           }
1257           j += t->size();  // long and double take two slots
1258         }
1259 
1260         // Try to get the most accurate receiver type
1261         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1262         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1263         int  vtable_index       = Method::invalid_vtable_index;
1264         bool call_does_dispatch = false;
1265 
1266         ciKlass* speculative_receiver_type = NULL;
1267         if (is_virtual_or_interface) {
1268           ciInstanceKlass* klass = target->holder();
1269           Node*             receiver_node = kit.argument(0);
1270           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1271           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1272           // optimize_virtual_call() takes 2 different holder
1273           // arguments for a corner case that doesn't apply here (see
1274           // Parse::do_call())
1275           target = C->optimize_virtual_call(caller, klass, klass,
1276                                             target, receiver_type, is_virtual,
1277                                             call_does_dispatch, vtable_index, // out-parameters
1278                                             false /* check_access */);
1279           // We lack profiling at this call but type speculation may
1280           // provide us with a type
1281           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1282         }
1283         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1284                                               allow_inline,
1285                                               PROB_ALWAYS,
1286                                               speculative_receiver_type,
1287                                               true);
1288         return cg;
1289       } else {
1290         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1291                                "member_name not constant");
1292       }
1293     }
1294     break;
1295 
1296     case vmIntrinsics::_linkToNative:
1297     print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1298                            "native call");
1299     break;
1300 
1301   default:
1302     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1303     break;
1304   }
1305   return NULL;
1306 }
1307 
1308 //------------------------PredicatedIntrinsicGenerator------------------------------
1309 // Internal class which handles all predicated Intrinsic calls.
1310 class PredicatedIntrinsicGenerator : public CallGenerator {
1311   CallGenerator* _intrinsic;
1312   CallGenerator* _cg;
1313 
1314 public:
1315   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1316                                CallGenerator* cg)
1317     : CallGenerator(cg->method())
1318   {
1319     _intrinsic = intrinsic;
1320     _cg        = cg;
1321   }
1322 
1323   virtual bool      is_virtual()   const    { return true; }
1324   virtual bool      is_inline()    const    { return true; }
1325   virtual bool      is_intrinsic() const    { return true; }
1326 
1327   virtual JVMState* generate(JVMState* jvms);
1328 };
1329 
1330 
1331 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1332                                                        CallGenerator* cg) {
1333   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1334 }
1335 
1336 
1337 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1338   // The code we want to generate here is:
1339   //    if (receiver == NULL)
1340   //        uncommon_Trap
1341   //    if (predicate(0))
1342   //        do_intrinsic(0)
1343   //    else
1344   //    if (predicate(1))
1345   //        do_intrinsic(1)
1346   //    ...
1347   //    else
1348   //        do_java_comp
1349 
1350   GraphKit kit(jvms);
1351   PhaseGVN& gvn = kit.gvn();
1352 
1353   CompileLog* log = kit.C->log();
1354   if (log != NULL) {
1355     log->elem("predicated_intrinsic bci='%d' method='%d'",
1356               jvms->bci(), log->identify(method()));
1357   }
1358 
1359   if (!method()->is_static()) {
1360     // We need an explicit receiver null_check before checking its type in predicate.
1361     // We share a map with the caller, so his JVMS gets adjusted.
1362     kit.null_check_receiver_before_call(method());
1363     if (kit.stopped()) {
1364       return kit.transfer_exceptions_into_jvms();
1365     }
1366   }
1367 
1368   int n_predicates = _intrinsic->predicates_count();
1369   assert(n_predicates > 0, "sanity");
1370 
1371   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1372 
1373   // Region for normal compilation code if intrinsic failed.
1374   Node* slow_region = new RegionNode(1);
1375 
1376   int results = 0;
1377   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1378 #ifdef ASSERT
1379     JVMState* old_jvms = kit.jvms();
1380     SafePointNode* old_map = kit.map();
1381     Node* old_io  = old_map->i_o();
1382     Node* old_mem = old_map->memory();
1383     Node* old_exc = old_map->next_exception();
1384 #endif
1385     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1386 #ifdef ASSERT
1387     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1388     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1389     SafePointNode* new_map = kit.map();
1390     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1391     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1392     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1393 #endif
1394     if (!kit.stopped()) {
1395       PreserveJVMState pjvms(&kit);
1396       // Generate intrinsic code:
1397       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1398       if (new_jvms == NULL) {
1399         // Intrinsic failed, use normal compilation path for this predicate.
1400         slow_region->add_req(kit.control());
1401       } else {
1402         kit.add_exception_states_from(new_jvms);
1403         kit.set_jvms(new_jvms);
1404         if (!kit.stopped()) {
1405           result_jvms[results++] = kit.jvms();
1406         }
1407       }
1408     }
1409     if (else_ctrl == NULL) {
1410       else_ctrl = kit.C->top();
1411     }
1412     kit.set_control(else_ctrl);
1413   }
1414   if (!kit.stopped()) {
1415     // Final 'else' after predicates.
1416     slow_region->add_req(kit.control());
1417   }
1418   if (slow_region->req() > 1) {
1419     PreserveJVMState pjvms(&kit);
1420     // Generate normal compilation code:
1421     kit.set_control(gvn.transform(slow_region));
1422     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1423     if (kit.failing())
1424       return NULL;  // might happen because of NodeCountInliningCutoff
1425     assert(new_jvms != NULL, "must be");
1426     kit.add_exception_states_from(new_jvms);
1427     kit.set_jvms(new_jvms);
1428     if (!kit.stopped()) {
1429       result_jvms[results++] = kit.jvms();
1430     }
1431   }
1432 
1433   if (results == 0) {
1434     // All paths ended in uncommon traps.
1435     (void) kit.stop();
1436     return kit.transfer_exceptions_into_jvms();
1437   }
1438 
1439   if (results == 1) { // Only one path
1440     kit.set_jvms(result_jvms[0]);
1441     return kit.transfer_exceptions_into_jvms();
1442   }
1443 
1444   // Merge all paths.
1445   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1446   RegionNode* region = new RegionNode(results + 1);
1447   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1448   for (int i = 0; i < results; i++) {
1449     JVMState* jvms = result_jvms[i];
1450     int path = i + 1;
1451     SafePointNode* map = jvms->map();
1452     region->init_req(path, map->control());
1453     iophi->set_req(path, map->i_o());
1454     if (i == 0) {
1455       kit.set_jvms(jvms);
1456     } else {
1457       kit.merge_memory(map->merged_memory(), region, path);
1458     }
1459   }
1460   kit.set_control(gvn.transform(region));
1461   kit.set_i_o(gvn.transform(iophi));
1462   // Transform new memory Phis.
1463   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1464     Node* phi = mms.memory();
1465     if (phi->is_Phi() && phi->in(0) == region) {
1466       mms.set_memory(gvn.transform(phi));
1467     }
1468   }
1469 
1470   // Merge debug info.
1471   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1472   uint tos = kit.jvms()->stkoff() + kit.sp();
1473   Node* map = kit.map();
1474   uint limit = map->req();
1475   for (uint i = TypeFunc::Parms; i < limit; i++) {
1476     // Skip unused stack slots; fast forward to monoff();
1477     if (i == tos) {
1478       i = kit.jvms()->monoff();
1479       if( i >= limit ) break;
1480     }
1481     Node* n = map->in(i);
1482     ins[0] = n;
1483     const Type* t = gvn.type(n);
1484     bool needs_phi = false;
1485     for (int j = 1; j < results; j++) {
1486       JVMState* jvms = result_jvms[j];
1487       Node* jmap = jvms->map();
1488       Node* m = NULL;
1489       if (jmap->req() > i) {
1490         m = jmap->in(i);
1491         if (m != n) {
1492           needs_phi = true;
1493           t = t->meet_speculative(gvn.type(m));
1494         }
1495       }
1496       ins[j] = m;
1497     }
1498     if (needs_phi) {
1499       Node* phi = PhiNode::make(region, n, t);
1500       for (int j = 1; j < results; j++) {
1501         phi->set_req(j + 1, ins[j]);
1502       }
1503       map->set_req(i, gvn.transform(phi));
1504     }
1505   }
1506 
1507   return kit.transfer_exceptions_into_jvms();
1508 }
1509 
1510 //-------------------------UncommonTrapCallGenerator-----------------------------
1511 // Internal class which handles all out-of-line calls checking receiver type.
1512 class UncommonTrapCallGenerator : public CallGenerator {
1513   Deoptimization::DeoptReason _reason;
1514   Deoptimization::DeoptAction _action;
1515 
1516 public:
1517   UncommonTrapCallGenerator(ciMethod* m,
1518                             Deoptimization::DeoptReason reason,
1519                             Deoptimization::DeoptAction action)
1520     : CallGenerator(m)
1521   {
1522     _reason = reason;
1523     _action = action;
1524   }
1525 
1526   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1527   virtual bool      is_trap() const             { return true; }
1528 
1529   virtual JVMState* generate(JVMState* jvms);
1530 };
1531 
1532 
1533 CallGenerator*
1534 CallGenerator::for_uncommon_trap(ciMethod* m,
1535                                  Deoptimization::DeoptReason reason,
1536                                  Deoptimization::DeoptAction action) {
1537   return new UncommonTrapCallGenerator(m, reason, action);
1538 }
1539 
1540 
1541 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1542   GraphKit kit(jvms);
1543   kit.C->print_inlining_update(this);
1544   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1545   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1546   // Use callsite signature always.
1547   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1548   int nargs = declared_method->arg_size();
1549   kit.inc_sp(nargs);
1550   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1551   if (_reason == Deoptimization::Reason_class_check &&
1552       _action == Deoptimization::Action_maybe_recompile) {
1553     // Temp fix for 6529811
1554     // Don't allow uncommon_trap to override our decision to recompile in the event
1555     // of a class cast failure for a monomorphic call as it will never let us convert
1556     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1557     bool keep_exact_action = true;
1558     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1559   } else {
1560     kit.uncommon_trap(_reason, _action);
1561   }
1562   return kit.transfer_exceptions_into_jvms();
1563 }
1564 
1565 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1566 
1567 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)