< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page

  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciObjArray.hpp"
  28 #include "ci/ciMemberName.hpp"
  29 #include "ci/ciMethodHandle.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"

  37 #include "opto/parse.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "runtime/os.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 // Utility function.
  46 const TypeFunc* CallGenerator::tf() const {
  47   return TypeFunc::make(method());
  48 }
  49 
  50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  51   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  52 }
  53 
  54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  55   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  56   return is_inlined_method_handle_intrinsic(symbolic_info, m);

 101   GraphKit& exits = parser.exits();
 102 
 103   if (C->failing()) {
 104     while (exits.pop_exception_state() != nullptr) ;
 105     return nullptr;
 106   }
 107 
 108   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 109 
 110   // Simply return the exit state of the parser,
 111   // augmented by any exceptional states.
 112   return exits.transfer_exceptions_into_jvms();
 113 }
 114 
 115 //---------------------------DirectCallGenerator------------------------------
 116 // Internal class which handles all out-of-line calls w/o receiver type checks.
 117 class DirectCallGenerator : public CallGenerator {
 118  private:
 119   CallStaticJavaNode* _call_node;
 120   // Force separate memory and I/O projections for the exceptional
 121   // paths to facilitate late inlinig.
 122   bool                _separate_io_proj;
 123 
 124 protected:
 125   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 126 
 127  public:
 128   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 129     : CallGenerator(method),

 130       _separate_io_proj(separate_io_proj)
 131   {








 132   }
 133   virtual JVMState* generate(JVMState* jvms);
 134 
 135   virtual CallNode* call_node() const { return _call_node; }
 136   virtual CallGenerator* with_call_node(CallNode* call) {
 137     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 138     dcg->set_call_node(call->as_CallStaticJava());
 139     return dcg;
 140   }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);

 145   bool is_static = method()->is_static();
 146   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 147                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 148 
 149   if (kit.C->log() != nullptr) {
 150     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 151   }
 152 
 153   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 154   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 155     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 156     // additional information about the method being invoked should be attached
 157     // to the call site to make resolution logic work
 158     // (see SharedRuntime::resolve_static_call_C).
 159     call->set_override_symbolic_info(true);
 160   }
 161   _call_node = call;  // Save the call node in case we need it later
 162   if (!is_static) {
 163     // Make an explicit receiver null_check as part of this call.
 164     // Since we share a map with the caller, his JVMS gets adjusted.
 165     kit.null_check_receiver_before_call(method());
 166     if (kit.stopped()) {
 167       // And dump it back to the caller, decorated with any exceptions:
 168       return kit.transfer_exceptions_into_jvms();
 169     }
 170     // Mark the call node as virtual, sort of:
 171     call->set_optimized_virtual(true);
 172     if (method()->is_method_handle_intrinsic() ||
 173         method()->is_compiled_lambda_form()) {
 174       call->set_method_handle_invoke(true);
 175     }
 176   }
 177   kit.set_arguments_for_java_call(call);



 178   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 179   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 180   kit.push_node(method()->return_type()->basic_type(), ret);
 181   return kit.transfer_exceptions_into_jvms();
 182 }
 183 
 184 //--------------------------VirtualCallGenerator------------------------------
 185 // Internal class which handles all out-of-line calls checking receiver type.
 186 class VirtualCallGenerator : public CallGenerator {
 187 private:
 188   int _vtable_index;
 189   bool _separate_io_proj;
 190   CallDynamicJavaNode* _call_node;
 191 
 192 protected:
 193   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 194 
 195 public:
 196   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 197     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 198   {
 199     assert(vtable_index == Method::invalid_vtable_index ||
 200            vtable_index >= 0, "either invalid or usable");
 201   }
 202   virtual bool      is_virtual() const          { return true; }
 203   virtual JVMState* generate(JVMState* jvms);
 204 
 205   virtual CallNode* call_node() const { return _call_node; }
 206   int vtable_index() const { return _vtable_index; }
 207 
 208   virtual CallGenerator* with_call_node(CallNode* call) {
 209     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 210     cg->set_call_node(call->as_CallDynamicJava());
 211     return cg;
 212   }
 213 };
 214 
 215 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 216   GraphKit kit(jvms);
 217   Node* receiver = kit.argument(0);
 218 
 219   if (kit.C->log() != nullptr) {
 220     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 221   }
 222 
 223   // If the receiver is a constant null, do not torture the system
 224   // by attempting to call through it.  The compile will proceed
 225   // correctly, but may bail out in final_graph_reshaping, because
 226   // the call instruction will have a seemingly deficient out-count.
 227   // (The bailout says something misleading about an "infinite loop".)
 228   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 229     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 230     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 231     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 232     kit.inc_sp(arg_size);  // restore arguments
 233     kit.uncommon_trap(Deoptimization::Reason_null_check,
 234                       Deoptimization::Action_none,
 235                       nullptr, "null receiver");
 236     return kit.transfer_exceptions_into_jvms();
 237   }
 238 

 256   }
 257 
 258   assert(!method()->is_static(), "virtual call must not be to static");
 259   assert(!method()->is_final(), "virtual call should not be to final");
 260   assert(!method()->is_private(), "virtual call should not be to private");
 261   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 262          "no vtable calls if +UseInlineCaches ");
 263   address target = SharedRuntime::get_resolve_virtual_call_stub();
 264   // Normal inline cache used for call
 265   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 266   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 267     // To be able to issue a direct call (optimized virtual or virtual)
 268     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 269     // about the method being invoked should be attached to the call site to
 270     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 271     call->set_override_symbolic_info(true);
 272   }
 273   _call_node = call;  // Save the call node in case we need it later
 274 
 275   kit.set_arguments_for_java_call(call);



 276   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 277   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 278   kit.push_node(method()->return_type()->basic_type(), ret);
 279 
 280   // Represent the effect of an implicit receiver null_check
 281   // as part of this call.  Since we share a map with the caller,
 282   // his JVMS gets adjusted.
 283   kit.cast_not_null(receiver);
 284   return kit.transfer_exceptions_into_jvms();
 285 }
 286 
 287 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 288   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 289   return new ParseGenerator(m, expected_uses);
 290 }
 291 
 292 // As a special case, the JVMS passed to this CallGenerator is
 293 // for the method execution already in progress, not just the JVMS
 294 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 295 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {

 340     // parse is finished.
 341     if (!is_mh_late_inline()) {
 342       C->add_late_inline(this);
 343     }
 344 
 345     // Emit the CallStaticJava and request separate projections so
 346     // that the late inlining logic can distinguish between fall
 347     // through and exceptional uses of the memory and io projections
 348     // as is done for allocations and macro expansion.
 349     return DirectCallGenerator::generate(jvms);
 350   }
 351 
 352   virtual void set_unique_id(jlong id) {
 353     _unique_id = id;
 354   }
 355 
 356   virtual jlong unique_id() const {
 357     return _unique_id;
 358   }
 359 




 360   virtual CallGenerator* with_call_node(CallNode* call) {
 361     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 362     cg->set_call_node(call->as_CallStaticJava());
 363     return cg;
 364   }
 365 };
 366 
 367 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 368   return new LateInlineCallGenerator(method, inline_cg);
 369 }
 370 
 371 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 372   ciMethod* _caller;
 373   bool _input_not_const;
 374 
 375   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 376 
 377  public:
 378   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 379     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}

 401     cg->set_call_node(call->as_CallStaticJava());
 402     return cg;
 403   }
 404 };
 405 
 406 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 407   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 408   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 409   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 410   // of late inlining with exceptions.
 411   assert(!jvms->method()->has_exception_handlers() ||
 412          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 413           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 414   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 415   bool allow_inline = C->inlining_incrementally();
 416   bool input_not_const = true;
 417   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 418   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 419 
 420   if (cg != nullptr) {








 421     if (!allow_inline) {
 422       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 423                                   "late method handle call resolution");
 424     }
 425     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 426     _inline_cg = cg;
 427     C->dec_number_of_mh_late_inlines();
 428     return true;
 429   } else {
 430     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 431     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 432     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 433     return false;
 434   }
 435 }
 436 
 437 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 438   assert(IncrementalInlineMH, "required");
 439   Compile::current()->inc_number_of_mh_late_inlines();
 440   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);

 557 
 558 void LateInlineMHCallGenerator::do_late_inline() {
 559   CallGenerator::do_late_inline_helper();
 560 }
 561 
 562 void LateInlineVirtualCallGenerator::do_late_inline() {
 563   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 564   CallGenerator::do_late_inline_helper();
 565 }
 566 
 567 void CallGenerator::do_late_inline_helper() {
 568   assert(is_late_inline(), "only late inline allowed");
 569 
 570   // Can't inline it
 571   CallNode* call = call_node();
 572   if (call == nullptr || call->outcnt() == 0 ||
 573       call->in(0) == nullptr || call->in(0)->is_top()) {
 574     return;
 575   }
 576 
 577   const TypeTuple *r = call->tf()->domain();
 578   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 579     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 580       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 581       return;
 582     }
 583   }
 584 
 585   if (call->in(TypeFunc::Memory)->is_top()) {
 586     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 587     return;
 588   }
 589   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 590     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 591     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 592       return; // dead path
 593     }
 594   }
 595 
 596   // check for unreachable loop
 597   CallProjections callprojs;
 598   // Similar to incremental inlining, don't assert that all call
 599   // projections are still there for post-parse call devirtualization.
 600   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 601   call->extract_projections(&callprojs, true, do_asserts);
 602   if ((callprojs.fallthrough_catchproj == call->in(0)) ||
 603       (callprojs.catchall_catchproj    == call->in(0)) ||
 604       (callprojs.fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 605       (callprojs.catchall_memproj      == call->in(TypeFunc::Memory)) ||
 606       (callprojs.fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 607       (callprojs.catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 608       (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
 609       (callprojs.exobj   != nullptr && call->find_edge(callprojs.exobj) != -1)) {
 610     return;
 611   }
 612 
 613   Compile* C = Compile::current();
 614   // Remove inlined methods from Compiler's lists.
 615   if (call->is_macro()) {
 616     C->remove_macro_node(call);
 617   }
 618 
 619   // The call is marked as pure (no important side effects), but result isn't used.
 620   // It's safe to remove the call.
 621   bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);









 622 
 623   if (is_pure_call() && result_not_used) {


 624     GraphKit kit(call->jvms());
 625     kit.replace_call(call, C->top(), true, do_asserts);
 626   } else {
 627     // Make a clone of the JVMState that appropriate to use for driving a parse
 628     JVMState* old_jvms = call->jvms();
 629     JVMState* jvms = old_jvms->clone_shallow(C);
 630     uint size = call->req();
 631     SafePointNode* map = new SafePointNode(size, jvms);
 632     for (uint i1 = 0; i1 < size; i1++) {
 633       map->init_req(i1, call->in(i1));
 634     }
 635 

 636     // Make sure the state is a MergeMem for parsing.
 637     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 638       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 639       C->initial_gvn()->set_type_bottom(mem);
 640       map->set_req(TypeFunc::Memory, mem);
 641     }
 642 
 643     uint nargs = method()->arg_size();
 644     // blow away old call arguments
 645     Node* top = C->top();
 646     for (uint i1 = 0; i1 < nargs; i1++) {
 647       map->set_req(TypeFunc::Parms + i1, top);
 648     }
 649     jvms->set_map(map);
 650 
 651     // Make enough space in the expression stack to transfer
 652     // the incoming arguments and return value.
 653     map->ensure_stack(jvms, jvms->method()->max_stack());






 654     for (uint i1 = 0; i1 < nargs; i1++) {
 655       map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));













 656     }
 657 
 658     C->log_late_inline(this);
 659 
 660     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 661     if (!do_late_inline_check(C, jvms)) {
 662       map->disconnect_inputs(C);
 663       return;
 664     }
 665 




















 666     // Setup default node notes to be picked up by the inlining
 667     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 668     if (old_nn != nullptr) {
 669       Node_Notes* entry_nn = old_nn->clone(C);
 670       entry_nn->set_jvms(jvms);
 671       C->set_default_node_notes(entry_nn);
 672     }
 673 
 674     // Now perform the inlining using the synthesized JVMState
 675     JVMState* new_jvms = inline_cg()->generate(jvms);
 676     if (new_jvms == nullptr)  return;  // no change
 677     if (C->failing())      return;
 678 
 679     if (is_mh_late_inline()) {
 680       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 681     } else if (is_string_late_inline()) {
 682       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 683     } else if (is_boxing_late_inline()) {
 684       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 685     } else if (is_vector_reboxing_late_inline()) {
 686       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 687     } else {
 688       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 689     }
 690 
 691     // Capture any exceptional control flow
 692     GraphKit kit(new_jvms);
 693 
 694     // Find the result object
 695     Node* result = C->top();
 696     int   result_size = method()->return_type()->size();
 697     if (result_size != 0 && !kit.stopped()) {
 698       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 699     }
 700 
 701     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 702       result = kit.must_be_not_null(result, false);
 703     }
 704 
 705     if (inline_cg()->is_inline()) {
 706       C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
 707       C->env()->notice_inlined_method(inline_cg()->method());
 708     }
 709     C->set_inlining_progress(true);
 710     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup




















































 711     kit.replace_call(call, result, true, do_asserts);
 712   }
 713 }
 714 
 715 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 716 
 717  public:
 718   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 719     LateInlineCallGenerator(method, inline_cg) {}
 720 
 721   virtual JVMState* generate(JVMState* jvms) {
 722     Compile *C = Compile::current();
 723 
 724     C->log_inline_id(this);
 725 
 726     C->add_string_late_inline(this);
 727 
 728     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 729     return new_jvms;
 730   }

 919     // Inline failed, so make a direct call.
 920     assert(_if_hit->is_inline(), "must have been a failed inline");
 921     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 922     new_jvms = cg->generate(kit.sync_jvms());
 923   }
 924   kit.add_exception_states_from(new_jvms);
 925   kit.set_jvms(new_jvms);
 926 
 927   // Need to merge slow and fast?
 928   if (slow_map == nullptr) {
 929     // The fast path is the only path remaining.
 930     return kit.transfer_exceptions_into_jvms();
 931   }
 932 
 933   if (kit.stopped()) {
 934     // Inlined method threw an exception, so it's just the slow path after all.
 935     kit.set_jvms(slow_jvms);
 936     return kit.transfer_exceptions_into_jvms();
 937   }
 938 























 939   // There are 2 branches and the replaced nodes are only valid on
 940   // one: restore the replaced nodes to what they were before the
 941   // branch.
 942   kit.map()->set_replaced_nodes(replaced_nodes);
 943 
 944   // Finish the diamond.
 945   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 946   RegionNode* region = new RegionNode(3);
 947   region->init_req(1, kit.control());
 948   region->init_req(2, slow_map->control());
 949   kit.set_control(gvn.transform(region));
 950   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 951   iophi->set_req(2, slow_map->i_o());
 952   kit.set_i_o(gvn.transform(iophi));
 953   // Merge memory
 954   kit.merge_memory(slow_map->merged_memory(), region, 2);
 955   // Transform new memory Phis.
 956   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 957     Node* phi = mms.memory();
 958     if (phi->is_Phi() && phi->in(0) == region) {
 959       mms.set_memory(gvn.transform(phi));
 960     }
 961   }
 962   uint tos = kit.jvms()->stkoff() + kit.sp();
 963   uint limit = slow_map->req();
 964   for (uint i = TypeFunc::Parms; i < limit; i++) {
 965     // Skip unused stack slots; fast forward to monoff();
 966     if (i == tos) {
 967       i = kit.jvms()->monoff();
 968       if( i >= limit ) break;
 969     }
 970     Node* m = kit.map()->in(i);
 971     Node* n = slow_map->in(i);
 972     if (m != n) {
 973       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 974       Node* phi = PhiNode::make(region, m, t);
 975       phi->set_req(2, n);
 976       kit.map()->set_req(i, gvn.transform(phi));
 977     }
 978   }
 979   return kit.transfer_exceptions_into_jvms();
 980 }
 981 
 982 
 983 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
 984   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 985   bool input_not_const;
 986   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
 987   Compile* C = Compile::current();
 988   bool should_delay = C->should_delay_inlining();
 989   if (cg != nullptr) {
 990     if (should_delay) {
 991       return CallGenerator::for_late_inline(callee, cg);
 992     } else {
 993       return cg;
 994     }
 995   }
 996   int bci = jvms->bci();
 997   ciCallProfile profile = caller->call_profile_at_bci(bci);
 998   int call_site_count = caller->scale_count(profile.count());
 999 
1000   if (IncrementalInlineMH && call_site_count > 0 &&
1001       (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1002     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1003   } else {
1004     // Out-of-line call.
1005     return CallGenerator::for_direct_call(callee);
1006   }
1007 }
1008 

1009 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1010   GraphKit kit(jvms);
1011   PhaseGVN& gvn = kit.gvn();
1012   Compile* C = kit.C;
1013   vmIntrinsics::ID iid = callee->intrinsic_id();
1014   input_not_const = true;
1015   if (StressMethodHandleLinkerInlining) {
1016     allow_inline = false;
1017   }
1018   switch (iid) {
1019   case vmIntrinsics::_invokeBasic:
1020     {
1021       // Get MethodHandle receiver:
1022       Node* receiver = kit.argument(0);
1023       if (receiver->Opcode() == Op_ConP) {
1024         input_not_const = false;
1025         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1026         if (recv_toop != nullptr) {
1027           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1028           const int vtable_index = Method::invalid_vtable_index;

1036                                                 false /* call_does_dispatch */,
1037                                                 jvms,
1038                                                 allow_inline,
1039                                                 PROB_ALWAYS);
1040           return cg;
1041         } else {
1042           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1043                  Type::str(receiver->bottom_type()));
1044           print_inlining_failure(C, callee, jvms, "receiver is always null");
1045         }
1046       } else {
1047         print_inlining_failure(C, callee, jvms, "receiver not constant");
1048       }
1049   } break;
1050 
1051   case vmIntrinsics::_linkToVirtual:
1052   case vmIntrinsics::_linkToStatic:
1053   case vmIntrinsics::_linkToSpecial:
1054   case vmIntrinsics::_linkToInterface:
1055     {

1056       // Get MemberName argument:
1057       Node* member_name = kit.argument(callee->arg_size() - 1);
1058       if (member_name->Opcode() == Op_ConP) {
1059         input_not_const = false;
1060         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1061         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1062 
1063         if (!ciMethod::is_consistent_info(callee, target)) {
1064           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1065           return nullptr;
1066         }
1067 
1068         // In lambda forms we erase signature types to avoid resolving issues
1069         // involving class loaders.  When we optimize a method handle invoke
1070         // to a direct call we must cast the receiver and arguments to its
1071         // actual types.
1072         ciSignature* signature = target->signature();
1073         const int receiver_skip = target->is_static() ? 0 : 1;
1074         // Cast receiver to its type.
1075         if (!target->is_static()) {
1076           Node* recv = kit.argument(0);
1077           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());

1107         ciKlass* speculative_receiver_type = nullptr;
1108         if (is_virtual_or_interface) {
1109           ciInstanceKlass* klass = target->holder();
1110           Node*             receiver_node = kit.argument(0);
1111           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1112           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1113           // optimize_virtual_call() takes 2 different holder
1114           // arguments for a corner case that doesn't apply here (see
1115           // Parse::do_call())
1116           target = C->optimize_virtual_call(caller, klass, klass,
1117                                             target, receiver_type, is_virtual,
1118                                             call_does_dispatch, vtable_index, // out-parameters
1119                                             false /* check_access */);
1120           // We lack profiling at this call but type speculation may
1121           // provide us with a type
1122           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1123         }
1124         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1125                                               allow_inline,
1126                                               PROB_ALWAYS,
1127                                               speculative_receiver_type);

1128         return cg;
1129       } else {
1130         print_inlining_failure(C, callee, jvms, "member_name not constant");
1131       }
1132   } break;
1133 
1134   case vmIntrinsics::_linkToNative:
1135     print_inlining_failure(C, callee, jvms, "native call");
1136     break;
1137 
1138   default:
1139     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1140     break;
1141   }
1142   return nullptr;
1143 }
1144 
1145 //------------------------PredicatedIntrinsicGenerator------------------------------
1146 // Internal class which handles all predicated Intrinsic calls.
1147 class PredicatedIntrinsicGenerator : public CallGenerator {

1179   //        do_intrinsic(0)
1180   //    else
1181   //    if (predicate(1))
1182   //        do_intrinsic(1)
1183   //    ...
1184   //    else
1185   //        do_java_comp
1186 
1187   GraphKit kit(jvms);
1188   PhaseGVN& gvn = kit.gvn();
1189 
1190   CompileLog* log = kit.C->log();
1191   if (log != nullptr) {
1192     log->elem("predicated_intrinsic bci='%d' method='%d'",
1193               jvms->bci(), log->identify(method()));
1194   }
1195 
1196   if (!method()->is_static()) {
1197     // We need an explicit receiver null_check before checking its type in predicate.
1198     // We share a map with the caller, so his JVMS gets adjusted.
1199     Node* receiver = kit.null_check_receiver_before_call(method());
1200     if (kit.stopped()) {
1201       return kit.transfer_exceptions_into_jvms();
1202     }
1203   }
1204 
1205   int n_predicates = _intrinsic->predicates_count();
1206   assert(n_predicates > 0, "sanity");
1207 
1208   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1209 
1210   // Region for normal compilation code if intrinsic failed.
1211   Node* slow_region = new RegionNode(1);
1212 
1213   int results = 0;
1214   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1215 #ifdef ASSERT
1216     JVMState* old_jvms = kit.jvms();
1217     SafePointNode* old_map = kit.map();
1218     Node* old_io  = old_map->i_o();
1219     Node* old_mem = old_map->memory();

  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciObjArray.hpp"
  28 #include "ci/ciMemberName.hpp"
  29 #include "ci/ciMethodHandle.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/inlinetypenode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/os.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 // Utility function.
  47 const TypeFunc* CallGenerator::tf() const {
  48   return TypeFunc::make(method());
  49 }
  50 
  51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  52   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  53 }
  54 
  55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  56   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  57   return is_inlined_method_handle_intrinsic(symbolic_info, m);

 102   GraphKit& exits = parser.exits();
 103 
 104   if (C->failing()) {
 105     while (exits.pop_exception_state() != nullptr) ;
 106     return nullptr;
 107   }
 108 
 109   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 110 
 111   // Simply return the exit state of the parser,
 112   // augmented by any exceptional states.
 113   return exits.transfer_exceptions_into_jvms();
 114 }
 115 
 116 //---------------------------DirectCallGenerator------------------------------
 117 // Internal class which handles all out-of-line calls w/o receiver type checks.
 118 class DirectCallGenerator : public CallGenerator {
 119  private:
 120   CallStaticJavaNode* _call_node;
 121   // Force separate memory and I/O projections for the exceptional
 122   // paths to facilitate late inlining.
 123   bool                _separate_io_proj;
 124 
 125 protected:
 126   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 127 
 128  public:
 129   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 130     : CallGenerator(method),
 131       _call_node(nullptr),
 132       _separate_io_proj(separate_io_proj)
 133   {
 134     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 135       // If that call has not been optimized by the time optimizations are over,
 136       // we'll need to add a call to create an inline type instance from the klass
 137       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 138       // Separating memory and I/O projections for exceptions is required to
 139       // perform that graph transformation.
 140       _separate_io_proj = true;
 141     }
 142   }
 143   virtual JVMState* generate(JVMState* jvms);
 144 
 145   virtual CallNode* call_node() const { return _call_node; }
 146   virtual CallGenerator* with_call_node(CallNode* call) {
 147     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 148     dcg->set_call_node(call->as_CallStaticJava());
 149     return dcg;
 150   }
 151 };
 152 
 153 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 154   GraphKit kit(jvms);
 155   PhaseGVN& gvn = kit.gvn();
 156   bool is_static = method()->is_static();
 157   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 158                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 159 
 160   if (kit.C->log() != nullptr) {
 161     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 162   }
 163 
 164   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 165   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 166     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 167     // additional information about the method being invoked should be attached
 168     // to the call site to make resolution logic work
 169     // (see SharedRuntime::resolve_static_call_C).
 170     call->set_override_symbolic_info(true);
 171   }
 172   _call_node = call;  // Save the call node in case we need it later
 173   if (!is_static) {
 174     // Make an explicit receiver null_check as part of this call.
 175     // Since we share a map with the caller, his JVMS gets adjusted.
 176     kit.null_check_receiver_before_call(method());
 177     if (kit.stopped()) {
 178       // And dump it back to the caller, decorated with any exceptions:
 179       return kit.transfer_exceptions_into_jvms();
 180     }
 181     // Mark the call node as virtual, sort of:
 182     call->set_optimized_virtual(true);
 183     if (method()->is_method_handle_intrinsic() ||
 184         method()->is_compiled_lambda_form()) {
 185       call->set_method_handle_invoke(true);
 186     }
 187   }
 188   kit.set_arguments_for_java_call(call, is_late_inline());
 189   if (kit.stopped()) {
 190     return kit.transfer_exceptions_into_jvms();
 191   }
 192   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 193   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 194   kit.push_node(method()->return_type()->basic_type(), ret);
 195   return kit.transfer_exceptions_into_jvms();
 196 }
 197 
 198 //--------------------------VirtualCallGenerator------------------------------
 199 // Internal class which handles all out-of-line calls checking receiver type.
 200 class VirtualCallGenerator : public CallGenerator {
 201 private:
 202   int _vtable_index;
 203   bool _separate_io_proj;
 204   CallDynamicJavaNode* _call_node;
 205 
 206 protected:
 207   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 208 
 209 public:
 210   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 211     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 212   {
 213     assert(vtable_index == Method::invalid_vtable_index ||
 214            vtable_index >= 0, "either invalid or usable");
 215   }
 216   virtual bool      is_virtual() const          { return true; }
 217   virtual JVMState* generate(JVMState* jvms);
 218 
 219   virtual CallNode* call_node() const { return _call_node; }
 220   int vtable_index() const { return _vtable_index; }
 221 
 222   virtual CallGenerator* with_call_node(CallNode* call) {
 223     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 224     cg->set_call_node(call->as_CallDynamicJava());
 225     return cg;
 226   }
 227 };
 228 
 229 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 230   GraphKit kit(jvms);
 231   Node* receiver = kit.argument(0);

 232   if (kit.C->log() != nullptr) {
 233     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 234   }
 235 
 236   // If the receiver is a constant null, do not torture the system
 237   // by attempting to call through it.  The compile will proceed
 238   // correctly, but may bail out in final_graph_reshaping, because
 239   // the call instruction will have a seemingly deficient out-count.
 240   // (The bailout says something misleading about an "infinite loop".)
 241   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 242     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 243     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 244     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 245     kit.inc_sp(arg_size);  // restore arguments
 246     kit.uncommon_trap(Deoptimization::Reason_null_check,
 247                       Deoptimization::Action_none,
 248                       nullptr, "null receiver");
 249     return kit.transfer_exceptions_into_jvms();
 250   }
 251 

 269   }
 270 
 271   assert(!method()->is_static(), "virtual call must not be to static");
 272   assert(!method()->is_final(), "virtual call should not be to final");
 273   assert(!method()->is_private(), "virtual call should not be to private");
 274   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 275          "no vtable calls if +UseInlineCaches ");
 276   address target = SharedRuntime::get_resolve_virtual_call_stub();
 277   // Normal inline cache used for call
 278   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 279   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 280     // To be able to issue a direct call (optimized virtual or virtual)
 281     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 282     // about the method being invoked should be attached to the call site to
 283     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 284     call->set_override_symbolic_info(true);
 285   }
 286   _call_node = call;  // Save the call node in case we need it later
 287 
 288   kit.set_arguments_for_java_call(call);
 289   if (kit.stopped()) {
 290     return kit.transfer_exceptions_into_jvms();
 291   }
 292   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 293   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 294   kit.push_node(method()->return_type()->basic_type(), ret);
 295 
 296   // Represent the effect of an implicit receiver null_check
 297   // as part of this call.  Since we share a map with the caller,
 298   // his JVMS gets adjusted.
 299   kit.cast_not_null(receiver);
 300   return kit.transfer_exceptions_into_jvms();
 301 }
 302 
 303 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 304   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 305   return new ParseGenerator(m, expected_uses);
 306 }
 307 
 308 // As a special case, the JVMS passed to this CallGenerator is
 309 // for the method execution already in progress, not just the JVMS
 310 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 311 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {

 356     // parse is finished.
 357     if (!is_mh_late_inline()) {
 358       C->add_late_inline(this);
 359     }
 360 
 361     // Emit the CallStaticJava and request separate projections so
 362     // that the late inlining logic can distinguish between fall
 363     // through and exceptional uses of the memory and io projections
 364     // as is done for allocations and macro expansion.
 365     return DirectCallGenerator::generate(jvms);
 366   }
 367 
 368   virtual void set_unique_id(jlong id) {
 369     _unique_id = id;
 370   }
 371 
 372   virtual jlong unique_id() const {
 373     return _unique_id;
 374   }
 375 
 376   virtual CallGenerator* inline_cg() {
 377     return _inline_cg;
 378   }
 379 
 380   virtual CallGenerator* with_call_node(CallNode* call) {
 381     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 382     cg->set_call_node(call->as_CallStaticJava());
 383     return cg;
 384   }
 385 };
 386 
 387 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 388   return new LateInlineCallGenerator(method, inline_cg);
 389 }
 390 
 391 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 392   ciMethod* _caller;
 393   bool _input_not_const;
 394 
 395   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 396 
 397  public:
 398   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 399     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}

 421     cg->set_call_node(call->as_CallStaticJava());
 422     return cg;
 423   }
 424 };
 425 
 426 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 427   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 428   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 429   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 430   // of late inlining with exceptions.
 431   assert(!jvms->method()->has_exception_handlers() ||
 432          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 433           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 434   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 435   bool allow_inline = C->inlining_incrementally();
 436   bool input_not_const = true;
 437   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 438   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 439 
 440   if (cg != nullptr) {
 441     // AlwaysIncrementalInline causes for_method_handle_inline() to
 442     // return a LateInlineCallGenerator. Extract the
 443     // InlineCallGenerator from it.
 444     if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
 445       cg = cg->inline_cg();
 446       assert(cg != nullptr, "inline call generator expected");
 447     }
 448 
 449     if (!allow_inline) {
 450       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 451                                   "late method handle call resolution");
 452     }
 453     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 454     _inline_cg = cg;
 455     C->dec_number_of_mh_late_inlines();
 456     return true;
 457   } else {
 458     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 459     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 460     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 461     return false;
 462   }
 463 }
 464 
 465 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 466   assert(IncrementalInlineMH, "required");
 467   Compile::current()->inc_number_of_mh_late_inlines();
 468   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);

 585 
 586 void LateInlineMHCallGenerator::do_late_inline() {
 587   CallGenerator::do_late_inline_helper();
 588 }
 589 
 590 void LateInlineVirtualCallGenerator::do_late_inline() {
 591   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 592   CallGenerator::do_late_inline_helper();
 593 }
 594 
 595 void CallGenerator::do_late_inline_helper() {
 596   assert(is_late_inline(), "only late inline allowed");
 597 
 598   // Can't inline it
 599   CallNode* call = call_node();
 600   if (call == nullptr || call->outcnt() == 0 ||
 601       call->in(0) == nullptr || call->in(0)->is_top()) {
 602     return;
 603   }
 604 
 605   const TypeTuple* r = call->tf()->domain_cc();
 606   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 607     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
 608       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 609       return;
 610     }
 611   }
 612 
 613   if (call->in(TypeFunc::Memory)->is_top()) {
 614     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 615     return;
 616   }
 617   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 618     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 619     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 620       return; // dead path
 621     }
 622   }
 623 
 624   // check for unreachable loop

 625   // Similar to incremental inlining, don't assert that all call
 626   // projections are still there for post-parse call devirtualization.
 627   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 628   CallProjections* callprojs = call->extract_projections(true, do_asserts);
 629   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
 630       (callprojs->catchall_catchproj    == call->in(0)) ||
 631       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 632       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
 633       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 634       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 635       (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {

 636     return;
 637   }
 638 
 639   Compile* C = Compile::current();
 640   // Remove inlined methods from Compiler's lists.
 641   if (call->is_macro()) {
 642     C->remove_macro_node(call);
 643   }
 644 
 645 
 646   bool result_not_used = true;
 647   for (uint i = 0; i < callprojs->nb_resproj; i++) {
 648     if (callprojs->resproj[i] != nullptr) {
 649       if (callprojs->resproj[i]->outcnt() != 0) {
 650         result_not_used = false;
 651       }
 652       if (call->find_edge(callprojs->resproj[i]) != -1) {
 653         return;
 654       }
 655     }
 656   }
 657 
 658   if (is_pure_call() && result_not_used) {
 659     // The call is marked as pure (no important side effects), but result isn't used.
 660     // It's safe to remove the call.
 661     GraphKit kit(call->jvms());
 662     kit.replace_call(call, C->top(), true, do_asserts);
 663   } else {
 664     // Make a clone of the JVMState that appropriate to use for driving a parse
 665     JVMState* old_jvms = call->jvms();
 666     JVMState* jvms = old_jvms->clone_shallow(C);
 667     uint size = call->req();
 668     SafePointNode* map = new SafePointNode(size, jvms);
 669     for (uint i1 = 0; i1 < size; i1++) {
 670       map->init_req(i1, call->in(i1));
 671     }
 672 
 673     PhaseGVN& gvn = *C->initial_gvn();
 674     // Make sure the state is a MergeMem for parsing.
 675     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 676       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 677       gvn.set_type_bottom(mem);
 678       map->set_req(TypeFunc::Memory, mem);
 679     }
 680 

 681     // blow away old call arguments
 682     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 683       map->set_req(i1, C->top());

 684     }
 685     jvms->set_map(map);
 686 
 687     // Make enough space in the expression stack to transfer
 688     // the incoming arguments and return value.
 689     map->ensure_stack(jvms, jvms->method()->max_stack());
 690     const TypeTuple* domain_sig = call->_tf->domain_sig();
 691     uint nargs = method()->arg_size();
 692     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 693 
 694     uint j = TypeFunc::Parms;
 695     int arg_num = 0;
 696     for (uint i1 = 0; i1 < nargs; i1++) {
 697       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 698       if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
 699         // Inline type arguments are not passed by reference: we get an argument per
 700         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 701         GraphKit arg_kit(jvms, &gvn);
 702         Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 703         map->set_control(arg_kit.control());
 704         map->set_argument(jvms, i1, vt);
 705       } else {
 706         map->set_argument(jvms, i1, call->in(j++));
 707       }
 708       if (t != Type::HALF) {
 709         arg_num++;
 710       }
 711     }
 712 
 713     C->log_late_inline(this);
 714 
 715     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 716     if (!do_late_inline_check(C, jvms)) {
 717       map->disconnect_inputs(C);
 718       return;
 719     }
 720 
 721     // Check if we are late inlining a method handle call that returns an inline type as fields.
 722     Node* buffer_oop = nullptr;
 723     ciMethod* inline_method = inline_cg()->method();
 724     ciType* return_type = inline_method->return_type();
 725     if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() &&
 726         return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
 727       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
 728       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
 729       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
 730       GraphKit arg_kit(jvms, &gvn);
 731       {
 732         PreserveReexecuteState preexecs(&arg_kit);
 733         arg_kit.jvms()->set_should_reexecute(true);
 734         arg_kit.inc_sp(nargs);
 735         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
 736         buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
 737       }
 738       jvms = arg_kit.transfer_exceptions_into_jvms();
 739     }
 740 
 741     // Setup default node notes to be picked up by the inlining
 742     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 743     if (old_nn != nullptr) {
 744       Node_Notes* entry_nn = old_nn->clone(C);
 745       entry_nn->set_jvms(jvms);
 746       C->set_default_node_notes(entry_nn);
 747     }
 748 
 749     // Now perform the inlining using the synthesized JVMState
 750     JVMState* new_jvms = inline_cg()->generate(jvms);
 751     if (new_jvms == nullptr)  return;  // no change
 752     if (C->failing())      return;
 753 
 754     if (is_mh_late_inline()) {
 755       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 756     } else if (is_string_late_inline()) {
 757       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 758     } else if (is_boxing_late_inline()) {
 759       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 760     } else if (is_vector_reboxing_late_inline()) {
 761       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 762     } else {
 763       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 764     }
 765 
 766     // Capture any exceptional control flow
 767     GraphKit kit(new_jvms);
 768 
 769     // Find the result object
 770     Node* result = C->top();
 771     int   result_size = method()->return_type()->size();
 772     if (result_size != 0 && !kit.stopped()) {
 773       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 774     }
 775 
 776     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 777       result = kit.must_be_not_null(result, false);
 778     }
 779 
 780     if (inline_cg()->is_inline()) {
 781       C->set_has_loops(C->has_loops() || inline_method->has_loops());
 782       C->env()->notice_inlined_method(inline_method);
 783     }
 784     C->set_inlining_progress(true);
 785     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 786 
 787     // Handle inline type returns
 788     InlineTypeNode* vt = result->isa_InlineType();
 789     if (vt != nullptr) {
 790       if (call->tf()->returns_inline_type_as_fields()) {
 791         vt->replace_call_results(&kit, call, C);
 792       } else if (vt->is_InlineType()) {
 793         // Result might still be allocated (for example, if it has been stored to a non-flat field)
 794         if (!vt->is_allocated(&kit.gvn())) {
 795           assert(buffer_oop != nullptr, "should have allocated a buffer");
 796           RegionNode* region = new RegionNode(3);
 797 
 798           // Check if result is null
 799           Node* null_ctl = kit.top();
 800           kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
 801           region->init_req(1, null_ctl);
 802           PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
 803           Node* init_mem = kit.reset_memory();
 804           PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
 805 
 806           // Not null, initialize the buffer
 807           kit.set_all_memory(init_mem);
 808           vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
 809           // Do not let stores that initialize this buffer be reordered with a subsequent
 810           // store that would make this buffer accessible by other threads.
 811           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
 812           assert(alloc != nullptr, "must have an allocation node");
 813           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 814           region->init_req(2, kit.control());
 815           oop->init_req(2, buffer_oop);
 816           mem->init_req(2, kit.merged_memory());
 817 
 818           // Update oop input to buffer
 819           kit.gvn().hash_delete(vt);
 820           vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
 821           vt->set_is_buffered(kit.gvn());
 822           vt = kit.gvn().transform(vt)->as_InlineType();
 823 
 824           kit.set_control(kit.gvn().transform(region));
 825           kit.set_all_memory(kit.gvn().transform(mem));
 826           kit.record_for_igvn(region);
 827           kit.record_for_igvn(oop);
 828           kit.record_for_igvn(mem);
 829         }
 830         result = vt;
 831       }
 832       DEBUG_ONLY(buffer_oop = nullptr);
 833     } else {
 834       assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
 835     }
 836     assert(buffer_oop == nullptr, "unused buffer allocation");
 837 
 838     kit.replace_call(call, result, true, do_asserts);
 839   }
 840 }
 841 
 842 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 843 
 844  public:
 845   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 846     LateInlineCallGenerator(method, inline_cg) {}
 847 
 848   virtual JVMState* generate(JVMState* jvms) {
 849     Compile *C = Compile::current();
 850 
 851     C->log_inline_id(this);
 852 
 853     C->add_string_late_inline(this);
 854 
 855     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 856     return new_jvms;
 857   }

1046     // Inline failed, so make a direct call.
1047     assert(_if_hit->is_inline(), "must have been a failed inline");
1048     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1049     new_jvms = cg->generate(kit.sync_jvms());
1050   }
1051   kit.add_exception_states_from(new_jvms);
1052   kit.set_jvms(new_jvms);
1053 
1054   // Need to merge slow and fast?
1055   if (slow_map == nullptr) {
1056     // The fast path is the only path remaining.
1057     return kit.transfer_exceptions_into_jvms();
1058   }
1059 
1060   if (kit.stopped()) {
1061     // Inlined method threw an exception, so it's just the slow path after all.
1062     kit.set_jvms(slow_jvms);
1063     return kit.transfer_exceptions_into_jvms();
1064   }
1065 
1066   // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1067   uint tos = kit.jvms()->stkoff() + kit.sp();
1068   uint limit = slow_map->req();
1069   for (uint i = TypeFunc::Parms; i < limit; i++) {
1070     Node* m = kit.map()->in(i);
1071     Node* n = slow_map->in(i);
1072     const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1073     // TODO 8284443 still needed?
1074     if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1075       // Allocate inline type in fast path
1076       m = m->as_InlineType()->buffer(&kit);
1077       kit.map()->set_req(i, m);
1078     }
1079     if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1080       // Allocate inline type in slow path
1081       PreserveJVMState pjvms(&kit);
1082       kit.set_map(slow_map);
1083       n = n->as_InlineType()->buffer(&kit);
1084       kit.map()->set_req(i, n);
1085       slow_map = kit.stop();
1086     }
1087   }
1088 
1089   // There are 2 branches and the replaced nodes are only valid on
1090   // one: restore the replaced nodes to what they were before the
1091   // branch.
1092   kit.map()->set_replaced_nodes(replaced_nodes);
1093 
1094   // Finish the diamond.
1095   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1096   RegionNode* region = new RegionNode(3);
1097   region->init_req(1, kit.control());
1098   region->init_req(2, slow_map->control());
1099   kit.set_control(gvn.transform(region));
1100   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1101   iophi->set_req(2, slow_map->i_o());
1102   kit.set_i_o(gvn.transform(iophi));
1103   // Merge memory
1104   kit.merge_memory(slow_map->merged_memory(), region, 2);
1105   // Transform new memory Phis.
1106   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1107     Node* phi = mms.memory();
1108     if (phi->is_Phi() && phi->in(0) == region) {
1109       mms.set_memory(gvn.transform(phi));
1110     }
1111   }


1112   for (uint i = TypeFunc::Parms; i < limit; i++) {
1113     // Skip unused stack slots; fast forward to monoff();
1114     if (i == tos) {
1115       i = kit.jvms()->monoff();
1116       if( i >= limit ) break;
1117     }
1118     Node* m = kit.map()->in(i);
1119     Node* n = slow_map->in(i);
1120     if (m != n) {
1121       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1122       Node* phi = PhiNode::make(region, m, t);
1123       phi->set_req(2, n);
1124       kit.map()->set_req(i, gvn.transform(phi));
1125     }
1126   }
1127   return kit.transfer_exceptions_into_jvms();
1128 }
1129 
1130 
1131 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1132   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1133   bool input_not_const;
1134   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1135   Compile* C = Compile::current();
1136   bool should_delay = C->should_delay_inlining();
1137   if (cg != nullptr) {
1138     if (should_delay) {
1139       return CallGenerator::for_late_inline(callee, cg);
1140     } else {
1141       return cg;
1142     }
1143   }
1144   int bci = jvms->bci();
1145   ciCallProfile profile = caller->call_profile_at_bci(bci);
1146   int call_site_count = caller->scale_count(profile.count());
1147 
1148   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1149                             (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1150     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1151   } else {
1152     // Out-of-line call.
1153     return CallGenerator::for_direct_call(callee);
1154   }
1155 }
1156 
1157 
1158 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1159   GraphKit kit(jvms);
1160   PhaseGVN& gvn = kit.gvn();
1161   Compile* C = kit.C;
1162   vmIntrinsics::ID iid = callee->intrinsic_id();
1163   input_not_const = true;
1164   if (StressMethodHandleLinkerInlining) {
1165     allow_inline = false;
1166   }
1167   switch (iid) {
1168   case vmIntrinsics::_invokeBasic:
1169     {
1170       // Get MethodHandle receiver:
1171       Node* receiver = kit.argument(0);
1172       if (receiver->Opcode() == Op_ConP) {
1173         input_not_const = false;
1174         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1175         if (recv_toop != nullptr) {
1176           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1177           const int vtable_index = Method::invalid_vtable_index;

1185                                                 false /* call_does_dispatch */,
1186                                                 jvms,
1187                                                 allow_inline,
1188                                                 PROB_ALWAYS);
1189           return cg;
1190         } else {
1191           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1192                  Type::str(receiver->bottom_type()));
1193           print_inlining_failure(C, callee, jvms, "receiver is always null");
1194         }
1195       } else {
1196         print_inlining_failure(C, callee, jvms, "receiver not constant");
1197       }
1198   } break;
1199 
1200   case vmIntrinsics::_linkToVirtual:
1201   case vmIntrinsics::_linkToStatic:
1202   case vmIntrinsics::_linkToSpecial:
1203   case vmIntrinsics::_linkToInterface:
1204     {
1205       int nargs = callee->arg_size();
1206       // Get MemberName argument:
1207       Node* member_name = kit.argument(nargs - 1);
1208       if (member_name->Opcode() == Op_ConP) {
1209         input_not_const = false;
1210         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1211         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1212 
1213         if (!ciMethod::is_consistent_info(callee, target)) {
1214           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1215           return nullptr;
1216         }
1217 
1218         // In lambda forms we erase signature types to avoid resolving issues
1219         // involving class loaders.  When we optimize a method handle invoke
1220         // to a direct call we must cast the receiver and arguments to its
1221         // actual types.
1222         ciSignature* signature = target->signature();
1223         const int receiver_skip = target->is_static() ? 0 : 1;
1224         // Cast receiver to its type.
1225         if (!target->is_static()) {
1226           Node* recv = kit.argument(0);
1227           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());

1257         ciKlass* speculative_receiver_type = nullptr;
1258         if (is_virtual_or_interface) {
1259           ciInstanceKlass* klass = target->holder();
1260           Node*             receiver_node = kit.argument(0);
1261           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1262           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1263           // optimize_virtual_call() takes 2 different holder
1264           // arguments for a corner case that doesn't apply here (see
1265           // Parse::do_call())
1266           target = C->optimize_virtual_call(caller, klass, klass,
1267                                             target, receiver_type, is_virtual,
1268                                             call_does_dispatch, vtable_index, // out-parameters
1269                                             false /* check_access */);
1270           // We lack profiling at this call but type speculation may
1271           // provide us with a type
1272           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1273         }
1274         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1275                                               allow_inline,
1276                                               PROB_ALWAYS,
1277                                               speculative_receiver_type,
1278                                               true);
1279         return cg;
1280       } else {
1281         print_inlining_failure(C, callee, jvms, "member_name not constant");
1282       }
1283   } break;
1284 
1285   case vmIntrinsics::_linkToNative:
1286     print_inlining_failure(C, callee, jvms, "native call");
1287     break;
1288 
1289   default:
1290     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1291     break;
1292   }
1293   return nullptr;
1294 }
1295 
1296 //------------------------PredicatedIntrinsicGenerator------------------------------
1297 // Internal class which handles all predicated Intrinsic calls.
1298 class PredicatedIntrinsicGenerator : public CallGenerator {

1330   //        do_intrinsic(0)
1331   //    else
1332   //    if (predicate(1))
1333   //        do_intrinsic(1)
1334   //    ...
1335   //    else
1336   //        do_java_comp
1337 
1338   GraphKit kit(jvms);
1339   PhaseGVN& gvn = kit.gvn();
1340 
1341   CompileLog* log = kit.C->log();
1342   if (log != nullptr) {
1343     log->elem("predicated_intrinsic bci='%d' method='%d'",
1344               jvms->bci(), log->identify(method()));
1345   }
1346 
1347   if (!method()->is_static()) {
1348     // We need an explicit receiver null_check before checking its type in predicate.
1349     // We share a map with the caller, so his JVMS gets adjusted.
1350     kit.null_check_receiver_before_call(method());
1351     if (kit.stopped()) {
1352       return kit.transfer_exceptions_into_jvms();
1353     }
1354   }
1355 
1356   int n_predicates = _intrinsic->predicates_count();
1357   assert(n_predicates > 0, "sanity");
1358 
1359   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1360 
1361   // Region for normal compilation code if intrinsic failed.
1362   Node* slow_region = new RegionNode(1);
1363 
1364   int results = 0;
1365   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1366 #ifdef ASSERT
1367     JVMState* old_jvms = kit.jvms();
1368     SafePointNode* old_map = kit.map();
1369     Node* old_io  = old_map->i_o();
1370     Node* old_mem = old_map->memory();
< prev index next >