< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciMemberName.hpp"
  28 #include "ci/ciMethodHandle.hpp"
  29 #include "ci/ciObjArray.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"

  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"

  37 #include "opto/parse.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "runtime/os.inline.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 // Utility function.
  46 const TypeFunc* CallGenerator::tf() const {
  47   return TypeFunc::make(method());
  48 }
  49 
  50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  51   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  52 }
  53 
  54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  55   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  56   return is_inlined_method_handle_intrinsic(symbolic_info, m);

 101   GraphKit& exits = parser.exits();
 102 
 103   if (C->failing()) {
 104     while (exits.pop_exception_state() != nullptr) ;
 105     return nullptr;
 106   }
 107 
 108   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 109 
 110   // Simply return the exit state of the parser,
 111   // augmented by any exceptional states.
 112   return exits.transfer_exceptions_into_jvms();
 113 }
 114 
 115 //---------------------------DirectCallGenerator------------------------------
 116 // Internal class which handles all out-of-line calls w/o receiver type checks.
 117 class DirectCallGenerator : public CallGenerator {
 118  private:
 119   CallStaticJavaNode* _call_node;
 120   // Force separate memory and I/O projections for the exceptional
 121   // paths to facilitate late inlinig.
 122   bool                _separate_io_proj;
 123 
 124 protected:
 125   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 126 
 127  public:
 128   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 129     : CallGenerator(method),

 130       _separate_io_proj(separate_io_proj)
 131   {








 132   }
 133   virtual JVMState* generate(JVMState* jvms);
 134 
 135   virtual CallNode* call_node() const { return _call_node; }
 136   virtual CallGenerator* with_call_node(CallNode* call) {
 137     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 138     dcg->set_call_node(call->as_CallStaticJava());
 139     return dcg;
 140   }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);
 145   bool is_static = method()->is_static();
 146   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 147                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 148 
 149   if (kit.C->log() != nullptr) {
 150     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 151   }

 194   {
 195     assert(vtable_index == Method::invalid_vtable_index ||
 196            vtable_index >= 0, "either invalid or usable");
 197   }
 198   virtual bool      is_virtual() const          { return true; }
 199   virtual JVMState* generate(JVMState* jvms);
 200 
 201   virtual CallNode* call_node() const { return _call_node; }
 202   int vtable_index() const { return _vtable_index; }
 203 
 204   virtual CallGenerator* with_call_node(CallNode* call) {
 205     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 206     cg->set_call_node(call->as_CallDynamicJava());
 207     return cg;
 208   }
 209 };
 210 
 211 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 212   GraphKit kit(jvms);
 213   Node* receiver = kit.argument(0);
 214 
 215   if (kit.C->log() != nullptr) {
 216     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 217   }
 218 
 219   // If the receiver is a constant null, do not torture the system
 220   // by attempting to call through it.  The compile will proceed
 221   // correctly, but may bail out in final_graph_reshaping, because
 222   // the call instruction will have a seemingly deficient out-count.
 223   // (The bailout says something misleading about an "infinite loop".)
 224   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 225     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 226     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 227     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 228     kit.inc_sp(arg_size);  // restore arguments
 229     kit.uncommon_trap(Deoptimization::Reason_null_check,
 230                       Deoptimization::Action_none,
 231                       nullptr, "null receiver");
 232     return kit.transfer_exceptions_into_jvms();
 233   }
 234 

 336     // parse is finished.
 337     if (!is_mh_late_inline()) {
 338       C->add_late_inline(this);
 339     }
 340 
 341     // Emit the CallStaticJava and request separate projections so
 342     // that the late inlining logic can distinguish between fall
 343     // through and exceptional uses of the memory and io projections
 344     // as is done for allocations and macro expansion.
 345     return DirectCallGenerator::generate(jvms);
 346   }
 347 
 348   virtual void set_unique_id(jlong id) {
 349     _unique_id = id;
 350   }
 351 
 352   virtual jlong unique_id() const {
 353     return _unique_id;
 354   }
 355 




 356   virtual CallGenerator* with_call_node(CallNode* call) {
 357     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 358     cg->set_call_node(call->as_CallStaticJava());
 359     return cg;
 360   }
 361 };
 362 
 363 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 364   return new LateInlineCallGenerator(method, inline_cg);
 365 }
 366 
 367 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 368   ciMethod* _caller;
 369   bool _input_not_const;
 370 
 371   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 372 
 373  public:
 374   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 375     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}

 397     cg->set_call_node(call->as_CallStaticJava());
 398     return cg;
 399   }
 400 };
 401 
 402 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 403   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 404   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 405   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 406   // of late inlining with exceptions.
 407   assert(!jvms->method()->has_exception_handlers() ||
 408          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 409           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 410   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 411   bool allow_inline = C->inlining_incrementally();
 412   bool input_not_const = true;
 413   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 414   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 415 
 416   if (cg != nullptr) {








 417     if (!allow_inline) {
 418       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 419                                   "late method handle call resolution");
 420     }
 421     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || cg->is_virtual_late_inline() ||
 422            AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 423     _inline_cg = cg;
 424     return true;
 425   } else {
 426     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 427     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 428     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 429     return false;
 430   }
 431 }
 432 
 433 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 434   assert(IncrementalInlineMH, "required");
 435   Compile::current()->mark_has_mh_late_inlines();
 436   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);

 557 
 558 void LateInlineMHCallGenerator::do_late_inline() {
 559   CallGenerator::do_late_inline_helper();
 560 }
 561 
 562 void LateInlineVirtualCallGenerator::do_late_inline() {
 563   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 564   CallGenerator::do_late_inline_helper();
 565 }
 566 
 567 void CallGenerator::do_late_inline_helper() {
 568   assert(is_late_inline(), "only late inline allowed");
 569 
 570   // Can't inline it
 571   CallNode* call = call_node();
 572   if (call == nullptr || call->outcnt() == 0 ||
 573       call->in(0) == nullptr || call->in(0)->is_top()) {
 574     return;
 575   }
 576 
 577   const TypeTuple *r = call->tf()->domain();
 578   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 579     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 580       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 581       return;
 582     }
 583   }
 584 
 585   if (call->in(TypeFunc::Memory)->is_top()) {
 586     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 587     return;
 588   }
 589   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 590     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 591     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 592       return; // dead path
 593     }
 594   }
 595 
 596   // check for unreachable loop
 597   CallProjections callprojs;
 598   // Similar to incremental inlining, don't assert that all call
 599   // projections are still there for post-parse call devirtualization.
 600   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 601   call->extract_projections(&callprojs, true, do_asserts);
 602   if ((callprojs.fallthrough_catchproj == call->in(0)) ||
 603       (callprojs.catchall_catchproj    == call->in(0)) ||
 604       (callprojs.fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 605       (callprojs.catchall_memproj      == call->in(TypeFunc::Memory)) ||
 606       (callprojs.fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 607       (callprojs.catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 608       (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
 609       (callprojs.exobj   != nullptr && call->find_edge(callprojs.exobj) != -1)) {
 610     return;
 611   }
 612 
 613   Compile* C = Compile::current();
 614 
 615   uint endoff = call->jvms()->endoff();
 616   if (C->inlining_incrementally()) {
 617     // No reachability edges should be present when incremental inlining takes place.
 618     // Inlining logic doesn't expect any extra edges past debug info and fails with
 619     // an assert in SafePointNode::grow_stack.
 620     assert(endoff == call->req(), "reachability edges not supported");
 621   } else {
 622     if (call->req() > endoff) { // reachability edges present
 623       assert(OptimizeReachabilityFences, "required");
 624       return; // keep the original call node as the holder of reachability info
 625     }
 626   }
 627 
 628   // Remove inlined methods from Compiler's lists.
 629   if (call->is_macro()) {
 630     C->remove_macro_node(call);
 631   }
 632 
 633   // The call is marked as pure (no important side effects), but result isn't used.
 634   // It's safe to remove the call.
 635   bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);









 636 
 637   if (is_pure_call() && result_not_used) {


 638     GraphKit kit(call->jvms());
 639     kit.replace_call(call, C->top(), true, do_asserts);
 640   } else {
 641     // Make a clone of the JVMState that appropriate to use for driving a parse
 642     JVMState* old_jvms = call->jvms();
 643     JVMState* jvms = old_jvms->clone_shallow(C);
 644     uint size = call->req();
 645     SafePointNode* map = new SafePointNode(size, jvms);
 646     for (uint i1 = 0; i1 < size; i1++) {
 647       map->init_req(i1, call->in(i1));
 648     }
 649 

 650     // Make sure the state is a MergeMem for parsing.
 651     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 652       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 653       C->initial_gvn()->set_type_bottom(mem);
 654       map->set_req(TypeFunc::Memory, mem);
 655     }
 656 
 657     uint nargs = method()->arg_size();
 658     // blow away old call arguments
 659     Node* top = C->top();
 660     for (uint i1 = 0; i1 < nargs; i1++) {
 661       map->set_req(TypeFunc::Parms + i1, top);
 662     }
 663     jvms->set_map(map);
 664 
 665     // Make enough space in the expression stack to transfer
 666     // the incoming arguments and return value.
 667     map->ensure_stack(jvms, jvms->method()->max_stack());






 668     for (uint i1 = 0; i1 < nargs; i1++) {
 669       map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
















 670     }
 671 
 672     C->log_late_inline(this);
 673 
 674     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 675     if (!do_late_inline_check(C, jvms)) {
 676       map->disconnect_inputs(C);
 677       return;
 678     }
 679 






















 680     // Setup default node notes to be picked up by the inlining
 681     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 682     if (old_nn != nullptr) {
 683       Node_Notes* entry_nn = old_nn->clone(C);
 684       entry_nn->set_jvms(jvms);
 685       C->set_default_node_notes(entry_nn);
 686     }
 687 
 688     // Now perform the inlining using the synthesized JVMState
 689     JVMState* new_jvms = inline_cg()->generate(jvms);
 690     if (new_jvms == nullptr)  return;  // no change
 691     if (C->failing())      return;
 692 
 693     if (is_mh_late_inline()) {
 694       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 695     } else if (is_string_late_inline()) {
 696       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 697     } else if (is_boxing_late_inline()) {
 698       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 699     } else if (is_vector_reboxing_late_inline()) {
 700       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 701     } else {
 702       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 703     }
 704 
 705     // Capture any exceptional control flow
 706     GraphKit kit(new_jvms);
 707 
 708     // Find the result object
 709     Node* result = C->top();
 710     int   result_size = method()->return_type()->size();
 711     if (result_size != 0 && !kit.stopped()) {
 712       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 713     }
 714 
 715     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 716       result = kit.must_be_not_null(result, false);
 717     }
 718 
 719     if (inline_cg()->is_inline()) {
 720       C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
 721       C->env()->notice_inlined_method(inline_cg()->method());
 722     }
 723     C->set_inlining_progress(true);
 724     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup






















































 725     kit.replace_call(call, result, true, do_asserts);
 726   }
 727 }
 728 
 729 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 730 
 731  public:
 732   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 733     LateInlineCallGenerator(method, inline_cg) {}
 734 
 735   virtual JVMState* generate(JVMState* jvms) {
 736     Compile *C = Compile::current();
 737 
 738     C->log_inline_id(this);
 739 
 740     C->add_string_late_inline(this);
 741 
 742     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 743     return new_jvms;
 744   }

 967   // Merge memory
 968   kit.merge_memory(slow_map->merged_memory(), region, 2);
 969   // Transform new memory Phis.
 970   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 971     Node* phi = mms.memory();
 972     if (phi->is_Phi() && phi->in(0) == region) {
 973       mms.set_memory(gvn.transform(phi));
 974     }
 975   }
 976   uint tos = kit.jvms()->stkoff() + kit.sp();
 977   uint limit = slow_map->req();
 978   for (uint i = TypeFunc::Parms; i < limit; i++) {
 979     // Skip unused stack slots; fast forward to monoff();
 980     if (i == tos) {
 981       i = kit.jvms()->monoff();
 982       if( i >= limit ) break;
 983     }
 984     Node* m = kit.map()->in(i);
 985     Node* n = slow_map->in(i);
 986     if (m != n) {






 987       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 988       Node* phi = PhiNode::make(region, m, t);
 989       phi->set_req(2, n);
 990       kit.map()->set_req(i, gvn.transform(phi));
 991     }
 992   }
 993   return kit.transfer_exceptions_into_jvms();
 994 }
 995 
 996 
 997 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
 998   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 999   bool input_not_const;
1000   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1001   Compile* C = Compile::current();
1002   bool should_delay = C->should_delay_inlining();
1003   if (cg != nullptr) {
1004     if (should_delay && IncrementalInlineMH) {
1005       return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1006     } else {
1007       return cg;
1008     }
1009   }
1010   int bci = jvms->bci();
1011   ciCallProfile profile = caller->call_profile_at_bci(bci);
1012   int call_site_count = caller->scale_count(profile.count());
1013 
1014   if (IncrementalInlineMH && call_site_count > 0 &&
1015       (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1016     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1017   } else {
1018     // Out-of-line call.
1019     return CallGenerator::for_direct_call(callee);
1020   }
1021 }
1022 

1023 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1024   GraphKit kit(jvms);
1025   PhaseGVN& gvn = kit.gvn();
1026   Compile* C = kit.C;
1027   vmIntrinsics::ID iid = callee->intrinsic_id();
1028   input_not_const = true;
1029   if (StressMethodHandleLinkerInlining) {
1030     allow_inline = false;
1031   }
1032   switch (iid) {
1033   case vmIntrinsics::_invokeBasic:
1034     {
1035       // Get MethodHandle receiver:
1036       Node* receiver = kit.argument(0);
1037       if (receiver->Opcode() == Op_ConP) {
1038         input_not_const = false;
1039         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1040         if (recv_toop != nullptr) {
1041           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1042           const int vtable_index = Method::invalid_vtable_index;

1050                                                 false /* call_does_dispatch */,
1051                                                 jvms,
1052                                                 allow_inline,
1053                                                 PROB_ALWAYS);
1054           return cg;
1055         } else {
1056           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1057                  Type::str(receiver->bottom_type()));
1058           print_inlining_failure(C, callee, jvms, "receiver is always null");
1059         }
1060       } else {
1061         print_inlining_failure(C, callee, jvms, "receiver not constant");
1062       }
1063   } break;
1064 
1065   case vmIntrinsics::_linkToVirtual:
1066   case vmIntrinsics::_linkToStatic:
1067   case vmIntrinsics::_linkToSpecial:
1068   case vmIntrinsics::_linkToInterface:
1069     {

1070       // Get MemberName argument:
1071       Node* member_name = kit.argument(callee->arg_size() - 1);
1072       if (member_name->Opcode() == Op_ConP) {
1073         input_not_const = false;
1074         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1075         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1076 
1077         if (!ciMethod::is_consistent_info(callee, target)) {
1078           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1079           return nullptr;
1080         }
1081 
1082         // In lambda forms we erase signature types to avoid resolving issues
1083         // involving class loaders.  When we optimize a method handle invoke
1084         // to a direct call we must cast the receiver and arguments to its
1085         // actual types.
1086         ciSignature* signature = target->signature();
1087         const int receiver_skip = target->is_static() ? 0 : 1;
1088         // Cast receiver to its type.
1089         if (!target->is_static()) {
1090           Node* recv = kit.argument(0);
1091           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1092           if (casted_recv->is_top()) {
1093             print_inlining_failure(C, callee, jvms, "argument types mismatch");
1094             return nullptr; // FIXME: effectively dead; issue a halt node instead
1095           } else if (casted_recv != recv) {
1096             kit.set_argument(0, casted_recv);
1097           }
1098         }
1099         // Cast reference arguments to its type.
1100         for (int i = 0, j = 0; i < signature->count(); i++) {
1101           ciType* t = signature->type_at(i);
1102           if (t->is_klass()) {
1103             Node* arg = kit.argument(receiver_skip + j);
1104             Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass());
1105             if (casted_arg->is_top()) {
1106               print_inlining_failure(C, callee, jvms, "argument types mismatch");
1107               return nullptr; // FIXME: effectively dead; issue a halt node instead
1108             } else if (casted_arg != arg) {
1109               kit.set_argument(receiver_skip + j, casted_arg);
1110             }
1111           }
1112           j += t->size();  // long and double take two slots
1113         }
1114 
1115         // Try to get the most accurate receiver type
1116         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1117         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1118         int  vtable_index       = Method::invalid_vtable_index;
1119         bool call_does_dispatch = false;
1120 
1121         ciKlass* speculative_receiver_type = nullptr;
1122         if (is_virtual_or_interface) {
1123           ciInstanceKlass* klass = target->holder();
1124           Node*             receiver_node = kit.argument(0);
1125           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1126           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1127           // optimize_virtual_call() takes 2 different holder
1128           // arguments for a corner case that doesn't apply here (see
1129           // Parse::do_call())
1130           target = C->optimize_virtual_call(caller, klass, klass,
1131                                             target, receiver_type, is_virtual,
1132                                             call_does_dispatch, vtable_index, // out-parameters
1133                                             false /* check_access */);
1134           // We lack profiling at this call but type speculation may
1135           // provide us with a type
1136           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1137         }
1138         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1139                                               allow_inline,
1140                                               PROB_ALWAYS,
1141                                               speculative_receiver_type);

1142         return cg;
1143       } else {
1144         print_inlining_failure(C, callee, jvms, "member_name not constant");
1145       }
1146   } break;
1147 
1148   case vmIntrinsics::_linkToNative:
1149     print_inlining_failure(C, callee, jvms, "native call");
1150     break;
1151 
1152   default:
1153     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1154     break;
1155   }
1156   return nullptr;
1157 }
1158 
1159 //------------------------PredicatedIntrinsicGenerator------------------------------
1160 // Internal class which handles all predicated Intrinsic calls.
1161 class PredicatedIntrinsicGenerator : public CallGenerator {

1193   //        do_intrinsic(0)
1194   //    else
1195   //    if (predicate(1))
1196   //        do_intrinsic(1)
1197   //    ...
1198   //    else
1199   //        do_java_comp
1200 
1201   GraphKit kit(jvms);
1202   PhaseGVN& gvn = kit.gvn();
1203 
1204   CompileLog* log = kit.C->log();
1205   if (log != nullptr) {
1206     log->elem("predicated_intrinsic bci='%d' method='%d'",
1207               jvms->bci(), log->identify(method()));
1208   }
1209 
1210   if (!method()->is_static()) {
1211     // We need an explicit receiver null_check before checking its type in predicate.
1212     // We share a map with the caller, so his JVMS gets adjusted.
1213     Node* receiver = kit.null_check_receiver_before_call(method());
1214     if (kit.stopped()) {
1215       return kit.transfer_exceptions_into_jvms();
1216     }
1217   }
1218 
1219   int n_predicates = _intrinsic->predicates_count();
1220   assert(n_predicates > 0, "sanity");
1221 
1222   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1223 
1224   // Region for normal compilation code if intrinsic failed.
1225   Node* slow_region = new RegionNode(1);
1226 
1227   int results = 0;
1228   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1229 #ifdef ASSERT
1230     JVMState* old_jvms = kit.jvms();
1231     SafePointNode* old_map = kit.map();
1232     Node* old_io  = old_map->i_o();
1233     Node* old_mem = old_map->memory();

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciMemberName.hpp"
  28 #include "ci/ciMethodHandle.hpp"
  29 #include "ci/ciObjArray.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "oops/accessDecorators.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/inlinetypenode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/debug.hpp"
  46 
  47 // Utility function.
  48 const TypeFunc* CallGenerator::tf() const {
  49   return TypeFunc::make(method());
  50 }
  51 
  52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  53   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  54 }
  55 
  56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  57   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  58   return is_inlined_method_handle_intrinsic(symbolic_info, m);

 103   GraphKit& exits = parser.exits();
 104 
 105   if (C->failing()) {
 106     while (exits.pop_exception_state() != nullptr) ;
 107     return nullptr;
 108   }
 109 
 110   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 111 
 112   // Simply return the exit state of the parser,
 113   // augmented by any exceptional states.
 114   return exits.transfer_exceptions_into_jvms();
 115 }
 116 
 117 //---------------------------DirectCallGenerator------------------------------
 118 // Internal class which handles all out-of-line calls w/o receiver type checks.
 119 class DirectCallGenerator : public CallGenerator {
 120  private:
 121   CallStaticJavaNode* _call_node;
 122   // Force separate memory and I/O projections for the exceptional
 123   // paths to facilitate late inlining.
 124   bool                _separate_io_proj;
 125 
 126 protected:
 127   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 128 
 129  public:
 130   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 131     : CallGenerator(method),
 132       _call_node(nullptr),
 133       _separate_io_proj(separate_io_proj)
 134   {
 135     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 136       // If that call has not been optimized by the time optimizations are over,
 137       // we'll need to add a call to create an inline type instance from the klass
 138       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 139       // Separating memory and I/O projections for exceptions is required to
 140       // perform that graph transformation.
 141       _separate_io_proj = true;
 142     }
 143   }
 144   virtual JVMState* generate(JVMState* jvms);
 145 
 146   virtual CallNode* call_node() const { return _call_node; }
 147   virtual CallGenerator* with_call_node(CallNode* call) {
 148     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 149     dcg->set_call_node(call->as_CallStaticJava());
 150     return dcg;
 151   }
 152 };
 153 
 154 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 155   GraphKit kit(jvms);
 156   bool is_static = method()->is_static();
 157   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 158                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 159 
 160   if (kit.C->log() != nullptr) {
 161     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 162   }

 205   {
 206     assert(vtable_index == Method::invalid_vtable_index ||
 207            vtable_index >= 0, "either invalid or usable");
 208   }
 209   virtual bool      is_virtual() const          { return true; }
 210   virtual JVMState* generate(JVMState* jvms);
 211 
 212   virtual CallNode* call_node() const { return _call_node; }
 213   int vtable_index() const { return _vtable_index; }
 214 
 215   virtual CallGenerator* with_call_node(CallNode* call) {
 216     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 217     cg->set_call_node(call->as_CallDynamicJava());
 218     return cg;
 219   }
 220 };
 221 
 222 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 223   GraphKit kit(jvms);
 224   Node* receiver = kit.argument(0);

 225   if (kit.C->log() != nullptr) {
 226     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 227   }
 228 
 229   // If the receiver is a constant null, do not torture the system
 230   // by attempting to call through it.  The compile will proceed
 231   // correctly, but may bail out in final_graph_reshaping, because
 232   // the call instruction will have a seemingly deficient out-count.
 233   // (The bailout says something misleading about an "infinite loop".)
 234   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 235     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 236     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 237     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 238     kit.inc_sp(arg_size);  // restore arguments
 239     kit.uncommon_trap(Deoptimization::Reason_null_check,
 240                       Deoptimization::Action_none,
 241                       nullptr, "null receiver");
 242     return kit.transfer_exceptions_into_jvms();
 243   }
 244 

 346     // parse is finished.
 347     if (!is_mh_late_inline()) {
 348       C->add_late_inline(this);
 349     }
 350 
 351     // Emit the CallStaticJava and request separate projections so
 352     // that the late inlining logic can distinguish between fall
 353     // through and exceptional uses of the memory and io projections
 354     // as is done for allocations and macro expansion.
 355     return DirectCallGenerator::generate(jvms);
 356   }
 357 
 358   virtual void set_unique_id(jlong id) {
 359     _unique_id = id;
 360   }
 361 
 362   virtual jlong unique_id() const {
 363     return _unique_id;
 364   }
 365 
 366   virtual CallGenerator* inline_cg() {
 367     return _inline_cg;
 368   }
 369 
 370   virtual CallGenerator* with_call_node(CallNode* call) {
 371     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 372     cg->set_call_node(call->as_CallStaticJava());
 373     return cg;
 374   }
 375 };
 376 
 377 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 378   return new LateInlineCallGenerator(method, inline_cg);
 379 }
 380 
 381 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 382   ciMethod* _caller;
 383   bool _input_not_const;
 384 
 385   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 386 
 387  public:
 388   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 389     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}

 411     cg->set_call_node(call->as_CallStaticJava());
 412     return cg;
 413   }
 414 };
 415 
 416 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 417   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 418   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 419   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 420   // of late inlining with exceptions.
 421   assert(!jvms->method()->has_exception_handlers() ||
 422          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 423           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 424   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 425   bool allow_inline = C->inlining_incrementally();
 426   bool input_not_const = true;
 427   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 428   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 429 
 430   if (cg != nullptr) {
 431     // AlwaysIncrementalInline causes for_method_handle_inline() to
 432     // return a LateInlineCallGenerator. Extract the
 433     // InlineCallGenerator from it.
 434     if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
 435       cg = cg->inline_cg();
 436       assert(cg != nullptr, "inline call generator expected");
 437     }
 438 
 439     if (!allow_inline) {
 440       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 441                                   "late method handle call resolution");
 442     }
 443     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || cg->is_virtual_late_inline() ||
 444            AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 445     _inline_cg = cg;
 446     return true;
 447   } else {
 448     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 449     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 450     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 451     return false;
 452   }
 453 }
 454 
 455 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 456   assert(IncrementalInlineMH, "required");
 457   Compile::current()->mark_has_mh_late_inlines();
 458   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);

 579 
 580 void LateInlineMHCallGenerator::do_late_inline() {
 581   CallGenerator::do_late_inline_helper();
 582 }
 583 
 584 void LateInlineVirtualCallGenerator::do_late_inline() {
 585   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 586   CallGenerator::do_late_inline_helper();
 587 }
 588 
 589 void CallGenerator::do_late_inline_helper() {
 590   assert(is_late_inline(), "only late inline allowed");
 591 
 592   // Can't inline it
 593   CallNode* call = call_node();
 594   if (call == nullptr || call->outcnt() == 0 ||
 595       call->in(0) == nullptr || call->in(0)->is_top()) {
 596     return;
 597   }
 598 
 599   const TypeTuple* r = call->tf()->domain_cc();
 600   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 601     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
 602       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 603       return;
 604     }
 605   }
 606 
 607   if (call->in(TypeFunc::Memory)->is_top()) {
 608     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 609     return;
 610   }
 611   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 612     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 613     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 614       return; // dead path
 615     }
 616   }
 617 
 618   // check for unreachable loop

 619   // Similar to incremental inlining, don't assert that all call
 620   // projections are still there for post-parse call devirtualization.
 621   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 622   CallProjections* callprojs = call->extract_projections(true, do_asserts);
 623   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
 624       (callprojs->catchall_catchproj    == call->in(0)) ||
 625       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 626       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
 627       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 628       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 629       (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {

 630     return;
 631   }
 632 
 633   Compile* C = Compile::current();
 634 
 635   uint endoff = call->jvms()->endoff();
 636   if (C->inlining_incrementally()) {
 637     // No reachability edges should be present when incremental inlining takes place.
 638     // Inlining logic doesn't expect any extra edges past debug info and fails with
 639     // an assert in SafePointNode::grow_stack.
 640     assert(endoff == call->req(), "reachability edges not supported");
 641   } else {
 642     if (call->req() > endoff) { // reachability edges present
 643       assert(OptimizeReachabilityFences, "required");
 644       return; // keep the original call node as the holder of reachability info
 645     }
 646   }
 647 
 648   // Remove inlined methods from Compiler's lists.
 649   if (call->is_macro()) {
 650     C->remove_macro_node(call);
 651   }
 652 
 653 
 654   bool result_not_used = true;
 655   for (uint i = 0; i < callprojs->nb_resproj; i++) {
 656     if (callprojs->resproj[i] != nullptr) {
 657       if (callprojs->resproj[i]->outcnt() != 0) {
 658         result_not_used = false;
 659       }
 660       if (call->find_edge(callprojs->resproj[i]) != -1) {
 661         return;
 662       }
 663     }
 664   }
 665 
 666   if (is_pure_call() && result_not_used) {
 667     // The call is marked as pure (no important side effects), but result isn't used.
 668     // It's safe to remove the call.
 669     GraphKit kit(call->jvms());
 670     kit.replace_call(call, C->top(), true, do_asserts);
 671   } else {
 672     // Make a clone of the JVMState that appropriate to use for driving a parse
 673     JVMState* old_jvms = call->jvms();
 674     JVMState* jvms = old_jvms->clone_shallow(C);
 675     uint size = call->req();
 676     SafePointNode* map = new SafePointNode(size, jvms);
 677     for (uint i1 = 0; i1 < size; i1++) {
 678       map->init_req(i1, call->in(i1));
 679     }
 680 
 681     PhaseGVN& gvn = *C->initial_gvn();
 682     // Make sure the state is a MergeMem for parsing.
 683     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 684       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 685       gvn.set_type_bottom(mem);
 686       map->set_req(TypeFunc::Memory, mem);
 687     }
 688 

 689     // blow away old call arguments
 690     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 691       map->set_req(i1, C->top());

 692     }
 693     jvms->set_map(map);
 694 
 695     // Make enough space in the expression stack to transfer
 696     // the incoming arguments and return value.
 697     map->ensure_stack(jvms, jvms->method()->max_stack());
 698     const TypeTuple* domain_sig = call->_tf->domain_sig();
 699     uint nargs = method()->arg_size();
 700     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 701 
 702     uint j = TypeFunc::Parms;
 703     int arg_num = 0;
 704     for (uint i1 = 0; i1 < nargs; i1++) {
 705       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 706       if (t->is_inlinetypeptr() && !method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
 707         // Inline type arguments are not passed by reference: we get an argument per
 708         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 709         GraphKit arg_kit(jvms, &gvn);
 710         Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 711         // GraphKit::access_load_at() may be called from InlineTypeNode::make_from_multi() and it may change the map
 712         // that arg_kit uses.
 713         map = arg_kit.map();
 714         map->set_control(arg_kit.control());
 715         map->set_argument(jvms, i1, vt);
 716       } else {
 717         map->set_argument(jvms, i1, call->in(j++));
 718       }
 719       if (t != Type::HALF) {
 720         arg_num++;
 721       }
 722     }
 723 
 724     C->log_late_inline(this);
 725 
 726     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 727     if (!do_late_inline_check(C, jvms)) {
 728       map->disconnect_inputs(C);
 729       return;
 730     }
 731 
 732     // Check if we are late inlining a method handle call that returns an inline type as fields.
 733     Node* buffer_oop = nullptr;
 734     ciMethod* inline_method = inline_cg()->method();
 735     ciType* return_type = inline_method->return_type();
 736     if (!call->tf()->returns_inline_type_as_fields() &&
 737         return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
 738       assert(is_mh_late_inline(), "Unexpected return type");
 739 
 740       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
 741       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
 742       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
 743       GraphKit arg_kit(jvms, &gvn);
 744       {
 745         PreserveReexecuteState preexecs(&arg_kit);
 746         arg_kit.jvms()->set_should_reexecute(true);
 747         arg_kit.inc_sp(nargs);
 748         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
 749         buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
 750       }
 751       jvms = arg_kit.transfer_exceptions_into_jvms();
 752     }
 753 
 754     // Setup default node notes to be picked up by the inlining
 755     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 756     if (old_nn != nullptr) {
 757       Node_Notes* entry_nn = old_nn->clone(C);
 758       entry_nn->set_jvms(jvms);
 759       C->set_default_node_notes(entry_nn);
 760     }
 761 
 762     // Now perform the inlining using the synthesized JVMState
 763     JVMState* new_jvms = inline_cg()->generate(jvms);
 764     if (new_jvms == nullptr)  return;  // no change
 765     if (C->failing())      return;
 766 
 767     if (is_mh_late_inline()) {
 768       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 769     } else if (is_string_late_inline()) {
 770       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 771     } else if (is_boxing_late_inline()) {
 772       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 773     } else if (is_vector_reboxing_late_inline()) {
 774       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 775     } else {
 776       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 777     }
 778 
 779     // Capture any exceptional control flow
 780     GraphKit kit(new_jvms);
 781 
 782     // Find the result object
 783     Node* result = C->top();
 784     int   result_size = method()->return_type()->size();
 785     if (result_size != 0 && !kit.stopped()) {
 786       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 787     }
 788 
 789     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 790       result = kit.must_be_not_null(result, false);
 791     }
 792 
 793     if (inline_cg()->is_inline()) {
 794       C->set_has_loops(C->has_loops() || inline_method->has_loops());
 795       C->env()->notice_inlined_method(inline_method);
 796     }
 797     C->set_inlining_progress(true);
 798     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 799 
 800     // Handle inline type returns
 801     InlineTypeNode* vt = result->isa_InlineType();
 802     if (vt != nullptr) {
 803       if (call->tf()->returns_inline_type_as_fields()) {
 804         vt->replace_call_results(&kit, call, C);
 805       } else {
 806         // Result might still be allocated (for example, if it has been stored to a non-flat field)
 807         if (!vt->is_allocated(&kit.gvn())) {
 808           assert(buffer_oop != nullptr, "should have allocated a buffer");
 809           RegionNode* region = new RegionNode(3);
 810 
 811           // Check if result is null
 812           Node* null_ctl = kit.top();
 813           kit.null_check_common(vt->get_null_marker(), T_INT, false, &null_ctl);
 814           region->init_req(1, null_ctl);
 815           PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
 816           Node* init_mem = kit.reset_memory();
 817           PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
 818 
 819           // Not null, initialize the buffer
 820           kit.set_all_memory(init_mem);
 821 
 822           Node* payload_ptr = kit.basic_plus_adr(buffer_oop, kit.gvn().type(vt)->inline_klass()->payload_offset());
 823           vt->store_flat(&kit, buffer_oop, payload_ptr, false, true, true, IN_HEAP | MO_UNORDERED);
 824           // Do not let stores that initialize this buffer be reordered with a subsequent
 825           // store that would make this buffer accessible by other threads.
 826           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
 827           assert(alloc != nullptr, "must have an allocation node");
 828           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 829           region->init_req(2, kit.control());
 830           oop->init_req(2, buffer_oop);
 831           mem->init_req(2, kit.merged_memory());
 832 
 833           // Update oop input to buffer
 834           kit.gvn().hash_delete(vt);
 835           vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
 836           vt->set_is_buffered(kit.gvn());
 837           vt = kit.gvn().transform(vt)->as_InlineType();
 838 
 839           kit.set_control(kit.gvn().transform(region));
 840           kit.set_all_memory(kit.gvn().transform(mem));
 841           kit.record_for_igvn(region);
 842           kit.record_for_igvn(oop);
 843           kit.record_for_igvn(mem);
 844         }
 845         result = vt;
 846       }
 847       DEBUG_ONLY(buffer_oop = nullptr);
 848     } else {
 849       assert(result->is_top() || !call->tf()->returns_inline_type_as_fields() || !call->as_CallJava()->method()->return_type()->is_loaded(), "Unexpected return value");
 850     }
 851     assert(kit.stopped() || buffer_oop == nullptr, "unused buffer allocation");
 852 
 853     kit.replace_call(call, result, true, do_asserts);
 854   }
 855 }
 856 
 857 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 858 
 859  public:
 860   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 861     LateInlineCallGenerator(method, inline_cg) {}
 862 
 863   virtual JVMState* generate(JVMState* jvms) {
 864     Compile *C = Compile::current();
 865 
 866     C->log_inline_id(this);
 867 
 868     C->add_string_late_inline(this);
 869 
 870     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 871     return new_jvms;
 872   }

1095   // Merge memory
1096   kit.merge_memory(slow_map->merged_memory(), region, 2);
1097   // Transform new memory Phis.
1098   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1099     Node* phi = mms.memory();
1100     if (phi->is_Phi() && phi->in(0) == region) {
1101       mms.set_memory(gvn.transform(phi));
1102     }
1103   }
1104   uint tos = kit.jvms()->stkoff() + kit.sp();
1105   uint limit = slow_map->req();
1106   for (uint i = TypeFunc::Parms; i < limit; i++) {
1107     // Skip unused stack slots; fast forward to monoff();
1108     if (i == tos) {
1109       i = kit.jvms()->monoff();
1110       if( i >= limit ) break;
1111     }
1112     Node* m = kit.map()->in(i);
1113     Node* n = slow_map->in(i);
1114     if (m != n) {
1115 #ifdef ASSERT
1116       if (m->is_InlineType() != n->is_InlineType()) {
1117         InlineTypeNode* unique_vt = m->is_InlineType() ? m->as_InlineType() : n->as_InlineType();
1118         assert(unique_vt->is_allocated(&gvn), "InlineType can be merged with an oop only if it is allocated");
1119       }
1120 #endif
1121       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1122       Node* phi = PhiNode::make(region, m, t);
1123       phi->set_req(2, n);
1124       kit.map()->set_req(i, gvn.transform(phi));
1125     }
1126   }
1127   return kit.transfer_exceptions_into_jvms();
1128 }
1129 
1130 
1131 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1132   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1133   bool input_not_const;
1134   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1135   Compile* C = Compile::current();
1136   bool should_delay = C->should_delay_inlining();
1137   if (cg != nullptr) {
1138     if (should_delay && IncrementalInlineMH) {
1139       return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1140     } else {
1141       return cg;
1142     }
1143   }
1144   int bci = jvms->bci();
1145   ciCallProfile profile = caller->call_profile_at_bci(bci);
1146   int call_site_count = caller->scale_count(profile.count());
1147 
1148   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1149                             (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1150     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1151   } else {
1152     // Out-of-line call.
1153     return CallGenerator::for_direct_call(callee);
1154   }
1155 }
1156 
1157 
1158 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1159   GraphKit kit(jvms);
1160   PhaseGVN& gvn = kit.gvn();
1161   Compile* C = kit.C;
1162   vmIntrinsics::ID iid = callee->intrinsic_id();
1163   input_not_const = true;
1164   if (StressMethodHandleLinkerInlining) {
1165     allow_inline = false;
1166   }
1167   switch (iid) {
1168   case vmIntrinsics::_invokeBasic:
1169     {
1170       // Get MethodHandle receiver:
1171       Node* receiver = kit.argument(0);
1172       if (receiver->Opcode() == Op_ConP) {
1173         input_not_const = false;
1174         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1175         if (recv_toop != nullptr) {
1176           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1177           const int vtable_index = Method::invalid_vtable_index;

1185                                                 false /* call_does_dispatch */,
1186                                                 jvms,
1187                                                 allow_inline,
1188                                                 PROB_ALWAYS);
1189           return cg;
1190         } else {
1191           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1192                  Type::str(receiver->bottom_type()));
1193           print_inlining_failure(C, callee, jvms, "receiver is always null");
1194         }
1195       } else {
1196         print_inlining_failure(C, callee, jvms, "receiver not constant");
1197       }
1198   } break;
1199 
1200   case vmIntrinsics::_linkToVirtual:
1201   case vmIntrinsics::_linkToStatic:
1202   case vmIntrinsics::_linkToSpecial:
1203   case vmIntrinsics::_linkToInterface:
1204     {
1205       int nargs = callee->arg_size();
1206       // Get MemberName argument:
1207       Node* member_name = kit.argument(nargs - 1);
1208       if (member_name->Opcode() == Op_ConP) {
1209         input_not_const = false;
1210         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1211         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1212 
1213         if (!ciMethod::is_consistent_info(callee, target)) {
1214           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1215           return nullptr;
1216         }
1217 
1218         // In lambda forms we erase signature types to avoid resolving issues
1219         // involving class loaders.  When we optimize a method handle invoke
1220         // to a direct call we must cast the receiver and arguments to its
1221         // actual types.
1222         ciSignature* signature = target->signature();
1223         const int receiver_skip = target->is_static() ? 0 : 1;
1224         // Cast receiver to its type.
1225         if (!target->is_static()) {
1226           Node* recv = kit.argument(0);
1227           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass(), target->receiver_maybe_larval());
1228           if (casted_recv->is_top()) {
1229             print_inlining_failure(C, callee, jvms, "argument types mismatch");
1230             return nullptr; // FIXME: effectively dead; issue a halt node instead
1231           } else if (casted_recv != recv) {
1232             kit.set_argument(0, casted_recv);
1233           }
1234         }
1235         // Cast reference arguments to its type.
1236         for (int i = 0, j = 0; i < signature->count(); i++) {
1237           ciType* t = signature->type_at(i);
1238           if (t->is_klass()) {
1239             Node* arg = kit.argument(receiver_skip + j);
1240             Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass(), false);
1241             if (casted_arg->is_top()) {
1242               print_inlining_failure(C, callee, jvms, "argument types mismatch");
1243               return nullptr; // FIXME: effectively dead; issue a halt node instead
1244             } else if (casted_arg != arg) {
1245               kit.set_argument(receiver_skip + j, casted_arg);
1246             }
1247           }
1248           j += t->size();  // long and double take two slots
1249         }
1250 
1251         // Try to get the most accurate receiver type
1252         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1253         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1254         int  vtable_index       = Method::invalid_vtable_index;
1255         bool call_does_dispatch = false;
1256 
1257         ciKlass* speculative_receiver_type = nullptr;
1258         if (is_virtual_or_interface) {
1259           ciInstanceKlass* klass = target->holder();
1260           Node*             receiver_node = kit.argument(0);
1261           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1262           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1263           // optimize_virtual_call() takes 2 different holder
1264           // arguments for a corner case that doesn't apply here (see
1265           // Parse::do_call())
1266           target = C->optimize_virtual_call(caller, klass, klass,
1267                                             target, receiver_type, is_virtual,
1268                                             call_does_dispatch, vtable_index, // out-parameters
1269                                             false /* check_access */);
1270           // We lack profiling at this call but type speculation may
1271           // provide us with a type
1272           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1273         }
1274         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1275                                               allow_inline,
1276                                               PROB_ALWAYS,
1277                                               speculative_receiver_type,
1278                                               true);
1279         return cg;
1280       } else {
1281         print_inlining_failure(C, callee, jvms, "member_name not constant");
1282       }
1283   } break;
1284 
1285   case vmIntrinsics::_linkToNative:
1286     print_inlining_failure(C, callee, jvms, "native call");
1287     break;
1288 
1289   default:
1290     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1291     break;
1292   }
1293   return nullptr;
1294 }
1295 
1296 //------------------------PredicatedIntrinsicGenerator------------------------------
1297 // Internal class which handles all predicated Intrinsic calls.
1298 class PredicatedIntrinsicGenerator : public CallGenerator {

1330   //        do_intrinsic(0)
1331   //    else
1332   //    if (predicate(1))
1333   //        do_intrinsic(1)
1334   //    ...
1335   //    else
1336   //        do_java_comp
1337 
1338   GraphKit kit(jvms);
1339   PhaseGVN& gvn = kit.gvn();
1340 
1341   CompileLog* log = kit.C->log();
1342   if (log != nullptr) {
1343     log->elem("predicated_intrinsic bci='%d' method='%d'",
1344               jvms->bci(), log->identify(method()));
1345   }
1346 
1347   if (!method()->is_static()) {
1348     // We need an explicit receiver null_check before checking its type in predicate.
1349     // We share a map with the caller, so his JVMS gets adjusted.
1350     kit.null_check_receiver_before_call(method());
1351     if (kit.stopped()) {
1352       return kit.transfer_exceptions_into_jvms();
1353     }
1354   }
1355 
1356   int n_predicates = _intrinsic->predicates_count();
1357   assert(n_predicates > 0, "sanity");
1358 
1359   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1360 
1361   // Region for normal compilation code if intrinsic failed.
1362   Node* slow_region = new RegionNode(1);
1363 
1364   int results = 0;
1365   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1366 #ifdef ASSERT
1367     JVMState* old_jvms = kit.jvms();
1368     SafePointNode* old_map = kit.map();
1369     Node* old_io  = old_map->i_o();
1370     Node* old_mem = old_map->memory();
< prev index next >