< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page

  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"

  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/os.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 // Utility function.
  47 const TypeFunc* CallGenerator::tf() const {
  48   return TypeFunc::make(method());
  49 }
  50 
  51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  52   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  53 }
  54 
  55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  56   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  57   return is_inlined_method_handle_intrinsic(symbolic_info, m);

 101   GraphKit& exits = parser.exits();
 102 
 103   if (C->failing()) {
 104     while (exits.pop_exception_state() != nullptr) ;
 105     return nullptr;
 106   }
 107 
 108   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 109 
 110   // Simply return the exit state of the parser,
 111   // augmented by any exceptional states.
 112   return exits.transfer_exceptions_into_jvms();
 113 }
 114 
 115 //---------------------------DirectCallGenerator------------------------------
 116 // Internal class which handles all out-of-line calls w/o receiver type checks.
 117 class DirectCallGenerator : public CallGenerator {
 118  private:
 119   CallStaticJavaNode* _call_node;
 120   // Force separate memory and I/O projections for the exceptional
 121   // paths to facilitate late inlinig.
 122   bool                _separate_io_proj;
 123 
 124 protected:
 125   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 126 
 127  public:
 128   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 129     : CallGenerator(method),

 130       _separate_io_proj(separate_io_proj)
 131   {








 132   }
 133   virtual JVMState* generate(JVMState* jvms);
 134 
 135   virtual CallNode* call_node() const { return _call_node; }
 136   virtual CallGenerator* with_call_node(CallNode* call) {
 137     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 138     dcg->set_call_node(call->as_CallStaticJava());
 139     return dcg;
 140   }
 141 };
 142 
 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 144   GraphKit kit(jvms);
 145   kit.C->print_inlining_update(this);

 146   bool is_static = method()->is_static();
 147   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 148                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 149 
 150   if (kit.C->log() != nullptr) {
 151     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 152   }
 153 
 154   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 155   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 156     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 157     // additional information about the method being invoked should be attached
 158     // to the call site to make resolution logic work
 159     // (see SharedRuntime::resolve_static_call_C).
 160     call->set_override_symbolic_info(true);
 161   }
 162   _call_node = call;  // Save the call node in case we need it later
 163   if (!is_static) {
 164     // Make an explicit receiver null_check as part of this call.
 165     // Since we share a map with the caller, his JVMS gets adjusted.
 166     kit.null_check_receiver_before_call(method());
 167     if (kit.stopped()) {
 168       // And dump it back to the caller, decorated with any exceptions:
 169       return kit.transfer_exceptions_into_jvms();
 170     }
 171     // Mark the call node as virtual, sort of:
 172     call->set_optimized_virtual(true);
 173     if (method()->is_method_handle_intrinsic() ||
 174         method()->is_compiled_lambda_form()) {
 175       call->set_method_handle_invoke(true);
 176     }
 177   }
 178   kit.set_arguments_for_java_call(call);



 179   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 180   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 181   kit.push_node(method()->return_type()->basic_type(), ret);
 182   return kit.transfer_exceptions_into_jvms();
 183 }
 184 
 185 //--------------------------VirtualCallGenerator------------------------------
 186 // Internal class which handles all out-of-line calls checking receiver type.
 187 class VirtualCallGenerator : public CallGenerator {
 188 private:
 189   int _vtable_index;
 190   bool _separate_io_proj;
 191   CallDynamicJavaNode* _call_node;
 192 
 193 protected:
 194   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 195 
 196 public:
 197   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 198     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 199   {
 200     assert(vtable_index == Method::invalid_vtable_index ||
 201            vtable_index >= 0, "either invalid or usable");
 202   }
 203   virtual bool      is_virtual() const          { return true; }
 204   virtual JVMState* generate(JVMState* jvms);
 205 
 206   virtual CallNode* call_node() const { return _call_node; }
 207   int vtable_index() const { return _vtable_index; }
 208 
 209   virtual CallGenerator* with_call_node(CallNode* call) {
 210     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 211     cg->set_call_node(call->as_CallDynamicJava());
 212     return cg;
 213   }
 214 };
 215 
 216 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 217   GraphKit kit(jvms);
 218   Node* receiver = kit.argument(0);
 219 
 220   kit.C->print_inlining_update(this);
 221 
 222   if (kit.C->log() != nullptr) {
 223     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 224   }
 225 
 226   // If the receiver is a constant null, do not torture the system
 227   // by attempting to call through it.  The compile will proceed
 228   // correctly, but may bail out in final_graph_reshaping, because
 229   // the call instruction will have a seemingly deficient out-count.
 230   // (The bailout says something misleading about an "infinite loop".)
 231   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 232     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 233     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 234     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 235     kit.inc_sp(arg_size);  // restore arguments
 236     kit.uncommon_trap(Deoptimization::Reason_null_check,
 237                       Deoptimization::Action_none,
 238                       nullptr, "null receiver");
 239     return kit.transfer_exceptions_into_jvms();

 259   }
 260 
 261   assert(!method()->is_static(), "virtual call must not be to static");
 262   assert(!method()->is_final(), "virtual call should not be to final");
 263   assert(!method()->is_private(), "virtual call should not be to private");
 264   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 265          "no vtable calls if +UseInlineCaches ");
 266   address target = SharedRuntime::get_resolve_virtual_call_stub();
 267   // Normal inline cache used for call
 268   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 269   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 270     // To be able to issue a direct call (optimized virtual or virtual)
 271     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 272     // about the method being invoked should be attached to the call site to
 273     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 274     call->set_override_symbolic_info(true);
 275   }
 276   _call_node = call;  // Save the call node in case we need it later
 277 
 278   kit.set_arguments_for_java_call(call);



 279   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 280   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 281   kit.push_node(method()->return_type()->basic_type(), ret);
 282 
 283   // Represent the effect of an implicit receiver null_check
 284   // as part of this call.  Since we share a map with the caller,
 285   // his JVMS gets adjusted.
 286   kit.cast_not_null(receiver);
 287   return kit.transfer_exceptions_into_jvms();
 288 }
 289 
 290 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 291   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 292   return new ParseGenerator(m, expected_uses);
 293 }
 294 
 295 // As a special case, the JVMS passed to this CallGenerator is
 296 // for the method execution already in progress, not just the JVMS
 297 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 298 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {

 352     return DirectCallGenerator::generate(jvms);
 353   }
 354 
 355   virtual void print_inlining_late(InliningResult result, const char* msg) {
 356     CallNode* call = call_node();
 357     Compile* C = Compile::current();
 358     C->print_inlining_assert_ready();
 359     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg);
 360     C->print_inlining_move_to(this);
 361     C->print_inlining_update_delayed(this);
 362   }
 363 
 364   virtual void set_unique_id(jlong id) {
 365     _unique_id = id;
 366   }
 367 
 368   virtual jlong unique_id() const {
 369     return _unique_id;
 370   }
 371 




 372   virtual CallGenerator* with_call_node(CallNode* call) {
 373     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 374     cg->set_call_node(call->as_CallStaticJava());
 375     return cg;
 376   }
 377 };
 378 
 379 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 380   return new LateInlineCallGenerator(method, inline_cg);
 381 }
 382 
 383 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 384   ciMethod* _caller;
 385   bool _input_not_const;
 386 
 387   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 388 
 389  public:
 390   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 391     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}

 413     cg->set_call_node(call->as_CallStaticJava());
 414     return cg;
 415   }
 416 };
 417 
 418 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 419   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 420   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 421   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 422   // of late inlining with exceptions.
 423   assert(!jvms->method()->has_exception_handlers() ||
 424          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 425           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 426   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 427   bool allow_inline = C->inlining_incrementally();
 428   bool input_not_const = true;
 429   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 430   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 431 
 432   if (cg != nullptr) {








 433     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
 434     _inline_cg = cg;
 435     C->dec_number_of_mh_late_inlines();
 436     return true;
 437   } else {
 438     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 439     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 440     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 441     return false;
 442   }
 443 }
 444 
 445 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 446   assert(IncrementalInlineMH, "required");
 447   Compile::current()->inc_number_of_mh_late_inlines();
 448   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 449   return cg;
 450 }
 451 
 452 // Allow inlining decisions to be delayed

 575 
 576 void LateInlineMHCallGenerator::do_late_inline() {
 577   CallGenerator::do_late_inline_helper();
 578 }
 579 
 580 void LateInlineVirtualCallGenerator::do_late_inline() {
 581   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 582   CallGenerator::do_late_inline_helper();
 583 }
 584 
 585 void CallGenerator::do_late_inline_helper() {
 586   assert(is_late_inline(), "only late inline allowed");
 587 
 588   // Can't inline it
 589   CallNode* call = call_node();
 590   if (call == nullptr || call->outcnt() == 0 ||
 591       call->in(0) == nullptr || call->in(0)->is_top()) {
 592     return;
 593   }
 594 
 595   const TypeTuple *r = call->tf()->domain();
 596   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 597     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 598       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 599       return;
 600     }
 601   }
 602 
 603   if (call->in(TypeFunc::Memory)->is_top()) {
 604     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 605     return;
 606   }
 607   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 608     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 609     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 610       return; // dead path
 611     }
 612   }
 613 
 614   // check for unreachable loop
 615   CallProjections callprojs;
 616   call->extract_projections(&callprojs, true);
 617   if ((callprojs.fallthrough_catchproj == call->in(0)) ||
 618       (callprojs.catchall_catchproj    == call->in(0)) ||
 619       (callprojs.fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 620       (callprojs.catchall_memproj      == call->in(TypeFunc::Memory)) ||
 621       (callprojs.fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 622       (callprojs.catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 623       (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
 624       (callprojs.exobj   != nullptr && call->find_edge(callprojs.exobj) != -1)) {
 625     return;
 626   }
 627 
 628   Compile* C = Compile::current();
 629   // Remove inlined methods from Compiler's lists.
 630   if (call->is_macro()) {
 631     C->remove_macro_node(call);
 632   }
 633 
 634   // The call is marked as pure (no important side effects), but result isn't used.
 635   // It's safe to remove the call.
 636   bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);









 637 
 638   if (is_pure_call() && result_not_used) {


 639     GraphKit kit(call->jvms());
 640     kit.replace_call(call, C->top(), true);
 641   } else {
 642     // Make a clone of the JVMState that appropriate to use for driving a parse
 643     JVMState* old_jvms = call->jvms();
 644     JVMState* jvms = old_jvms->clone_shallow(C);
 645     uint size = call->req();
 646     SafePointNode* map = new SafePointNode(size, jvms);
 647     for (uint i1 = 0; i1 < size; i1++) {
 648       map->init_req(i1, call->in(i1));
 649     }
 650 

 651     // Make sure the state is a MergeMem for parsing.
 652     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 653       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 654       C->initial_gvn()->set_type_bottom(mem);
 655       map->set_req(TypeFunc::Memory, mem);
 656     }
 657 
 658     uint nargs = method()->arg_size();
 659     // blow away old call arguments
 660     Node* top = C->top();
 661     for (uint i1 = 0; i1 < nargs; i1++) {
 662       map->set_req(TypeFunc::Parms + i1, top);
 663     }
 664     jvms->set_map(map);
 665 
 666     // Make enough space in the expression stack to transfer
 667     // the incoming arguments and return value.
 668     map->ensure_stack(jvms, jvms->method()->max_stack());






 669     for (uint i1 = 0; i1 < nargs; i1++) {
 670       map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));













 671     }
 672 
 673     C->print_inlining_assert_ready();
 674 
 675     C->print_inlining_move_to(this);
 676 
 677     C->log_late_inline(this);
 678 
 679     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 680     if (!do_late_inline_check(C, jvms)) {
 681       map->disconnect_inputs(C);
 682       C->print_inlining_update_delayed(this);
 683       return;
 684     }
 685 




















 686     // Setup default node notes to be picked up by the inlining
 687     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 688     if (old_nn != nullptr) {
 689       Node_Notes* entry_nn = old_nn->clone(C);
 690       entry_nn->set_jvms(jvms);
 691       C->set_default_node_notes(entry_nn);
 692     }
 693 
 694     // Now perform the inlining using the synthesized JVMState
 695     JVMState* new_jvms = inline_cg()->generate(jvms);
 696     if (new_jvms == nullptr)  return;  // no change
 697     if (C->failing())      return;
 698 
 699     // Capture any exceptional control flow
 700     GraphKit kit(new_jvms);
 701 
 702     // Find the result object
 703     Node* result = C->top();
 704     int   result_size = method()->return_type()->size();
 705     if (result_size != 0 && !kit.stopped()) {
 706       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 707     }
 708 
 709     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 710       result = kit.must_be_not_null(result, false);
 711     }
 712 
 713     if (inline_cg()->is_inline()) {
 714       C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
 715       C->env()->notice_inlined_method(inline_cg()->method());
 716     }
 717     C->set_inlining_progress(true);
 718     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup






















































 719     kit.replace_call(call, result, true);
 720   }
 721 }
 722 
 723 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 724 
 725  public:
 726   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 727     LateInlineCallGenerator(method, inline_cg) {}
 728 
 729   virtual JVMState* generate(JVMState* jvms) {
 730     Compile *C = Compile::current();
 731 
 732     C->log_inline_id(this);
 733 
 734     C->add_string_late_inline(this);
 735 
 736     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 737     return new_jvms;
 738   }

 921     // Inline failed, so make a direct call.
 922     assert(_if_hit->is_inline(), "must have been a failed inline");
 923     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 924     new_jvms = cg->generate(kit.sync_jvms());
 925   }
 926   kit.add_exception_states_from(new_jvms);
 927   kit.set_jvms(new_jvms);
 928 
 929   // Need to merge slow and fast?
 930   if (slow_map == nullptr) {
 931     // The fast path is the only path remaining.
 932     return kit.transfer_exceptions_into_jvms();
 933   }
 934 
 935   if (kit.stopped()) {
 936     // Inlined method threw an exception, so it's just the slow path after all.
 937     kit.set_jvms(slow_jvms);
 938     return kit.transfer_exceptions_into_jvms();
 939   }
 940 























 941   // There are 2 branches and the replaced nodes are only valid on
 942   // one: restore the replaced nodes to what they were before the
 943   // branch.
 944   kit.map()->set_replaced_nodes(replaced_nodes);
 945 
 946   // Finish the diamond.
 947   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 948   RegionNode* region = new RegionNode(3);
 949   region->init_req(1, kit.control());
 950   region->init_req(2, slow_map->control());
 951   kit.set_control(gvn.transform(region));
 952   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 953   iophi->set_req(2, slow_map->i_o());
 954   kit.set_i_o(gvn.transform(iophi));
 955   // Merge memory
 956   kit.merge_memory(slow_map->merged_memory(), region, 2);
 957   // Transform new memory Phis.
 958   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 959     Node* phi = mms.memory();
 960     if (phi->is_Phi() && phi->in(0) == region) {
 961       mms.set_memory(gvn.transform(phi));
 962     }
 963   }
 964   uint tos = kit.jvms()->stkoff() + kit.sp();
 965   uint limit = slow_map->req();
 966   for (uint i = TypeFunc::Parms; i < limit; i++) {
 967     // Skip unused stack slots; fast forward to monoff();
 968     if (i == tos) {
 969       i = kit.jvms()->monoff();
 970       if( i >= limit ) break;
 971     }
 972     Node* m = kit.map()->in(i);
 973     Node* n = slow_map->in(i);
 974     if (m != n) {
 975       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 976       Node* phi = PhiNode::make(region, m, t);
 977       phi->set_req(2, n);
 978       kit.map()->set_req(i, gvn.transform(phi));
 979     }
 980   }
 981   return kit.transfer_exceptions_into_jvms();
 982 }
 983 
 984 
 985 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
 986   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 987   bool input_not_const;
 988   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
 989   Compile* C = Compile::current();
 990   if (cg != nullptr) {
 991     if (AlwaysIncrementalInline) {
 992       return CallGenerator::for_late_inline(callee, cg);
 993     } else {
 994       return cg;
 995     }
 996   }
 997   int bci = jvms->bci();
 998   ciCallProfile profile = caller->call_profile_at_bci(bci);
 999   int call_site_count = caller->scale_count(profile.count());
1000 
1001   if (IncrementalInlineMH && call_site_count > 0 &&
1002       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1003     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1004   } else {
1005     // Out-of-line call.
1006     return CallGenerator::for_direct_call(callee);
1007   }
1008 }
1009 



















1010 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1011   GraphKit kit(jvms);
1012   PhaseGVN& gvn = kit.gvn();
1013   Compile* C = kit.C;
1014   vmIntrinsics::ID iid = callee->intrinsic_id();
1015   input_not_const = true;
1016   if (StressMethodHandleLinkerInlining) {
1017     allow_inline = false;
1018   }
1019   switch (iid) {
1020   case vmIntrinsics::_invokeBasic:
1021     {
1022       // Get MethodHandle receiver:
1023       Node* receiver = kit.argument(0);
1024       if (receiver->Opcode() == Op_ConP) {
1025         input_not_const = false;
1026         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1027         if (recv_toop != nullptr) {
1028           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1029           const int vtable_index = Method::invalid_vtable_index;

1041                                                 PROB_ALWAYS);
1042           return cg;
1043         } else {
1044           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1045                  Type::str(receiver->bottom_type()));
1046           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1047                                  "receiver is always null");
1048         }
1049       } else {
1050         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1051                                "receiver not constant");
1052       }
1053     }
1054     break;
1055 
1056   case vmIntrinsics::_linkToVirtual:
1057   case vmIntrinsics::_linkToStatic:
1058   case vmIntrinsics::_linkToSpecial:
1059   case vmIntrinsics::_linkToInterface:
1060     {

1061       // Get MemberName argument:
1062       Node* member_name = kit.argument(callee->arg_size() - 1);
1063       if (member_name->Opcode() == Op_ConP) {
1064         input_not_const = false;
1065         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1066         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1067 
1068         if (!ciMethod::is_consistent_info(callee, target)) {
1069           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1070                                  "signatures mismatch");
1071           return nullptr;
1072         }
1073 
1074         // In lambda forms we erase signature types to avoid resolving issues
1075         // involving class loaders.  When we optimize a method handle invoke
1076         // to a direct call we must cast the receiver and arguments to its
1077         // actual types.
1078         ciSignature* signature = target->signature();
1079         const int receiver_skip = target->is_static() ? 0 : 1;
1080         // Cast receiver to its type.
1081         if (!target->is_static()) {
1082           Node* arg = kit.argument(0);
1083           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1084           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
1085           if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
1086             const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part
1087             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
1088             kit.set_argument(0, cast_obj);
1089           }
1090         }
1091         // Cast reference arguments to its type.
1092         for (int i = 0, j = 0; i < signature->count(); i++) {
1093           ciType* t = signature->type_at(i);
1094           if (t->is_klass()) {
1095             Node* arg = kit.argument(receiver_skip + j);
1096             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1097             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1098             if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
1099               const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1100               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1101               kit.set_argument(receiver_skip + j, cast_obj);
1102             }
1103           }
1104           j += t->size();  // long and double take two slots
1105         }
1106 
1107         // Try to get the most accurate receiver type
1108         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1109         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1110         int  vtable_index       = Method::invalid_vtable_index;
1111         bool call_does_dispatch = false;
1112 
1113         ciKlass* speculative_receiver_type = nullptr;
1114         if (is_virtual_or_interface) {
1115           ciInstanceKlass* klass = target->holder();
1116           Node*             receiver_node = kit.argument(0);
1117           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1118           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1119           // optimize_virtual_call() takes 2 different holder
1120           // arguments for a corner case that doesn't apply here (see
1121           // Parse::do_call())
1122           target = C->optimize_virtual_call(caller, klass, klass,
1123                                             target, receiver_type, is_virtual,
1124                                             call_does_dispatch, vtable_index, // out-parameters
1125                                             false /* check_access */);
1126           // We lack profiling at this call but type speculation may
1127           // provide us with a type
1128           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1129         }
1130         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1131                                               allow_inline,
1132                                               PROB_ALWAYS,
1133                                               speculative_receiver_type);

1134         return cg;
1135       } else {
1136         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1137                                "member_name not constant");
1138       }
1139     }
1140     break;
1141 
1142     case vmIntrinsics::_linkToNative:
1143     print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1144                            "native call");
1145     break;
1146 
1147   default:
1148     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1149     break;
1150   }
1151   return nullptr;
1152 }
1153 

1188   //        do_intrinsic(0)
1189   //    else
1190   //    if (predicate(1))
1191   //        do_intrinsic(1)
1192   //    ...
1193   //    else
1194   //        do_java_comp
1195 
1196   GraphKit kit(jvms);
1197   PhaseGVN& gvn = kit.gvn();
1198 
1199   CompileLog* log = kit.C->log();
1200   if (log != nullptr) {
1201     log->elem("predicated_intrinsic bci='%d' method='%d'",
1202               jvms->bci(), log->identify(method()));
1203   }
1204 
1205   if (!method()->is_static()) {
1206     // We need an explicit receiver null_check before checking its type in predicate.
1207     // We share a map with the caller, so his JVMS gets adjusted.
1208     Node* receiver = kit.null_check_receiver_before_call(method());
1209     if (kit.stopped()) {
1210       return kit.transfer_exceptions_into_jvms();
1211     }
1212   }
1213 
1214   int n_predicates = _intrinsic->predicates_count();
1215   assert(n_predicates > 0, "sanity");
1216 
1217   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1218 
1219   // Region for normal compilation code if intrinsic failed.
1220   Node* slow_region = new RegionNode(1);
1221 
1222   int results = 0;
1223   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1224 #ifdef ASSERT
1225     JVMState* old_jvms = kit.jvms();
1226     SafePointNode* old_map = kit.map();
1227     Node* old_io  = old_map->i_o();
1228     Node* old_mem = old_map->memory();

  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/inlinetypenode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "runtime/os.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/debug.hpp"
  46 
  47 // Utility function.
  48 const TypeFunc* CallGenerator::tf() const {
  49   return TypeFunc::make(method());
  50 }
  51 
  52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  53   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  54 }
  55 
  56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  57   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  58   return is_inlined_method_handle_intrinsic(symbolic_info, m);

 102   GraphKit& exits = parser.exits();
 103 
 104   if (C->failing()) {
 105     while (exits.pop_exception_state() != nullptr) ;
 106     return nullptr;
 107   }
 108 
 109   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 110 
 111   // Simply return the exit state of the parser,
 112   // augmented by any exceptional states.
 113   return exits.transfer_exceptions_into_jvms();
 114 }
 115 
 116 //---------------------------DirectCallGenerator------------------------------
 117 // Internal class which handles all out-of-line calls w/o receiver type checks.
 118 class DirectCallGenerator : public CallGenerator {
 119  private:
 120   CallStaticJavaNode* _call_node;
 121   // Force separate memory and I/O projections for the exceptional
 122   // paths to facilitate late inlining.
 123   bool                _separate_io_proj;
 124 
 125 protected:
 126   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 127 
 128  public:
 129   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 130     : CallGenerator(method),
 131       _call_node(nullptr),
 132       _separate_io_proj(separate_io_proj)
 133   {
 134     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 135       // If that call has not been optimized by the time optimizations are over,
 136       // we'll need to add a call to create an inline type instance from the klass
 137       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 138       // Separating memory and I/O projections for exceptions is required to
 139       // perform that graph transformation.
 140       _separate_io_proj = true;
 141     }
 142   }
 143   virtual JVMState* generate(JVMState* jvms);
 144 
 145   virtual CallNode* call_node() const { return _call_node; }
 146   virtual CallGenerator* with_call_node(CallNode* call) {
 147     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 148     dcg->set_call_node(call->as_CallStaticJava());
 149     return dcg;
 150   }
 151 };
 152 
 153 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 154   GraphKit kit(jvms);
 155   kit.C->print_inlining_update(this);
 156   PhaseGVN& gvn = kit.gvn();
 157   bool is_static = method()->is_static();
 158   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 159                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 160 
 161   if (kit.C->log() != nullptr) {
 162     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 163   }
 164 
 165   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 166   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 167     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 168     // additional information about the method being invoked should be attached
 169     // to the call site to make resolution logic work
 170     // (see SharedRuntime::resolve_static_call_C).
 171     call->set_override_symbolic_info(true);
 172   }
 173   _call_node = call;  // Save the call node in case we need it later
 174   if (!is_static) {
 175     // Make an explicit receiver null_check as part of this call.
 176     // Since we share a map with the caller, his JVMS gets adjusted.
 177     kit.null_check_receiver_before_call(method());
 178     if (kit.stopped()) {
 179       // And dump it back to the caller, decorated with any exceptions:
 180       return kit.transfer_exceptions_into_jvms();
 181     }
 182     // Mark the call node as virtual, sort of:
 183     call->set_optimized_virtual(true);
 184     if (method()->is_method_handle_intrinsic() ||
 185         method()->is_compiled_lambda_form()) {
 186       call->set_method_handle_invoke(true);
 187     }
 188   }
 189   kit.set_arguments_for_java_call(call, is_late_inline());
 190   if (kit.stopped()) {
 191     return kit.transfer_exceptions_into_jvms();
 192   }
 193   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 194   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 195   kit.push_node(method()->return_type()->basic_type(), ret);
 196   return kit.transfer_exceptions_into_jvms();
 197 }
 198 
 199 //--------------------------VirtualCallGenerator------------------------------
 200 // Internal class which handles all out-of-line calls checking receiver type.
 201 class VirtualCallGenerator : public CallGenerator {
 202 private:
 203   int _vtable_index;
 204   bool _separate_io_proj;
 205   CallDynamicJavaNode* _call_node;
 206 
 207 protected:
 208   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 209 
 210 public:
 211   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 212     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 213   {
 214     assert(vtable_index == Method::invalid_vtable_index ||
 215            vtable_index >= 0, "either invalid or usable");
 216   }
 217   virtual bool      is_virtual() const          { return true; }
 218   virtual JVMState* generate(JVMState* jvms);
 219 
 220   virtual CallNode* call_node() const { return _call_node; }
 221   int vtable_index() const { return _vtable_index; }
 222 
 223   virtual CallGenerator* with_call_node(CallNode* call) {
 224     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 225     cg->set_call_node(call->as_CallDynamicJava());
 226     return cg;
 227   }
 228 };
 229 
 230 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 231   GraphKit kit(jvms);
 232   Node* receiver = kit.argument(0);

 233   kit.C->print_inlining_update(this);
 234 
 235   if (kit.C->log() != nullptr) {
 236     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 237   }
 238 
 239   // If the receiver is a constant null, do not torture the system
 240   // by attempting to call through it.  The compile will proceed
 241   // correctly, but may bail out in final_graph_reshaping, because
 242   // the call instruction will have a seemingly deficient out-count.
 243   // (The bailout says something misleading about an "infinite loop".)
 244   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 245     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 246     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 247     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 248     kit.inc_sp(arg_size);  // restore arguments
 249     kit.uncommon_trap(Deoptimization::Reason_null_check,
 250                       Deoptimization::Action_none,
 251                       nullptr, "null receiver");
 252     return kit.transfer_exceptions_into_jvms();

 272   }
 273 
 274   assert(!method()->is_static(), "virtual call must not be to static");
 275   assert(!method()->is_final(), "virtual call should not be to final");
 276   assert(!method()->is_private(), "virtual call should not be to private");
 277   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 278          "no vtable calls if +UseInlineCaches ");
 279   address target = SharedRuntime::get_resolve_virtual_call_stub();
 280   // Normal inline cache used for call
 281   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 282   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 283     // To be able to issue a direct call (optimized virtual or virtual)
 284     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 285     // about the method being invoked should be attached to the call site to
 286     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 287     call->set_override_symbolic_info(true);
 288   }
 289   _call_node = call;  // Save the call node in case we need it later
 290 
 291   kit.set_arguments_for_java_call(call);
 292   if (kit.stopped()) {
 293     return kit.transfer_exceptions_into_jvms();
 294   }
 295   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 296   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 297   kit.push_node(method()->return_type()->basic_type(), ret);
 298 
 299   // Represent the effect of an implicit receiver null_check
 300   // as part of this call.  Since we share a map with the caller,
 301   // his JVMS gets adjusted.
 302   kit.cast_not_null(receiver);
 303   return kit.transfer_exceptions_into_jvms();
 304 }
 305 
 306 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 307   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 308   return new ParseGenerator(m, expected_uses);
 309 }
 310 
 311 // As a special case, the JVMS passed to this CallGenerator is
 312 // for the method execution already in progress, not just the JVMS
 313 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 314 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {

 368     return DirectCallGenerator::generate(jvms);
 369   }
 370 
 371   virtual void print_inlining_late(InliningResult result, const char* msg) {
 372     CallNode* call = call_node();
 373     Compile* C = Compile::current();
 374     C->print_inlining_assert_ready();
 375     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg);
 376     C->print_inlining_move_to(this);
 377     C->print_inlining_update_delayed(this);
 378   }
 379 
 380   virtual void set_unique_id(jlong id) {
 381     _unique_id = id;
 382   }
 383 
 384   virtual jlong unique_id() const {
 385     return _unique_id;
 386   }
 387 
 388   virtual CallGenerator* inline_cg() {
 389     return _inline_cg;
 390   }
 391 
 392   virtual CallGenerator* with_call_node(CallNode* call) {
 393     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 394     cg->set_call_node(call->as_CallStaticJava());
 395     return cg;
 396   }
 397 };
 398 
 399 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 400   return new LateInlineCallGenerator(method, inline_cg);
 401 }
 402 
 403 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 404   ciMethod* _caller;
 405   bool _input_not_const;
 406 
 407   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 408 
 409  public:
 410   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 411     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}

 433     cg->set_call_node(call->as_CallStaticJava());
 434     return cg;
 435   }
 436 };
 437 
 438 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 439   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 440   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 441   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 442   // of late inlining with exceptions.
 443   assert(!jvms->method()->has_exception_handlers() ||
 444          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 445           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 446   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 447   bool allow_inline = C->inlining_incrementally();
 448   bool input_not_const = true;
 449   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 450   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 451 
 452   if (cg != nullptr) {
 453     // AlwaysIncrementalInline causes for_method_handle_inline() to
 454     // return a LateInlineCallGenerator. Extract the
 455     // InlineCallGenerator from it.
 456     if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
 457       cg = cg->inline_cg();
 458       assert(cg != nullptr, "inline call generator expected");
 459     }
 460 
 461     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
 462     _inline_cg = cg;
 463     C->dec_number_of_mh_late_inlines();
 464     return true;
 465   } else {
 466     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 467     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 468     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 469     return false;
 470   }
 471 }
 472 
 473 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 474   assert(IncrementalInlineMH, "required");
 475   Compile::current()->inc_number_of_mh_late_inlines();
 476   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 477   return cg;
 478 }
 479 
 480 // Allow inlining decisions to be delayed

 603 
 604 void LateInlineMHCallGenerator::do_late_inline() {
 605   CallGenerator::do_late_inline_helper();
 606 }
 607 
 608 void LateInlineVirtualCallGenerator::do_late_inline() {
 609   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 610   CallGenerator::do_late_inline_helper();
 611 }
 612 
 613 void CallGenerator::do_late_inline_helper() {
 614   assert(is_late_inline(), "only late inline allowed");
 615 
 616   // Can't inline it
 617   CallNode* call = call_node();
 618   if (call == nullptr || call->outcnt() == 0 ||
 619       call->in(0) == nullptr || call->in(0)->is_top()) {
 620     return;
 621   }
 622 
 623   const TypeTuple* r = call->tf()->domain_cc();
 624   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 625     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
 626       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 627       return;
 628     }
 629   }
 630 
 631   if (call->in(TypeFunc::Memory)->is_top()) {
 632     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 633     return;
 634   }
 635   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 636     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 637     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 638       return; // dead path
 639     }
 640   }
 641 
 642   // check for unreachable loop
 643   CallProjections* callprojs = call->extract_projections(true);
 644   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
 645       (callprojs->catchall_catchproj    == call->in(0)) ||
 646       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 647       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
 648       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 649       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 650       (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {


 651     return;
 652   }
 653 
 654   Compile* C = Compile::current();
 655   // Remove inlined methods from Compiler's lists.
 656   if (call->is_macro()) {
 657     C->remove_macro_node(call);
 658   }
 659 
 660 
 661   bool result_not_used = true;
 662   for (uint i = 0; i < callprojs->nb_resproj; i++) {
 663     if (callprojs->resproj[i] != nullptr) {
 664       if (callprojs->resproj[i]->outcnt() != 0) {
 665         result_not_used = false;
 666       }
 667       if (call->find_edge(callprojs->resproj[i]) != -1) {
 668         return;
 669       }
 670     }
 671   }
 672 
 673   if (is_pure_call() && result_not_used) {
 674     // The call is marked as pure (no important side effects), but result isn't used.
 675     // It's safe to remove the call.
 676     GraphKit kit(call->jvms());
 677     kit.replace_call(call, C->top(), true);
 678   } else {
 679     // Make a clone of the JVMState that appropriate to use for driving a parse
 680     JVMState* old_jvms = call->jvms();
 681     JVMState* jvms = old_jvms->clone_shallow(C);
 682     uint size = call->req();
 683     SafePointNode* map = new SafePointNode(size, jvms);
 684     for (uint i1 = 0; i1 < size; i1++) {
 685       map->init_req(i1, call->in(i1));
 686     }
 687 
 688     PhaseGVN& gvn = *C->initial_gvn();
 689     // Make sure the state is a MergeMem for parsing.
 690     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 691       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 692       gvn.set_type_bottom(mem);
 693       map->set_req(TypeFunc::Memory, mem);
 694     }
 695 

 696     // blow away old call arguments
 697     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 698       map->set_req(i1, C->top());

 699     }
 700     jvms->set_map(map);
 701 
 702     // Make enough space in the expression stack to transfer
 703     // the incoming arguments and return value.
 704     map->ensure_stack(jvms, jvms->method()->max_stack());
 705     const TypeTuple* domain_sig = call->_tf->domain_sig();
 706     uint nargs = method()->arg_size();
 707     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 708 
 709     uint j = TypeFunc::Parms;
 710     int arg_num = 0;
 711     for (uint i1 = 0; i1 < nargs; i1++) {
 712       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 713       if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
 714         // Inline type arguments are not passed by reference: we get an argument per
 715         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 716         GraphKit arg_kit(jvms, &gvn);
 717         Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 718         map->set_control(arg_kit.control());
 719         map->set_argument(jvms, i1, vt);
 720       } else {
 721         map->set_argument(jvms, i1, call->in(j++));
 722       }
 723       if (t != Type::HALF) {
 724         arg_num++;
 725       }
 726     }
 727 
 728     C->print_inlining_assert_ready();
 729 
 730     C->print_inlining_move_to(this);
 731 
 732     C->log_late_inline(this);
 733 
 734     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 735     if (!do_late_inline_check(C, jvms)) {
 736       map->disconnect_inputs(C);
 737       C->print_inlining_update_delayed(this);
 738       return;
 739     }
 740 
 741     // Check if we are late inlining a method handle call that returns an inline type as fields.
 742     Node* buffer_oop = nullptr;
 743     ciMethod* inline_method = inline_cg()->method();
 744     ciType* return_type = inline_method->return_type();
 745     if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() &&
 746         return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
 747       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
 748       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
 749       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
 750       GraphKit arg_kit(jvms, &gvn);
 751       {
 752         PreserveReexecuteState preexecs(&arg_kit);
 753         arg_kit.jvms()->set_should_reexecute(true);
 754         arg_kit.inc_sp(nargs);
 755         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
 756         buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
 757       }
 758       jvms = arg_kit.transfer_exceptions_into_jvms();
 759     }
 760 
 761     // Setup default node notes to be picked up by the inlining
 762     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 763     if (old_nn != nullptr) {
 764       Node_Notes* entry_nn = old_nn->clone(C);
 765       entry_nn->set_jvms(jvms);
 766       C->set_default_node_notes(entry_nn);
 767     }
 768 
 769     // Now perform the inlining using the synthesized JVMState
 770     JVMState* new_jvms = inline_cg()->generate(jvms);
 771     if (new_jvms == nullptr)  return;  // no change
 772     if (C->failing())      return;
 773 
 774     // Capture any exceptional control flow
 775     GraphKit kit(new_jvms);
 776 
 777     // Find the result object
 778     Node* result = C->top();
 779     int   result_size = method()->return_type()->size();
 780     if (result_size != 0 && !kit.stopped()) {
 781       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 782     }
 783 
 784     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 785       result = kit.must_be_not_null(result, false);
 786     }
 787 
 788     if (inline_cg()->is_inline()) {
 789       C->set_has_loops(C->has_loops() || inline_method->has_loops());
 790       C->env()->notice_inlined_method(inline_method);
 791     }
 792     C->set_inlining_progress(true);
 793     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 794 
 795     // Handle inline type returns
 796     InlineTypeNode* vt = result->isa_InlineType();
 797     if (vt != nullptr) {
 798       if (call->tf()->returns_inline_type_as_fields()) {
 799         vt->replace_call_results(&kit, call, C, inline_method->signature()->returns_null_free_inline_type());
 800       } else if (vt->is_InlineType()) {
 801         // Result might still be allocated (for example, if it has been stored to a non-flat field)
 802         if (!vt->is_allocated(&kit.gvn())) {
 803           assert(buffer_oop != nullptr, "should have allocated a buffer");
 804           RegionNode* region = new RegionNode(3);
 805 
 806           // Check if result is null
 807           Node* null_ctl = kit.top();
 808           if (!inline_method->signature()->returns_null_free_inline_type()) {
 809             kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
 810           }
 811           region->init_req(1, null_ctl);
 812           PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
 813           Node* init_mem = kit.reset_memory();
 814           PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
 815 
 816           // Not null, initialize the buffer
 817           kit.set_all_memory(init_mem);
 818           vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
 819           // Do not let stores that initialize this buffer be reordered with a subsequent
 820           // store that would make this buffer accessible by other threads.
 821           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
 822           assert(alloc != nullptr, "must have an allocation node");
 823           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 824           region->init_req(2, kit.control());
 825           oop->init_req(2, buffer_oop);
 826           mem->init_req(2, kit.merged_memory());
 827 
 828           // Update oop input to buffer
 829           kit.gvn().hash_delete(vt);
 830           vt->set_oop(kit.gvn().transform(oop));
 831           vt->set_is_buffered(kit.gvn());
 832           vt = kit.gvn().transform(vt)->as_InlineType();
 833 
 834           kit.set_control(kit.gvn().transform(region));
 835           kit.set_all_memory(kit.gvn().transform(mem));
 836           kit.record_for_igvn(region);
 837           kit.record_for_igvn(oop);
 838           kit.record_for_igvn(mem);
 839         }
 840         result = vt;
 841       }
 842       DEBUG_ONLY(buffer_oop = nullptr);
 843     } else {
 844       assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
 845     }
 846     assert(buffer_oop == nullptr, "unused buffer allocation");
 847 
 848     kit.replace_call(call, result, true);
 849   }
 850 }
 851 
 852 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 853 
 854  public:
 855   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 856     LateInlineCallGenerator(method, inline_cg) {}
 857 
 858   virtual JVMState* generate(JVMState* jvms) {
 859     Compile *C = Compile::current();
 860 
 861     C->log_inline_id(this);
 862 
 863     C->add_string_late_inline(this);
 864 
 865     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 866     return new_jvms;
 867   }

1050     // Inline failed, so make a direct call.
1051     assert(_if_hit->is_inline(), "must have been a failed inline");
1052     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1053     new_jvms = cg->generate(kit.sync_jvms());
1054   }
1055   kit.add_exception_states_from(new_jvms);
1056   kit.set_jvms(new_jvms);
1057 
1058   // Need to merge slow and fast?
1059   if (slow_map == nullptr) {
1060     // The fast path is the only path remaining.
1061     return kit.transfer_exceptions_into_jvms();
1062   }
1063 
1064   if (kit.stopped()) {
1065     // Inlined method threw an exception, so it's just the slow path after all.
1066     kit.set_jvms(slow_jvms);
1067     return kit.transfer_exceptions_into_jvms();
1068   }
1069 
1070   // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1071   uint tos = kit.jvms()->stkoff() + kit.sp();
1072   uint limit = slow_map->req();
1073   for (uint i = TypeFunc::Parms; i < limit; i++) {
1074     Node* m = kit.map()->in(i);
1075     Node* n = slow_map->in(i);
1076     const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1077     // TODO 8284443 still needed?
1078     if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1079       // Allocate inline type in fast path
1080       m = m->as_InlineType()->buffer(&kit);
1081       kit.map()->set_req(i, m);
1082     }
1083     if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1084       // Allocate inline type in slow path
1085       PreserveJVMState pjvms(&kit);
1086       kit.set_map(slow_map);
1087       n = n->as_InlineType()->buffer(&kit);
1088       kit.map()->set_req(i, n);
1089       slow_map = kit.stop();
1090     }
1091   }
1092 
1093   // There are 2 branches and the replaced nodes are only valid on
1094   // one: restore the replaced nodes to what they were before the
1095   // branch.
1096   kit.map()->set_replaced_nodes(replaced_nodes);
1097 
1098   // Finish the diamond.
1099   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1100   RegionNode* region = new RegionNode(3);
1101   region->init_req(1, kit.control());
1102   region->init_req(2, slow_map->control());
1103   kit.set_control(gvn.transform(region));
1104   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1105   iophi->set_req(2, slow_map->i_o());
1106   kit.set_i_o(gvn.transform(iophi));
1107   // Merge memory
1108   kit.merge_memory(slow_map->merged_memory(), region, 2);
1109   // Transform new memory Phis.
1110   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1111     Node* phi = mms.memory();
1112     if (phi->is_Phi() && phi->in(0) == region) {
1113       mms.set_memory(gvn.transform(phi));
1114     }
1115   }


1116   for (uint i = TypeFunc::Parms; i < limit; i++) {
1117     // Skip unused stack slots; fast forward to monoff();
1118     if (i == tos) {
1119       i = kit.jvms()->monoff();
1120       if( i >= limit ) break;
1121     }
1122     Node* m = kit.map()->in(i);
1123     Node* n = slow_map->in(i);
1124     if (m != n) {
1125       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1126       Node* phi = PhiNode::make(region, m, t);
1127       phi->set_req(2, n);
1128       kit.map()->set_req(i, gvn.transform(phi));
1129     }
1130   }
1131   return kit.transfer_exceptions_into_jvms();
1132 }
1133 
1134 
1135 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1136   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1137   bool input_not_const;
1138   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1139   Compile* C = Compile::current();
1140   if (cg != nullptr) {
1141     if (AlwaysIncrementalInline) {
1142       return CallGenerator::for_late_inline(callee, cg);
1143     } else {
1144       return cg;
1145     }
1146   }
1147   int bci = jvms->bci();
1148   ciCallProfile profile = caller->call_profile_at_bci(bci);
1149   int call_site_count = caller->scale_count(profile.count());
1150 
1151   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1152                             (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1153     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1154   } else {
1155     // Out-of-line call.
1156     return CallGenerator::for_direct_call(callee);
1157   }
1158 }
1159 
1160 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit, bool null_free) {
1161   PhaseGVN& gvn = kit.gvn();
1162   Node* arg = kit.argument(arg_nb);
1163   const Type* arg_type = arg->bottom_type();
1164   const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1165   if (t->as_klass()->is_inlinetype() && null_free) {
1166     sig_type = sig_type->filter_speculative(TypePtr::NOTNULL);
1167   }
1168   if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
1169     const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1170     arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1171     kit.set_argument(arg_nb, arg);
1172   }
1173   if (sig_type->is_inlinetypeptr()) {
1174     arg = InlineTypeNode::make_from_oop(&kit, arg, sig_type->inline_klass(), !kit.gvn().type(arg)->maybe_null());
1175     kit.set_argument(arg_nb, arg);
1176   }
1177 }
1178 
1179 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1180   GraphKit kit(jvms);
1181   PhaseGVN& gvn = kit.gvn();
1182   Compile* C = kit.C;
1183   vmIntrinsics::ID iid = callee->intrinsic_id();
1184   input_not_const = true;
1185   if (StressMethodHandleLinkerInlining) {
1186     allow_inline = false;
1187   }
1188   switch (iid) {
1189   case vmIntrinsics::_invokeBasic:
1190     {
1191       // Get MethodHandle receiver:
1192       Node* receiver = kit.argument(0);
1193       if (receiver->Opcode() == Op_ConP) {
1194         input_not_const = false;
1195         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1196         if (recv_toop != nullptr) {
1197           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1198           const int vtable_index = Method::invalid_vtable_index;

1210                                                 PROB_ALWAYS);
1211           return cg;
1212         } else {
1213           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1214                  Type::str(receiver->bottom_type()));
1215           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1216                                  "receiver is always null");
1217         }
1218       } else {
1219         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1220                                "receiver not constant");
1221       }
1222     }
1223     break;
1224 
1225   case vmIntrinsics::_linkToVirtual:
1226   case vmIntrinsics::_linkToStatic:
1227   case vmIntrinsics::_linkToSpecial:
1228   case vmIntrinsics::_linkToInterface:
1229     {
1230       int nargs = callee->arg_size();
1231       // Get MemberName argument:
1232       Node* member_name = kit.argument(nargs - 1);
1233       if (member_name->Opcode() == Op_ConP) {
1234         input_not_const = false;
1235         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1236         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1237 
1238         if (!ciMethod::is_consistent_info(callee, target)) {
1239           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1240                                  "signatures mismatch");
1241           return nullptr;
1242         }
1243 
1244         // In lambda forms we erase signature types to avoid resolving issues
1245         // involving class loaders.  When we optimize a method handle invoke
1246         // to a direct call we must cast the receiver and arguments to its
1247         // actual types.
1248         ciSignature* signature = target->signature();
1249         const int receiver_skip = target->is_static() ? 0 : 1;
1250         // Cast receiver to its type.
1251         if (!target->is_static()) {
1252           cast_argument(nargs, 0, signature->accessing_klass(), kit, false);







1253         }
1254         // Cast reference arguments to its type.
1255         for (int i = 0, j = 0; i < signature->count(); i++) {
1256           ciType* t = signature->type_at(i);
1257           if (t->is_klass()) {
1258             bool null_free = signature->is_null_free_at(i);
1259             cast_argument(nargs, receiver_skip + j, t, kit, null_free);






1260           }
1261           j += t->size();  // long and double take two slots
1262         }
1263 
1264         // Try to get the most accurate receiver type
1265         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1266         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1267         int  vtable_index       = Method::invalid_vtable_index;
1268         bool call_does_dispatch = false;
1269 
1270         ciKlass* speculative_receiver_type = nullptr;
1271         if (is_virtual_or_interface) {
1272           ciInstanceKlass* klass = target->holder();
1273           Node*             receiver_node = kit.argument(0);
1274           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1275           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1276           // optimize_virtual_call() takes 2 different holder
1277           // arguments for a corner case that doesn't apply here (see
1278           // Parse::do_call())
1279           target = C->optimize_virtual_call(caller, klass, klass,
1280                                             target, receiver_type, is_virtual,
1281                                             call_does_dispatch, vtable_index, // out-parameters
1282                                             false /* check_access */);
1283           // We lack profiling at this call but type speculation may
1284           // provide us with a type
1285           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1286         }
1287         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1288                                               allow_inline,
1289                                               PROB_ALWAYS,
1290                                               speculative_receiver_type,
1291                                               true);
1292         return cg;
1293       } else {
1294         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1295                                "member_name not constant");
1296       }
1297     }
1298     break;
1299 
1300     case vmIntrinsics::_linkToNative:
1301     print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1302                            "native call");
1303     break;
1304 
1305   default:
1306     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1307     break;
1308   }
1309   return nullptr;
1310 }
1311 

1346   //        do_intrinsic(0)
1347   //    else
1348   //    if (predicate(1))
1349   //        do_intrinsic(1)
1350   //    ...
1351   //    else
1352   //        do_java_comp
1353 
1354   GraphKit kit(jvms);
1355   PhaseGVN& gvn = kit.gvn();
1356 
1357   CompileLog* log = kit.C->log();
1358   if (log != nullptr) {
1359     log->elem("predicated_intrinsic bci='%d' method='%d'",
1360               jvms->bci(), log->identify(method()));
1361   }
1362 
1363   if (!method()->is_static()) {
1364     // We need an explicit receiver null_check before checking its type in predicate.
1365     // We share a map with the caller, so his JVMS gets adjusted.
1366     kit.null_check_receiver_before_call(method());
1367     if (kit.stopped()) {
1368       return kit.transfer_exceptions_into_jvms();
1369     }
1370   }
1371 
1372   int n_predicates = _intrinsic->predicates_count();
1373   assert(n_predicates > 0, "sanity");
1374 
1375   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1376 
1377   // Region for normal compilation code if intrinsic failed.
1378   Node* slow_region = new RegionNode(1);
1379 
1380   int results = 0;
1381   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1382 #ifdef ASSERT
1383     JVMState* old_jvms = kit.jvms();
1384     SafePointNode* old_map = kit.map();
1385     Node* old_io  = old_map->i_o();
1386     Node* old_mem = old_map->memory();
< prev index next >