< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page

 627     return;
 628   }
 629 
 630   Compile* C = Compile::current();
 631   // Remove inlined methods from Compiler's lists.
 632   if (call->is_macro()) {
 633     C->remove_macro_node(call);
 634   }
 635 
 636   // The call is marked as pure (no important side effects), but result isn't used.
 637   // It's safe to remove the call.
 638   bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
 639 
 640   if (is_pure_call() && result_not_used) {
 641     GraphKit kit(call->jvms());
 642     kit.replace_call(call, C->top(), true);
 643   } else {
 644     // Make a clone of the JVMState that appropriate to use for driving a parse
 645     JVMState* old_jvms = call->jvms();
 646     JVMState* jvms = old_jvms->clone_shallow(C);




 647     uint size = call->req();
 648     SafePointNode* map = new SafePointNode(size, jvms);
 649     for (uint i1 = 0; i1 < size; i1++) {
 650       map->init_req(i1, call->in(i1));
 651     }
 652 
 653     // Make sure the state is a MergeMem for parsing.
 654     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 655       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 656       C->initial_gvn()->set_type_bottom(mem);
 657       map->set_req(TypeFunc::Memory, mem);
 658     }
 659 
 660     uint nargs = method()->arg_size();
 661     // blow away old call arguments
 662     Node* top = C->top();
 663     for (uint i1 = 0; i1 < nargs; i1++) {
 664       map->set_req(TypeFunc::Parms + i1, top);
 665     }
 666     jvms->set_map(map);

 794     C->add_vector_reboxing_late_inline(this);
 795 
 796     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 797     return new_jvms;
 798   }
 799 
 800   virtual CallGenerator* with_call_node(CallNode* call) {
 801     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 802     cg->set_call_node(call->as_CallStaticJava());
 803     return cg;
 804   }
 805 };
 806 
 807 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 808 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 809   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 810 }
 811 
 812 //------------------------PredictedCallGenerator------------------------------
 813 // Internal class which handles all out-of-line calls checking receiver type.
 814 class PredictedCallGenerator : public CallGenerator {
 815   ciKlass*       _predicted_receiver;
 816   CallGenerator* _if_missed;
 817   CallGenerator* _if_hit;
 818   float          _hit_prob;
 819   bool           _exact_check;
 820 
 821 public:
 822   PredictedCallGenerator(ciKlass* predicted_receiver,
 823                          CallGenerator* if_missed,
 824                          CallGenerator* if_hit, bool exact_check,
 825                          float hit_prob)
 826     : CallGenerator(if_missed->method())
 827   {
 828     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 829     // Remove the extremes values from the range.
 830     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 831     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 832 
 833     _predicted_receiver = predicted_receiver;
 834     _if_missed          = if_missed;
 835     _if_hit             = if_hit;
 836     _hit_prob           = hit_prob;
 837     _exact_check        = exact_check;
 838   }
 839 
 840   virtual bool      is_virtual()   const    { return true; }
 841   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 842   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 843 
 844   virtual JVMState* generate(JVMState* jvms);
 845 };
 846 

 899     if (!kit.stopped()) {
 900       slow_jvms = _if_missed->generate(kit.sync_jvms());
 901       if (kit.failing())
 902         return nullptr;  // might happen because of NodeCountInliningCutoff
 903       assert(slow_jvms != nullptr, "must be");
 904       kit.add_exception_states_from(slow_jvms);
 905       kit.set_map(slow_jvms->map());
 906       if (!kit.stopped())
 907         slow_map = kit.stop();
 908     }
 909   }
 910 
 911   if (kit.stopped()) {
 912     // Instance does not match the predicted type.
 913     kit.set_jvms(slow_jvms);
 914     return kit.transfer_exceptions_into_jvms();
 915   }
 916 
 917   // Fall through if the instance matches the desired type.
 918   kit.replace_in_map(receiver, casted_receiver);
 919 
 920   // Make the hot call:
 921   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 922   if (new_jvms == nullptr) {
 923     // Inline failed, so make a direct call.
 924     assert(_if_hit->is_inline(), "must have been a failed inline");
 925     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 926     new_jvms = cg->generate(kit.sync_jvms());
 927   }
 928   kit.add_exception_states_from(new_jvms);
 929   kit.set_jvms(new_jvms);
 930 
 931   // Need to merge slow and fast?
 932   if (slow_map == nullptr) {
 933     // The fast path is the only path remaining.
 934     return kit.transfer_exceptions_into_jvms();
 935   }
 936 
 937   if (kit.stopped()) {
 938     // Inlined method threw an exception, so it's just the slow path after all.
 939     kit.set_jvms(slow_jvms);
 940     return kit.transfer_exceptions_into_jvms();
 941   }
 942 
 943   // There are 2 branches and the replaced nodes are only valid on
 944   // one: restore the replaced nodes to what they were before the
 945   // branch.
 946   kit.map()->set_replaced_nodes(replaced_nodes);
 947 



 948   // Finish the diamond.
 949   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 950   RegionNode* region = new RegionNode(3);
 951   region->init_req(1, kit.control());
 952   region->init_req(2, slow_map->control());
 953   kit.set_control(gvn.transform(region));
 954   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 955   iophi->set_req(2, slow_map->i_o());
 956   kit.set_i_o(gvn.transform(iophi));
 957   // Merge memory
 958   kit.merge_memory(slow_map->merged_memory(), region, 2);
 959   // Transform new memory Phis.
 960   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 961     Node* phi = mms.memory();
 962     if (phi->is_Phi() && phi->in(0) == region) {
 963       mms.set_memory(gvn.transform(phi));
 964     }
 965   }
 966   uint tos = kit.jvms()->stkoff() + kit.sp();
 967   uint limit = slow_map->req();
 968   for (uint i = TypeFunc::Parms; i < limit; i++) {
 969     // Skip unused stack slots; fast forward to monoff();
 970     if (i == tos) {
 971       i = kit.jvms()->monoff();
 972       if( i >= limit ) break;
 973     }
 974     Node* m = kit.map()->in(i);
 975     Node* n = slow_map->in(i);
 976     if (m != n) {
 977       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 978       Node* phi = PhiNode::make(region, m, t);
 979       phi->set_req(2, n);
 980       kit.map()->set_req(i, gvn.transform(phi));



 981     }
 982   }




 983   return kit.transfer_exceptions_into_jvms();
 984 }
 985 
 986 
 987 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
 988   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 989   bool input_not_const;
 990   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
 991   Compile* C = Compile::current();
 992   bool should_delay = C->should_delay_inlining();
 993   if (cg != nullptr) {
 994     if (should_delay) {
 995       return CallGenerator::for_late_inline(callee, cg);
 996     } else {
 997       return cg;
 998     }
 999   }
1000   int bci = jvms->bci();
1001   ciCallProfile profile = caller->call_profile_at_bci(bci);
1002   int call_site_count = caller->scale_count(profile.count());

1139         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1140                                "member_name not constant");
1141       }
1142     }
1143     break;
1144 
1145     case vmIntrinsics::_linkToNative:
1146     print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1147                            "native call");
1148     break;
1149 
1150   default:
1151     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1152     break;
1153   }
1154   return nullptr;
1155 }
1156 
1157 //------------------------PredicatedIntrinsicGenerator------------------------------
1158 // Internal class which handles all predicated Intrinsic calls.
1159 class PredicatedIntrinsicGenerator : public CallGenerator {
1160   CallGenerator* _intrinsic;
1161   CallGenerator* _cg;
1162 
1163 public:
1164   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1165                                CallGenerator* cg)
1166     : CallGenerator(cg->method())
1167   {
1168     _intrinsic = intrinsic;
1169     _cg        = cg;
1170   }
1171 
1172   virtual bool      is_virtual()   const    { return true; }
1173   virtual bool      is_inline()    const    { return true; }
1174   virtual bool      is_intrinsic() const    { return true; }
1175 
1176   virtual JVMState* generate(JVMState* jvms);
1177 };
1178 
1179 
1180 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1181                                                        CallGenerator* cg) {
1182   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1183 }
1184 
1185 
1186 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1187   // The code we want to generate here is:
1188   //    if (receiver == nullptr)
1189   //        uncommon_Trap
1190   //    if (predicate(0))
1191   //        do_intrinsic(0)
1192   //    else
1193   //    if (predicate(1))

1226   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1227 #ifdef ASSERT
1228     JVMState* old_jvms = kit.jvms();
1229     SafePointNode* old_map = kit.map();
1230     Node* old_io  = old_map->i_o();
1231     Node* old_mem = old_map->memory();
1232     Node* old_exc = old_map->next_exception();
1233 #endif
1234     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1235 #ifdef ASSERT
1236     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1237     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1238     SafePointNode* new_map = kit.map();
1239     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1240     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1241     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1242 #endif
1243     if (!kit.stopped()) {
1244       PreserveJVMState pjvms(&kit);
1245       // Generate intrinsic code:

1246       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1247       if (new_jvms == nullptr) {
1248         // Intrinsic failed, use normal compilation path for this predicate.
1249         slow_region->add_req(kit.control());
1250       } else {
1251         kit.add_exception_states_from(new_jvms);
1252         kit.set_jvms(new_jvms);
1253         if (!kit.stopped()) {
1254           result_jvms[results++] = kit.jvms();
1255         }
1256       }
1257     }
1258     if (else_ctrl == nullptr) {
1259       else_ctrl = kit.C->top();
1260     }
1261     kit.set_control(else_ctrl);
1262   }
1263   if (!kit.stopped()) {
1264     // Final 'else' after predicates.
1265     slow_region->add_req(kit.control());

 627     return;
 628   }
 629 
 630   Compile* C = Compile::current();
 631   // Remove inlined methods from Compiler's lists.
 632   if (call->is_macro()) {
 633     C->remove_macro_node(call);
 634   }
 635 
 636   // The call is marked as pure (no important side effects), but result isn't used.
 637   // It's safe to remove the call.
 638   bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
 639 
 640   if (is_pure_call() && result_not_used) {
 641     GraphKit kit(call->jvms());
 642     kit.replace_call(call, C->top(), true);
 643   } else {
 644     // Make a clone of the JVMState that appropriate to use for driving a parse
 645     JVMState* old_jvms = call->jvms();
 646     JVMState* jvms = old_jvms->clone_shallow(C);
 647 
 648     // Clear the allocation state. We assume all inputs are materialized.
 649     jvms->alloc_state().clear();
 650 
 651     uint size = call->req();
 652     SafePointNode* map = new SafePointNode(size, jvms);
 653     for (uint i1 = 0; i1 < size; i1++) {
 654       map->init_req(i1, call->in(i1));
 655     }
 656 
 657     // Make sure the state is a MergeMem for parsing.
 658     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 659       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 660       C->initial_gvn()->set_type_bottom(mem);
 661       map->set_req(TypeFunc::Memory, mem);
 662     }
 663 
 664     uint nargs = method()->arg_size();
 665     // blow away old call arguments
 666     Node* top = C->top();
 667     for (uint i1 = 0; i1 < nargs; i1++) {
 668       map->set_req(TypeFunc::Parms + i1, top);
 669     }
 670     jvms->set_map(map);

 798     C->add_vector_reboxing_late_inline(this);
 799 
 800     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 801     return new_jvms;
 802   }
 803 
 804   virtual CallGenerator* with_call_node(CallNode* call) {
 805     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 806     cg->set_call_node(call->as_CallStaticJava());
 807     return cg;
 808   }
 809 };
 810 
 811 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 812 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 813   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 814 }
 815 
 816 //------------------------PredictedCallGenerator------------------------------
 817 // Internal class which handles all out-of-line calls checking receiver type.
 818 class PredictedCallGenerator : public InlineCallGenerator {
 819   ciKlass*       _predicted_receiver;
 820   CallGenerator* _if_missed;
 821   CallGenerator* _if_hit;
 822   float          _hit_prob;
 823   bool           _exact_check;
 824 
 825 public:
 826   PredictedCallGenerator(ciKlass* predicted_receiver,
 827                          CallGenerator* if_missed,
 828                          CallGenerator* if_hit, bool exact_check,
 829                          float hit_prob)
 830     : InlineCallGenerator(if_missed->method())
 831   {
 832     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 833     // Remove the extremes values from the range.
 834     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 835     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 836 
 837     _predicted_receiver = predicted_receiver;
 838     _if_missed          = if_missed;
 839     _if_hit             = if_hit;
 840     _hit_prob           = hit_prob;
 841     _exact_check        = exact_check;
 842   }
 843 
 844   virtual bool      is_virtual()   const    { return true; }
 845   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 846   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 847 
 848   virtual JVMState* generate(JVMState* jvms);
 849 };
 850 

 903     if (!kit.stopped()) {
 904       slow_jvms = _if_missed->generate(kit.sync_jvms());
 905       if (kit.failing())
 906         return nullptr;  // might happen because of NodeCountInliningCutoff
 907       assert(slow_jvms != nullptr, "must be");
 908       kit.add_exception_states_from(slow_jvms);
 909       kit.set_map(slow_jvms->map());
 910       if (!kit.stopped())
 911         slow_map = kit.stop();
 912     }
 913   }
 914 
 915   if (kit.stopped()) {
 916     // Instance does not match the predicted type.
 917     kit.set_jvms(slow_jvms);
 918     return kit.transfer_exceptions_into_jvms();
 919   }
 920 
 921   // Fall through if the instance matches the desired type.
 922   kit.replace_in_map(receiver, casted_receiver);

 923   // Make the hot call:
 924   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 925   if (new_jvms == nullptr) {
 926     // Inline failed, so make a direct call.
 927     assert(_if_hit->is_inline(), "must have been a failed inline");
 928     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 929     new_jvms = cg->generate(kit.sync_jvms());
 930   }
 931   kit.add_exception_states_from(new_jvms);
 932   kit.set_jvms(new_jvms);
 933 
 934   // Need to merge slow and fast?
 935   if (slow_map == nullptr) {
 936     // The fast path is the only path remaining.
 937     return kit.transfer_exceptions_into_jvms();
 938   }
 939 
 940   if (kit.stopped()) {
 941     // Inlined method threw an exception, so it's just the slow path after all.
 942     kit.set_jvms(slow_jvms);
 943     return kit.transfer_exceptions_into_jvms();
 944   }
 945 
 946   // There are 2 branches and the replaced nodes are only valid on
 947   // one: restore the replaced nodes to what they were before the
 948   // branch.
 949   kit.map()->set_replaced_nodes(replaced_nodes);
 950 
 951   PEAState& slow_as = slow_jvms->alloc_state();
 952   PEAState& as = new_jvms->alloc_state();
 953   AllocationStateMerger as_merger(as);
 954   // Finish the diamond.
 955   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 956   RegionNode* region = new RegionNode(3);
 957   region->init_req(1, kit.control());
 958   region->init_req(2, slow_map->control());
 959   kit.set_control(gvn.transform(region));
 960   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 961   iophi->set_req(2, slow_map->i_o());
 962   kit.set_i_o(gvn.transform(iophi));
 963   // Merge memory
 964   kit.merge_memory(slow_map->merged_memory(), region, 2);
 965   // Transform new memory Phis.
 966   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 967     Node* phi = mms.memory();
 968     if (phi->is_Phi() && phi->in(0) == region) {
 969       mms.set_memory(gvn.transform(phi));
 970     }
 971   }
 972   uint tos = kit.jvms()->stkoff() + kit.sp();
 973   uint limit = slow_map->req();
 974   for (uint i = TypeFunc::Parms; i < limit; i++) {
 975     // Skip unused stack slots; fast forward to monoff();
 976     if (i == tos) {
 977       i = kit.jvms()->monoff();
 978       if( i >= limit ) break;
 979     }
 980     Node* m = kit.map()->in(i);
 981     Node* n = slow_map->in(i);
 982     if (m != n) {
 983       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 984       Node* phi = PhiNode::make(region, m, t);
 985       phi->set_req(2, n);
 986       kit.map()->set_req(i, gvn.transform(phi));
 987       if (DoPartialEscapeAnalysis) {
 988         as_merger.merge_at_phi_creation(kit.PEA(), slow_as, phi->as_Phi(), m, n);
 989       }
 990     }
 991   }
 992 
 993   if (DoPartialEscapeAnalysis) {
 994     as_merger.merge(slow_as, &kit, region, 2);
 995   }
 996   return kit.transfer_exceptions_into_jvms();
 997 }
 998 
 999 
1000 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1001   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1002   bool input_not_const;
1003   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1004   Compile* C = Compile::current();
1005   bool should_delay = C->should_delay_inlining();
1006   if (cg != nullptr) {
1007     if (should_delay) {
1008       return CallGenerator::for_late_inline(callee, cg);
1009     } else {
1010       return cg;
1011     }
1012   }
1013   int bci = jvms->bci();
1014   ciCallProfile profile = caller->call_profile_at_bci(bci);
1015   int call_site_count = caller->scale_count(profile.count());

1152         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1153                                "member_name not constant");
1154       }
1155     }
1156     break;
1157 
1158     case vmIntrinsics::_linkToNative:
1159     print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1160                            "native call");
1161     break;
1162 
1163   default:
1164     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1165     break;
1166   }
1167   return nullptr;
1168 }
1169 
1170 //------------------------PredicatedIntrinsicGenerator------------------------------
1171 // Internal class which handles all predicated Intrinsic calls.
1172 class PredicatedIntrinsicGenerator : public InlineCallGenerator {
1173   CallGenerator* _intrinsic;
1174   CallGenerator* _cg;
1175 
1176 public:
1177   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1178                                CallGenerator* cg)
1179     : InlineCallGenerator(cg->method())
1180   {
1181     _intrinsic = intrinsic;
1182     _cg        = cg;
1183   }
1184 
1185   virtual bool      is_virtual()   const    { return true; }

1186   virtual bool      is_intrinsic() const    { return true; }
1187 
1188   virtual JVMState* generate(JVMState* jvms);
1189 };
1190 
1191 
1192 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1193                                                        CallGenerator* cg) {
1194   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1195 }
1196 
1197 
1198 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1199   // The code we want to generate here is:
1200   //    if (receiver == nullptr)
1201   //        uncommon_Trap
1202   //    if (predicate(0))
1203   //        do_intrinsic(0)
1204   //    else
1205   //    if (predicate(1))

1238   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1239 #ifdef ASSERT
1240     JVMState* old_jvms = kit.jvms();
1241     SafePointNode* old_map = kit.map();
1242     Node* old_io  = old_map->i_o();
1243     Node* old_mem = old_map->memory();
1244     Node* old_exc = old_map->next_exception();
1245 #endif
1246     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1247 #ifdef ASSERT
1248     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1249     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1250     SafePointNode* new_map = kit.map();
1251     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1252     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1253     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1254 #endif
1255     if (!kit.stopped()) {
1256       PreserveJVMState pjvms(&kit);
1257       // Generate intrinsic code:
1258       assert(_intrinsic->is_inline(), "LibraryIntrinsic");
1259       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1260       if (new_jvms == nullptr) {
1261         // Intrinsic failed, use normal compilation path for this predicate.
1262         slow_region->add_req(kit.control());
1263       } else {
1264         kit.add_exception_states_from(new_jvms);
1265         kit.set_jvms(new_jvms);
1266         if (!kit.stopped()) {
1267           result_jvms[results++] = kit.jvms();
1268         }
1269       }
1270     }
1271     if (else_ctrl == nullptr) {
1272       else_ctrl = kit.C->top();
1273     }
1274     kit.set_control(else_ctrl);
1275   }
1276   if (!kit.stopped()) {
1277     // Final 'else' after predicates.
1278     slow_region->add_req(kit.control());
< prev index next >