< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/connode.hpp"
  33 #include "opto/divnode.hpp"

  34 #include "opto/loopnode.hpp"
  35 #include "opto/matcher.hpp"
  36 #include "opto/movenode.hpp"
  37 #include "opto/mulnode.hpp"
  38 #include "opto/opaquenode.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/subnode.hpp"
  41 #include "opto/subtypenode.hpp"
  42 #include "opto/superword.hpp"
  43 #include "opto/vectornode.hpp"
  44 #include "utilities/checkedCast.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 //=============================================================================
  48 //------------------------------split_thru_phi---------------------------------
  49 // Split Node 'n' through merge point if there is enough win.
  50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  51   if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
  52       (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
  53     // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return nullptr;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  62       n->in(1) == region->as_CountedLoop()->phi()) {
  63     return nullptr;
  64   }
  65 






  66   if (cannot_split_division(n, region)) {
  67     return nullptr;
  68   }
  69 
  70   SplitThruPhiWins wins(region);
  71   assert(!n->is_CFG(), "");
  72   assert(region->is_Region(), "");
  73 
  74   const Type* type = n->bottom_type();
  75   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  76   Node* phi;
  77   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  78     int iid    = t_oop->instance_id();
  79     int index  = C->get_alias_index(t_oop);
  80     int offset = t_oop->offset();
  81     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  82   } else {
  83     phi = PhiNode::make_blank(region, n);
  84   }
  85   uint old_unique = C->unique();

 750       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 751       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 752       // have a Phi for the base here that we convert to a CMOVE all is well
 753       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 754       // the allocator will have to produce a base by creating a CMOVE of the
 755       // relevant bases.  This puts the allocator in the business of
 756       // manufacturing expensive instructions, generally a bad plan.
 757       // Just Say No to Conditionally-Moved Derived Pointers.
 758       if (tp && tp->offset() != 0)
 759         return nullptr;
 760       cost++;
 761       break;
 762     }
 763     default:
 764       return nullptr;              // In particular, can't do memory or I/O
 765     }
 766     // Add in cost any speculative ops
 767     for (uint j = 1; j < region->req(); j++) {
 768       Node *proj = region->in(j);
 769       Node *inp = phi->in(j);




 770       if (get_ctrl(inp) == proj) { // Found local op
 771         cost++;
 772         // Check for a chain of dependent ops; these will all become
 773         // speculative in a CMOV.
 774         for (uint k = 1; k < inp->req(); k++)
 775           if (get_ctrl(inp->in(k)) == proj)
 776             cost += ConditionalMoveLimit; // Too much speculative goo
 777       }
 778     }
 779     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 780     // This will likely Split-If, a higher-payoff operation.
 781     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 782       Node* use = phi->fast_out(k);
 783       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 784         cost += ConditionalMoveLimit;
 785       // Is there a use inside the loop?
 786       // Note: check only basic types since CMoveP is pinned.
 787       if (!used_inside_loop && is_java_primitive(bt)) {
 788         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 789         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1079             assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1080 
1081             // Move store out of the loop
1082             _igvn.replace_node(hook, n->in(MemNode::Memory));
1083             _igvn.replace_input_of(n, 0, lca);
1084             set_ctrl_and_loop(n, lca);
1085 
1086             // Disconnect the phi now. An empty phi can confuse other
1087             // optimizations in this pass of loop opts..
1088             if (phi->in(LoopNode::LoopBackControl) == phi) {
1089               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1090               n_loop->_body.yank(phi);
1091             }
1092           }
1093         }
1094       }
1095     }
1096   }
1097 }
1098 
















































1099 //------------------------------split_if_with_blocks_pre-----------------------
1100 // Do the real work in a non-recursive function.  Data nodes want to be
1101 // cloned in the pre-order so they can feed each other nicely.
1102 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1103   // Cloning these guys is unlikely to win
1104   int n_op = n->Opcode();
1105   if (n_op == Op_MergeMem) {
1106     return n;
1107   }
1108   if (n->is_Proj()) {
1109     return n;
1110   }






1111   // Do not clone-up CmpFXXX variations, as these are always
1112   // followed by a CmpI
1113   if (n->is_Cmp()) {
1114     return n;
1115   }
1116   // Attempt to use a conditional move instead of a phi/branch
1117   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1118     Node *cmov = conditional_move( n );
1119     if (cmov) {
1120       return cmov;
1121     }
1122   }
1123   if (n->is_CFG() || n->is_LoadStore()) {
1124     return n;
1125   }
1126   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1127     if (!C->major_progress()) {   // If chance of no more loop opts...
1128       _igvn._worklist.push(n);  // maybe we'll remove them
1129     }
1130     return n;

1366 
1367   return true;
1368 }
1369 
1370 // Detect if the node is the inner strip-mined loop
1371 // Return: null if it's not the case, or the exit of outer strip-mined loop
1372 static Node* is_inner_of_stripmined_loop(const Node* out) {
1373   Node* out_le = nullptr;
1374 
1375   if (out->is_CountedLoopEnd()) {
1376       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1377 
1378       if (loop != nullptr && loop->is_strip_mined()) {
1379         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1380       }
1381   }
1382 
1383   return out_le;
1384 }
1385 


































































































1386 //------------------------------split_if_with_blocks_post----------------------
1387 // Do the real work in a non-recursive function.  CFG hackery wants to be
1388 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1389 // info.
1390 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1391 




1392   // Cloning Cmp through Phi's involves the split-if transform.
1393   // FastLock is not used by an If
1394   if (n->is_Cmp() && !n->is_FastLock()) {
1395     Node *n_ctrl = get_ctrl(n);
1396     // Determine if the Node has inputs from some local Phi.
1397     // Returns the block to clone thru.
1398     Node *n_blk = has_local_phi_input(n);
1399     if (n_blk != n_ctrl) {
1400       return;
1401     }
1402 
1403     if (!can_split_if(n_ctrl)) {
1404       return;
1405     }
1406 
1407     if (n->outcnt() != 1) {
1408       return; // Multiple bool's from 1 compare?
1409     }
1410     Node *bol = n->unique_out();
1411     assert(bol->is_Bool(), "expect a bool here");

1522           //    accesses would start to float, since we don't pin at that point.
1523           // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1524           bool pin_array_access_nodes =  n->Opcode() == Op_RangeCheck &&
1525                                          prevdom->in(0)->Opcode() != Op_RangeCheck;
1526           dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1527           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1528           return;
1529         }
1530         prevdom = dom;
1531         dom = idom(prevdom);
1532       }
1533     }
1534   }
1535 
1536   try_sink_out_of_loop(n);
1537   if (C->failing()) {
1538     return;
1539   }
1540 
1541   try_move_store_after_loop(n);





1542 }
1543 
1544 // Transform:
1545 //
1546 // if (some_condition) {
1547 //   // body 1
1548 // } else {
1549 //   // body 2
1550 // }
1551 // if (some_condition) {
1552 //   // body 3
1553 // } else {
1554 //   // body 4
1555 // }
1556 //
1557 // into:
1558 //
1559 //
1560 // if (some_condition) {
1561 //   // body 1

2019   uint i;
2020   for (i = 1; i < phi->req(); i++) {
2021     Node* b = phi->in(i);
2022     if (b->is_Phi()) {
2023       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2024     } else {
2025       assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(),
2026              "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node");
2027     }
2028   }
2029   Node* n = phi->in(1);
2030   Node* sample_opaque = nullptr;
2031   Node *sample_bool = nullptr;
2032   if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) {
2033     sample_opaque = n;
2034     sample_bool = n->in(1);
2035     assert(sample_bool->is_Bool(), "wrong type");
2036   } else {
2037     sample_bool = n;
2038   }
2039   Node *sample_cmp = sample_bool->in(1);








2040 
2041   // Make Phis to merge the Cmp's inputs.
2042   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
2043   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2044   for (i = 1; i < phi->req(); i++) {
2045     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2046     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2047     phi1->set_req(i, n1);
2048     phi2->set_req(i, n2);
2049     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2050     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2051   }
2052   // See if these Phis have been made before.
2053   // Register with optimizer
2054   Node *hit1 = _igvn.hash_find_insert(phi1);
2055   if (hit1) {                   // Hit, toss just made Phi
2056     _igvn.remove_dead_node(phi1); // Remove new phi
2057     assert(hit1->is_Phi(), "" );
2058     phi1 = (PhiNode*)hit1;      // Use existing phi
2059   } else {                      // Miss
2060     _igvn.register_new_node_with_optimizer(phi1);
2061   }
2062   Node *hit2 = _igvn.hash_find_insert(phi2);

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shared/c2/barrierSetC2.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/connode.hpp"
  33 #include "opto/divnode.hpp"
  34 #include "opto/inlinetypenode.hpp"
  35 #include "opto/loopnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/opaquenode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "opto/subtypenode.hpp"
  43 #include "opto/superword.hpp"
  44 #include "opto/vectornode.hpp"
  45 #include "utilities/checkedCast.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  52   if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
  53       (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
  54     // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
  55     // so disable this for now
  56     return nullptr;
  57   }
  58 
  59   // Splitting range check CastIIs through a loop induction Phi can
  60   // cause new Phis to be created that are left unrelated to the loop
  61   // induction Phi and prevent optimizations (vectorization)
  62   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  63       n->in(1) == region->as_CountedLoop()->phi()) {
  64     return nullptr;
  65   }
  66 
  67   // Inline types should not be split through Phis because they cannot be merged
  68   // through Phi nodes but each value input needs to be merged individually.
  69   if (n->is_InlineType()) {
  70     return nullptr;
  71   }
  72 
  73   if (cannot_split_division(n, region)) {
  74     return nullptr;
  75   }
  76 
  77   SplitThruPhiWins wins(region);
  78   assert(!n->is_CFG(), "");
  79   assert(region->is_Region(), "");
  80 
  81   const Type* type = n->bottom_type();
  82   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  83   Node* phi;
  84   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  85     int iid    = t_oop->instance_id();
  86     int index  = C->get_alias_index(t_oop);
  87     int offset = t_oop->offset();
  88     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  89   } else {
  90     phi = PhiNode::make_blank(region, n);
  91   }
  92   uint old_unique = C->unique();

 757       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 758       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 759       // have a Phi for the base here that we convert to a CMOVE all is well
 760       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 761       // the allocator will have to produce a base by creating a CMOVE of the
 762       // relevant bases.  This puts the allocator in the business of
 763       // manufacturing expensive instructions, generally a bad plan.
 764       // Just Say No to Conditionally-Moved Derived Pointers.
 765       if (tp && tp->offset() != 0)
 766         return nullptr;
 767       cost++;
 768       break;
 769     }
 770     default:
 771       return nullptr;              // In particular, can't do memory or I/O
 772     }
 773     // Add in cost any speculative ops
 774     for (uint j = 1; j < region->req(); j++) {
 775       Node *proj = region->in(j);
 776       Node *inp = phi->in(j);
 777       if (inp->isa_InlineType()) {
 778         // TODO 8302217 This prevents PhiNode::push_inline_types_through
 779         return nullptr;
 780       }
 781       if (get_ctrl(inp) == proj) { // Found local op
 782         cost++;
 783         // Check for a chain of dependent ops; these will all become
 784         // speculative in a CMOV.
 785         for (uint k = 1; k < inp->req(); k++)
 786           if (get_ctrl(inp->in(k)) == proj)
 787             cost += ConditionalMoveLimit; // Too much speculative goo
 788       }
 789     }
 790     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 791     // This will likely Split-If, a higher-payoff operation.
 792     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 793       Node* use = phi->fast_out(k);
 794       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 795         cost += ConditionalMoveLimit;
 796       // Is there a use inside the loop?
 797       // Note: check only basic types since CMoveP is pinned.
 798       if (!used_inside_loop && is_java_primitive(bt)) {
 799         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 800         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1090             assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1091 
1092             // Move store out of the loop
1093             _igvn.replace_node(hook, n->in(MemNode::Memory));
1094             _igvn.replace_input_of(n, 0, lca);
1095             set_ctrl_and_loop(n, lca);
1096 
1097             // Disconnect the phi now. An empty phi can confuse other
1098             // optimizations in this pass of loop opts..
1099             if (phi->in(LoopNode::LoopBackControl) == phi) {
1100               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1101               n_loop->_body.yank(phi);
1102             }
1103           }
1104         }
1105       }
1106     }
1107   }
1108 }
1109 
1110 // We can't use immutable memory for the flat array check because we are loading the mark word which is
1111 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
1112 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
1113 // flat array checks out of loops, mainly to enable loop unswitching.
1114 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1115   // Skip checks for more than one array
1116   if (n->req() > 3) {
1117     return;
1118   }
1119   Node* mem = n->in(FlatArrayCheckNode::Memory);
1120   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1121   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1122   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1123 
1124   // Check if array is loop invariant
1125   if (!check_loop->is_member(ary_loop)) {
1126     // Walk up memory graph from the check until we leave the loop
1127     VectorSet wq;
1128     wq.set(mem->_idx);
1129     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1130       if (mem->is_Phi()) {
1131         mem = mem->in(1);
1132       } else if (mem->is_MergeMem()) {
1133         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1134       } else if (mem->is_Proj()) {
1135         mem = mem->in(0);
1136       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1137         mem = mem->in(TypeFunc::Memory);
1138       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1139         mem = mem->in(MemNode::Memory);
1140       } else {
1141 #ifdef ASSERT
1142         mem->dump();
1143 #endif
1144         ShouldNotReachHere();
1145       }
1146       if (wq.test_set(mem->_idx)) {
1147         return;
1148       }
1149     }
1150     // Replace memory input and re-compute ctrl to move the check out of the loop
1151     _igvn.replace_input_of(n, 1, mem);
1152     set_ctrl_and_loop(n, get_early_ctrl(n));
1153     Node* bol = n->unique_out();
1154     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1155   }
1156 }
1157 
1158 //------------------------------split_if_with_blocks_pre-----------------------
1159 // Do the real work in a non-recursive function.  Data nodes want to be
1160 // cloned in the pre-order so they can feed each other nicely.
1161 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1162   // Cloning these guys is unlikely to win
1163   int n_op = n->Opcode();
1164   if (n_op == Op_MergeMem) {
1165     return n;
1166   }
1167   if (n->is_Proj()) {
1168     return n;
1169   }
1170 
1171   if (n->isa_FlatArrayCheck()) {
1172     move_flat_array_check_out_of_loop(n);
1173     return n;
1174   }
1175 
1176   // Do not clone-up CmpFXXX variations, as these are always
1177   // followed by a CmpI
1178   if (n->is_Cmp()) {
1179     return n;
1180   }
1181   // Attempt to use a conditional move instead of a phi/branch
1182   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1183     Node *cmov = conditional_move( n );
1184     if (cmov) {
1185       return cmov;
1186     }
1187   }
1188   if (n->is_CFG() || n->is_LoadStore()) {
1189     return n;
1190   }
1191   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1192     if (!C->major_progress()) {   // If chance of no more loop opts...
1193       _igvn._worklist.push(n);  // maybe we'll remove them
1194     }
1195     return n;

1431 
1432   return true;
1433 }
1434 
1435 // Detect if the node is the inner strip-mined loop
1436 // Return: null if it's not the case, or the exit of outer strip-mined loop
1437 static Node* is_inner_of_stripmined_loop(const Node* out) {
1438   Node* out_le = nullptr;
1439 
1440   if (out->is_CountedLoopEnd()) {
1441       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1442 
1443       if (loop != nullptr && loop->is_strip_mined()) {
1444         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1445       }
1446   }
1447 
1448   return out_le;
1449 }
1450 
1451 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1452   // If the CmpP is a subtype check for a value that has just been
1453   // loaded from an array, the subtype check guarantees the value
1454   // can't be stored in a flat array and the load of the value
1455   // happens with a flat array check then: push the type check
1456   // through the phi of the flat array check. This needs special
1457   // logic because the subtype check's input is not a phi but a
1458   // LoadKlass that must first be cloned through the phi.
1459   if (n->Opcode() != Op_CmpP) {
1460     return false;
1461   }
1462 
1463   Node* klassptr = n->in(1);
1464   Node* klasscon = n->in(2);
1465 
1466   if (klassptr->is_DecodeNarrowPtr()) {
1467     klassptr = klassptr->in(1);
1468   }
1469 
1470   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1471     return false;
1472   }
1473 
1474   if (!klasscon->is_Con()) {
1475     return false;
1476   }
1477 
1478   Node* addr = klassptr->in(MemNode::Address);
1479 
1480   if (!addr->is_AddP()) {
1481     return false;
1482   }
1483 
1484   intptr_t offset;
1485   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1486 
1487   if (obj == nullptr) {
1488     return false;
1489   }
1490 
1491   assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1492   if (obj->Opcode() == Op_CastPP) {
1493     obj = obj->in(1);
1494   }
1495 
1496   if (!obj->is_Phi()) {
1497     return false;
1498   }
1499 
1500   Node* region = obj->in(0);
1501 
1502   Node* phi = PhiNode::make_blank(region, n->in(1));
1503   for (uint i = 1; i < region->req(); i++) {
1504     Node* in = obj->in(i);
1505     Node* ctrl = region->in(i);
1506     if (addr->in(AddPNode::Base) != obj) {
1507       Node* cast = addr->in(AddPNode::Base);
1508       assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1509       Node* cast_clone = cast->clone();
1510       cast_clone->set_req(0, ctrl);
1511       cast_clone->set_req(1, in);
1512       register_new_node(cast_clone, ctrl);
1513       const Type* tcast = cast_clone->Value(&_igvn);
1514       _igvn.set_type(cast_clone, tcast);
1515       cast_clone->as_Type()->set_type(tcast);
1516       in = cast_clone;
1517     }
1518     Node* addr_clone = addr->clone();
1519     addr_clone->set_req(AddPNode::Base, in);
1520     addr_clone->set_req(AddPNode::Address, in);
1521     register_new_node(addr_clone, ctrl);
1522     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1523     Node* klassptr_clone = klassptr->clone();
1524     klassptr_clone->set_req(2, addr_clone);
1525     register_new_node(klassptr_clone, ctrl);
1526     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1527     if (klassptr != n->in(1)) {
1528       Node* decode = n->in(1);
1529       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1530       Node* decode_clone = decode->clone();
1531       decode_clone->set_req(1, klassptr_clone);
1532       register_new_node(decode_clone, ctrl);
1533       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1534       klassptr_clone = decode_clone;
1535     }
1536     phi->set_req(i, klassptr_clone);
1537   }
1538   register_new_node(phi, region);
1539   Node* orig = n->in(1);
1540   _igvn.replace_input_of(n, 1, phi);
1541   split_if_with_blocks_post(n);
1542   if (n->outcnt() != 0) {
1543     _igvn.replace_input_of(n, 1, orig);
1544     _igvn.remove_dead_node(phi);
1545   }
1546   return true;
1547 }
1548 
1549 //------------------------------split_if_with_blocks_post----------------------
1550 // Do the real work in a non-recursive function.  CFG hackery wants to be
1551 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1552 // info.
1553 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1554 
1555   if (flat_array_element_type_check(n)) {
1556     return;
1557   }
1558 
1559   // Cloning Cmp through Phi's involves the split-if transform.
1560   // FastLock is not used by an If
1561   if (n->is_Cmp() && !n->is_FastLock()) {
1562     Node *n_ctrl = get_ctrl(n);
1563     // Determine if the Node has inputs from some local Phi.
1564     // Returns the block to clone thru.
1565     Node *n_blk = has_local_phi_input(n);
1566     if (n_blk != n_ctrl) {
1567       return;
1568     }
1569 
1570     if (!can_split_if(n_ctrl)) {
1571       return;
1572     }
1573 
1574     if (n->outcnt() != 1) {
1575       return; // Multiple bool's from 1 compare?
1576     }
1577     Node *bol = n->unique_out();
1578     assert(bol->is_Bool(), "expect a bool here");

1689           //    accesses would start to float, since we don't pin at that point.
1690           // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1691           bool pin_array_access_nodes =  n->Opcode() == Op_RangeCheck &&
1692                                          prevdom->in(0)->Opcode() != Op_RangeCheck;
1693           dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1694           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1695           return;
1696         }
1697         prevdom = dom;
1698         dom = idom(prevdom);
1699       }
1700     }
1701   }
1702 
1703   try_sink_out_of_loop(n);
1704   if (C->failing()) {
1705     return;
1706   }
1707 
1708   try_move_store_after_loop(n);
1709 
1710   // Remove multiple allocations of the same inline type
1711   if (n->is_InlineType()) {
1712     n->as_InlineType()->remove_redundant_allocations(this);
1713   }
1714 }
1715 
1716 // Transform:
1717 //
1718 // if (some_condition) {
1719 //   // body 1
1720 // } else {
1721 //   // body 2
1722 // }
1723 // if (some_condition) {
1724 //   // body 3
1725 // } else {
1726 //   // body 4
1727 // }
1728 //
1729 // into:
1730 //
1731 //
1732 // if (some_condition) {
1733 //   // body 1

2191   uint i;
2192   for (i = 1; i < phi->req(); i++) {
2193     Node* b = phi->in(i);
2194     if (b->is_Phi()) {
2195       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2196     } else {
2197       assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(),
2198              "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node");
2199     }
2200   }
2201   Node* n = phi->in(1);
2202   Node* sample_opaque = nullptr;
2203   Node *sample_bool = nullptr;
2204   if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) {
2205     sample_opaque = n;
2206     sample_bool = n->in(1);
2207     assert(sample_bool->is_Bool(), "wrong type");
2208   } else {
2209     sample_bool = n;
2210   }
2211   Node* sample_cmp = sample_bool->in(1);
2212   const Type* t = Type::TOP;
2213   const TypePtr* at = nullptr;
2214   if (sample_cmp->is_FlatArrayCheck()) {
2215     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2216     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2217     t = Type::MEMORY;
2218     at = TypeRawPtr::BOTTOM;
2219   }
2220 
2221   // Make Phis to merge the Cmp's inputs.
2222   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2223   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2224   for (i = 1; i < phi->req(); i++) {
2225     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2226     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2227     phi1->set_req(i, n1);
2228     phi2->set_req(i, n2);
2229     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2230     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2231   }
2232   // See if these Phis have been made before.
2233   // Register with optimizer
2234   Node *hit1 = _igvn.hash_find_insert(phi1);
2235   if (hit1) {                   // Hit, toss just made Phi
2236     _igvn.remove_dead_node(phi1); // Remove new phi
2237     assert(hit1->is_Phi(), "" );
2238     phi1 = (PhiNode*)hit1;      // Use existing phi
2239   } else {                      // Miss
2240     _igvn.register_new_node_with_optimizer(phi1);
2241   }
2242   Node *hit2 = _igvn.hash_find_insert(phi2);
< prev index next >