< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/matcher.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "opto/subtypenode.hpp"
  44 #include "opto/superword.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  52   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  53     // ConvI2L may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return nullptr;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  62       n->in(1) == region->as_CountedLoop()->phi()) {
  63     return nullptr;
  64   }
  65 






  66   if (cannot_split_division(n, region)) {
  67     return nullptr;
  68   }
  69 
  70   int wins = 0;
  71   assert(!n->is_CFG(), "");
  72   assert(region->is_Region(), "");
  73 
  74   const Type* type = n->bottom_type();
  75   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  76   Node* phi;
  77   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  78     int iid    = t_oop->instance_id();
  79     int index  = C->get_alias_index(t_oop);
  80     int offset = t_oop->offset();
  81     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  82   } else {
  83     phi = PhiNode::make_blank(region, n);
  84   }
  85   uint old_unique = C->unique();

 735       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 736       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 737       // have a Phi for the base here that we convert to a CMOVE all is well
 738       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 739       // the allocator will have to produce a base by creating a CMOVE of the
 740       // relevant bases.  This puts the allocator in the business of
 741       // manufacturing expensive instructions, generally a bad plan.
 742       // Just Say No to Conditionally-Moved Derived Pointers.
 743       if (tp && tp->offset() != 0)
 744         return nullptr;
 745       cost++;
 746       break;
 747     }
 748     default:
 749       return nullptr;              // In particular, can't do memory or I/O
 750     }
 751     // Add in cost any speculative ops
 752     for (uint j = 1; j < region->req(); j++) {
 753       Node *proj = region->in(j);
 754       Node *inp = phi->in(j);




 755       if (get_ctrl(inp) == proj) { // Found local op
 756         cost++;
 757         // Check for a chain of dependent ops; these will all become
 758         // speculative in a CMOV.
 759         for (uint k = 1; k < inp->req(); k++)
 760           if (get_ctrl(inp->in(k)) == proj)
 761             cost += ConditionalMoveLimit; // Too much speculative goo
 762       }
 763     }
 764     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 765     // This will likely Split-If, a higher-payoff operation.
 766     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 767       Node* use = phi->fast_out(k);
 768       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 769         cost += ConditionalMoveLimit;
 770       // Is there a use inside the loop?
 771       // Note: check only basic types since CMoveP is pinned.
 772       if (!used_inside_loop && is_java_primitive(bt)) {
 773         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 774         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1053             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1054 
1055             // Move store out of the loop
1056             _igvn.replace_node(hook, n->in(MemNode::Memory));
1057             _igvn.replace_input_of(n, 0, lca);
1058             set_ctrl_and_loop(n, lca);
1059 
1060             // Disconnect the phi now. An empty phi can confuse other
1061             // optimizations in this pass of loop opts..
1062             if (phi->in(LoopNode::LoopBackControl) == phi) {
1063               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1064               n_loop->_body.yank(phi);
1065             }
1066           }
1067         }
1068       }
1069     }
1070   }
1071 }
1072 
















































1073 //------------------------------split_if_with_blocks_pre-----------------------
1074 // Do the real work in a non-recursive function.  Data nodes want to be
1075 // cloned in the pre-order so they can feed each other nicely.
1076 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1077   // Cloning these guys is unlikely to win
1078   int n_op = n->Opcode();
1079   if (n_op == Op_MergeMem) {
1080     return n;
1081   }
1082   if (n->is_Proj()) {
1083     return n;
1084   }






1085   // Do not clone-up CmpFXXX variations, as these are always
1086   // followed by a CmpI
1087   if (n->is_Cmp()) {
1088     return n;
1089   }
1090   // Attempt to use a conditional move instead of a phi/branch
1091   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1092     Node *cmov = conditional_move( n );
1093     if (cmov) {
1094       return cmov;
1095     }
1096   }
1097   if (n->is_CFG() || n->is_LoadStore()) {
1098     return n;
1099   }
1100   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1101     if (!C->major_progress()) {   // If chance of no more loop opts...
1102       _igvn._worklist.push(n);  // maybe we'll remove them
1103     }
1104     return n;

1342 
1343   return true;
1344 }
1345 
1346 // Detect if the node is the inner strip-mined loop
1347 // Return: null if it's not the case, or the exit of outer strip-mined loop
1348 static Node* is_inner_of_stripmined_loop(const Node* out) {
1349   Node* out_le = nullptr;
1350 
1351   if (out->is_CountedLoopEnd()) {
1352       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1353 
1354       if (loop != nullptr && loop->is_strip_mined()) {
1355         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1356       }
1357   }
1358 
1359   return out_le;
1360 }
1361 


































































































1362 //------------------------------split_if_with_blocks_post----------------------
1363 // Do the real work in a non-recursive function.  CFG hackery wants to be
1364 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1365 // info.
1366 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1367 




1368   // Cloning Cmp through Phi's involves the split-if transform.
1369   // FastLock is not used by an If
1370   if (n->is_Cmp() && !n->is_FastLock()) {
1371     Node *n_ctrl = get_ctrl(n);
1372     // Determine if the Node has inputs from some local Phi.
1373     // Returns the block to clone thru.
1374     Node *n_blk = has_local_phi_input(n);
1375     if (n_blk != n_ctrl) {
1376       return;
1377     }
1378 
1379     if (!can_split_if(n_ctrl)) {
1380       return;
1381     }
1382 
1383     if (n->outcnt() != 1) {
1384       return; // Multiple bool's from 1 compare?
1385     }
1386     Node *bol = n->unique_out();
1387     assert(bol->is_Bool(), "expect a bool here");

1486           Node* out_le = is_inner_of_stripmined_loop(dom);
1487           if (out_le != nullptr) {
1488             prevdom = out_le;
1489           }
1490           // Replace the dominated test with an obvious true or false.
1491           // Place it on the IGVN worklist for later cleanup.
1492           C->set_major_progress();
1493           dominated_by(prevdom->as_IfProj(), n->as_If());
1494           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1495           return;
1496         }
1497         prevdom = dom;
1498         dom = idom(prevdom);
1499       }
1500     }
1501   }
1502 
1503   try_sink_out_of_loop(n);
1504 
1505   try_move_store_after_loop(n);





1506 }
1507 
1508 // Transform:
1509 //
1510 // if (some_condition) {
1511 //   // body 1
1512 // } else {
1513 //   // body 2
1514 // }
1515 // if (some_condition) {
1516 //   // body 3
1517 // } else {
1518 //   // body 4
1519 // }
1520 //
1521 // into:
1522 //
1523 //
1524 // if (some_condition) {
1525 //   // body 1

1945   uint i;
1946   for (i = 1; i < phi->req(); i++) {
1947     Node *b = phi->in(i);
1948     if (b->is_Phi()) {
1949       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
1950     } else {
1951       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1952     }
1953   }
1954 
1955   Node* n = phi->in(1);
1956   Node* sample_opaque = nullptr;
1957   Node *sample_bool = nullptr;
1958   if (n->Opcode() == Op_Opaque4) {
1959     sample_opaque = n;
1960     sample_bool = n->in(1);
1961     assert(sample_bool->is_Bool(), "wrong type");
1962   } else {
1963     sample_bool = n;
1964   }
1965   Node *sample_cmp = sample_bool->in(1);








1966 
1967   // Make Phis to merge the Cmp's inputs.
1968   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1969   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1970   for (i = 1; i < phi->req(); i++) {
1971     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1972     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1973     phi1->set_req(i, n1);
1974     phi2->set_req(i, n2);
1975     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1976     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1977   }
1978   // See if these Phis have been made before.
1979   // Register with optimizer
1980   Node *hit1 = _igvn.hash_find_insert(phi1);
1981   if (hit1) {                   // Hit, toss just made Phi
1982     _igvn.remove_dead_node(phi1); // Remove new phi
1983     assert(hit1->is_Phi(), "" );
1984     phi1 = (PhiNode*)hit1;      // Use existing phi
1985   } else {                      // Miss
1986     _igvn.register_new_node_with_optimizer(phi1);
1987   }
1988   Node *hit2 = _igvn.hash_find_insert(phi2);

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/mulnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/subnode.hpp"
  44 #include "opto/subtypenode.hpp"
  45 #include "opto/superword.hpp"
  46 #include "opto/vectornode.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 //=============================================================================
  50 //------------------------------split_thru_phi---------------------------------
  51 // Split Node 'n' through merge point if there is enough win.
  52 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  53   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  54     // ConvI2L may have type information on it which is unsafe to push up
  55     // so disable this for now
  56     return nullptr;
  57   }
  58 
  59   // Splitting range check CastIIs through a loop induction Phi can
  60   // cause new Phis to be created that are left unrelated to the loop
  61   // induction Phi and prevent optimizations (vectorization)
  62   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  63       n->in(1) == region->as_CountedLoop()->phi()) {
  64     return nullptr;
  65   }
  66 
  67   // Inline types should not be split through Phis because they cannot be merged
  68   // through Phi nodes but each value input needs to be merged individually.
  69   if (n->is_InlineType()) {
  70     return nullptr;
  71   }
  72 
  73   if (cannot_split_division(n, region)) {
  74     return nullptr;
  75   }
  76 
  77   int wins = 0;
  78   assert(!n->is_CFG(), "");
  79   assert(region->is_Region(), "");
  80 
  81   const Type* type = n->bottom_type();
  82   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  83   Node* phi;
  84   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  85     int iid    = t_oop->instance_id();
  86     int index  = C->get_alias_index(t_oop);
  87     int offset = t_oop->offset();
  88     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  89   } else {
  90     phi = PhiNode::make_blank(region, n);
  91   }
  92   uint old_unique = C->unique();

 742       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 743       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 744       // have a Phi for the base here that we convert to a CMOVE all is well
 745       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 746       // the allocator will have to produce a base by creating a CMOVE of the
 747       // relevant bases.  This puts the allocator in the business of
 748       // manufacturing expensive instructions, generally a bad plan.
 749       // Just Say No to Conditionally-Moved Derived Pointers.
 750       if (tp && tp->offset() != 0)
 751         return nullptr;
 752       cost++;
 753       break;
 754     }
 755     default:
 756       return nullptr;              // In particular, can't do memory or I/O
 757     }
 758     // Add in cost any speculative ops
 759     for (uint j = 1; j < region->req(); j++) {
 760       Node *proj = region->in(j);
 761       Node *inp = phi->in(j);
 762       if (inp->isa_InlineType()) {
 763         // TODO 8302217 This prevents PhiNode::push_inline_types_through
 764         return nullptr;
 765       }
 766       if (get_ctrl(inp) == proj) { // Found local op
 767         cost++;
 768         // Check for a chain of dependent ops; these will all become
 769         // speculative in a CMOV.
 770         for (uint k = 1; k < inp->req(); k++)
 771           if (get_ctrl(inp->in(k)) == proj)
 772             cost += ConditionalMoveLimit; // Too much speculative goo
 773       }
 774     }
 775     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 776     // This will likely Split-If, a higher-payoff operation.
 777     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 778       Node* use = phi->fast_out(k);
 779       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 780         cost += ConditionalMoveLimit;
 781       // Is there a use inside the loop?
 782       // Note: check only basic types since CMoveP is pinned.
 783       if (!used_inside_loop && is_java_primitive(bt)) {
 784         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 785         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1064             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1065 
1066             // Move store out of the loop
1067             _igvn.replace_node(hook, n->in(MemNode::Memory));
1068             _igvn.replace_input_of(n, 0, lca);
1069             set_ctrl_and_loop(n, lca);
1070 
1071             // Disconnect the phi now. An empty phi can confuse other
1072             // optimizations in this pass of loop opts..
1073             if (phi->in(LoopNode::LoopBackControl) == phi) {
1074               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1075               n_loop->_body.yank(phi);
1076             }
1077           }
1078         }
1079       }
1080     }
1081   }
1082 }
1083 
1084 // We can't use immutable memory for the flat array check because we are loading the mark word which is
1085 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
1086 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
1087 // flat array checks out of loops, mainly to enable loop unswitching.
1088 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1089   // Skip checks for more than one array
1090   if (n->req() > 3) {
1091     return;
1092   }
1093   Node* mem = n->in(FlatArrayCheckNode::Memory);
1094   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1095   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1096   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1097 
1098   // Check if array is loop invariant
1099   if (!check_loop->is_member(ary_loop)) {
1100     // Walk up memory graph from the check until we leave the loop
1101     VectorSet wq;
1102     wq.set(mem->_idx);
1103     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1104       if (mem->is_Phi()) {
1105         mem = mem->in(1);
1106       } else if (mem->is_MergeMem()) {
1107         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1108       } else if (mem->is_Proj()) {
1109         mem = mem->in(0);
1110       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1111         mem = mem->in(TypeFunc::Memory);
1112       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1113         mem = mem->in(MemNode::Memory);
1114       } else {
1115 #ifdef ASSERT
1116         mem->dump();
1117 #endif
1118         ShouldNotReachHere();
1119       }
1120       if (wq.test_set(mem->_idx)) {
1121         return;
1122       }
1123     }
1124     // Replace memory input and re-compute ctrl to move the check out of the loop
1125     _igvn.replace_input_of(n, 1, mem);
1126     set_ctrl_and_loop(n, get_early_ctrl(n));
1127     Node* bol = n->unique_out();
1128     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1129   }
1130 }
1131 
1132 //------------------------------split_if_with_blocks_pre-----------------------
1133 // Do the real work in a non-recursive function.  Data nodes want to be
1134 // cloned in the pre-order so they can feed each other nicely.
1135 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1136   // Cloning these guys is unlikely to win
1137   int n_op = n->Opcode();
1138   if (n_op == Op_MergeMem) {
1139     return n;
1140   }
1141   if (n->is_Proj()) {
1142     return n;
1143   }
1144 
1145   if (n->isa_FlatArrayCheck()) {
1146     move_flat_array_check_out_of_loop(n);
1147     return n;
1148   }
1149 
1150   // Do not clone-up CmpFXXX variations, as these are always
1151   // followed by a CmpI
1152   if (n->is_Cmp()) {
1153     return n;
1154   }
1155   // Attempt to use a conditional move instead of a phi/branch
1156   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1157     Node *cmov = conditional_move( n );
1158     if (cmov) {
1159       return cmov;
1160     }
1161   }
1162   if (n->is_CFG() || n->is_LoadStore()) {
1163     return n;
1164   }
1165   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1166     if (!C->major_progress()) {   // If chance of no more loop opts...
1167       _igvn._worklist.push(n);  // maybe we'll remove them
1168     }
1169     return n;

1407 
1408   return true;
1409 }
1410 
1411 // Detect if the node is the inner strip-mined loop
1412 // Return: null if it's not the case, or the exit of outer strip-mined loop
1413 static Node* is_inner_of_stripmined_loop(const Node* out) {
1414   Node* out_le = nullptr;
1415 
1416   if (out->is_CountedLoopEnd()) {
1417       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1418 
1419       if (loop != nullptr && loop->is_strip_mined()) {
1420         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1421       }
1422   }
1423 
1424   return out_le;
1425 }
1426 
1427 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1428   // If the CmpP is a subtype check for a value that has just been
1429   // loaded from an array, the subtype check guarantees the value
1430   // can't be stored in a flat array and the load of the value
1431   // happens with a flat array check then: push the type check
1432   // through the phi of the flat array check. This needs special
1433   // logic because the subtype check's input is not a phi but a
1434   // LoadKlass that must first be cloned through the phi.
1435   if (n->Opcode() != Op_CmpP) {
1436     return false;
1437   }
1438 
1439   Node* klassptr = n->in(1);
1440   Node* klasscon = n->in(2);
1441 
1442   if (klassptr->is_DecodeNarrowPtr()) {
1443     klassptr = klassptr->in(1);
1444   }
1445 
1446   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1447     return false;
1448   }
1449 
1450   if (!klasscon->is_Con()) {
1451     return false;
1452   }
1453 
1454   Node* addr = klassptr->in(MemNode::Address);
1455 
1456   if (!addr->is_AddP()) {
1457     return false;
1458   }
1459 
1460   intptr_t offset;
1461   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1462 
1463   if (obj == nullptr) {
1464     return false;
1465   }
1466 
1467   assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1468   if (obj->Opcode() == Op_CastPP) {
1469     obj = obj->in(1);
1470   }
1471 
1472   if (!obj->is_Phi()) {
1473     return false;
1474   }
1475 
1476   Node* region = obj->in(0);
1477 
1478   Node* phi = PhiNode::make_blank(region, n->in(1));
1479   for (uint i = 1; i < region->req(); i++) {
1480     Node* in = obj->in(i);
1481     Node* ctrl = region->in(i);
1482     if (addr->in(AddPNode::Base) != obj) {
1483       Node* cast = addr->in(AddPNode::Base);
1484       assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1485       Node* cast_clone = cast->clone();
1486       cast_clone->set_req(0, ctrl);
1487       cast_clone->set_req(1, in);
1488       register_new_node(cast_clone, ctrl);
1489       const Type* tcast = cast_clone->Value(&_igvn);
1490       _igvn.set_type(cast_clone, tcast);
1491       cast_clone->as_Type()->set_type(tcast);
1492       in = cast_clone;
1493     }
1494     Node* addr_clone = addr->clone();
1495     addr_clone->set_req(AddPNode::Base, in);
1496     addr_clone->set_req(AddPNode::Address, in);
1497     register_new_node(addr_clone, ctrl);
1498     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1499     Node* klassptr_clone = klassptr->clone();
1500     klassptr_clone->set_req(2, addr_clone);
1501     register_new_node(klassptr_clone, ctrl);
1502     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1503     if (klassptr != n->in(1)) {
1504       Node* decode = n->in(1);
1505       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1506       Node* decode_clone = decode->clone();
1507       decode_clone->set_req(1, klassptr_clone);
1508       register_new_node(decode_clone, ctrl);
1509       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1510       klassptr_clone = decode_clone;
1511     }
1512     phi->set_req(i, klassptr_clone);
1513   }
1514   register_new_node(phi, region);
1515   Node* orig = n->in(1);
1516   _igvn.replace_input_of(n, 1, phi);
1517   split_if_with_blocks_post(n);
1518   if (n->outcnt() != 0) {
1519     _igvn.replace_input_of(n, 1, orig);
1520     _igvn.remove_dead_node(phi);
1521   }
1522   return true;
1523 }
1524 
1525 //------------------------------split_if_with_blocks_post----------------------
1526 // Do the real work in a non-recursive function.  CFG hackery wants to be
1527 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1528 // info.
1529 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1530 
1531   if (flat_array_element_type_check(n)) {
1532     return;
1533   }
1534 
1535   // Cloning Cmp through Phi's involves the split-if transform.
1536   // FastLock is not used by an If
1537   if (n->is_Cmp() && !n->is_FastLock()) {
1538     Node *n_ctrl = get_ctrl(n);
1539     // Determine if the Node has inputs from some local Phi.
1540     // Returns the block to clone thru.
1541     Node *n_blk = has_local_phi_input(n);
1542     if (n_blk != n_ctrl) {
1543       return;
1544     }
1545 
1546     if (!can_split_if(n_ctrl)) {
1547       return;
1548     }
1549 
1550     if (n->outcnt() != 1) {
1551       return; // Multiple bool's from 1 compare?
1552     }
1553     Node *bol = n->unique_out();
1554     assert(bol->is_Bool(), "expect a bool here");

1653           Node* out_le = is_inner_of_stripmined_loop(dom);
1654           if (out_le != nullptr) {
1655             prevdom = out_le;
1656           }
1657           // Replace the dominated test with an obvious true or false.
1658           // Place it on the IGVN worklist for later cleanup.
1659           C->set_major_progress();
1660           dominated_by(prevdom->as_IfProj(), n->as_If());
1661           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1662           return;
1663         }
1664         prevdom = dom;
1665         dom = idom(prevdom);
1666       }
1667     }
1668   }
1669 
1670   try_sink_out_of_loop(n);
1671 
1672   try_move_store_after_loop(n);
1673 
1674   // Remove multiple allocations of the same inline type
1675   if (n->is_InlineType()) {
1676     n->as_InlineType()->remove_redundant_allocations(this);
1677   }
1678 }
1679 
1680 // Transform:
1681 //
1682 // if (some_condition) {
1683 //   // body 1
1684 // } else {
1685 //   // body 2
1686 // }
1687 // if (some_condition) {
1688 //   // body 3
1689 // } else {
1690 //   // body 4
1691 // }
1692 //
1693 // into:
1694 //
1695 //
1696 // if (some_condition) {
1697 //   // body 1

2117   uint i;
2118   for (i = 1; i < phi->req(); i++) {
2119     Node *b = phi->in(i);
2120     if (b->is_Phi()) {
2121       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2122     } else {
2123       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
2124     }
2125   }
2126 
2127   Node* n = phi->in(1);
2128   Node* sample_opaque = nullptr;
2129   Node *sample_bool = nullptr;
2130   if (n->Opcode() == Op_Opaque4) {
2131     sample_opaque = n;
2132     sample_bool = n->in(1);
2133     assert(sample_bool->is_Bool(), "wrong type");
2134   } else {
2135     sample_bool = n;
2136   }
2137   Node* sample_cmp = sample_bool->in(1);
2138   const Type* t = Type::TOP;
2139   const TypePtr* at = nullptr;
2140   if (sample_cmp->is_FlatArrayCheck()) {
2141     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2142     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2143     t = Type::MEMORY;
2144     at = TypeRawPtr::BOTTOM;
2145   }
2146 
2147   // Make Phis to merge the Cmp's inputs.
2148   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2149   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2150   for (i = 1; i < phi->req(); i++) {
2151     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2152     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2153     phi1->set_req(i, n1);
2154     phi2->set_req(i, n2);
2155     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2156     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2157   }
2158   // See if these Phis have been made before.
2159   // Register with optimizer
2160   Node *hit1 = _igvn.hash_find_insert(phi1);
2161   if (hit1) {                   // Hit, toss just made Phi
2162     _igvn.remove_dead_node(phi1); // Remove new phi
2163     assert(hit1->is_Phi(), "" );
2164     phi1 = (PhiNode*)hit1;      // Use existing phi
2165   } else {                      // Miss
2166     _igvn.register_new_node_with_optimizer(phi1);
2167   }
2168   Node *hit2 = _igvn.hash_find_insert(phi2);
< prev index next >