< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/matcher.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "opto/subtypenode.hpp"
  44 #include "opto/vectornode.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 //=============================================================================
  48 //------------------------------split_thru_phi---------------------------------
  49 // Split Node 'n' through merge point if there is enough win.
  50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  51   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  52     // ConvI2L may have type information on it which is unsafe to push up
  53     // so disable this for now
  54     return nullptr;
  55   }
  56 
  57   // Splitting range check CastIIs through a loop induction Phi can
  58   // cause new Phis to be created that are left unrelated to the loop
  59   // induction Phi and prevent optimizations (vectorization)
  60   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  61       n->in(1) == region->as_CountedLoop()->phi()) {
  62     return nullptr;
  63   }
  64 






  65   if (cannot_split_division(n, region)) {
  66     return nullptr;
  67   }
  68 
  69   int wins = 0;
  70   assert(!n->is_CFG(), "");
  71   assert(region->is_Region(), "");
  72 
  73   const Type* type = n->bottom_type();
  74   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  75   Node* phi;
  76   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  77     int iid    = t_oop->instance_id();
  78     int index  = C->get_alias_index(t_oop);
  79     int offset = t_oop->offset();
  80     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  81   } else {
  82     phi = PhiNode::make_blank(region, n);
  83   }
  84   uint old_unique = C->unique();

 731       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 732       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 733       // have a Phi for the base here that we convert to a CMOVE all is well
 734       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 735       // the allocator will have to produce a base by creating a CMOVE of the
 736       // relevant bases.  This puts the allocator in the business of
 737       // manufacturing expensive instructions, generally a bad plan.
 738       // Just Say No to Conditionally-Moved Derived Pointers.
 739       if (tp && tp->offset() != 0)
 740         return nullptr;
 741       cost++;
 742       break;
 743     }
 744     default:
 745       return nullptr;              // In particular, can't do memory or I/O
 746     }
 747     // Add in cost any speculative ops
 748     for (uint j = 1; j < region->req(); j++) {
 749       Node *proj = region->in(j);
 750       Node *inp = phi->in(j);




 751       if (get_ctrl(inp) == proj) { // Found local op
 752         cost++;
 753         // Check for a chain of dependent ops; these will all become
 754         // speculative in a CMOV.
 755         for (uint k = 1; k < inp->req(); k++)
 756           if (get_ctrl(inp->in(k)) == proj)
 757             cost += ConditionalMoveLimit; // Too much speculative goo
 758       }
 759     }
 760     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 761     // This will likely Split-If, a higher-payoff operation.
 762     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 763       Node* use = phi->fast_out(k);
 764       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 765         cost += ConditionalMoveLimit;
 766       // Is there a use inside the loop?
 767       // Note: check only basic types since CMoveP is pinned.
 768       if (!used_inside_loop && is_java_primitive(bt)) {
 769         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 770         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1056             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1057 
1058             // Move store out of the loop
1059             _igvn.replace_node(hook, n->in(MemNode::Memory));
1060             _igvn.replace_input_of(n, 0, lca);
1061             set_ctrl_and_loop(n, lca);
1062 
1063             // Disconnect the phi now. An empty phi can confuse other
1064             // optimizations in this pass of loop opts..
1065             if (phi->in(LoopNode::LoopBackControl) == phi) {
1066               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1067               n_loop->_body.yank(phi);
1068             }
1069           }
1070         }
1071       }
1072     }
1073   }
1074 }
1075 

















































1076 //------------------------------split_if_with_blocks_pre-----------------------
1077 // Do the real work in a non-recursive function.  Data nodes want to be
1078 // cloned in the pre-order so they can feed each other nicely.
1079 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1080   // Cloning these guys is unlikely to win
1081   int n_op = n->Opcode();
1082   if (n_op == Op_MergeMem) {
1083     return n;
1084   }
1085   if (n->is_Proj()) {
1086     return n;
1087   }






1088   // Do not clone-up CmpFXXX variations, as these are always
1089   // followed by a CmpI
1090   if (n->is_Cmp()) {
1091     return n;
1092   }
1093   // Attempt to use a conditional move instead of a phi/branch
1094   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1095     Node *cmov = conditional_move( n );
1096     if (cmov) {
1097       return cmov;
1098     }
1099   }
1100   if (n->is_CFG() || n->is_LoadStore()) {
1101     return n;
1102   }
1103   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1104     if (!C->major_progress()) {   // If chance of no more loop opts...
1105       _igvn._worklist.push(n);  // maybe we'll remove them
1106     }
1107     return n;

1345 
1346   return true;
1347 }
1348 
1349 // Detect if the node is the inner strip-mined loop
1350 // Return: null if it's not the case, or the exit of outer strip-mined loop
1351 static Node* is_inner_of_stripmined_loop(const Node* out) {
1352   Node* out_le = nullptr;
1353 
1354   if (out->is_CountedLoopEnd()) {
1355       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1356 
1357       if (loop != nullptr && loop->is_strip_mined()) {
1358         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1359       }
1360   }
1361 
1362   return out_le;
1363 }
1364 


































































































1365 //------------------------------split_if_with_blocks_post----------------------
1366 // Do the real work in a non-recursive function.  CFG hackery wants to be
1367 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1368 // info.
1369 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1370 




1371   // Cloning Cmp through Phi's involves the split-if transform.
1372   // FastLock is not used by an If
1373   if (n->is_Cmp() && !n->is_FastLock()) {
1374     Node *n_ctrl = get_ctrl(n);
1375     // Determine if the Node has inputs from some local Phi.
1376     // Returns the block to clone thru.
1377     Node *n_blk = has_local_phi_input(n);
1378     if (n_blk != n_ctrl) {
1379       return;
1380     }
1381 
1382     if (!can_split_if(n_ctrl)) {
1383       return;
1384     }
1385 
1386     if (n->outcnt() != 1) {
1387       return; // Multiple bool's from 1 compare?
1388     }
1389     Node *bol = n->unique_out();
1390     assert(bol->is_Bool(), "expect a bool here");

1484           Node* out_le = is_inner_of_stripmined_loop(dom);
1485           if (out_le != nullptr) {
1486             prevdom = out_le;
1487           }
1488           // Replace the dominated test with an obvious true or false.
1489           // Place it on the IGVN worklist for later cleanup.
1490           C->set_major_progress();
1491           dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1492           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1493           return;
1494         }
1495         prevdom = dom;
1496         dom = idom(prevdom);
1497       }
1498     }
1499   }
1500 
1501   try_sink_out_of_loop(n);
1502 
1503   try_move_store_after_loop(n);





1504 }
1505 
1506 // Transform:
1507 //
1508 // if (some_condition) {
1509 //   // body 1
1510 // } else {
1511 //   // body 2
1512 // }
1513 // if (some_condition) {
1514 //   // body 3
1515 // } else {
1516 //   // body 4
1517 // }
1518 //
1519 // into:
1520 //
1521 //
1522 // if (some_condition) {
1523 //   // body 1

1943   uint i;
1944   for (i = 1; i < phi->req(); i++) {
1945     Node *b = phi->in(i);
1946     if (b->is_Phi()) {
1947       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
1948     } else {
1949       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1950     }
1951   }
1952 
1953   Node* n = phi->in(1);
1954   Node* sample_opaque = nullptr;
1955   Node *sample_bool = nullptr;
1956   if (n->Opcode() == Op_Opaque4) {
1957     sample_opaque = n;
1958     sample_bool = n->in(1);
1959     assert(sample_bool->is_Bool(), "wrong type");
1960   } else {
1961     sample_bool = n;
1962   }
1963   Node *sample_cmp = sample_bool->in(1);








1964 
1965   // Make Phis to merge the Cmp's inputs.
1966   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1967   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1968   for (i = 1; i < phi->req(); i++) {
1969     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1970     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1971     phi1->set_req(i, n1);
1972     phi2->set_req(i, n2);
1973     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1974     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1975   }
1976   // See if these Phis have been made before.
1977   // Register with optimizer
1978   Node *hit1 = _igvn.hash_find_insert(phi1);
1979   if (hit1) {                   // Hit, toss just made Phi
1980     _igvn.remove_dead_node(phi1); // Remove new phi
1981     assert(hit1->is_Phi(), "" );
1982     phi1 = (PhiNode*)hit1;      // Use existing phi
1983   } else {                      // Miss
1984     _igvn.register_new_node_with_optimizer(phi1);
1985   }
1986   Node *hit2 = _igvn.hash_find_insert(phi2);

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/mulnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/subnode.hpp"
  44 #include "opto/subtypenode.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  52   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  53     // ConvI2L may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return nullptr;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  62       n->in(1) == region->as_CountedLoop()->phi()) {
  63     return nullptr;
  64   }
  65 
  66   // Inline types should not be split through Phis because they cannot be merged
  67   // through Phi nodes but each value input needs to be merged individually.
  68   if (n->is_InlineType()) {
  69     return nullptr;
  70   }
  71 
  72   if (cannot_split_division(n, region)) {
  73     return nullptr;
  74   }
  75 
  76   int wins = 0;
  77   assert(!n->is_CFG(), "");
  78   assert(region->is_Region(), "");
  79 
  80   const Type* type = n->bottom_type();
  81   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  82   Node* phi;
  83   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  84     int iid    = t_oop->instance_id();
  85     int index  = C->get_alias_index(t_oop);
  86     int offset = t_oop->offset();
  87     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  88   } else {
  89     phi = PhiNode::make_blank(region, n);
  90   }
  91   uint old_unique = C->unique();

 738       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 739       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 740       // have a Phi for the base here that we convert to a CMOVE all is well
 741       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 742       // the allocator will have to produce a base by creating a CMOVE of the
 743       // relevant bases.  This puts the allocator in the business of
 744       // manufacturing expensive instructions, generally a bad plan.
 745       // Just Say No to Conditionally-Moved Derived Pointers.
 746       if (tp && tp->offset() != 0)
 747         return nullptr;
 748       cost++;
 749       break;
 750     }
 751     default:
 752       return nullptr;              // In particular, can't do memory or I/O
 753     }
 754     // Add in cost any speculative ops
 755     for (uint j = 1; j < region->req(); j++) {
 756       Node *proj = region->in(j);
 757       Node *inp = phi->in(j);
 758       if (inp->isa_InlineType()) {
 759         // TODO 8302217 This prevents PhiNode::push_inline_types_through
 760         return nullptr;
 761       }
 762       if (get_ctrl(inp) == proj) { // Found local op
 763         cost++;
 764         // Check for a chain of dependent ops; these will all become
 765         // speculative in a CMOV.
 766         for (uint k = 1; k < inp->req(); k++)
 767           if (get_ctrl(inp->in(k)) == proj)
 768             cost += ConditionalMoveLimit; // Too much speculative goo
 769       }
 770     }
 771     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 772     // This will likely Split-If, a higher-payoff operation.
 773     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 774       Node* use = phi->fast_out(k);
 775       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 776         cost += ConditionalMoveLimit;
 777       // Is there a use inside the loop?
 778       // Note: check only basic types since CMoveP is pinned.
 779       if (!used_inside_loop && is_java_primitive(bt)) {
 780         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 781         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1067             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1068 
1069             // Move store out of the loop
1070             _igvn.replace_node(hook, n->in(MemNode::Memory));
1071             _igvn.replace_input_of(n, 0, lca);
1072             set_ctrl_and_loop(n, lca);
1073 
1074             // Disconnect the phi now. An empty phi can confuse other
1075             // optimizations in this pass of loop opts..
1076             if (phi->in(LoopNode::LoopBackControl) == phi) {
1077               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1078               n_loop->_body.yank(phi);
1079             }
1080           }
1081         }
1082       }
1083     }
1084   }
1085 }
1086 
1087 // If UseArrayMarkWordCheck is enabled, we can't use immutable memory for the flat array check
1088 // because we are loading the mark word which is mutable. Although the bits we are interested in
1089 // are immutable (we check for markWord::unlocked_value), we need to use raw memory to not break
1090 // anti dependency analysis. Below code will attempt to still move flat array checks out of loops,
1091 // mainly to enable loop unswitching.
1092 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1093   // Skip checks for more than one array
1094   if (n->req() > 3) {
1095     return;
1096   }
1097   Node* mem = n->in(FlatArrayCheckNode::Memory);
1098   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1099   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1100   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1101 
1102   // Check if array is loop invariant
1103   if (!check_loop->is_member(ary_loop)) {
1104     // Walk up memory graph from the check until we leave the loop
1105     VectorSet wq;
1106     wq.set(mem->_idx);
1107     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1108       if (mem->is_Phi()) {
1109         mem = mem->in(1);
1110       } else if (mem->is_MergeMem()) {
1111         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1112       } else if (mem->is_Proj()) {
1113         mem = mem->in(0);
1114       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1115         mem = mem->in(TypeFunc::Memory);
1116       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1117         mem = mem->in(MemNode::Memory);
1118       } else {
1119 #ifdef ASSERT
1120         mem->dump();
1121 #endif
1122         ShouldNotReachHere();
1123       }
1124       if (wq.test_set(mem->_idx)) {
1125         return;
1126       }
1127     }
1128     // Replace memory input and re-compute ctrl to move the check out of the loop
1129     _igvn.replace_input_of(n, 1, mem);
1130     set_ctrl_and_loop(n, get_early_ctrl(n));
1131     Node* bol = n->unique_out();
1132     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1133   }
1134 }
1135 
1136 //------------------------------split_if_with_blocks_pre-----------------------
1137 // Do the real work in a non-recursive function.  Data nodes want to be
1138 // cloned in the pre-order so they can feed each other nicely.
1139 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1140   // Cloning these guys is unlikely to win
1141   int n_op = n->Opcode();
1142   if (n_op == Op_MergeMem) {
1143     return n;
1144   }
1145   if (n->is_Proj()) {
1146     return n;
1147   }
1148 
1149   if (UseArrayMarkWordCheck && n->isa_FlatArrayCheck()) {
1150     move_flat_array_check_out_of_loop(n);
1151     return n;
1152   }
1153 
1154   // Do not clone-up CmpFXXX variations, as these are always
1155   // followed by a CmpI
1156   if (n->is_Cmp()) {
1157     return n;
1158   }
1159   // Attempt to use a conditional move instead of a phi/branch
1160   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1161     Node *cmov = conditional_move( n );
1162     if (cmov) {
1163       return cmov;
1164     }
1165   }
1166   if (n->is_CFG() || n->is_LoadStore()) {
1167     return n;
1168   }
1169   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1170     if (!C->major_progress()) {   // If chance of no more loop opts...
1171       _igvn._worklist.push(n);  // maybe we'll remove them
1172     }
1173     return n;

1411 
1412   return true;
1413 }
1414 
1415 // Detect if the node is the inner strip-mined loop
1416 // Return: null if it's not the case, or the exit of outer strip-mined loop
1417 static Node* is_inner_of_stripmined_loop(const Node* out) {
1418   Node* out_le = nullptr;
1419 
1420   if (out->is_CountedLoopEnd()) {
1421       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1422 
1423       if (loop != nullptr && loop->is_strip_mined()) {
1424         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1425       }
1426   }
1427 
1428   return out_le;
1429 }
1430 
1431 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1432   // If the CmpP is a subtype check for a value that has just been
1433   // loaded from an array, the subtype check guarantees the value
1434   // can't be stored in a flat array and the load of the value
1435   // happens with a flat array check then: push the type check
1436   // through the phi of the flat array check. This needs special
1437   // logic because the subtype check's input is not a phi but a
1438   // LoadKlass that must first be cloned through the phi.
1439   if (n->Opcode() != Op_CmpP) {
1440     return false;
1441   }
1442 
1443   Node* klassptr = n->in(1);
1444   Node* klasscon = n->in(2);
1445 
1446   if (klassptr->is_DecodeNarrowPtr()) {
1447     klassptr = klassptr->in(1);
1448   }
1449 
1450   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1451     return false;
1452   }
1453 
1454   if (!klasscon->is_Con()) {
1455     return false;
1456   }
1457 
1458   Node* addr = klassptr->in(MemNode::Address);
1459 
1460   if (!addr->is_AddP()) {
1461     return false;
1462   }
1463 
1464   intptr_t offset;
1465   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1466 
1467   if (obj == nullptr) {
1468     return false;
1469   }
1470 
1471   assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1472   if (obj->Opcode() == Op_CastPP) {
1473     obj = obj->in(1);
1474   }
1475 
1476   if (!obj->is_Phi()) {
1477     return false;
1478   }
1479 
1480   Node* region = obj->in(0);
1481 
1482   Node* phi = PhiNode::make_blank(region, n->in(1));
1483   for (uint i = 1; i < region->req(); i++) {
1484     Node* in = obj->in(i);
1485     Node* ctrl = region->in(i);
1486     if (addr->in(AddPNode::Base) != obj) {
1487       Node* cast = addr->in(AddPNode::Base);
1488       assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1489       Node* cast_clone = cast->clone();
1490       cast_clone->set_req(0, ctrl);
1491       cast_clone->set_req(1, in);
1492       register_new_node(cast_clone, ctrl);
1493       const Type* tcast = cast_clone->Value(&_igvn);
1494       _igvn.set_type(cast_clone, tcast);
1495       cast_clone->as_Type()->set_type(tcast);
1496       in = cast_clone;
1497     }
1498     Node* addr_clone = addr->clone();
1499     addr_clone->set_req(AddPNode::Base, in);
1500     addr_clone->set_req(AddPNode::Address, in);
1501     register_new_node(addr_clone, ctrl);
1502     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1503     Node* klassptr_clone = klassptr->clone();
1504     klassptr_clone->set_req(2, addr_clone);
1505     register_new_node(klassptr_clone, ctrl);
1506     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1507     if (klassptr != n->in(1)) {
1508       Node* decode = n->in(1);
1509       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1510       Node* decode_clone = decode->clone();
1511       decode_clone->set_req(1, klassptr_clone);
1512       register_new_node(decode_clone, ctrl);
1513       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1514       klassptr_clone = decode_clone;
1515     }
1516     phi->set_req(i, klassptr_clone);
1517   }
1518   register_new_node(phi, region);
1519   Node* orig = n->in(1);
1520   _igvn.replace_input_of(n, 1, phi);
1521   split_if_with_blocks_post(n);
1522   if (n->outcnt() != 0) {
1523     _igvn.replace_input_of(n, 1, orig);
1524     _igvn.remove_dead_node(phi);
1525   }
1526   return true;
1527 }
1528 
1529 //------------------------------split_if_with_blocks_post----------------------
1530 // Do the real work in a non-recursive function.  CFG hackery wants to be
1531 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1532 // info.
1533 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1534 
1535   if (flat_array_element_type_check(n)) {
1536     return;
1537   }
1538 
1539   // Cloning Cmp through Phi's involves the split-if transform.
1540   // FastLock is not used by an If
1541   if (n->is_Cmp() && !n->is_FastLock()) {
1542     Node *n_ctrl = get_ctrl(n);
1543     // Determine if the Node has inputs from some local Phi.
1544     // Returns the block to clone thru.
1545     Node *n_blk = has_local_phi_input(n);
1546     if (n_blk != n_ctrl) {
1547       return;
1548     }
1549 
1550     if (!can_split_if(n_ctrl)) {
1551       return;
1552     }
1553 
1554     if (n->outcnt() != 1) {
1555       return; // Multiple bool's from 1 compare?
1556     }
1557     Node *bol = n->unique_out();
1558     assert(bol->is_Bool(), "expect a bool here");

1652           Node* out_le = is_inner_of_stripmined_loop(dom);
1653           if (out_le != nullptr) {
1654             prevdom = out_le;
1655           }
1656           // Replace the dominated test with an obvious true or false.
1657           // Place it on the IGVN worklist for later cleanup.
1658           C->set_major_progress();
1659           dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1660           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1661           return;
1662         }
1663         prevdom = dom;
1664         dom = idom(prevdom);
1665       }
1666     }
1667   }
1668 
1669   try_sink_out_of_loop(n);
1670 
1671   try_move_store_after_loop(n);
1672 
1673   // Remove multiple allocations of the same inline type
1674   if (n->is_InlineType()) {
1675     n->as_InlineType()->remove_redundant_allocations(this);
1676   }
1677 }
1678 
1679 // Transform:
1680 //
1681 // if (some_condition) {
1682 //   // body 1
1683 // } else {
1684 //   // body 2
1685 // }
1686 // if (some_condition) {
1687 //   // body 3
1688 // } else {
1689 //   // body 4
1690 // }
1691 //
1692 // into:
1693 //
1694 //
1695 // if (some_condition) {
1696 //   // body 1

2116   uint i;
2117   for (i = 1; i < phi->req(); i++) {
2118     Node *b = phi->in(i);
2119     if (b->is_Phi()) {
2120       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2121     } else {
2122       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
2123     }
2124   }
2125 
2126   Node* n = phi->in(1);
2127   Node* sample_opaque = nullptr;
2128   Node *sample_bool = nullptr;
2129   if (n->Opcode() == Op_Opaque4) {
2130     sample_opaque = n;
2131     sample_bool = n->in(1);
2132     assert(sample_bool->is_Bool(), "wrong type");
2133   } else {
2134     sample_bool = n;
2135   }
2136   Node* sample_cmp = sample_bool->in(1);
2137   const Type* t = Type::TOP;
2138   const TypePtr* at = nullptr;
2139   if (sample_cmp->is_FlatArrayCheck()) {
2140     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2141     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2142     t = Type::MEMORY;
2143     at = TypeRawPtr::BOTTOM;
2144   }
2145 
2146   // Make Phis to merge the Cmp's inputs.
2147   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2148   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2149   for (i = 1; i < phi->req(); i++) {
2150     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2151     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2152     phi1->set_req(i, n1);
2153     phi2->set_req(i, n2);
2154     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2155     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2156   }
2157   // See if these Phis have been made before.
2158   // Register with optimizer
2159   Node *hit1 = _igvn.hash_find_insert(phi1);
2160   if (hit1) {                   // Hit, toss just made Phi
2161     _igvn.remove_dead_node(phi1); // Remove new phi
2162     assert(hit1->is_Phi(), "" );
2163     phi1 = (PhiNode*)hit1;      // Use existing phi
2164   } else {                      // Miss
2165     _igvn.register_new_node_with_optimizer(phi1);
2166   }
2167   Node *hit2 = _igvn.hash_find_insert(phi2);
< prev index next >