< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/matcher.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "opto/subtypenode.hpp"
  44 #include "opto/vectornode.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 //=============================================================================
  48 //------------------------------split_thru_phi---------------------------------
  49 // Split Node 'n' through merge point if there is enough win.
  50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  51   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  52     // ConvI2L may have type information on it which is unsafe to push up
  53     // so disable this for now
  54     return nullptr;
  55   }
  56 
  57   // Splitting range check CastIIs through a loop induction Phi can
  58   // cause new Phis to be created that are left unrelated to the loop
  59   // induction Phi and prevent optimizations (vectorization)
  60   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  61       n->in(1) == region->as_CountedLoop()->phi()) {
  62     return nullptr;
  63   }
  64 






  65   if (cannot_split_division(n, region)) {
  66     return nullptr;
  67   }
  68 
  69   int wins = 0;
  70   assert(!n->is_CFG(), "");
  71   assert(region->is_Region(), "");
  72 
  73   const Type* type = n->bottom_type();
  74   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  75   Node* phi;
  76   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  77     int iid    = t_oop->instance_id();
  78     int index  = C->get_alias_index(t_oop);
  79     int offset = t_oop->offset();
  80     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  81   } else {
  82     phi = PhiNode::make_blank(region, n);
  83   }
  84   uint old_unique = C->unique();

 680       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 681       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 682       // have a Phi for the base here that we convert to a CMOVE all is well
 683       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 684       // the allocator will have to produce a base by creating a CMOVE of the
 685       // relevant bases.  This puts the allocator in the business of
 686       // manufacturing expensive instructions, generally a bad plan.
 687       // Just Say No to Conditionally-Moved Derived Pointers.
 688       if (tp && tp->offset() != 0)
 689         return nullptr;
 690       cost++;
 691       break;
 692     }
 693     default:
 694       return nullptr;              // In particular, can't do memory or I/O
 695     }
 696     // Add in cost any speculative ops
 697     for (uint j = 1; j < region->req(); j++) {
 698       Node *proj = region->in(j);
 699       Node *inp = phi->in(j);




 700       if (get_ctrl(inp) == proj) { // Found local op
 701         cost++;
 702         // Check for a chain of dependent ops; these will all become
 703         // speculative in a CMOV.
 704         for (uint k = 1; k < inp->req(); k++)
 705           if (get_ctrl(inp->in(k)) == proj)
 706             cost += ConditionalMoveLimit; // Too much speculative goo
 707       }
 708     }
 709     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 710     // This will likely Split-If, a higher-payoff operation.
 711     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 712       Node* use = phi->fast_out(k);
 713       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 714         cost += ConditionalMoveLimit;
 715       // Is there a use inside the loop?
 716       // Note: check only basic types since CMoveP is pinned.
 717       if (!used_inside_loop && is_java_primitive(bt)) {
 718         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 719         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1005             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1006 
1007             // Move store out of the loop
1008             _igvn.replace_node(hook, n->in(MemNode::Memory));
1009             _igvn.replace_input_of(n, 0, lca);
1010             set_ctrl_and_loop(n, lca);
1011 
1012             // Disconnect the phi now. An empty phi can confuse other
1013             // optimizations in this pass of loop opts..
1014             if (phi->in(LoopNode::LoopBackControl) == phi) {
1015               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1016               n_loop->_body.yank(phi);
1017             }
1018           }
1019         }
1020       }
1021     }
1022   }
1023 }
1024 

















































1025 //------------------------------split_if_with_blocks_pre-----------------------
1026 // Do the real work in a non-recursive function.  Data nodes want to be
1027 // cloned in the pre-order so they can feed each other nicely.
1028 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1029   // Cloning these guys is unlikely to win
1030   int n_op = n->Opcode();
1031   if (n_op == Op_MergeMem) {
1032     return n;
1033   }
1034   if (n->is_Proj()) {
1035     return n;
1036   }






1037   // Do not clone-up CmpFXXX variations, as these are always
1038   // followed by a CmpI
1039   if (n->is_Cmp()) {
1040     return n;
1041   }
1042   // Attempt to use a conditional move instead of a phi/branch
1043   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1044     Node *cmov = conditional_move( n );
1045     if (cmov) {
1046       return cmov;
1047     }
1048   }
1049   if (n->is_CFG() || n->is_LoadStore()) {
1050     return n;
1051   }
1052   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1053     if (!C->major_progress()) {   // If chance of no more loop opts...
1054       _igvn._worklist.push(n);  // maybe we'll remove them
1055     }
1056     return n;

1294 
1295   return true;
1296 }
1297 
1298 // Detect if the node is the inner strip-mined loop
1299 // Return: null if it's not the case, or the exit of outer strip-mined loop
1300 static Node* is_inner_of_stripmined_loop(const Node* out) {
1301   Node* out_le = nullptr;
1302 
1303   if (out->is_CountedLoopEnd()) {
1304       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1305 
1306       if (loop != nullptr && loop->is_strip_mined()) {
1307         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1308       }
1309   }
1310 
1311   return out_le;
1312 }
1313 


































































































1314 //------------------------------split_if_with_blocks_post----------------------
1315 // Do the real work in a non-recursive function.  CFG hackery wants to be
1316 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1317 // info.
1318 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1319 




1320   // Cloning Cmp through Phi's involves the split-if transform.
1321   // FastLock is not used by an If
1322   if (n->is_Cmp() && !n->is_FastLock()) {
1323     Node *n_ctrl = get_ctrl(n);
1324     // Determine if the Node has inputs from some local Phi.
1325     // Returns the block to clone thru.
1326     Node *n_blk = has_local_phi_input(n);
1327     if (n_blk != n_ctrl) {
1328       return;
1329     }
1330 
1331     if (!can_split_if(n_ctrl)) {
1332       return;
1333     }
1334 
1335     if (n->outcnt() != 1) {
1336       return; // Multiple bool's from 1 compare?
1337     }
1338     Node *bol = n->unique_out();
1339     assert(bol->is_Bool(), "expect a bool here");

1432           Node* out_le = is_inner_of_stripmined_loop(dom);
1433           if (out_le != nullptr) {
1434             prevdom = out_le;
1435           }
1436           // Replace the dominated test with an obvious true or false.
1437           // Place it on the IGVN worklist for later cleanup.
1438           C->set_major_progress();
1439           dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1440           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1441           return;
1442         }
1443         prevdom = dom;
1444         dom = idom(prevdom);
1445       }
1446     }
1447   }
1448 
1449   try_sink_out_of_loop(n);
1450 
1451   try_move_store_after_loop(n);





1452 }
1453 
1454 // Transform:
1455 //
1456 // if (some_condition) {
1457 //   // body 1
1458 // } else {
1459 //   // body 2
1460 // }
1461 // if (some_condition) {
1462 //   // body 3
1463 // } else {
1464 //   // body 4
1465 // }
1466 //
1467 // into:
1468 //
1469 //
1470 // if (some_condition) {
1471 //   // body 1

1859   uint i;
1860   for (i = 1; i < phi->req(); i++) {
1861     Node *b = phi->in(i);
1862     if (b->is_Phi()) {
1863       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
1864     } else {
1865       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1866     }
1867   }
1868 
1869   Node* n = phi->in(1);
1870   Node* sample_opaque = nullptr;
1871   Node *sample_bool = nullptr;
1872   if (n->Opcode() == Op_Opaque4) {
1873     sample_opaque = n;
1874     sample_bool = n->in(1);
1875     assert(sample_bool->is_Bool(), "wrong type");
1876   } else {
1877     sample_bool = n;
1878   }
1879   Node *sample_cmp = sample_bool->in(1);








1880 
1881   // Make Phis to merge the Cmp's inputs.
1882   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1883   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1884   for (i = 1; i < phi->req(); i++) {
1885     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1886     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1887     phi1->set_req(i, n1);
1888     phi2->set_req(i, n2);
1889     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1890     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1891   }
1892   // See if these Phis have been made before.
1893   // Register with optimizer
1894   Node *hit1 = _igvn.hash_find_insert(phi1);
1895   if (hit1) {                   // Hit, toss just made Phi
1896     _igvn.remove_dead_node(phi1); // Remove new phi
1897     assert(hit1->is_Phi(), "" );
1898     phi1 = (PhiNode*)hit1;      // Use existing phi
1899   } else {                      // Miss
1900     _igvn.register_new_node_with_optimizer(phi1);
1901   }
1902   Node *hit2 = _igvn.hash_find_insert(phi2);

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/mulnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/subnode.hpp"
  44 #include "opto/subtypenode.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  52   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  53     // ConvI2L may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return nullptr;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  62       n->in(1) == region->as_CountedLoop()->phi()) {
  63     return nullptr;
  64   }
  65 
  66   // Inline types should not be split through Phis because they cannot be merged
  67   // through Phi nodes but each value input needs to be merged individually.
  68   if (n->is_InlineType()) {
  69     return NULL;
  70   }
  71 
  72   if (cannot_split_division(n, region)) {
  73     return nullptr;
  74   }
  75 
  76   int wins = 0;
  77   assert(!n->is_CFG(), "");
  78   assert(region->is_Region(), "");
  79 
  80   const Type* type = n->bottom_type();
  81   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  82   Node* phi;
  83   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  84     int iid    = t_oop->instance_id();
  85     int index  = C->get_alias_index(t_oop);
  86     int offset = t_oop->offset();
  87     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  88   } else {
  89     phi = PhiNode::make_blank(region, n);
  90   }
  91   uint old_unique = C->unique();

 687       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 688       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 689       // have a Phi for the base here that we convert to a CMOVE all is well
 690       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 691       // the allocator will have to produce a base by creating a CMOVE of the
 692       // relevant bases.  This puts the allocator in the business of
 693       // manufacturing expensive instructions, generally a bad plan.
 694       // Just Say No to Conditionally-Moved Derived Pointers.
 695       if (tp && tp->offset() != 0)
 696         return nullptr;
 697       cost++;
 698       break;
 699     }
 700     default:
 701       return nullptr;              // In particular, can't do memory or I/O
 702     }
 703     // Add in cost any speculative ops
 704     for (uint j = 1; j < region->req(); j++) {
 705       Node *proj = region->in(j);
 706       Node *inp = phi->in(j);
 707       if (inp->isa_InlineType()) {
 708         // TODO 8302217 This prevents PhiNode::push_inline_types_through
 709         return NULL;
 710       }
 711       if (get_ctrl(inp) == proj) { // Found local op
 712         cost++;
 713         // Check for a chain of dependent ops; these will all become
 714         // speculative in a CMOV.
 715         for (uint k = 1; k < inp->req(); k++)
 716           if (get_ctrl(inp->in(k)) == proj)
 717             cost += ConditionalMoveLimit; // Too much speculative goo
 718       }
 719     }
 720     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 721     // This will likely Split-If, a higher-payoff operation.
 722     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 723       Node* use = phi->fast_out(k);
 724       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 725         cost += ConditionalMoveLimit;
 726       // Is there a use inside the loop?
 727       // Note: check only basic types since CMoveP is pinned.
 728       if (!used_inside_loop && is_java_primitive(bt)) {
 729         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 730         if (r_loop == u_loop || r_loop->is_member(u_loop)) {

1016             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1017 
1018             // Move store out of the loop
1019             _igvn.replace_node(hook, n->in(MemNode::Memory));
1020             _igvn.replace_input_of(n, 0, lca);
1021             set_ctrl_and_loop(n, lca);
1022 
1023             // Disconnect the phi now. An empty phi can confuse other
1024             // optimizations in this pass of loop opts..
1025             if (phi->in(LoopNode::LoopBackControl) == phi) {
1026               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1027               n_loop->_body.yank(phi);
1028             }
1029           }
1030         }
1031       }
1032     }
1033   }
1034 }
1035 
1036 // If UseArrayMarkWordCheck is enabled, we can't use immutable memory for the flat array check
1037 // because we are loading the mark word which is mutable. Although the bits we are interested in
1038 // are immutable (we check for markWord::unlocked_value), we need to use raw memory to not break
1039 // anti dependency analysis. Below code will attempt to still move flat array checks out of loops,
1040 // mainly to enable loop unswitching.
1041 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1042   // Skip checks for more than one array
1043   if (n->req() > 3) {
1044     return;
1045   }
1046   Node* mem = n->in(FlatArrayCheckNode::Memory);
1047   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1048   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1049   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1050 
1051   // Check if array is loop invariant
1052   if (!check_loop->is_member(ary_loop)) {
1053     // Walk up memory graph from the check until we leave the loop
1054     VectorSet wq;
1055     wq.set(mem->_idx);
1056     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1057       if (mem->is_Phi()) {
1058         mem = mem->in(1);
1059       } else if (mem->is_MergeMem()) {
1060         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1061       } else if (mem->is_Proj()) {
1062         mem = mem->in(0);
1063       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1064         mem = mem->in(TypeFunc::Memory);
1065       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1066         mem = mem->in(MemNode::Memory);
1067       } else {
1068 #ifdef ASSERT
1069         mem->dump();
1070 #endif
1071         ShouldNotReachHere();
1072       }
1073       if (wq.test_set(mem->_idx)) {
1074         return;
1075       }
1076     }
1077     // Replace memory input and re-compute ctrl to move the check out of the loop
1078     _igvn.replace_input_of(n, 1, mem);
1079     set_ctrl_and_loop(n, get_early_ctrl(n));
1080     Node* bol = n->unique_out();
1081     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1082   }
1083 }
1084 
1085 //------------------------------split_if_with_blocks_pre-----------------------
1086 // Do the real work in a non-recursive function.  Data nodes want to be
1087 // cloned in the pre-order so they can feed each other nicely.
1088 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1089   // Cloning these guys is unlikely to win
1090   int n_op = n->Opcode();
1091   if (n_op == Op_MergeMem) {
1092     return n;
1093   }
1094   if (n->is_Proj()) {
1095     return n;
1096   }
1097 
1098   if (UseArrayMarkWordCheck && n->isa_FlatArrayCheck()) {
1099     move_flat_array_check_out_of_loop(n);
1100     return n;
1101   }
1102 
1103   // Do not clone-up CmpFXXX variations, as these are always
1104   // followed by a CmpI
1105   if (n->is_Cmp()) {
1106     return n;
1107   }
1108   // Attempt to use a conditional move instead of a phi/branch
1109   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1110     Node *cmov = conditional_move( n );
1111     if (cmov) {
1112       return cmov;
1113     }
1114   }
1115   if (n->is_CFG() || n->is_LoadStore()) {
1116     return n;
1117   }
1118   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1119     if (!C->major_progress()) {   // If chance of no more loop opts...
1120       _igvn._worklist.push(n);  // maybe we'll remove them
1121     }
1122     return n;

1360 
1361   return true;
1362 }
1363 
1364 // Detect if the node is the inner strip-mined loop
1365 // Return: null if it's not the case, or the exit of outer strip-mined loop
1366 static Node* is_inner_of_stripmined_loop(const Node* out) {
1367   Node* out_le = nullptr;
1368 
1369   if (out->is_CountedLoopEnd()) {
1370       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1371 
1372       if (loop != nullptr && loop->is_strip_mined()) {
1373         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1374       }
1375   }
1376 
1377   return out_le;
1378 }
1379 
1380 bool PhaseIdealLoop::flatten_array_element_type_check(Node *n) {
1381   // If the CmpP is a subtype check for a value that has just been
1382   // loaded from an array, the subtype check guarantees the value
1383   // can't be stored in a flattened array and the load of the value
1384   // happens with a flattened array check then: push the type check
1385   // through the phi of the flattened array check. This needs special
1386   // logic because the subtype check's input is not a phi but a
1387   // LoadKlass that must first be cloned through the phi.
1388   if (n->Opcode() != Op_CmpP) {
1389     return false;
1390   }
1391 
1392   Node* klassptr = n->in(1);
1393   Node* klasscon = n->in(2);
1394 
1395   if (klassptr->is_DecodeNarrowPtr()) {
1396     klassptr = klassptr->in(1);
1397   }
1398 
1399   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1400     return false;
1401   }
1402 
1403   if (!klasscon->is_Con()) {
1404     return false;
1405   }
1406 
1407   Node* addr = klassptr->in(MemNode::Address);
1408 
1409   if (!addr->is_AddP()) {
1410     return false;
1411   }
1412 
1413   intptr_t offset;
1414   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1415 
1416   if (obj == NULL) {
1417     return false;
1418   }
1419 
1420   assert(obj != NULL && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1421   if (obj->Opcode() == Op_CastPP) {
1422     obj = obj->in(1);
1423   }
1424 
1425   if (!obj->is_Phi()) {
1426     return false;
1427   }
1428 
1429   Node* region = obj->in(0);
1430 
1431   Node* phi = PhiNode::make_blank(region, n->in(1));
1432   for (uint i = 1; i < region->req(); i++) {
1433     Node* in = obj->in(i);
1434     Node* ctrl = region->in(i);
1435     if (addr->in(AddPNode::Base) != obj) {
1436       Node* cast = addr->in(AddPNode::Base);
1437       assert(cast->Opcode() == Op_CastPP && cast->in(0) != NULL, "inconsistent subgraph");
1438       Node* cast_clone = cast->clone();
1439       cast_clone->set_req(0, ctrl);
1440       cast_clone->set_req(1, in);
1441       register_new_node(cast_clone, ctrl);
1442       const Type* tcast = cast_clone->Value(&_igvn);
1443       _igvn.set_type(cast_clone, tcast);
1444       cast_clone->as_Type()->set_type(tcast);
1445       in = cast_clone;
1446     }
1447     Node* addr_clone = addr->clone();
1448     addr_clone->set_req(AddPNode::Base, in);
1449     addr_clone->set_req(AddPNode::Address, in);
1450     register_new_node(addr_clone, ctrl);
1451     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1452     Node* klassptr_clone = klassptr->clone();
1453     klassptr_clone->set_req(2, addr_clone);
1454     register_new_node(klassptr_clone, ctrl);
1455     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1456     if (klassptr != n->in(1)) {
1457       Node* decode = n->in(1);
1458       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1459       Node* decode_clone = decode->clone();
1460       decode_clone->set_req(1, klassptr_clone);
1461       register_new_node(decode_clone, ctrl);
1462       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1463       klassptr_clone = decode_clone;
1464     }
1465     phi->set_req(i, klassptr_clone);
1466   }
1467   register_new_node(phi, region);
1468   Node* orig = n->in(1);
1469   _igvn.replace_input_of(n, 1, phi);
1470   split_if_with_blocks_post(n);
1471   if (n->outcnt() != 0) {
1472     _igvn.replace_input_of(n, 1, orig);
1473     _igvn.remove_dead_node(phi);
1474   }
1475   return true;
1476 }
1477 
1478 //------------------------------split_if_with_blocks_post----------------------
1479 // Do the real work in a non-recursive function.  CFG hackery wants to be
1480 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1481 // info.
1482 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1483 
1484   if (flatten_array_element_type_check(n)) {
1485     return;
1486   }
1487 
1488   // Cloning Cmp through Phi's involves the split-if transform.
1489   // FastLock is not used by an If
1490   if (n->is_Cmp() && !n->is_FastLock()) {
1491     Node *n_ctrl = get_ctrl(n);
1492     // Determine if the Node has inputs from some local Phi.
1493     // Returns the block to clone thru.
1494     Node *n_blk = has_local_phi_input(n);
1495     if (n_blk != n_ctrl) {
1496       return;
1497     }
1498 
1499     if (!can_split_if(n_ctrl)) {
1500       return;
1501     }
1502 
1503     if (n->outcnt() != 1) {
1504       return; // Multiple bool's from 1 compare?
1505     }
1506     Node *bol = n->unique_out();
1507     assert(bol->is_Bool(), "expect a bool here");

1600           Node* out_le = is_inner_of_stripmined_loop(dom);
1601           if (out_le != nullptr) {
1602             prevdom = out_le;
1603           }
1604           // Replace the dominated test with an obvious true or false.
1605           // Place it on the IGVN worklist for later cleanup.
1606           C->set_major_progress();
1607           dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1608           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1609           return;
1610         }
1611         prevdom = dom;
1612         dom = idom(prevdom);
1613       }
1614     }
1615   }
1616 
1617   try_sink_out_of_loop(n);
1618 
1619   try_move_store_after_loop(n);
1620 
1621   // Remove multiple allocations of the same inline type
1622   if (n->is_InlineType()) {
1623     n->as_InlineType()->remove_redundant_allocations(this);
1624   }
1625 }
1626 
1627 // Transform:
1628 //
1629 // if (some_condition) {
1630 //   // body 1
1631 // } else {
1632 //   // body 2
1633 // }
1634 // if (some_condition) {
1635 //   // body 3
1636 // } else {
1637 //   // body 4
1638 // }
1639 //
1640 // into:
1641 //
1642 //
1643 // if (some_condition) {
1644 //   // body 1

2032   uint i;
2033   for (i = 1; i < phi->req(); i++) {
2034     Node *b = phi->in(i);
2035     if (b->is_Phi()) {
2036       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2037     } else {
2038       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
2039     }
2040   }
2041 
2042   Node* n = phi->in(1);
2043   Node* sample_opaque = nullptr;
2044   Node *sample_bool = nullptr;
2045   if (n->Opcode() == Op_Opaque4) {
2046     sample_opaque = n;
2047     sample_bool = n->in(1);
2048     assert(sample_bool->is_Bool(), "wrong type");
2049   } else {
2050     sample_bool = n;
2051   }
2052   Node* sample_cmp = sample_bool->in(1);
2053   const Type* t = Type::TOP;
2054   const TypePtr* at = NULL;
2055   if (sample_cmp->is_FlatArrayCheck()) {
2056     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2057     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2058     t = Type::MEMORY;
2059     at = TypeRawPtr::BOTTOM;
2060   }
2061 
2062   // Make Phis to merge the Cmp's inputs.
2063   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2064   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2065   for (i = 1; i < phi->req(); i++) {
2066     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2067     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2068     phi1->set_req(i, n1);
2069     phi2->set_req(i, n2);
2070     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2071     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2072   }
2073   // See if these Phis have been made before.
2074   // Register with optimizer
2075   Node *hit1 = _igvn.hash_find_insert(phi1);
2076   if (hit1) {                   // Hit, toss just made Phi
2077     _igvn.remove_dead_node(phi1); // Remove new phi
2078     assert(hit1->is_Phi(), "" );
2079     phi1 = (PhiNode*)hit1;      // Use existing phi
2080   } else {                      // Miss
2081     _igvn.register_new_node_with_optimizer(phi1);
2082   }
2083   Node *hit2 = _igvn.hash_find_insert(phi2);
< prev index next >