< prev index next >

src/hotspot/share/opto/loopopts.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/matcher.hpp"
  38 #include "opto/mulnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/opaquenode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/subnode.hpp"
  43 #include "opto/subtypenode.hpp"
  44 #include "utilities/macros.hpp"
  45 
  46 //=============================================================================
  47 //------------------------------split_thru_phi---------------------------------
  48 // Split Node 'n' through merge point if there is enough win.
  49 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  50   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  51     // ConvI2L may have type information on it which is unsafe to push up
  52     // so disable this for now
  53     return NULL;
  54   }
  55 
  56   // Splitting range check CastIIs through a loop induction Phi can
  57   // cause new Phis to be created that are left unrelated to the loop
  58   // induction Phi and prevent optimizations (vectorization)
  59   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  60       n->in(1) == region->as_CountedLoop()->phi()) {
  61     return NULL;
  62   }
  63 






  64   // Bail out if 'n' is a Div or Mod node whose zero check was removed earlier (i.e. control is NULL) and its divisor is an induction variable
  65   // phi p of a trip-counted (integer) loop whose inputs could be zero (include zero in their type range). p could have a more precise type
  66   // range that does not necessarily include all values of its inputs. Since each of these inputs will be a divisor of the newly cloned nodes
  67   // of 'n', we need to bail out of one of these divisors could be zero (zero in its type range).
  68   if ((n->Opcode() == Op_DivI || n->Opcode() == Op_ModI) && n->in(0) == NULL
  69       && region->is_CountedLoop() && n->in(2) == region->as_CountedLoop()->phi()) {
  70     Node* phi = region->as_CountedLoop()->phi();
  71     for (uint i = 1; i < phi->req(); i++) {
  72       if (_igvn.type(phi->in(i))->filter_speculative(TypeInt::ZERO) != Type::TOP) {
  73         // Zero could be a possible value but we already removed the zero check. Bail out to avoid a possible division by zero at a later point.
  74         return NULL;
  75       }
  76     }
  77   }
  78 
  79   int wins = 0;
  80   assert(!n->is_CFG(), "");
  81   assert(region->is_Region(), "");
  82 
  83   const Type* type = n->bottom_type();

 959             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->Opcode() == Op_NeverBranch, "must not be moved into inner loop");
 960 
 961             // Move store out of the loop
 962             _igvn.replace_node(hook, n->in(MemNode::Memory));
 963             _igvn.replace_input_of(n, 0, lca);
 964             set_ctrl_and_loop(n, lca);
 965 
 966             // Disconnect the phi now. An empty phi can confuse other
 967             // optimizations in this pass of loop opts..
 968             if (phi->in(LoopNode::LoopBackControl) == phi) {
 969               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
 970               n_loop->_body.yank(phi);
 971             }
 972           }
 973         }
 974       }
 975     }
 976   }
 977 }
 978 

















































 979 //------------------------------split_if_with_blocks_pre-----------------------
 980 // Do the real work in a non-recursive function.  Data nodes want to be
 981 // cloned in the pre-order so they can feed each other nicely.
 982 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
 983   // Cloning these guys is unlikely to win
 984   int n_op = n->Opcode();
 985   if (n_op == Op_MergeMem) {
 986     return n;
 987   }
 988   if (n->is_Proj()) {
 989     return n;
 990   }






 991   // Do not clone-up CmpFXXX variations, as these are always
 992   // followed by a CmpI
 993   if (n->is_Cmp()) {
 994     return n;
 995   }
 996   // Attempt to use a conditional move instead of a phi/branch
 997   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
 998     Node *cmov = conditional_move( n );
 999     if (cmov) {
1000       return cmov;
1001     }
1002   }
1003   if (n->is_CFG() || n->is_LoadStore()) {
1004     return n;
1005   }
1006   if (n->is_Opaque1() ||     // Opaque nodes cannot be mod'd
1007       n_op == Op_Opaque2) {
1008     if (!C->major_progress()) {   // If chance of no more loop opts...
1009       _igvn._worklist.push(n);  // maybe we'll remove them
1010     }

1243 
1244   return true;
1245 }
1246 
1247 // Detect if the node is the inner strip-mined loop
1248 // Return: NULL if it's not the case, or the exit of outer strip-mined loop
1249 static Node* is_inner_of_stripmined_loop(const Node* out) {
1250   Node* out_le = NULL;
1251 
1252   if (out->is_CountedLoopEnd()) {
1253       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1254 
1255       if (loop != NULL && loop->is_strip_mined()) {
1256         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1257       }
1258   }
1259 
1260   return out_le;
1261 }
1262 
































































































1263 //------------------------------split_if_with_blocks_post----------------------
1264 // Do the real work in a non-recursive function.  CFG hackery wants to be
1265 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1266 // info.
1267 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1268 




1269   // Cloning Cmp through Phi's involves the split-if transform.
1270   // FastLock is not used by an If
1271   if (n->is_Cmp() && !n->is_FastLock()) {
1272     Node *n_ctrl = get_ctrl(n);
1273     // Determine if the Node has inputs from some local Phi.
1274     // Returns the block to clone thru.
1275     Node *n_blk = has_local_phi_input(n);
1276     if (n_blk != n_ctrl) {
1277       return;
1278     }
1279 
1280     if (!can_split_if(n_ctrl)) {
1281       return;
1282     }
1283 
1284     if (n->outcnt() != 1) {
1285       return; // Multiple bool's from 1 compare?
1286     }
1287     Node *bol = n->unique_out();
1288     assert(bol->is_Bool(), "expect a bool here");

1404           }
1405           // Replace the dominated test with an obvious true or false.
1406           // Place it on the IGVN worklist for later cleanup.
1407           C->set_major_progress();
1408           dominated_by(prevdom, n, false, true);
1409 #ifndef PRODUCT
1410           if( VerifyLoopOptimizations ) verify();
1411 #endif
1412           return;
1413         }
1414         prevdom = dom;
1415         dom = idom(prevdom);
1416       }
1417     }
1418   }
1419 
1420   try_sink_out_of_loop(n);
1421 
1422   try_move_store_after_loop(n);
1423 





1424   // Check for Opaque2's who's loop has disappeared - who's input is in the
1425   // same loop nest as their output.  Remove 'em, they are no longer useful.
1426   if( n_op == Op_Opaque2 &&
1427       n->in(1) != NULL &&
1428       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
1429     _igvn.replace_node( n, n->in(1) );
1430   }
1431 }
1432 
1433 // See if a shared loop-varying computation has no loop-varying uses.
1434 // Happens if something is only used for JVM state in uncommon trap exits,
1435 // like various versions of induction variable+offset.  Clone the
1436 // computation per usage to allow it to sink out of the loop.
1437 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1438   bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
1439                             n->in(1)->bottom_type()->isa_rawptr() &&
1440                             !n->bottom_type()->isa_rawptr();
1441   if (has_ctrl(n) &&
1442       !n->is_Phi() &&
1443       !n->is_Bool() &&

1712   uint i;
1713   for (i = 1; i < phi->req(); i++) {
1714     Node *b = phi->in(i);
1715     if (b->is_Phi()) {
1716       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi(), loop));
1717     } else {
1718       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1719     }
1720   }
1721 
1722   Node* n = phi->in(1);
1723   Node* sample_opaque = NULL;
1724   Node *sample_bool = NULL;
1725   if (n->Opcode() == Op_Opaque4) {
1726     sample_opaque = n;
1727     sample_bool = n->in(1);
1728     assert(sample_bool->is_Bool(), "wrong type");
1729   } else {
1730     sample_bool = n;
1731   }
1732   Node *sample_cmp = sample_bool->in(1);








1733 
1734   // Make Phis to merge the Cmp's inputs.
1735   PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
1736   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1737   for (i = 1; i < phi->req(); i++) {
1738     Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1739     Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1740     phi1->set_req(i, n1);
1741     phi2->set_req(i, n2);
1742     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1743     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1744   }
1745   // See if these Phis have been made before.
1746   // Register with optimizer
1747   Node *hit1 = _igvn.hash_find_insert(phi1);
1748   if (hit1) {                   // Hit, toss just made Phi
1749     _igvn.remove_dead_node(phi1); // Remove new phi
1750     assert(hit1->is_Phi(), "" );
1751     phi1 = (PhiNode*)hit1;      // Use existing phi
1752   } else {                      // Miss
1753     _igvn.register_new_node_with_optimizer(phi1);
1754   }
1755   Node *hit2 = _igvn.hash_find_insert(phi2);

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/mulnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/subnode.hpp"
  44 #include "opto/subtypenode.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 //=============================================================================
  48 //------------------------------split_thru_phi---------------------------------
  49 // Split Node 'n' through merge point if there is enough win.
  50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  51   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  52     // ConvI2L may have type information on it which is unsafe to push up
  53     // so disable this for now
  54     return NULL;
  55   }
  56 
  57   // Splitting range check CastIIs through a loop induction Phi can
  58   // cause new Phis to be created that are left unrelated to the loop
  59   // induction Phi and prevent optimizations (vectorization)
  60   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  61       n->in(1) == region->as_CountedLoop()->phi()) {
  62     return NULL;
  63   }
  64 
  65   // Inline types should not be split through Phis because they cannot be merged
  66   // through Phi nodes but each value input needs to be merged individually.
  67   if (n->is_InlineType()) {
  68     return NULL;
  69   }
  70 
  71   // Bail out if 'n' is a Div or Mod node whose zero check was removed earlier (i.e. control is NULL) and its divisor is an induction variable
  72   // phi p of a trip-counted (integer) loop whose inputs could be zero (include zero in their type range). p could have a more precise type
  73   // range that does not necessarily include all values of its inputs. Since each of these inputs will be a divisor of the newly cloned nodes
  74   // of 'n', we need to bail out of one of these divisors could be zero (zero in its type range).
  75   if ((n->Opcode() == Op_DivI || n->Opcode() == Op_ModI) && n->in(0) == NULL
  76       && region->is_CountedLoop() && n->in(2) == region->as_CountedLoop()->phi()) {
  77     Node* phi = region->as_CountedLoop()->phi();
  78     for (uint i = 1; i < phi->req(); i++) {
  79       if (_igvn.type(phi->in(i))->filter_speculative(TypeInt::ZERO) != Type::TOP) {
  80         // Zero could be a possible value but we already removed the zero check. Bail out to avoid a possible division by zero at a later point.
  81         return NULL;
  82       }
  83     }
  84   }
  85 
  86   int wins = 0;
  87   assert(!n->is_CFG(), "");
  88   assert(region->is_Region(), "");
  89 
  90   const Type* type = n->bottom_type();

 966             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->Opcode() == Op_NeverBranch, "must not be moved into inner loop");
 967 
 968             // Move store out of the loop
 969             _igvn.replace_node(hook, n->in(MemNode::Memory));
 970             _igvn.replace_input_of(n, 0, lca);
 971             set_ctrl_and_loop(n, lca);
 972 
 973             // Disconnect the phi now. An empty phi can confuse other
 974             // optimizations in this pass of loop opts..
 975             if (phi->in(LoopNode::LoopBackControl) == phi) {
 976               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
 977               n_loop->_body.yank(phi);
 978             }
 979           }
 980         }
 981       }
 982     }
 983   }
 984 }
 985 
 986 // If UseArrayMarkWordCheck is enabled, we can't use immutable memory for the flat array check
 987 // because we are loading the mark word which is mutable. Although the bits we are interested in
 988 // are immutable (we check for markWord::unlocked_value), we need to use raw memory to not break
 989 // anti dependency analysis. Below code will attempt to still move flat array checks out of loops,
 990 // mainly to enable loop unswitching.
 991 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
 992   // Skip checks for more than one array
 993   if (n->req() > 3) {
 994     return;
 995   }
 996   Node* mem = n->in(FlatArrayCheckNode::Memory);
 997   Node* array = n->in(FlatArrayCheckNode::Array)->uncast();
 998   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
 999   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1000 
1001   // Check if array is loop invariant
1002   if (!check_loop->is_member(ary_loop)) {
1003     // Walk up memory graph from the check until we leave the loop
1004     VectorSet wq;
1005     wq.set(mem->_idx);
1006     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1007       if (mem->is_Phi()) {
1008         mem = mem->in(1);
1009       } else if (mem->is_MergeMem()) {
1010         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1011       } else if (mem->is_Proj()) {
1012         mem = mem->in(0);
1013       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1014         mem = mem->in(TypeFunc::Memory);
1015       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1016         mem = mem->in(MemNode::Memory);
1017       } else {
1018 #ifdef ASSERT
1019         mem->dump();
1020 #endif
1021         ShouldNotReachHere();
1022       }
1023       if (wq.test_set(mem->_idx)) {
1024         return;
1025       }
1026     }
1027     // Replace memory input and re-compute ctrl to move the check out of the loop
1028     _igvn.replace_input_of(n, 1, mem);
1029     set_ctrl_and_loop(n, get_early_ctrl(n));
1030     Node* bol = n->unique_out();
1031     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1032   }
1033 }
1034 
1035 //------------------------------split_if_with_blocks_pre-----------------------
1036 // Do the real work in a non-recursive function.  Data nodes want to be
1037 // cloned in the pre-order so they can feed each other nicely.
1038 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1039   // Cloning these guys is unlikely to win
1040   int n_op = n->Opcode();
1041   if (n_op == Op_MergeMem) {
1042     return n;
1043   }
1044   if (n->is_Proj()) {
1045     return n;
1046   }
1047 
1048   if (UseArrayMarkWordCheck && n->isa_FlatArrayCheck()) {
1049     move_flat_array_check_out_of_loop(n);
1050     return n;
1051   }
1052 
1053   // Do not clone-up CmpFXXX variations, as these are always
1054   // followed by a CmpI
1055   if (n->is_Cmp()) {
1056     return n;
1057   }
1058   // Attempt to use a conditional move instead of a phi/branch
1059   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1060     Node *cmov = conditional_move( n );
1061     if (cmov) {
1062       return cmov;
1063     }
1064   }
1065   if (n->is_CFG() || n->is_LoadStore()) {
1066     return n;
1067   }
1068   if (n->is_Opaque1() ||     // Opaque nodes cannot be mod'd
1069       n_op == Op_Opaque2) {
1070     if (!C->major_progress()) {   // If chance of no more loop opts...
1071       _igvn._worklist.push(n);  // maybe we'll remove them
1072     }

1305 
1306   return true;
1307 }
1308 
1309 // Detect if the node is the inner strip-mined loop
1310 // Return: NULL if it's not the case, or the exit of outer strip-mined loop
1311 static Node* is_inner_of_stripmined_loop(const Node* out) {
1312   Node* out_le = NULL;
1313 
1314   if (out->is_CountedLoopEnd()) {
1315       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1316 
1317       if (loop != NULL && loop->is_strip_mined()) {
1318         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1319       }
1320   }
1321 
1322   return out_le;
1323 }
1324 
1325 bool PhaseIdealLoop::flatten_array_element_type_check(Node *n) {
1326   // If the CmpP is a subtype check for a value that has just been
1327   // loaded from an array, the subtype check guarantees the value
1328   // can't be stored in a flattened array and the load of the value
1329   // happens with a flattened array check then: push the type check
1330   // through the phi of the flattened array check. This needs special
1331   // logic because the subtype check's input is not a phi but a
1332   // LoadKlass that must first be cloned through the phi.
1333   if (n->Opcode() != Op_CmpP) {
1334     return false;
1335   }
1336 
1337   Node* klassptr = n->in(1);
1338   Node* klasscon = n->in(2);
1339 
1340   if (klassptr->is_DecodeNarrowPtr()) {
1341     klassptr = klassptr->in(1);
1342   }
1343 
1344   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1345     return false;
1346   }
1347 
1348   if (!klasscon->is_Con()) {
1349     return false;
1350   }
1351 
1352   Node* addr = klassptr->in(MemNode::Address);
1353 
1354   if (!addr->is_AddP()) {
1355     return false;
1356   }
1357 
1358   intptr_t offset;
1359   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1360 
1361   if (obj == NULL) {
1362     return false;
1363   }
1364 
1365   assert(obj != NULL && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1366   if (obj->Opcode() == Op_CastPP) {
1367     obj = obj->in(1);
1368   }
1369 
1370   if (!obj->is_Phi()) {
1371     return false;
1372   }
1373 
1374   Node* region = obj->in(0);
1375 
1376   Node* phi = PhiNode::make_blank(region, n->in(1));
1377   for (uint i = 1; i < region->req(); i++) {
1378     Node* in = obj->in(i);
1379     Node* ctrl = region->in(i);
1380     if (addr->in(AddPNode::Base) != obj) {
1381       Node* cast = addr->in(AddPNode::Base);
1382       assert(cast->Opcode() == Op_CastPP && cast->in(0) != NULL, "inconsistent subgraph");
1383       Node* cast_clone = cast->clone();
1384       cast_clone->set_req(0, ctrl);
1385       cast_clone->set_req(1, in);
1386       register_new_node(cast_clone, ctrl);
1387       _igvn.set_type(cast_clone, cast_clone->Value(&_igvn));
1388       in = cast_clone;
1389     }
1390     Node* addr_clone = addr->clone();
1391     addr_clone->set_req(AddPNode::Base, in);
1392     addr_clone->set_req(AddPNode::Address, in);
1393     register_new_node(addr_clone, ctrl);
1394     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1395     Node* klassptr_clone = klassptr->clone();
1396     klassptr_clone->set_req(2, addr_clone);
1397     register_new_node(klassptr_clone, ctrl);
1398     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1399     if (klassptr != n->in(1)) {
1400       Node* decode = n->in(1);
1401       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1402       Node* decode_clone = decode->clone();
1403       decode_clone->set_req(1, klassptr_clone);
1404       register_new_node(decode_clone, ctrl);
1405       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1406       klassptr_clone = decode_clone;
1407     }
1408     phi->set_req(i, klassptr_clone);
1409   }
1410   register_new_node(phi, region);
1411   Node* orig = n->in(1);
1412   _igvn.replace_input_of(n, 1, phi);
1413   split_if_with_blocks_post(n);
1414   if (n->outcnt() != 0) {
1415     _igvn.replace_input_of(n, 1, orig);
1416     _igvn.remove_dead_node(phi);
1417   }
1418   return true;
1419 }
1420 
1421 //------------------------------split_if_with_blocks_post----------------------
1422 // Do the real work in a non-recursive function.  CFG hackery wants to be
1423 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1424 // info.
1425 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1426 
1427   if (flatten_array_element_type_check(n)) {
1428     return;
1429   }
1430 
1431   // Cloning Cmp through Phi's involves the split-if transform.
1432   // FastLock is not used by an If
1433   if (n->is_Cmp() && !n->is_FastLock()) {
1434     Node *n_ctrl = get_ctrl(n);
1435     // Determine if the Node has inputs from some local Phi.
1436     // Returns the block to clone thru.
1437     Node *n_blk = has_local_phi_input(n);
1438     if (n_blk != n_ctrl) {
1439       return;
1440     }
1441 
1442     if (!can_split_if(n_ctrl)) {
1443       return;
1444     }
1445 
1446     if (n->outcnt() != 1) {
1447       return; // Multiple bool's from 1 compare?
1448     }
1449     Node *bol = n->unique_out();
1450     assert(bol->is_Bool(), "expect a bool here");

1566           }
1567           // Replace the dominated test with an obvious true or false.
1568           // Place it on the IGVN worklist for later cleanup.
1569           C->set_major_progress();
1570           dominated_by(prevdom, n, false, true);
1571 #ifndef PRODUCT
1572           if( VerifyLoopOptimizations ) verify();
1573 #endif
1574           return;
1575         }
1576         prevdom = dom;
1577         dom = idom(prevdom);
1578       }
1579     }
1580   }
1581 
1582   try_sink_out_of_loop(n);
1583 
1584   try_move_store_after_loop(n);
1585 
1586   // Remove multiple allocations of the same inline type
1587   if (n->is_InlineType()) {
1588     n->as_InlineType()->remove_redundant_allocations(&_igvn, this);
1589   }
1590 
1591   // Check for Opaque2's who's loop has disappeared - who's input is in the
1592   // same loop nest as their output.  Remove 'em, they are no longer useful.
1593   if( n_op == Op_Opaque2 &&
1594       n->in(1) != NULL &&
1595       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
1596     _igvn.replace_node( n, n->in(1) );
1597   }
1598 }
1599 
1600 // See if a shared loop-varying computation has no loop-varying uses.
1601 // Happens if something is only used for JVM state in uncommon trap exits,
1602 // like various versions of induction variable+offset.  Clone the
1603 // computation per usage to allow it to sink out of the loop.
1604 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1605   bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
1606                             n->in(1)->bottom_type()->isa_rawptr() &&
1607                             !n->bottom_type()->isa_rawptr();
1608   if (has_ctrl(n) &&
1609       !n->is_Phi() &&
1610       !n->is_Bool() &&

1879   uint i;
1880   for (i = 1; i < phi->req(); i++) {
1881     Node *b = phi->in(i);
1882     if (b->is_Phi()) {
1883       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi(), loop));
1884     } else {
1885       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
1886     }
1887   }
1888 
1889   Node* n = phi->in(1);
1890   Node* sample_opaque = NULL;
1891   Node *sample_bool = NULL;
1892   if (n->Opcode() == Op_Opaque4) {
1893     sample_opaque = n;
1894     sample_bool = n->in(1);
1895     assert(sample_bool->is_Bool(), "wrong type");
1896   } else {
1897     sample_bool = n;
1898   }
1899   Node* sample_cmp = sample_bool->in(1);
1900   const Type* t = Type::TOP;
1901   const TypePtr* at = NULL;
1902   if (sample_cmp->is_FlatArrayCheck()) {
1903     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
1904     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
1905     t = Type::MEMORY;
1906     at = TypeRawPtr::BOTTOM;
1907   }
1908 
1909   // Make Phis to merge the Cmp's inputs.
1910   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
1911   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
1912   for (i = 1; i < phi->req(); i++) {
1913     Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
1914     Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
1915     phi1->set_req(i, n1);
1916     phi2->set_req(i, n2);
1917     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
1918     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
1919   }
1920   // See if these Phis have been made before.
1921   // Register with optimizer
1922   Node *hit1 = _igvn.hash_find_insert(phi1);
1923   if (hit1) {                   // Hit, toss just made Phi
1924     _igvn.remove_dead_node(phi1); // Remove new phi
1925     assert(hit1->is_Phi(), "" );
1926     phi1 = (PhiNode*)hit1;      // Use existing phi
1927   } else {                      // Miss
1928     _igvn.register_new_node_with_optimizer(phi1);
1929   }
1930   Node *hit2 = _igvn.hash_find_insert(phi2);
< prev index next >