< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

1208     loop_count++;
1209   }
1210   NOT_PRODUCT(verify_PhaseIterGVN();)
1211 }
1212 
1213 
1214 /**
1215  * Register a new node with the optimizer.  Update the types array, the def-use
1216  * info.  Put on worklist.
1217  */
1218 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1219   set_type_bottom(n);
1220   _worklist.push(n);
1221   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1222   return n;
1223 }
1224 
1225 //------------------------------transform--------------------------------------
1226 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1227 Node *PhaseIterGVN::transform( Node *n ) {
1228   if (_delay_transform) {
1229     // Register the node but don't optimize for now
1230     register_new_node_with_optimizer(n);
1231     return n;
1232   }
1233 
1234   // If brand new node, make space in type array, and give it a type.
1235   ensure_type_or_null(n);
1236   if (type_or_null(n) == NULL) {
1237     set_type_bottom(n);
1238   }
1239 






1240   return transform_old(n);
1241 }
1242 
1243 Node *PhaseIterGVN::transform_old(Node* n) {
1244   NOT_PRODUCT(set_transforms());
1245   // Remove 'n' from hash table in case it gets modified
1246   _table.hash_delete(n);
1247   if (VerifyIterativeGVN) {
1248     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1249   }
1250 
1251   // Apply the Ideal call in a loop until it no longer applies
1252   Node* k = n;
1253   DEBUG_ONLY(dead_loop_check(k);)
1254   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1255   C->remove_modified_node(k);
1256   Node* i = apply_ideal(k, /*can_reshape=*/true);
1257   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1258 #ifndef PRODUCT
1259   verify_step(k);

1482 
1483   // Smash all inputs to 'old', isolating him completely
1484   Node *temp = new Node(1);
1485   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1486   remove_dead_node( old );
1487   temp->del_req(0);         // Yank bogus edge
1488   if (nn != NULL && nn->outcnt() == 0) {
1489     _worklist.push(nn);
1490   }
1491 #ifndef PRODUCT
1492   if( VerifyIterativeGVN ) {
1493     for ( int i = 0; i < _verify_window_size; i++ ) {
1494       if ( _verify_window[i] == old )
1495         _verify_window[i] = nn;
1496     }
1497   }
1498 #endif
1499   temp->destruct(this);     // reuse the _idx of this little guy
1500 }
1501 













1502 //------------------------------add_users_to_worklist--------------------------
1503 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1504   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1505     _worklist.push(n->fast_out(i));  // Push on worklist
1506   }
1507 }
1508 
1509 // Return counted loop Phi if as a counted loop exit condition, cmp
1510 // compares the induction variable with n
1511 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1512   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1513     Node* bol = cmp->fast_out(i);
1514     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1515       Node* iff = bol->fast_out(i2);
1516       if (iff->is_BaseCountedLoopEnd()) {
1517         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1518         if (cle->limit() == n) {
1519           PhiNode* phi = cle->phi();
1520           if (phi != NULL) {
1521             return phi;

1582         }
1583         Node* in1 = use->in(1);
1584         for (uint i = 0; i < in1->outcnt(); i++) {
1585           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1586             Node* castii = in1->raw_out(i);
1587             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1588               Node* ifnode = castii->in(0)->in(0);
1589               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1590                 // Reprocess a CastII node that may depend on an
1591                 // opaque node value when the opaque node is
1592                 // removed. In case it carries a dependency we can do
1593                 // a better job of computing its type.
1594                 _worklist.push(castii);
1595               }
1596             }
1597           }
1598         }
1599       }
1600     }
1601 









1602     // If changed Cast input, check Phi users for simple cycles
1603     if (use->is_ConstraintCast()) {
1604       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1605         Node* u = use->fast_out(i2);
1606         if (u->is_Phi())
1607           _worklist.push(u);
1608       }
1609     }
1610     // If changed LShift inputs, check RShift users for useless sign-ext
1611     if( use_op == Op_LShiftI ) {
1612       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1613         Node* u = use->fast_out(i2);
1614         if (u->Opcode() == Op_RShiftI)
1615           _worklist.push(u);
1616       }
1617     }
1618     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1619     if (use_op == Op_AddI || use_op == Op_SubI) {
1620       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1621         Node* u = use->fast_out(i2);

1637       InitializeNode* init = use->as_Allocate()->initialization();
1638       if (init != NULL) {
1639         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1640         if (imem != NULL)  add_users_to_worklist0(imem);
1641       }
1642     }
1643     // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1644     // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1645     // to guarantee the change is not missed.
1646     if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1647       Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1648       if (p != NULL) {
1649         add_users_to_worklist0(p);
1650       }
1651     }
1652 
1653     if (use_op == Op_Initialize) {
1654       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1655       if (imem != NULL)  add_users_to_worklist0(imem);
1656     }








1657     // Loading the java mirror from a Klass requires two loads and the type
1658     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1659     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1660     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1661     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1662 
1663     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1664       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1665         Node* u = use->fast_out(i2);
1666         const Type* ut = u->bottom_type();
1667         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1668           if (has_load_barrier_nodes) {
1669             // Search for load barriers behind the load
1670             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1671               Node* b = u->fast_out(i3);
1672               if (bs->is_gc_barrier_node(b)) {
1673                 _worklist.push(b);
1674               }
1675             }
1676           }
1677           _worklist.push(u);
1678         }
1679       }
1680     }











1681   }
1682 }
1683 
1684 /**
1685  * Remove the speculative part of all types that we know of
1686  */
1687 void PhaseIterGVN::remove_speculative_types()  {
1688   assert(UseTypeSpeculation, "speculation is off");
1689   for (uint i = 0; i < _types.Size(); i++)  {
1690     const Type* t = _types.fast_lookup(i);
1691     if (t != NULL) {
1692       _types.map(i, t->remove_speculative());
1693     }
1694   }
1695   _table.check_no_speculative_types();
1696 }
1697 
1698 // Check if the type of a divisor of a Div or Mod node includes zero.
1699 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1700   switch (n->Opcode()) {

1828   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1829     Node* use = n->fast_out(i);
1830     push_if_not_bottom_type(worklist, use);
1831     push_more_uses(worklist, n, use);
1832   }
1833 }
1834 
1835 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1836   if (n->bottom_type() != type(n)) {
1837     worklist.push(n);
1838   }
1839 }
1840 
1841 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1842 // Add them back to the worklist.
1843 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1844   push_phis(worklist, use);
1845   push_catch(worklist, use);
1846   push_cmpu(worklist, use);
1847   push_counted_loop_phi(worklist, parent, use);

1848   push_loadp(worklist, use);
1849   push_and(worklist, parent, use);
1850   push_cast_ii(worklist, parent, use);
1851 }
1852 
1853 
1854 // We must recheck Phis too if use is a Region.
1855 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1856   if (use->is_Region()) {
1857     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1858       push_if_not_bottom_type(worklist, use->fast_out(i));
1859     }
1860   }
1861 }
1862 
1863 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1864 // non-NULL receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1865 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1866 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
1867   if (use->is_Call()) {

1888       if (cmpu->Opcode() == Op_CmpU) {
1889         // Got a CmpU which might need the new type information from node n.
1890         push_if_not_bottom_type(worklist, cmpu);
1891       }
1892     }
1893   }
1894 }
1895 
1896 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1897 // Seem PhiNode::Value().
1898 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1899   uint use_op = use->Opcode();
1900   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1901     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1902     if (phi != NULL) {
1903       worklist.push(phi);
1904     }
1905   }
1906 }
1907 












1908 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1909 // See LoadNode::Value().
1910 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1911   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1912   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1913 
1914   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1915     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1916       Node* loadp = use->fast_out(i);
1917       const Type* ut = loadp->bottom_type();
1918       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1919         if (has_load_barrier_nodes) {
1920           // Search for load barriers behind the load
1921           push_load_barrier(worklist, barrier_set, loadp);
1922         }
1923         worklist.push(loadp);
1924       }
1925     }
1926   }
1927 }

1208     loop_count++;
1209   }
1210   NOT_PRODUCT(verify_PhaseIterGVN();)
1211 }
1212 
1213 
1214 /**
1215  * Register a new node with the optimizer.  Update the types array, the def-use
1216  * info.  Put on worklist.
1217  */
1218 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1219   set_type_bottom(n);
1220   _worklist.push(n);
1221   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1222   return n;
1223 }
1224 
1225 //------------------------------transform--------------------------------------
1226 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1227 Node *PhaseIterGVN::transform( Node *n ) {






1228   // If brand new node, make space in type array, and give it a type.
1229   ensure_type_or_null(n);
1230   if (type_or_null(n) == NULL) {
1231     set_type_bottom(n);
1232   }
1233 
1234   if (_delay_transform) {
1235     // Add the node to the worklist but don't optimize for now
1236     _worklist.push(n);
1237     return n;
1238   }
1239 
1240   return transform_old(n);
1241 }
1242 
1243 Node *PhaseIterGVN::transform_old(Node* n) {
1244   NOT_PRODUCT(set_transforms());
1245   // Remove 'n' from hash table in case it gets modified
1246   _table.hash_delete(n);
1247   if (VerifyIterativeGVN) {
1248     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1249   }
1250 
1251   // Apply the Ideal call in a loop until it no longer applies
1252   Node* k = n;
1253   DEBUG_ONLY(dead_loop_check(k);)
1254   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1255   C->remove_modified_node(k);
1256   Node* i = apply_ideal(k, /*can_reshape=*/true);
1257   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1258 #ifndef PRODUCT
1259   verify_step(k);

1482 
1483   // Smash all inputs to 'old', isolating him completely
1484   Node *temp = new Node(1);
1485   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1486   remove_dead_node( old );
1487   temp->del_req(0);         // Yank bogus edge
1488   if (nn != NULL && nn->outcnt() == 0) {
1489     _worklist.push(nn);
1490   }
1491 #ifndef PRODUCT
1492   if( VerifyIterativeGVN ) {
1493     for ( int i = 0; i < _verify_window_size; i++ ) {
1494       if ( _verify_window[i] == old )
1495         _verify_window[i] = nn;
1496     }
1497   }
1498 #endif
1499   temp->destruct(this);     // reuse the _idx of this little guy
1500 }
1501 
1502 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1503   assert(n != NULL, "sanity");
1504   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1505     Node* u = n->fast_out(i);
1506     if (u != n) {
1507       rehash_node_delayed(u);
1508       int nb = u->replace_edge(n, m);
1509       --i, imax -= nb;
1510     }
1511   }
1512   assert(n->outcnt() == 0, "all uses must be deleted");
1513 }
1514 
1515 //------------------------------add_users_to_worklist--------------------------
1516 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1517   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1518     _worklist.push(n->fast_out(i));  // Push on worklist
1519   }
1520 }
1521 
1522 // Return counted loop Phi if as a counted loop exit condition, cmp
1523 // compares the induction variable with n
1524 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1525   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1526     Node* bol = cmp->fast_out(i);
1527     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1528       Node* iff = bol->fast_out(i2);
1529       if (iff->is_BaseCountedLoopEnd()) {
1530         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1531         if (cle->limit() == n) {
1532           PhiNode* phi = cle->phi();
1533           if (phi != NULL) {
1534             return phi;

1595         }
1596         Node* in1 = use->in(1);
1597         for (uint i = 0; i < in1->outcnt(); i++) {
1598           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1599             Node* castii = in1->raw_out(i);
1600             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1601               Node* ifnode = castii->in(0)->in(0);
1602               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1603                 // Reprocess a CastII node that may depend on an
1604                 // opaque node value when the opaque node is
1605                 // removed. In case it carries a dependency we can do
1606                 // a better job of computing its type.
1607                 _worklist.push(castii);
1608               }
1609             }
1610           }
1611         }
1612       }
1613     }
1614 
1615     // Inline type nodes can have other inline types as users. If an input gets
1616     // updated, make sure that inline type users get a chance for optimization.
1617     if (use->is_InlineType()) {
1618       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1619         Node* u = use->fast_out(i2);
1620         if (u->is_InlineType())
1621           _worklist.push(u);
1622       }
1623     }
1624     // If changed Cast input, check Phi users for simple cycles
1625     if (use->is_ConstraintCast()) {
1626       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1627         Node* u = use->fast_out(i2);
1628         if (u->is_Phi())
1629           _worklist.push(u);
1630       }
1631     }
1632     // If changed LShift inputs, check RShift users for useless sign-ext
1633     if( use_op == Op_LShiftI ) {
1634       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1635         Node* u = use->fast_out(i2);
1636         if (u->Opcode() == Op_RShiftI)
1637           _worklist.push(u);
1638       }
1639     }
1640     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1641     if (use_op == Op_AddI || use_op == Op_SubI) {
1642       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1643         Node* u = use->fast_out(i2);

1659       InitializeNode* init = use->as_Allocate()->initialization();
1660       if (init != NULL) {
1661         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1662         if (imem != NULL)  add_users_to_worklist0(imem);
1663       }
1664     }
1665     // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1666     // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1667     // to guarantee the change is not missed.
1668     if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1669       Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1670       if (p != NULL) {
1671         add_users_to_worklist0(p);
1672       }
1673     }
1674 
1675     if (use_op == Op_Initialize) {
1676       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1677       if (imem != NULL)  add_users_to_worklist0(imem);
1678     }
1679     if (use_op == Op_CastP2X) {
1680       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1681         Node* u = use->fast_out(i2);
1682         if (u->Opcode() == Op_AndX) {
1683           _worklist.push(u);
1684         }
1685       }
1686     }
1687     // Loading the java mirror from a Klass requires two loads and the type
1688     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1689     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1690     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1691     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1692 
1693     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1694       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1695         Node* u = use->fast_out(i2);
1696         const Type* ut = u->bottom_type();
1697         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1698           if (has_load_barrier_nodes) {
1699             // Search for load barriers behind the load
1700             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1701               Node* b = u->fast_out(i3);
1702               if (bs->is_gc_barrier_node(b)) {
1703                 _worklist.push(b);
1704               }
1705             }
1706           }
1707           _worklist.push(u);
1708         }
1709       }
1710     }
1711 
1712     // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1713     if (use->is_Region()) {
1714       Node* c = use;
1715       do {
1716         c = c->unique_ctrl_out_or_null();
1717       } while (c != NULL && c->is_Region());
1718       if (c != NULL && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1719         _worklist.push(c);
1720       }
1721     }
1722   }
1723 }
1724 
1725 /**
1726  * Remove the speculative part of all types that we know of
1727  */
1728 void PhaseIterGVN::remove_speculative_types()  {
1729   assert(UseTypeSpeculation, "speculation is off");
1730   for (uint i = 0; i < _types.Size(); i++)  {
1731     const Type* t = _types.fast_lookup(i);
1732     if (t != NULL) {
1733       _types.map(i, t->remove_speculative());
1734     }
1735   }
1736   _table.check_no_speculative_types();
1737 }
1738 
1739 // Check if the type of a divisor of a Div or Mod node includes zero.
1740 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1741   switch (n->Opcode()) {

1869   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1870     Node* use = n->fast_out(i);
1871     push_if_not_bottom_type(worklist, use);
1872     push_more_uses(worklist, n, use);
1873   }
1874 }
1875 
1876 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1877   if (n->bottom_type() != type(n)) {
1878     worklist.push(n);
1879   }
1880 }
1881 
1882 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1883 // Add them back to the worklist.
1884 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1885   push_phis(worklist, use);
1886   push_catch(worklist, use);
1887   push_cmpu(worklist, use);
1888   push_counted_loop_phi(worklist, parent, use);
1889   push_cast(worklist, use);
1890   push_loadp(worklist, use);
1891   push_and(worklist, parent, use);
1892   push_cast_ii(worklist, parent, use);
1893 }
1894 
1895 
1896 // We must recheck Phis too if use is a Region.
1897 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1898   if (use->is_Region()) {
1899     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1900       push_if_not_bottom_type(worklist, use->fast_out(i));
1901     }
1902   }
1903 }
1904 
1905 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1906 // non-NULL receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1907 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1908 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
1909   if (use->is_Call()) {

1930       if (cmpu->Opcode() == Op_CmpU) {
1931         // Got a CmpU which might need the new type information from node n.
1932         push_if_not_bottom_type(worklist, cmpu);
1933       }
1934     }
1935   }
1936 }
1937 
1938 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1939 // Seem PhiNode::Value().
1940 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1941   uint use_op = use->Opcode();
1942   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1943     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1944     if (phi != NULL) {
1945       worklist.push(phi);
1946     }
1947   }
1948 }
1949 
1950 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
1951   uint use_op = use->Opcode();
1952   if (use_op == Op_CastP2X) {
1953     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1954       Node* u = use->fast_out(i2);
1955       if (u->Opcode() == Op_AndX) {
1956         worklist.push(u);
1957       }
1958     }
1959   }
1960 }
1961 
1962 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1963 // See LoadNode::Value().
1964 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1965   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1966   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1967 
1968   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1969     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1970       Node* loadp = use->fast_out(i);
1971       const Type* ut = loadp->bottom_type();
1972       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1973         if (has_load_barrier_nodes) {
1974           // Search for load barriers behind the load
1975           push_load_barrier(worklist, barrier_set, loadp);
1976         }
1977         worklist.push(loadp);
1978       }
1979     }
1980   }
1981 }
< prev index next >