< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

1208     loop_count++;
1209   }
1210   NOT_PRODUCT(verify_PhaseIterGVN();)
1211 }
1212 
1213 
1214 /**
1215  * Register a new node with the optimizer.  Update the types array, the def-use
1216  * info.  Put on worklist.
1217  */
1218 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1219   set_type_bottom(n);
1220   _worklist.push(n);
1221   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1222   return n;
1223 }
1224 
1225 //------------------------------transform--------------------------------------
1226 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1227 Node *PhaseIterGVN::transform( Node *n ) {
1228   if (_delay_transform) {
1229     // Register the node but don't optimize for now
1230     register_new_node_with_optimizer(n);
1231     return n;
1232   }
1233 
1234   // If brand new node, make space in type array, and give it a type.
1235   ensure_type_or_null(n);
1236   if (type_or_null(n) == NULL) {
1237     set_type_bottom(n);
1238   }
1239 






1240   return transform_old(n);
1241 }
1242 
1243 Node *PhaseIterGVN::transform_old(Node* n) {
1244   NOT_PRODUCT(set_transforms());
1245   // Remove 'n' from hash table in case it gets modified
1246   _table.hash_delete(n);
1247   if (VerifyIterativeGVN) {
1248     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1249   }
1250 
1251   // Apply the Ideal call in a loop until it no longer applies
1252   Node* k = n;
1253   DEBUG_ONLY(dead_loop_check(k);)
1254   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1255   C->remove_modified_node(k);
1256   Node* i = apply_ideal(k, /*can_reshape=*/true);
1257   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1258 #ifndef PRODUCT
1259   verify_step(k);

1482 
1483   // Smash all inputs to 'old', isolating him completely
1484   Node *temp = new Node(1);
1485   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1486   remove_dead_node( old );
1487   temp->del_req(0);         // Yank bogus edge
1488   if (nn != NULL && nn->outcnt() == 0) {
1489     _worklist.push(nn);
1490   }
1491 #ifndef PRODUCT
1492   if( VerifyIterativeGVN ) {
1493     for ( int i = 0; i < _verify_window_size; i++ ) {
1494       if ( _verify_window[i] == old )
1495         _verify_window[i] = nn;
1496     }
1497   }
1498 #endif
1499   temp->destruct(this);     // reuse the _idx of this little guy
1500 }
1501 













1502 //------------------------------add_users_to_worklist--------------------------
1503 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1504   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1505     _worklist.push(n->fast_out(i));  // Push on worklist
1506   }
1507 }
1508 
1509 // Return counted loop Phi if as a counted loop exit condition, cmp
1510 // compares the induction variable with n
1511 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1512   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1513     Node* bol = cmp->fast_out(i);
1514     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1515       Node* iff = bol->fast_out(i2);
1516       if (iff->is_BaseCountedLoopEnd()) {
1517         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1518         if (cle->limit() == n) {
1519           PhiNode* phi = cle->phi();
1520           if (phi != NULL) {
1521             return phi;

1582         }
1583         Node* in1 = use->in(1);
1584         for (uint i = 0; i < in1->outcnt(); i++) {
1585           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1586             Node* castii = in1->raw_out(i);
1587             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1588               Node* ifnode = castii->in(0)->in(0);
1589               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1590                 // Reprocess a CastII node that may depend on an
1591                 // opaque node value when the opaque node is
1592                 // removed. In case it carries a dependency we can do
1593                 // a better job of computing its type.
1594                 _worklist.push(castii);
1595               }
1596             }
1597           }
1598         }
1599       }
1600     }
1601 









1602     // If changed Cast input, check Phi users for simple cycles
1603     if (use->is_ConstraintCast()) {
1604       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1605         Node* u = use->fast_out(i2);
1606         if (u->is_Phi())
1607           _worklist.push(u);
1608       }
1609     }
1610     // If changed LShift inputs, check RShift users for useless sign-ext
1611     if( use_op == Op_LShiftI ) {
1612       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1613         Node* u = use->fast_out(i2);
1614         if (u->Opcode() == Op_RShiftI)
1615           _worklist.push(u);
1616       }
1617     }
1618     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1619     if (use_op == Op_AddI || use_op == Op_SubI) {
1620       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1621         Node* u = use->fast_out(i2);

1627     // If changed AddP inputs, check Stores for loop invariant
1628     if( use_op == Op_AddP ) {
1629       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1630         Node* u = use->fast_out(i2);
1631         if (u->is_Mem())
1632           _worklist.push(u);
1633       }
1634     }
1635     // If changed initialization activity, check dependent Stores
1636     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1637       InitializeNode* init = use->as_Allocate()->initialization();
1638       if (init != NULL) {
1639         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1640         if (imem != NULL)  add_users_to_worklist0(imem);
1641       }
1642     }
1643     if (use_op == Op_Initialize) {
1644       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1645       if (imem != NULL)  add_users_to_worklist0(imem);
1646     }








1647     // Loading the java mirror from a Klass requires two loads and the type
1648     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1649     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1650     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1651     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1652 
1653     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1654       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1655         Node* u = use->fast_out(i2);
1656         const Type* ut = u->bottom_type();
1657         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1658           if (has_load_barrier_nodes) {
1659             // Search for load barriers behind the load
1660             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1661               Node* b = u->fast_out(i3);
1662               if (bs->is_gc_barrier_node(b)) {
1663                 _worklist.push(b);
1664               }
1665             }
1666           }
1667           _worklist.push(u);
1668         }
1669       }
1670     }











1671   }
1672 }
1673 
1674 /**
1675  * Remove the speculative part of all types that we know of
1676  */
1677 void PhaseIterGVN::remove_speculative_types()  {
1678   assert(UseTypeSpeculation, "speculation is off");
1679   for (uint i = 0; i < _types.Size(); i++)  {
1680     const Type* t = _types.fast_lookup(i);
1681     if (t != NULL) {
1682       _types.map(i, t->remove_speculative());
1683     }
1684   }
1685   _table.check_no_speculative_types();
1686 }
1687 
1688 // Check if the type of a divisor of a Div or Mod node includes zero.
1689 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1690   switch (n->Opcode()) {

1818   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1819     Node* use = n->fast_out(i);
1820     push_if_not_bottom_type(worklist, use);
1821     push_more_uses(worklist, n, use);
1822   }
1823 }
1824 
1825 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1826   if (n->bottom_type() != type(n)) {
1827     worklist.push(n);
1828   }
1829 }
1830 
1831 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1832 // Add them back to the worklist.
1833 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1834   push_phis(worklist, use);
1835   push_catch(worklist, use);
1836   push_cmpu(worklist, use);
1837   push_counted_loop_phi(worklist, parent, use);

1838   push_loadp(worklist, use);
1839   push_and(worklist, parent, use);
1840 }
1841 
1842 
1843 // We must recheck Phis too if use is a Region.
1844 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1845   if (use->is_Region()) {
1846     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1847       push_if_not_bottom_type(worklist, use->fast_out(i));
1848     }
1849   }
1850 }
1851 
1852 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1853 // non-NULL receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1854 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
1855   if (use->is_Call()) {
1856     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1857       Node* proj = use->fast_out(i);

1876       if (cmpu->Opcode() == Op_CmpU) {
1877         // Got a CmpU which might need the new type information from node n.
1878         push_if_not_bottom_type(worklist, cmpu);
1879       }
1880     }
1881   }
1882 }
1883 
1884 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1885 // Seem PhiNode::Value().
1886 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1887   uint use_op = use->Opcode();
1888   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1889     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1890     if (phi != NULL) {
1891       worklist.push(phi);
1892     }
1893   }
1894 }
1895 












1896 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1897 // See LoadNode::Value().
1898 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1899   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1900   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1901 
1902   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1903     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1904       Node* loadp = use->fast_out(i);
1905       const Type* ut = loadp->bottom_type();
1906       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1907         if (has_load_barrier_nodes) {
1908           // Search for load barriers behind the load
1909           push_load_barrier(worklist, barrier_set, loadp);
1910         }
1911         worklist.push(loadp);
1912       }
1913     }
1914   }
1915 }

1208     loop_count++;
1209   }
1210   NOT_PRODUCT(verify_PhaseIterGVN();)
1211 }
1212 
1213 
1214 /**
1215  * Register a new node with the optimizer.  Update the types array, the def-use
1216  * info.  Put on worklist.
1217  */
1218 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1219   set_type_bottom(n);
1220   _worklist.push(n);
1221   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1222   return n;
1223 }
1224 
1225 //------------------------------transform--------------------------------------
1226 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1227 Node *PhaseIterGVN::transform( Node *n ) {






1228   // If brand new node, make space in type array, and give it a type.
1229   ensure_type_or_null(n);
1230   if (type_or_null(n) == NULL) {
1231     set_type_bottom(n);
1232   }
1233 
1234   if (_delay_transform) {
1235     // Add the node to the worklist but don't optimize for now
1236     _worklist.push(n);
1237     return n;
1238   }
1239 
1240   return transform_old(n);
1241 }
1242 
1243 Node *PhaseIterGVN::transform_old(Node* n) {
1244   NOT_PRODUCT(set_transforms());
1245   // Remove 'n' from hash table in case it gets modified
1246   _table.hash_delete(n);
1247   if (VerifyIterativeGVN) {
1248     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1249   }
1250 
1251   // Apply the Ideal call in a loop until it no longer applies
1252   Node* k = n;
1253   DEBUG_ONLY(dead_loop_check(k);)
1254   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1255   C->remove_modified_node(k);
1256   Node* i = apply_ideal(k, /*can_reshape=*/true);
1257   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1258 #ifndef PRODUCT
1259   verify_step(k);

1482 
1483   // Smash all inputs to 'old', isolating him completely
1484   Node *temp = new Node(1);
1485   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1486   remove_dead_node( old );
1487   temp->del_req(0);         // Yank bogus edge
1488   if (nn != NULL && nn->outcnt() == 0) {
1489     _worklist.push(nn);
1490   }
1491 #ifndef PRODUCT
1492   if( VerifyIterativeGVN ) {
1493     for ( int i = 0; i < _verify_window_size; i++ ) {
1494       if ( _verify_window[i] == old )
1495         _verify_window[i] = nn;
1496     }
1497   }
1498 #endif
1499   temp->destruct(this);     // reuse the _idx of this little guy
1500 }
1501 
1502 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1503   assert(n != NULL, "sanity");
1504   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1505     Node* u = n->fast_out(i);
1506     if (u != n) {
1507       rehash_node_delayed(u);
1508       int nb = u->replace_edge(n, m);
1509       --i, imax -= nb;
1510     }
1511   }
1512   assert(n->outcnt() == 0, "all uses must be deleted");
1513 }
1514 
1515 //------------------------------add_users_to_worklist--------------------------
1516 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1517   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1518     _worklist.push(n->fast_out(i));  // Push on worklist
1519   }
1520 }
1521 
1522 // Return counted loop Phi if as a counted loop exit condition, cmp
1523 // compares the induction variable with n
1524 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1525   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1526     Node* bol = cmp->fast_out(i);
1527     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1528       Node* iff = bol->fast_out(i2);
1529       if (iff->is_BaseCountedLoopEnd()) {
1530         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1531         if (cle->limit() == n) {
1532           PhiNode* phi = cle->phi();
1533           if (phi != NULL) {
1534             return phi;

1595         }
1596         Node* in1 = use->in(1);
1597         for (uint i = 0; i < in1->outcnt(); i++) {
1598           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1599             Node* castii = in1->raw_out(i);
1600             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1601               Node* ifnode = castii->in(0)->in(0);
1602               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1603                 // Reprocess a CastII node that may depend on an
1604                 // opaque node value when the opaque node is
1605                 // removed. In case it carries a dependency we can do
1606                 // a better job of computing its type.
1607                 _worklist.push(castii);
1608               }
1609             }
1610           }
1611         }
1612       }
1613     }
1614 
1615     // Inline type nodes can have other inline types as users. If an input gets
1616     // updated, make sure that inline type users get a chance for optimization.
1617     if (use->is_InlineTypeBase()) {
1618       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1619         Node* u = use->fast_out(i2);
1620         if (u->is_InlineTypeBase())
1621           _worklist.push(u);
1622       }
1623     }
1624     // If changed Cast input, check Phi users for simple cycles
1625     if (use->is_ConstraintCast()) {
1626       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1627         Node* u = use->fast_out(i2);
1628         if (u->is_Phi())
1629           _worklist.push(u);
1630       }
1631     }
1632     // If changed LShift inputs, check RShift users for useless sign-ext
1633     if( use_op == Op_LShiftI ) {
1634       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1635         Node* u = use->fast_out(i2);
1636         if (u->Opcode() == Op_RShiftI)
1637           _worklist.push(u);
1638       }
1639     }
1640     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1641     if (use_op == Op_AddI || use_op == Op_SubI) {
1642       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1643         Node* u = use->fast_out(i2);

1649     // If changed AddP inputs, check Stores for loop invariant
1650     if( use_op == Op_AddP ) {
1651       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1652         Node* u = use->fast_out(i2);
1653         if (u->is_Mem())
1654           _worklist.push(u);
1655       }
1656     }
1657     // If changed initialization activity, check dependent Stores
1658     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1659       InitializeNode* init = use->as_Allocate()->initialization();
1660       if (init != NULL) {
1661         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1662         if (imem != NULL)  add_users_to_worklist0(imem);
1663       }
1664     }
1665     if (use_op == Op_Initialize) {
1666       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1667       if (imem != NULL)  add_users_to_worklist0(imem);
1668     }
1669     if (use_op == Op_CastP2X) {
1670       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1671         Node* u = use->fast_out(i2);
1672         if (u->Opcode() == Op_AndX) {
1673           _worklist.push(u);
1674         }
1675       }
1676     }
1677     // Loading the java mirror from a Klass requires two loads and the type
1678     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1679     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1680     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1681     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1682 
1683     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1684       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1685         Node* u = use->fast_out(i2);
1686         const Type* ut = u->bottom_type();
1687         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1688           if (has_load_barrier_nodes) {
1689             // Search for load barriers behind the load
1690             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1691               Node* b = u->fast_out(i3);
1692               if (bs->is_gc_barrier_node(b)) {
1693                 _worklist.push(b);
1694               }
1695             }
1696           }
1697           _worklist.push(u);
1698         }
1699       }
1700     }
1701 
1702     // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1703     if (use->is_Region()) {
1704       Node* c = use;
1705       do {
1706         c = c->unique_ctrl_out_or_null();
1707       } while (c != NULL && c->is_Region());
1708       if (c != NULL && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1709         _worklist.push(c);
1710       }
1711     }
1712   }
1713 }
1714 
1715 /**
1716  * Remove the speculative part of all types that we know of
1717  */
1718 void PhaseIterGVN::remove_speculative_types()  {
1719   assert(UseTypeSpeculation, "speculation is off");
1720   for (uint i = 0; i < _types.Size(); i++)  {
1721     const Type* t = _types.fast_lookup(i);
1722     if (t != NULL) {
1723       _types.map(i, t->remove_speculative());
1724     }
1725   }
1726   _table.check_no_speculative_types();
1727 }
1728 
1729 // Check if the type of a divisor of a Div or Mod node includes zero.
1730 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1731   switch (n->Opcode()) {

1859   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1860     Node* use = n->fast_out(i);
1861     push_if_not_bottom_type(worklist, use);
1862     push_more_uses(worklist, n, use);
1863   }
1864 }
1865 
1866 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1867   if (n->bottom_type() != type(n)) {
1868     worklist.push(n);
1869   }
1870 }
1871 
1872 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1873 // Add them back to the worklist.
1874 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1875   push_phis(worklist, use);
1876   push_catch(worklist, use);
1877   push_cmpu(worklist, use);
1878   push_counted_loop_phi(worklist, parent, use);
1879   push_cast(worklist, use);
1880   push_loadp(worklist, use);
1881   push_and(worklist, parent, use);
1882 }
1883 
1884 
1885 // We must recheck Phis too if use is a Region.
1886 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1887   if (use->is_Region()) {
1888     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1889       push_if_not_bottom_type(worklist, use->fast_out(i));
1890     }
1891   }
1892 }
1893 
1894 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1895 // non-NULL receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1896 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
1897   if (use->is_Call()) {
1898     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1899       Node* proj = use->fast_out(i);

1918       if (cmpu->Opcode() == Op_CmpU) {
1919         // Got a CmpU which might need the new type information from node n.
1920         push_if_not_bottom_type(worklist, cmpu);
1921       }
1922     }
1923   }
1924 }
1925 
1926 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1927 // Seem PhiNode::Value().
1928 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1929   uint use_op = use->Opcode();
1930   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1931     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1932     if (phi != NULL) {
1933       worklist.push(phi);
1934     }
1935   }
1936 }
1937 
1938 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
1939   uint use_op = use->Opcode();
1940   if (use_op == Op_CastP2X) {
1941     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1942       Node* u = use->fast_out(i2);
1943       if (u->Opcode() == Op_AndX) {
1944         worklist.push(u);
1945       }
1946     }
1947   }
1948 }
1949 
1950 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1951 // See LoadNode::Value().
1952 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1953   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1954   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1955 
1956   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1957     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1958       Node* loadp = use->fast_out(i);
1959       const Type* ut = loadp->bottom_type();
1960       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1961         if (has_load_barrier_nodes) {
1962           // Search for load barriers behind the load
1963           push_load_barrier(worklist, barrier_set, loadp);
1964         }
1965         worklist.push(loadp);
1966       }
1967     }
1968   }
1969 }
< prev index next >