< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

 406 //=============================================================================
 407 //------------------------------PhaseRemoveUseless-----------------------------
 408 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
 409 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List* worklist, PhaseNumber phase_num) : Phase(phase_num) {
 410   // Implementation requires an edge from root to each SafePointNode
 411   // at a backward branch. Inserted in add_safepoint().
 412 
 413   // Identify nodes that are reachable from below, useful.
 414   C->identify_useful_nodes(_useful);
 415   // Update dead node list
 416   C->update_dead_node_list(_useful);
 417 
 418   // Remove all useless nodes from PhaseValues' recorded types
 419   // Must be done before disconnecting nodes to preserve hash-table-invariant
 420   gvn->remove_useless_nodes(_useful.member_set());
 421 
 422   // Remove all useless nodes from future worklist
 423   worklist->remove_useless_nodes(_useful.member_set());
 424 
 425   // Disconnect 'useless' nodes that are adjacent to useful nodes
 426   C->remove_useless_nodes(_useful);
 427 }
 428 
 429 //=============================================================================
 430 //------------------------------PhaseRenumberLive------------------------------
 431 // First, remove useless nodes (equivalent to identifying live nodes).
 432 // Then, renumber live nodes.
 433 //
 434 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
 435 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
 436 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
 437 // value in the range [0, x).
 438 //
 439 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
 440 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
 441 //
 442 // The PhaseRenumberLive phase updates two data structures with the new node IDs.
 443 // (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
 444 // processed. A new worklist (with the updated node IDs) is returned in 'new_worklist'.
 445 // 'worklist' is cleared upon returning.
 446 // (2) Type information (the field PhaseGVN::_types) maps type information to each

1210     loop_count++;
1211   }
1212   NOT_PRODUCT(verify_PhaseIterGVN();)
1213 }
1214 
1215 
1216 /**
1217  * Register a new node with the optimizer.  Update the types array, the def-use
1218  * info.  Put on worklist.
1219  */
1220 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1221   set_type_bottom(n);
1222   _worklist.push(n);
1223   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1224   return n;
1225 }
1226 
1227 //------------------------------transform--------------------------------------
1228 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1229 Node *PhaseIterGVN::transform( Node *n ) {
1230   if (_delay_transform) {
1231     // Register the node but don't optimize for now
1232     register_new_node_with_optimizer(n);
1233     return n;
1234   }
1235 
1236   // If brand new node, make space in type array, and give it a type.
1237   ensure_type_or_null(n);
1238   if (type_or_null(n) == NULL) {
1239     set_type_bottom(n);
1240   }
1241 






1242   return transform_old(n);
1243 }
1244 
1245 Node *PhaseIterGVN::transform_old(Node* n) {
1246   NOT_PRODUCT(set_transforms());
1247   // Remove 'n' from hash table in case it gets modified
1248   _table.hash_delete(n);
1249   if (VerifyIterativeGVN) {
1250     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1251   }
1252 
1253   // Apply the Ideal call in a loop until it no longer applies
1254   Node* k = n;
1255   DEBUG_ONLY(dead_loop_check(k);)
1256   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1257   C->remove_modified_node(k);
1258   Node* i = apply_ideal(k, /*can_reshape=*/true);
1259   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1260 #ifndef PRODUCT
1261   verify_step(k);

1484 
1485   // Smash all inputs to 'old', isolating him completely
1486   Node *temp = new Node(1);
1487   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1488   remove_dead_node( old );
1489   temp->del_req(0);         // Yank bogus edge
1490   if (nn != NULL && nn->outcnt() == 0) {
1491     _worklist.push(nn);
1492   }
1493 #ifndef PRODUCT
1494   if( VerifyIterativeGVN ) {
1495     for ( int i = 0; i < _verify_window_size; i++ ) {
1496       if ( _verify_window[i] == old )
1497         _verify_window[i] = nn;
1498     }
1499   }
1500 #endif
1501   temp->destruct(this);     // reuse the _idx of this little guy
1502 }
1503 













1504 //------------------------------add_users_to_worklist--------------------------
1505 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1506   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1507     _worklist.push(n->fast_out(i));  // Push on worklist
1508   }
1509 }
1510 
1511 // Return counted loop Phi if as a counted loop exit condition, cmp
1512 // compares the the induction variable with n
1513 static PhiNode* countedloop_phi_from_cmp(CmpINode* cmp, Node* n) {
1514   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1515     Node* bol = cmp->fast_out(i);
1516     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1517       Node* iff = bol->fast_out(i2);
1518       if (iff->is_CountedLoopEnd()) {
1519         CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
1520         if (cle->limit() == n) {
1521           PhiNode* phi = cle->phi();
1522           if (phi != NULL) {
1523             return phi;

1584         }
1585         Node* in1 = use->in(1);
1586         for (uint i = 0; i < in1->outcnt(); i++) {
1587           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1588             Node* castii = in1->raw_out(i);
1589             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1590               Node* ifnode = castii->in(0)->in(0);
1591               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1592                 // Reprocess a CastII node that may depend on an
1593                 // opaque node value when the opaque node is
1594                 // removed. In case it carries a dependency we can do
1595                 // a better job of computing its type.
1596                 _worklist.push(castii);
1597               }
1598             }
1599           }
1600         }
1601       }
1602     }
1603 









1604     // If changed Cast input, check Phi users for simple cycles
1605     if (use->is_ConstraintCast()) {
1606       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1607         Node* u = use->fast_out(i2);
1608         if (u->is_Phi())
1609           _worklist.push(u);
1610       }
1611     }
1612     // If changed LShift inputs, check RShift users for useless sign-ext
1613     if( use_op == Op_LShiftI ) {
1614       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1615         Node* u = use->fast_out(i2);
1616         if (u->Opcode() == Op_RShiftI)
1617           _worklist.push(u);
1618       }
1619     }
1620     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1621     if (use_op == Op_AddI || use_op == Op_SubI) {
1622       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1623         Node* u = use->fast_out(i2);

1629     // If changed AddP inputs, check Stores for loop invariant
1630     if( use_op == Op_AddP ) {
1631       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1632         Node* u = use->fast_out(i2);
1633         if (u->is_Mem())
1634           _worklist.push(u);
1635       }
1636     }
1637     // If changed initialization activity, check dependent Stores
1638     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1639       InitializeNode* init = use->as_Allocate()->initialization();
1640       if (init != NULL) {
1641         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1642         if (imem != NULL)  add_users_to_worklist0(imem);
1643       }
1644     }
1645     if (use_op == Op_Initialize) {
1646       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1647       if (imem != NULL)  add_users_to_worklist0(imem);
1648     }








1649     // Loading the java mirror from a Klass requires two loads and the type
1650     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1651     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1652     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1653     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1654 
1655     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1656       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1657         Node* u = use->fast_out(i2);
1658         const Type* ut = u->bottom_type();
1659         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1660           if (has_load_barrier_nodes) {
1661             // Search for load barriers behind the load
1662             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1663               Node* b = u->fast_out(i3);
1664               if (bs->is_gc_barrier_node(b)) {
1665                 _worklist.push(b);
1666               }
1667             }
1668           }
1669           _worklist.push(u);
1670         }
1671       }
1672     }











1673   }
1674 }
1675 
1676 /**
1677  * Remove the speculative part of all types that we know of
1678  */
1679 void PhaseIterGVN::remove_speculative_types()  {
1680   assert(UseTypeSpeculation, "speculation is off");
1681   for (uint i = 0; i < _types.Size(); i++)  {
1682     const Type* t = _types.fast_lookup(i);
1683     if (t != NULL) {
1684       _types.map(i, t->remove_speculative());
1685     }
1686   }
1687   _table.check_no_speculative_types();
1688 }
1689 
1690 // Check if the type of a divisor of a Div or Mod node includes zero.
1691 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1692   switch (n->Opcode()) {

1704     case Op_ModL: {
1705       // Type of divisor includes 0?
1706       if (n->in(2)->is_top()) {
1707         // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
1708         return false;
1709       }
1710       const TypeLong* type_divisor = type(n->in(2))->is_long();
1711       return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
1712     }
1713   }
1714   return true;
1715 }
1716 
1717 //=============================================================================
1718 #ifndef PRODUCT
1719 uint PhaseCCP::_total_invokes   = 0;
1720 uint PhaseCCP::_total_constants = 0;
1721 #endif
1722 //------------------------------PhaseCCP---------------------------------------
1723 // Conditional Constant Propagation, ala Wegman & Zadeck
1724 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1725   NOT_PRODUCT( clear_constants(); )
1726   assert( _worklist.size() == 0, "" );
1727   // Clear out _nodes from IterGVN.  Must be clear to transform call.
1728   _nodes.clear();               // Clear out from IterGVN
1729   analyze();
1730 }
1731 
1732 #ifndef PRODUCT
1733 //------------------------------~PhaseCCP--------------------------------------
1734 PhaseCCP::~PhaseCCP() {
1735   inc_invokes();
1736   _total_constants += count_constants();
1737 }
1738 #endif
1739 
1740 
1741 #ifdef ASSERT
1742 static bool ccp_type_widens(const Type* t, const Type* t0) {
1743   assert(t->meet(t0) == t, "Not monotonic");
1744   switch (t->base() == t0->base() ? t->base() : Type::Top) {

1758 //------------------------------analyze----------------------------------------
1759 void PhaseCCP::analyze() {
1760   // Initialize all types to TOP, optimistic analysis
1761   for (int i = C->unique() - 1; i >= 0; i--)  {
1762     _types.map(i,Type::TOP);
1763   }
1764 
1765   // Push root onto worklist
1766   Unique_Node_List worklist;
1767   worklist.push(C->root());
1768 
1769   // Pull from worklist; compute new value; push changes out.
1770   // This loop is the meat of CCP.
1771   while( worklist.size() ) {
1772     Node* n; // Node to be examined in this iteration
1773     if (StressCCP) {
1774       n = worklist.remove(C->random() % worklist.size());
1775     } else {
1776       n = worklist.pop();
1777     }





1778     const Type *t = n->Value(this);
1779     if (t != type(n)) {
1780       assert(ccp_type_widens(t, type(n)), "ccp type must widen");
1781 #ifndef PRODUCT
1782       if( TracePhaseCCP ) {
1783         t->dump();
1784         do { tty->print("\t"); } while (tty->position() < 16);
1785         n->dump();
1786       }
1787 #endif
1788       set_type(n, t);
1789       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1790         Node* m = n->fast_out(i);   // Get user
1791         if (m->is_Region()) {  // New path to Region?  Must recheck Phis too
1792           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1793             Node* p = m->fast_out(i2); // Propagate changes to uses
1794             if (p->bottom_type() != type(p)) { // If not already bottomed out
1795               worklist.push(p); // Propagate change to user
1796             }
1797           }

1823         if (m_op == Op_AddI || m_op == Op_SubI) {
1824           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1825             Node* p = m->fast_out(i2); // Propagate changes to uses
1826             if (p->Opcode() == Op_CmpU) {
1827               // Got a CmpU which might need the new type information from node n.
1828               if(p->bottom_type() != type(p)) { // If not already bottomed out
1829                 worklist.push(p); // Propagate change to user
1830               }
1831             }
1832           }
1833         }
1834         // If n is used in a counted loop exit condition then the type
1835         // of the counted loop's Phi depends on the type of n. See
1836         // PhiNode::Value().
1837         if (m_op == Op_CmpI) {
1838           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1839           if (phi != NULL) {
1840             worklist.push(phi);
1841           }
1842         }








1843         // Loading the java mirror from a Klass requires two loads and the type
1844         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1845         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1846         bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1847 
1848         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1849           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1850             Node* u = m->fast_out(i2);
1851             const Type* ut = u->bottom_type();
1852             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
1853               if (has_load_barrier_nodes) {
1854                 // Search for load barriers behind the load
1855                 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1856                   Node* b = u->fast_out(i3);
1857                   if (bs->is_gc_barrier_node(b)) {
1858                     worklist.push(b);
1859                   }
1860                 }
1861               }
1862               worklist.push(u);

1869 }
1870 
1871 //------------------------------do_transform-----------------------------------
1872 // Top level driver for the recursive transformer
1873 void PhaseCCP::do_transform() {
1874   // Correct leaves of new-space Nodes; they point to old-space.
1875   C->set_root( transform(C->root())->as_Root() );
1876   assert( C->top(),  "missing TOP node" );
1877   assert( C->root(), "missing root" );
1878 }
1879 
1880 //------------------------------transform--------------------------------------
1881 // Given a Node in old-space, clone him into new-space.
1882 // Convert any of his old-space children into new-space children.
1883 Node *PhaseCCP::transform( Node *n ) {
1884   Node *new_node = _nodes[n->_idx]; // Check for transformed node
1885   if( new_node != NULL )
1886     return new_node;                // Been there, done that, return old answer
1887   new_node = transform_once(n);     // Check for constant
1888   _nodes.map( n->_idx, new_node );  // Flag as having been cloned

1889 
1890   // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
1891   GrowableArray <Node *> trstack(C->live_nodes() >> 1);
1892 
1893   trstack.push(new_node);           // Process children of cloned node
1894   while ( trstack.is_nonempty() ) {
1895     Node *clone = trstack.pop();
1896     uint cnt = clone->req();
1897     for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
1898       Node *input = clone->in(i);
1899       if( input != NULL ) {                    // Ignore NULLs
1900         Node *new_input = _nodes[input->_idx]; // Check for cloned input node
1901         if( new_input == NULL ) {
1902           new_input = transform_once(input);   // Check for constant
1903           _nodes.map( input->_idx, new_input );// Flag as having been cloned
1904           trstack.push(new_input);

1905         }
1906         assert( new_input == clone->in(i), "insanity check");
1907       }
1908     }
1909   }

















1910   return new_node;
1911 }
1912 
1913 
1914 //------------------------------transform_once---------------------------------
1915 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
1916 Node *PhaseCCP::transform_once( Node *n ) {
1917   const Type *t = type(n);
1918   // Constant?  Use constant Node instead
1919   if( t->singleton() ) {
1920     Node *nn = n;               // Default is to return the original constant
1921     if( t == Type::TOP ) {
1922       // cache my top node on the Compile instance
1923       if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
1924         C->set_cached_top_node(ConNode::make(Type::TOP));
1925         set_type(C->top(), Type::TOP);
1926       }
1927       nn = C->top();
1928     }
1929     if( !n->is_Con() ) {

 406 //=============================================================================
 407 //------------------------------PhaseRemoveUseless-----------------------------
 408 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
 409 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List* worklist, PhaseNumber phase_num) : Phase(phase_num) {
 410   // Implementation requires an edge from root to each SafePointNode
 411   // at a backward branch. Inserted in add_safepoint().
 412 
 413   // Identify nodes that are reachable from below, useful.
 414   C->identify_useful_nodes(_useful);
 415   // Update dead node list
 416   C->update_dead_node_list(_useful);
 417 
 418   // Remove all useless nodes from PhaseValues' recorded types
 419   // Must be done before disconnecting nodes to preserve hash-table-invariant
 420   gvn->remove_useless_nodes(_useful.member_set());
 421 
 422   // Remove all useless nodes from future worklist
 423   worklist->remove_useless_nodes(_useful.member_set());
 424 
 425   // Disconnect 'useless' nodes that are adjacent to useful nodes
 426   C->disconnect_useless_nodes(_useful, worklist);
 427 }
 428 
 429 //=============================================================================
 430 //------------------------------PhaseRenumberLive------------------------------
 431 // First, remove useless nodes (equivalent to identifying live nodes).
 432 // Then, renumber live nodes.
 433 //
 434 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
 435 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
 436 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
 437 // value in the range [0, x).
 438 //
 439 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
 440 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
 441 //
 442 // The PhaseRenumberLive phase updates two data structures with the new node IDs.
 443 // (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
 444 // processed. A new worklist (with the updated node IDs) is returned in 'new_worklist'.
 445 // 'worklist' is cleared upon returning.
 446 // (2) Type information (the field PhaseGVN::_types) maps type information to each

1210     loop_count++;
1211   }
1212   NOT_PRODUCT(verify_PhaseIterGVN();)
1213 }
1214 
1215 
1216 /**
1217  * Register a new node with the optimizer.  Update the types array, the def-use
1218  * info.  Put on worklist.
1219  */
1220 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1221   set_type_bottom(n);
1222   _worklist.push(n);
1223   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1224   return n;
1225 }
1226 
1227 //------------------------------transform--------------------------------------
1228 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1229 Node *PhaseIterGVN::transform( Node *n ) {






1230   // If brand new node, make space in type array, and give it a type.
1231   ensure_type_or_null(n);
1232   if (type_or_null(n) == NULL) {
1233     set_type_bottom(n);
1234   }
1235 
1236   if (_delay_transform) {
1237     // Add the node to the worklist but don't optimize for now
1238     _worklist.push(n);
1239     return n;
1240   }
1241 
1242   return transform_old(n);
1243 }
1244 
1245 Node *PhaseIterGVN::transform_old(Node* n) {
1246   NOT_PRODUCT(set_transforms());
1247   // Remove 'n' from hash table in case it gets modified
1248   _table.hash_delete(n);
1249   if (VerifyIterativeGVN) {
1250     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1251   }
1252 
1253   // Apply the Ideal call in a loop until it no longer applies
1254   Node* k = n;
1255   DEBUG_ONLY(dead_loop_check(k);)
1256   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1257   C->remove_modified_node(k);
1258   Node* i = apply_ideal(k, /*can_reshape=*/true);
1259   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1260 #ifndef PRODUCT
1261   verify_step(k);

1484 
1485   // Smash all inputs to 'old', isolating him completely
1486   Node *temp = new Node(1);
1487   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1488   remove_dead_node( old );
1489   temp->del_req(0);         // Yank bogus edge
1490   if (nn != NULL && nn->outcnt() == 0) {
1491     _worklist.push(nn);
1492   }
1493 #ifndef PRODUCT
1494   if( VerifyIterativeGVN ) {
1495     for ( int i = 0; i < _verify_window_size; i++ ) {
1496       if ( _verify_window[i] == old )
1497         _verify_window[i] = nn;
1498     }
1499   }
1500 #endif
1501   temp->destruct(this);     // reuse the _idx of this little guy
1502 }
1503 
1504 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1505   assert(n != NULL, "sanity");
1506   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1507     Node* u = n->fast_out(i);
1508     if (u != n) {
1509       rehash_node_delayed(u);
1510       int nb = u->replace_edge(n, m);
1511       --i, imax -= nb;
1512     }
1513   }
1514   assert(n->outcnt() == 0, "all uses must be deleted");
1515 }
1516 
1517 //------------------------------add_users_to_worklist--------------------------
1518 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1519   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1520     _worklist.push(n->fast_out(i));  // Push on worklist
1521   }
1522 }
1523 
1524 // Return counted loop Phi if as a counted loop exit condition, cmp
1525 // compares the the induction variable with n
1526 static PhiNode* countedloop_phi_from_cmp(CmpINode* cmp, Node* n) {
1527   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1528     Node* bol = cmp->fast_out(i);
1529     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1530       Node* iff = bol->fast_out(i2);
1531       if (iff->is_CountedLoopEnd()) {
1532         CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
1533         if (cle->limit() == n) {
1534           PhiNode* phi = cle->phi();
1535           if (phi != NULL) {
1536             return phi;

1597         }
1598         Node* in1 = use->in(1);
1599         for (uint i = 0; i < in1->outcnt(); i++) {
1600           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1601             Node* castii = in1->raw_out(i);
1602             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1603               Node* ifnode = castii->in(0)->in(0);
1604               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1605                 // Reprocess a CastII node that may depend on an
1606                 // opaque node value when the opaque node is
1607                 // removed. In case it carries a dependency we can do
1608                 // a better job of computing its type.
1609                 _worklist.push(castii);
1610               }
1611             }
1612           }
1613         }
1614       }
1615     }
1616 
1617     // Inline type nodes can have other inline types as users. If an input gets
1618     // updated, make sure that inline type users get a chance for optimization.
1619     if (use->is_InlineTypeBase()) {
1620       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1621         Node* u = use->fast_out(i2);
1622         if (u->is_InlineTypeBase())
1623           _worklist.push(u);
1624       }
1625     }
1626     // If changed Cast input, check Phi users for simple cycles
1627     if (use->is_ConstraintCast()) {
1628       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1629         Node* u = use->fast_out(i2);
1630         if (u->is_Phi())
1631           _worklist.push(u);
1632       }
1633     }
1634     // If changed LShift inputs, check RShift users for useless sign-ext
1635     if( use_op == Op_LShiftI ) {
1636       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1637         Node* u = use->fast_out(i2);
1638         if (u->Opcode() == Op_RShiftI)
1639           _worklist.push(u);
1640       }
1641     }
1642     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1643     if (use_op == Op_AddI || use_op == Op_SubI) {
1644       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1645         Node* u = use->fast_out(i2);

1651     // If changed AddP inputs, check Stores for loop invariant
1652     if( use_op == Op_AddP ) {
1653       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1654         Node* u = use->fast_out(i2);
1655         if (u->is_Mem())
1656           _worklist.push(u);
1657       }
1658     }
1659     // If changed initialization activity, check dependent Stores
1660     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1661       InitializeNode* init = use->as_Allocate()->initialization();
1662       if (init != NULL) {
1663         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1664         if (imem != NULL)  add_users_to_worklist0(imem);
1665       }
1666     }
1667     if (use_op == Op_Initialize) {
1668       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1669       if (imem != NULL)  add_users_to_worklist0(imem);
1670     }
1671     if (use_op == Op_CastP2X) {
1672       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1673         Node* u = use->fast_out(i2);
1674         if (u->Opcode() == Op_AndX) {
1675           _worklist.push(u);
1676         }
1677       }
1678     }
1679     // Loading the java mirror from a Klass requires two loads and the type
1680     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1681     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1682     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1683     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1684 
1685     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1686       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1687         Node* u = use->fast_out(i2);
1688         const Type* ut = u->bottom_type();
1689         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1690           if (has_load_barrier_nodes) {
1691             // Search for load barriers behind the load
1692             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1693               Node* b = u->fast_out(i3);
1694               if (bs->is_gc_barrier_node(b)) {
1695                 _worklist.push(b);
1696               }
1697             }
1698           }
1699           _worklist.push(u);
1700         }
1701       }
1702     }
1703 
1704     // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1705     if (use->is_Region()) {
1706       Node* c = use;
1707       do {
1708         c = c->unique_ctrl_out();
1709       } while (c != NULL && c->is_Region());
1710       if (c != NULL && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1711         _worklist.push(c);
1712       }
1713     }
1714   }
1715 }
1716 
1717 /**
1718  * Remove the speculative part of all types that we know of
1719  */
1720 void PhaseIterGVN::remove_speculative_types()  {
1721   assert(UseTypeSpeculation, "speculation is off");
1722   for (uint i = 0; i < _types.Size(); i++)  {
1723     const Type* t = _types.fast_lookup(i);
1724     if (t != NULL) {
1725       _types.map(i, t->remove_speculative());
1726     }
1727   }
1728   _table.check_no_speculative_types();
1729 }
1730 
1731 // Check if the type of a divisor of a Div or Mod node includes zero.
1732 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1733   switch (n->Opcode()) {

1745     case Op_ModL: {
1746       // Type of divisor includes 0?
1747       if (n->in(2)->is_top()) {
1748         // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
1749         return false;
1750       }
1751       const TypeLong* type_divisor = type(n->in(2))->is_long();
1752       return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
1753     }
1754   }
1755   return true;
1756 }
1757 
1758 //=============================================================================
1759 #ifndef PRODUCT
1760 uint PhaseCCP::_total_invokes   = 0;
1761 uint PhaseCCP::_total_constants = 0;
1762 #endif
1763 //------------------------------PhaseCCP---------------------------------------
1764 // Conditional Constant Propagation, ala Wegman & Zadeck
1765 PhaseCCP::PhaseCCP(PhaseIterGVN* igvn) : PhaseIterGVN(igvn), _trstack(C->live_nodes() >> 1) {
1766   NOT_PRODUCT( clear_constants(); )
1767   assert( _worklist.size() == 0, "" );
1768   // Clear out _nodes from IterGVN.  Must be clear to transform call.
1769   _nodes.clear();               // Clear out from IterGVN
1770   analyze();
1771 }
1772 
1773 #ifndef PRODUCT
1774 //------------------------------~PhaseCCP--------------------------------------
1775 PhaseCCP::~PhaseCCP() {
1776   inc_invokes();
1777   _total_constants += count_constants();
1778 }
1779 #endif
1780 
1781 
1782 #ifdef ASSERT
1783 static bool ccp_type_widens(const Type* t, const Type* t0) {
1784   assert(t->meet(t0) == t, "Not monotonic");
1785   switch (t->base() == t0->base() ? t->base() : Type::Top) {

1799 //------------------------------analyze----------------------------------------
1800 void PhaseCCP::analyze() {
1801   // Initialize all types to TOP, optimistic analysis
1802   for (int i = C->unique() - 1; i >= 0; i--)  {
1803     _types.map(i,Type::TOP);
1804   }
1805 
1806   // Push root onto worklist
1807   Unique_Node_List worklist;
1808   worklist.push(C->root());
1809 
1810   // Pull from worklist; compute new value; push changes out.
1811   // This loop is the meat of CCP.
1812   while( worklist.size() ) {
1813     Node* n; // Node to be examined in this iteration
1814     if (StressCCP) {
1815       n = worklist.remove(C->random() % worklist.size());
1816     } else {
1817       n = worklist.pop();
1818     }
1819     if (n->is_SafePoint()) {
1820       // Make sure safepoints are processed by PhaseCCP::transform even if they are
1821       // not reachable from the bottom. Otherwise, infinite loops would be removed.
1822       _trstack.push(n);
1823     }
1824     const Type *t = n->Value(this);
1825     if (t != type(n)) {
1826       assert(ccp_type_widens(t, type(n)), "ccp type must widen");
1827 #ifndef PRODUCT
1828       if( TracePhaseCCP ) {
1829         t->dump();
1830         do { tty->print("\t"); } while (tty->position() < 16);
1831         n->dump();
1832       }
1833 #endif
1834       set_type(n, t);
1835       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1836         Node* m = n->fast_out(i);   // Get user
1837         if (m->is_Region()) {  // New path to Region?  Must recheck Phis too
1838           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1839             Node* p = m->fast_out(i2); // Propagate changes to uses
1840             if (p->bottom_type() != type(p)) { // If not already bottomed out
1841               worklist.push(p); // Propagate change to user
1842             }
1843           }

1869         if (m_op == Op_AddI || m_op == Op_SubI) {
1870           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1871             Node* p = m->fast_out(i2); // Propagate changes to uses
1872             if (p->Opcode() == Op_CmpU) {
1873               // Got a CmpU which might need the new type information from node n.
1874               if(p->bottom_type() != type(p)) { // If not already bottomed out
1875                 worklist.push(p); // Propagate change to user
1876               }
1877             }
1878           }
1879         }
1880         // If n is used in a counted loop exit condition then the type
1881         // of the counted loop's Phi depends on the type of n. See
1882         // PhiNode::Value().
1883         if (m_op == Op_CmpI) {
1884           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1885           if (phi != NULL) {
1886             worklist.push(phi);
1887           }
1888         }
1889         if (m_op == Op_CastP2X) {
1890           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1891             Node* u = m->fast_out(i2);
1892             if (u->Opcode() == Op_AndX) {
1893               worklist.push(u);
1894             }
1895           }
1896         }
1897         // Loading the java mirror from a Klass requires two loads and the type
1898         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1899         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1900         bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1901 
1902         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1903           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1904             Node* u = m->fast_out(i2);
1905             const Type* ut = u->bottom_type();
1906             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
1907               if (has_load_barrier_nodes) {
1908                 // Search for load barriers behind the load
1909                 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1910                   Node* b = u->fast_out(i3);
1911                   if (bs->is_gc_barrier_node(b)) {
1912                     worklist.push(b);
1913                   }
1914                 }
1915               }
1916               worklist.push(u);

1923 }
1924 
1925 //------------------------------do_transform-----------------------------------
1926 // Top level driver for the recursive transformer
1927 void PhaseCCP::do_transform() {
1928   // Correct leaves of new-space Nodes; they point to old-space.
1929   C->set_root( transform(C->root())->as_Root() );
1930   assert( C->top(),  "missing TOP node" );
1931   assert( C->root(), "missing root" );
1932 }
1933 
1934 //------------------------------transform--------------------------------------
1935 // Given a Node in old-space, clone him into new-space.
1936 // Convert any of his old-space children into new-space children.
1937 Node *PhaseCCP::transform( Node *n ) {
1938   Node *new_node = _nodes[n->_idx]; // Check for transformed node
1939   if( new_node != NULL )
1940     return new_node;                // Been there, done that, return old answer
1941   new_node = transform_once(n);     // Check for constant
1942   _nodes.map( n->_idx, new_node );  // Flag as having been cloned
1943   _useful.push(new_node); // Keep track of nodes that are reachable from the bottom
1944 
1945   _trstack.push(new_node);           // Process children of cloned node
1946   while (_trstack.is_nonempty()) {
1947     Node* clone = _trstack.pop();



1948     uint cnt = clone->req();
1949     for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
1950       Node *input = clone->in(i);
1951       if( input != NULL ) {                    // Ignore NULLs
1952         Node *new_input = _nodes[input->_idx]; // Check for cloned input node
1953         if( new_input == NULL ) {
1954           new_input = transform_once(input);   // Check for constant
1955           _nodes.map( input->_idx, new_input );// Flag as having been cloned
1956           _useful.push(new_input);
1957           _trstack.push(new_input);
1958         }
1959         assert( new_input == clone->in(i), "insanity check");
1960       }
1961     }
1962   }
1963 
1964   // The above transformation might lead to subgraphs becoming unreachable from the
1965   // bottom while still being reachable from the top. As a result, nodes in that
1966   // subgraph are not transformed and their bottom types are not updated, leading to
1967   // an inconsistency between bottom_type() and type(). In rare cases, LoadNodes in
1968   // such a subgraph, kept alive by InlineTypePtrNodes, might be re-enqueued for IGVN
1969   // indefinitely by MemNode::Ideal_common because their address type is inconsistent.
1970   // Therefore, we aggressively remove all useless nodes here even before
1971   // PhaseIdealLoop::build_loop_late gets a chance to remove them anyway.
1972   if (C->cached_top_node()) {
1973     _useful.push(C->cached_top_node());
1974   }
1975   C->update_dead_node_list(_useful);
1976   remove_useless_nodes(_useful.member_set());
1977   _worklist.remove_useless_nodes(_useful.member_set());
1978   C->disconnect_useless_nodes(_useful, &_worklist);
1979 
1980   return new_node;
1981 }
1982 
1983 
1984 //------------------------------transform_once---------------------------------
1985 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
1986 Node *PhaseCCP::transform_once( Node *n ) {
1987   const Type *t = type(n);
1988   // Constant?  Use constant Node instead
1989   if( t->singleton() ) {
1990     Node *nn = n;               // Default is to return the original constant
1991     if( t == Type::TOP ) {
1992       // cache my top node on the Compile instance
1993       if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
1994         C->set_cached_top_node(ConNode::make(Type::TOP));
1995         set_type(C->top(), Type::TOP);
1996       }
1997       nn = C->top();
1998     }
1999     if( !n->is_Con() ) {
< prev index next >