< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

 406 //=============================================================================
 407 //------------------------------PhaseRemoveUseless-----------------------------
 408 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
 409 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List* worklist, PhaseNumber phase_num) : Phase(phase_num) {
 410   // Implementation requires an edge from root to each SafePointNode
 411   // at a backward branch. Inserted in add_safepoint().
 412 
 413   // Identify nodes that are reachable from below, useful.
 414   C->identify_useful_nodes(_useful);
 415   // Update dead node list
 416   C->update_dead_node_list(_useful);
 417 
 418   // Remove all useless nodes from PhaseValues' recorded types
 419   // Must be done before disconnecting nodes to preserve hash-table-invariant
 420   gvn->remove_useless_nodes(_useful.member_set());
 421 
 422   // Remove all useless nodes from future worklist
 423   worklist->remove_useless_nodes(_useful.member_set());
 424 
 425   // Disconnect 'useless' nodes that are adjacent to useful nodes
 426   C->remove_useless_nodes(_useful);
 427 }
 428 
 429 //=============================================================================
 430 //------------------------------PhaseRenumberLive------------------------------
 431 // First, remove useless nodes (equivalent to identifying live nodes).
 432 // Then, renumber live nodes.
 433 //
 434 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
 435 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
 436 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
 437 // value in the range [0, x).
 438 //
 439 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
 440 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
 441 //
 442 // The PhaseRenumberLive phase updates two data structures with the new node IDs.
 443 // (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
 444 // processed. A new worklist (with the updated node IDs) is returned in 'new_worklist'.
 445 // 'worklist' is cleared upon returning.
 446 // (2) Type information (the field PhaseGVN::_types) maps type information to each

1208     loop_count++;
1209   }
1210   NOT_PRODUCT(verify_PhaseIterGVN();)
1211 }
1212 
1213 
1214 /**
1215  * Register a new node with the optimizer.  Update the types array, the def-use
1216  * info.  Put on worklist.
1217  */
1218 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1219   set_type_bottom(n);
1220   _worklist.push(n);
1221   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1222   return n;
1223 }
1224 
1225 //------------------------------transform--------------------------------------
1226 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1227 Node *PhaseIterGVN::transform( Node *n ) {
1228   if (_delay_transform) {
1229     // Register the node but don't optimize for now
1230     register_new_node_with_optimizer(n);
1231     return n;
1232   }
1233 
1234   // If brand new node, make space in type array, and give it a type.
1235   ensure_type_or_null(n);
1236   if (type_or_null(n) == NULL) {
1237     set_type_bottom(n);
1238   }
1239 






1240   return transform_old(n);
1241 }
1242 
1243 Node *PhaseIterGVN::transform_old(Node* n) {
1244   NOT_PRODUCT(set_transforms());
1245   // Remove 'n' from hash table in case it gets modified
1246   _table.hash_delete(n);
1247   if (VerifyIterativeGVN) {
1248     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1249   }
1250 
1251   // Apply the Ideal call in a loop until it no longer applies
1252   Node* k = n;
1253   DEBUG_ONLY(dead_loop_check(k);)
1254   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1255   C->remove_modified_node(k);
1256   Node* i = apply_ideal(k, /*can_reshape=*/true);
1257   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1258 #ifndef PRODUCT
1259   verify_step(k);

1482 
1483   // Smash all inputs to 'old', isolating him completely
1484   Node *temp = new Node(1);
1485   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1486   remove_dead_node( old );
1487   temp->del_req(0);         // Yank bogus edge
1488   if (nn != NULL && nn->outcnt() == 0) {
1489     _worklist.push(nn);
1490   }
1491 #ifndef PRODUCT
1492   if( VerifyIterativeGVN ) {
1493     for ( int i = 0; i < _verify_window_size; i++ ) {
1494       if ( _verify_window[i] == old )
1495         _verify_window[i] = nn;
1496     }
1497   }
1498 #endif
1499   temp->destruct(this);     // reuse the _idx of this little guy
1500 }
1501 













1502 //------------------------------add_users_to_worklist--------------------------
1503 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1504   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1505     _worklist.push(n->fast_out(i));  // Push on worklist
1506   }
1507 }
1508 
1509 // Return counted loop Phi if as a counted loop exit condition, cmp
1510 // compares the the induction variable with n
1511 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1512   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1513     Node* bol = cmp->fast_out(i);
1514     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1515       Node* iff = bol->fast_out(i2);
1516       if (iff->is_BaseCountedLoopEnd()) {
1517         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1518         if (cle->limit() == n) {
1519           PhiNode* phi = cle->phi();
1520           if (phi != NULL) {
1521             return phi;

1582         }
1583         Node* in1 = use->in(1);
1584         for (uint i = 0; i < in1->outcnt(); i++) {
1585           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1586             Node* castii = in1->raw_out(i);
1587             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1588               Node* ifnode = castii->in(0)->in(0);
1589               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1590                 // Reprocess a CastII node that may depend on an
1591                 // opaque node value when the opaque node is
1592                 // removed. In case it carries a dependency we can do
1593                 // a better job of computing its type.
1594                 _worklist.push(castii);
1595               }
1596             }
1597           }
1598         }
1599       }
1600     }
1601 









1602     // If changed Cast input, check Phi users for simple cycles
1603     if (use->is_ConstraintCast()) {
1604       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1605         Node* u = use->fast_out(i2);
1606         if (u->is_Phi())
1607           _worklist.push(u);
1608       }
1609     }
1610     // If changed LShift inputs, check RShift users for useless sign-ext
1611     if( use_op == Op_LShiftI ) {
1612       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1613         Node* u = use->fast_out(i2);
1614         if (u->Opcode() == Op_RShiftI)
1615           _worklist.push(u);
1616       }
1617     }
1618     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1619     if (use_op == Op_AddI || use_op == Op_SubI) {
1620       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1621         Node* u = use->fast_out(i2);

1627     // If changed AddP inputs, check Stores for loop invariant
1628     if( use_op == Op_AddP ) {
1629       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1630         Node* u = use->fast_out(i2);
1631         if (u->is_Mem())
1632           _worklist.push(u);
1633       }
1634     }
1635     // If changed initialization activity, check dependent Stores
1636     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1637       InitializeNode* init = use->as_Allocate()->initialization();
1638       if (init != NULL) {
1639         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1640         if (imem != NULL)  add_users_to_worklist0(imem);
1641       }
1642     }
1643     if (use_op == Op_Initialize) {
1644       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1645       if (imem != NULL)  add_users_to_worklist0(imem);
1646     }








1647     // Loading the java mirror from a Klass requires two loads and the type
1648     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1649     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1650     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1651     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1652 
1653     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1654       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1655         Node* u = use->fast_out(i2);
1656         const Type* ut = u->bottom_type();
1657         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1658           if (has_load_barrier_nodes) {
1659             // Search for load barriers behind the load
1660             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1661               Node* b = u->fast_out(i3);
1662               if (bs->is_gc_barrier_node(b)) {
1663                 _worklist.push(b);
1664               }
1665             }
1666           }
1667           _worklist.push(u);
1668         }
1669       }
1670     }











1671   }
1672 }
1673 
1674 /**
1675  * Remove the speculative part of all types that we know of
1676  */
1677 void PhaseIterGVN::remove_speculative_types()  {
1678   assert(UseTypeSpeculation, "speculation is off");
1679   for (uint i = 0; i < _types.Size(); i++)  {
1680     const Type* t = _types.fast_lookup(i);
1681     if (t != NULL) {
1682       _types.map(i, t->remove_speculative());
1683     }
1684   }
1685   _table.check_no_speculative_types();
1686 }
1687 
1688 // Check if the type of a divisor of a Div or Mod node includes zero.
1689 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1690   switch (n->Opcode()) {

1702     case Op_ModL: {
1703       // Type of divisor includes 0?
1704       if (type(n->in(2)) == Type::TOP) {
1705         // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
1706         return false;
1707       }
1708       const TypeLong* type_divisor = type(n->in(2))->is_long();
1709       return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
1710     }
1711   }
1712   return true;
1713 }
1714 
1715 //=============================================================================
1716 #ifndef PRODUCT
1717 uint PhaseCCP::_total_invokes   = 0;
1718 uint PhaseCCP::_total_constants = 0;
1719 #endif
1720 //------------------------------PhaseCCP---------------------------------------
1721 // Conditional Constant Propagation, ala Wegman & Zadeck
1722 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1723   NOT_PRODUCT( clear_constants(); )
1724   assert( _worklist.size() == 0, "" );
1725   // Clear out _nodes from IterGVN.  Must be clear to transform call.
1726   _nodes.clear();               // Clear out from IterGVN
1727   analyze();
1728 }
1729 
1730 #ifndef PRODUCT
1731 //------------------------------~PhaseCCP--------------------------------------
1732 PhaseCCP::~PhaseCCP() {
1733   inc_invokes();
1734   _total_constants += count_constants();
1735 }
1736 #endif
1737 
1738 
1739 #ifdef ASSERT
1740 static bool ccp_type_widens(const Type* t, const Type* t0) {
1741   assert(t->meet(t0) == t, "Not monotonic");
1742   switch (t->base() == t0->base() ? t->base() : Type::Top) {

1756 //------------------------------analyze----------------------------------------
1757 void PhaseCCP::analyze() {
1758   // Initialize all types to TOP, optimistic analysis
1759   for (int i = C->unique() - 1; i >= 0; i--)  {
1760     _types.map(i,Type::TOP);
1761   }
1762 
1763   // Push root onto worklist
1764   Unique_Node_List worklist;
1765   worklist.push(C->root());
1766 
1767   // Pull from worklist; compute new value; push changes out.
1768   // This loop is the meat of CCP.
1769   while( worklist.size() ) {
1770     Node* n; // Node to be examined in this iteration
1771     if (StressCCP) {
1772       n = worklist.remove(C->random() % worklist.size());
1773     } else {
1774       n = worklist.pop();
1775     }





1776     const Type *t = n->Value(this);
1777     if (t != type(n)) {
1778       assert(ccp_type_widens(t, type(n)), "ccp type must widen");
1779 #ifndef PRODUCT
1780       if( TracePhaseCCP ) {
1781         t->dump();
1782         do { tty->print("\t"); } while (tty->position() < 16);
1783         n->dump();
1784       }
1785 #endif
1786       set_type(n, t);
1787       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1788         Node* m = n->fast_out(i);   // Get user
1789         if (m->is_Region()) {  // New path to Region?  Must recheck Phis too
1790           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1791             Node* p = m->fast_out(i2); // Propagate changes to uses
1792             if (p->bottom_type() != type(p)) { // If not already bottomed out
1793               worklist.push(p); // Propagate change to user
1794             }
1795           }

1821         if (m_op == Op_AddI || m_op == Op_SubI) {
1822           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1823             Node* p = m->fast_out(i2); // Propagate changes to uses
1824             if (p->Opcode() == Op_CmpU) {
1825               // Got a CmpU which might need the new type information from node n.
1826               if(p->bottom_type() != type(p)) { // If not already bottomed out
1827                 worklist.push(p); // Propagate change to user
1828               }
1829             }
1830           }
1831         }
1832         // If n is used in a counted loop exit condition then the type
1833         // of the counted loop's Phi depends on the type of n. See
1834         // PhiNode::Value().
1835         if (m_op == Op_CmpI || m_op == Op_CmpL) {
1836           PhiNode* phi = countedloop_phi_from_cmp(m->as_Cmp(), n);
1837           if (phi != NULL) {
1838             worklist.push(phi);
1839           }
1840         }








1841         // Loading the java mirror from a Klass requires two loads and the type
1842         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1843         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1844         bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1845 
1846         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1847           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1848             Node* u = m->fast_out(i2);
1849             const Type* ut = u->bottom_type();
1850             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
1851               if (has_load_barrier_nodes) {
1852                 // Search for load barriers behind the load
1853                 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1854                   Node* b = u->fast_out(i3);
1855                   if (bs->is_gc_barrier_node(b)) {
1856                     worklist.push(b);
1857                   }
1858                 }
1859               }
1860               worklist.push(u);

1867 }
1868 
1869 //------------------------------do_transform-----------------------------------
1870 // Top level driver for the recursive transformer
1871 void PhaseCCP::do_transform() {
1872   // Correct leaves of new-space Nodes; they point to old-space.
1873   C->set_root( transform(C->root())->as_Root() );
1874   assert( C->top(),  "missing TOP node" );
1875   assert( C->root(), "missing root" );
1876 }
1877 
1878 //------------------------------transform--------------------------------------
1879 // Given a Node in old-space, clone him into new-space.
1880 // Convert any of his old-space children into new-space children.
1881 Node *PhaseCCP::transform( Node *n ) {
1882   Node *new_node = _nodes[n->_idx]; // Check for transformed node
1883   if( new_node != NULL )
1884     return new_node;                // Been there, done that, return old answer
1885   new_node = transform_once(n);     // Check for constant
1886   _nodes.map( n->_idx, new_node );  // Flag as having been cloned

1887 
1888   // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
1889   GrowableArray <Node *> trstack(C->live_nodes() >> 1);
1890 
1891   trstack.push(new_node);           // Process children of cloned node
1892   while ( trstack.is_nonempty() ) {
1893     Node *clone = trstack.pop();
1894     uint cnt = clone->req();
1895     for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
1896       Node *input = clone->in(i);
1897       if( input != NULL ) {                    // Ignore NULLs
1898         Node *new_input = _nodes[input->_idx]; // Check for cloned input node
1899         if( new_input == NULL ) {
1900           new_input = transform_once(input);   // Check for constant
1901           _nodes.map( input->_idx, new_input );// Flag as having been cloned
1902           trstack.push(new_input);

1903         }
1904         assert( new_input == clone->in(i), "insanity check");
1905       }
1906     }
1907   }

















1908   return new_node;
1909 }
1910 
1911 
1912 //------------------------------transform_once---------------------------------
1913 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
1914 Node *PhaseCCP::transform_once( Node *n ) {
1915   const Type *t = type(n);
1916   // Constant?  Use constant Node instead
1917   if( t->singleton() ) {
1918     Node *nn = n;               // Default is to return the original constant
1919     if( t == Type::TOP ) {
1920       // cache my top node on the Compile instance
1921       if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
1922         C->set_cached_top_node(ConNode::make(Type::TOP));
1923         set_type(C->top(), Type::TOP);
1924       }
1925       nn = C->top();
1926     }
1927     if( !n->is_Con() ) {

 406 //=============================================================================
 407 //------------------------------PhaseRemoveUseless-----------------------------
 408 // 1) Use a breadthfirst walk to collect useful nodes reachable from root.
 409 PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN* gvn, Unique_Node_List* worklist, PhaseNumber phase_num) : Phase(phase_num) {
 410   // Implementation requires an edge from root to each SafePointNode
 411   // at a backward branch. Inserted in add_safepoint().
 412 
 413   // Identify nodes that are reachable from below, useful.
 414   C->identify_useful_nodes(_useful);
 415   // Update dead node list
 416   C->update_dead_node_list(_useful);
 417 
 418   // Remove all useless nodes from PhaseValues' recorded types
 419   // Must be done before disconnecting nodes to preserve hash-table-invariant
 420   gvn->remove_useless_nodes(_useful.member_set());
 421 
 422   // Remove all useless nodes from future worklist
 423   worklist->remove_useless_nodes(_useful.member_set());
 424 
 425   // Disconnect 'useless' nodes that are adjacent to useful nodes
 426   C->disconnect_useless_nodes(_useful, worklist);
 427 }
 428 
 429 //=============================================================================
 430 //------------------------------PhaseRenumberLive------------------------------
 431 // First, remove useless nodes (equivalent to identifying live nodes).
 432 // Then, renumber live nodes.
 433 //
 434 // The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
 435 // If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
 436 // PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
 437 // value in the range [0, x).
 438 //
 439 // At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
 440 // updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
 441 //
 442 // The PhaseRenumberLive phase updates two data structures with the new node IDs.
 443 // (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
 444 // processed. A new worklist (with the updated node IDs) is returned in 'new_worklist'.
 445 // 'worklist' is cleared upon returning.
 446 // (2) Type information (the field PhaseGVN::_types) maps type information to each

1208     loop_count++;
1209   }
1210   NOT_PRODUCT(verify_PhaseIterGVN();)
1211 }
1212 
1213 
1214 /**
1215  * Register a new node with the optimizer.  Update the types array, the def-use
1216  * info.  Put on worklist.
1217  */
1218 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1219   set_type_bottom(n);
1220   _worklist.push(n);
1221   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1222   return n;
1223 }
1224 
1225 //------------------------------transform--------------------------------------
1226 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1227 Node *PhaseIterGVN::transform( Node *n ) {






1228   // If brand new node, make space in type array, and give it a type.
1229   ensure_type_or_null(n);
1230   if (type_or_null(n) == NULL) {
1231     set_type_bottom(n);
1232   }
1233 
1234   if (_delay_transform) {
1235     // Add the node to the worklist but don't optimize for now
1236     _worklist.push(n);
1237     return n;
1238   }
1239 
1240   return transform_old(n);
1241 }
1242 
1243 Node *PhaseIterGVN::transform_old(Node* n) {
1244   NOT_PRODUCT(set_transforms());
1245   // Remove 'n' from hash table in case it gets modified
1246   _table.hash_delete(n);
1247   if (VerifyIterativeGVN) {
1248     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1249   }
1250 
1251   // Apply the Ideal call in a loop until it no longer applies
1252   Node* k = n;
1253   DEBUG_ONLY(dead_loop_check(k);)
1254   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1255   C->remove_modified_node(k);
1256   Node* i = apply_ideal(k, /*can_reshape=*/true);
1257   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1258 #ifndef PRODUCT
1259   verify_step(k);

1482 
1483   // Smash all inputs to 'old', isolating him completely
1484   Node *temp = new Node(1);
1485   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1486   remove_dead_node( old );
1487   temp->del_req(0);         // Yank bogus edge
1488   if (nn != NULL && nn->outcnt() == 0) {
1489     _worklist.push(nn);
1490   }
1491 #ifndef PRODUCT
1492   if( VerifyIterativeGVN ) {
1493     for ( int i = 0; i < _verify_window_size; i++ ) {
1494       if ( _verify_window[i] == old )
1495         _verify_window[i] = nn;
1496     }
1497   }
1498 #endif
1499   temp->destruct(this);     // reuse the _idx of this little guy
1500 }
1501 
1502 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1503   assert(n != NULL, "sanity");
1504   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1505     Node* u = n->fast_out(i);
1506     if (u != n) {
1507       rehash_node_delayed(u);
1508       int nb = u->replace_edge(n, m);
1509       --i, imax -= nb;
1510     }
1511   }
1512   assert(n->outcnt() == 0, "all uses must be deleted");
1513 }
1514 
1515 //------------------------------add_users_to_worklist--------------------------
1516 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1517   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1518     _worklist.push(n->fast_out(i));  // Push on worklist
1519   }
1520 }
1521 
1522 // Return counted loop Phi if as a counted loop exit condition, cmp
1523 // compares the the induction variable with n
1524 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1525   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1526     Node* bol = cmp->fast_out(i);
1527     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1528       Node* iff = bol->fast_out(i2);
1529       if (iff->is_BaseCountedLoopEnd()) {
1530         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1531         if (cle->limit() == n) {
1532           PhiNode* phi = cle->phi();
1533           if (phi != NULL) {
1534             return phi;

1595         }
1596         Node* in1 = use->in(1);
1597         for (uint i = 0; i < in1->outcnt(); i++) {
1598           if (in1->raw_out(i)->Opcode() == Op_CastII) {
1599             Node* castii = in1->raw_out(i);
1600             if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1601               Node* ifnode = castii->in(0)->in(0);
1602               if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
1603                 // Reprocess a CastII node that may depend on an
1604                 // opaque node value when the opaque node is
1605                 // removed. In case it carries a dependency we can do
1606                 // a better job of computing its type.
1607                 _worklist.push(castii);
1608               }
1609             }
1610           }
1611         }
1612       }
1613     }
1614 
1615     // Inline type nodes can have other inline types as users. If an input gets
1616     // updated, make sure that inline type users get a chance for optimization.
1617     if (use->is_InlineTypeBase()) {
1618       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1619         Node* u = use->fast_out(i2);
1620         if (u->is_InlineTypeBase())
1621           _worklist.push(u);
1622       }
1623     }
1624     // If changed Cast input, check Phi users for simple cycles
1625     if (use->is_ConstraintCast()) {
1626       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1627         Node* u = use->fast_out(i2);
1628         if (u->is_Phi())
1629           _worklist.push(u);
1630       }
1631     }
1632     // If changed LShift inputs, check RShift users for useless sign-ext
1633     if( use_op == Op_LShiftI ) {
1634       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1635         Node* u = use->fast_out(i2);
1636         if (u->Opcode() == Op_RShiftI)
1637           _worklist.push(u);
1638       }
1639     }
1640     // If changed AddI/SubI inputs, check CmpU for range check optimization.
1641     if (use_op == Op_AddI || use_op == Op_SubI) {
1642       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1643         Node* u = use->fast_out(i2);

1649     // If changed AddP inputs, check Stores for loop invariant
1650     if( use_op == Op_AddP ) {
1651       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1652         Node* u = use->fast_out(i2);
1653         if (u->is_Mem())
1654           _worklist.push(u);
1655       }
1656     }
1657     // If changed initialization activity, check dependent Stores
1658     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1659       InitializeNode* init = use->as_Allocate()->initialization();
1660       if (init != NULL) {
1661         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1662         if (imem != NULL)  add_users_to_worklist0(imem);
1663       }
1664     }
1665     if (use_op == Op_Initialize) {
1666       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1667       if (imem != NULL)  add_users_to_worklist0(imem);
1668     }
1669     if (use_op == Op_CastP2X) {
1670       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1671         Node* u = use->fast_out(i2);
1672         if (u->Opcode() == Op_AndX) {
1673           _worklist.push(u);
1674         }
1675       }
1676     }
1677     // Loading the java mirror from a Klass requires two loads and the type
1678     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1679     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1680     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1681     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1682 
1683     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1684       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1685         Node* u = use->fast_out(i2);
1686         const Type* ut = u->bottom_type();
1687         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1688           if (has_load_barrier_nodes) {
1689             // Search for load barriers behind the load
1690             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1691               Node* b = u->fast_out(i3);
1692               if (bs->is_gc_barrier_node(b)) {
1693                 _worklist.push(b);
1694               }
1695             }
1696           }
1697           _worklist.push(u);
1698         }
1699       }
1700     }
1701 
1702     // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1703     if (use->is_Region()) {
1704       Node* c = use;
1705       do {
1706         c = c->unique_ctrl_out_or_null();
1707       } while (c != NULL && c->is_Region());
1708       if (c != NULL && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1709         _worklist.push(c);
1710       }
1711     }
1712   }
1713 }
1714 
1715 /**
1716  * Remove the speculative part of all types that we know of
1717  */
1718 void PhaseIterGVN::remove_speculative_types()  {
1719   assert(UseTypeSpeculation, "speculation is off");
1720   for (uint i = 0; i < _types.Size(); i++)  {
1721     const Type* t = _types.fast_lookup(i);
1722     if (t != NULL) {
1723       _types.map(i, t->remove_speculative());
1724     }
1725   }
1726   _table.check_no_speculative_types();
1727 }
1728 
1729 // Check if the type of a divisor of a Div or Mod node includes zero.
1730 bool PhaseIterGVN::no_dependent_zero_check(Node* n) const {
1731   switch (n->Opcode()) {

1743     case Op_ModL: {
1744       // Type of divisor includes 0?
1745       if (type(n->in(2)) == Type::TOP) {
1746         // 'n' is dead. Treat as if zero check is still there to avoid any further optimizations.
1747         return false;
1748       }
1749       const TypeLong* type_divisor = type(n->in(2))->is_long();
1750       return (type_divisor->_hi < 0 || type_divisor->_lo > 0);
1751     }
1752   }
1753   return true;
1754 }
1755 
1756 //=============================================================================
1757 #ifndef PRODUCT
1758 uint PhaseCCP::_total_invokes   = 0;
1759 uint PhaseCCP::_total_constants = 0;
1760 #endif
1761 //------------------------------PhaseCCP---------------------------------------
1762 // Conditional Constant Propagation, ala Wegman & Zadeck
1763 PhaseCCP::PhaseCCP(PhaseIterGVN* igvn) : PhaseIterGVN(igvn), _trstack(C->live_nodes() >> 1) {
1764   NOT_PRODUCT( clear_constants(); )
1765   assert( _worklist.size() == 0, "" );
1766   // Clear out _nodes from IterGVN.  Must be clear to transform call.
1767   _nodes.clear();               // Clear out from IterGVN
1768   analyze();
1769 }
1770 
1771 #ifndef PRODUCT
1772 //------------------------------~PhaseCCP--------------------------------------
1773 PhaseCCP::~PhaseCCP() {
1774   inc_invokes();
1775   _total_constants += count_constants();
1776 }
1777 #endif
1778 
1779 
1780 #ifdef ASSERT
1781 static bool ccp_type_widens(const Type* t, const Type* t0) {
1782   assert(t->meet(t0) == t, "Not monotonic");
1783   switch (t->base() == t0->base() ? t->base() : Type::Top) {

1797 //------------------------------analyze----------------------------------------
1798 void PhaseCCP::analyze() {
1799   // Initialize all types to TOP, optimistic analysis
1800   for (int i = C->unique() - 1; i >= 0; i--)  {
1801     _types.map(i,Type::TOP);
1802   }
1803 
1804   // Push root onto worklist
1805   Unique_Node_List worklist;
1806   worklist.push(C->root());
1807 
1808   // Pull from worklist; compute new value; push changes out.
1809   // This loop is the meat of CCP.
1810   while( worklist.size() ) {
1811     Node* n; // Node to be examined in this iteration
1812     if (StressCCP) {
1813       n = worklist.remove(C->random() % worklist.size());
1814     } else {
1815       n = worklist.pop();
1816     }
1817     if (n->is_SafePoint()) {
1818       // Make sure safepoints are processed by PhaseCCP::transform even if they are
1819       // not reachable from the bottom. Otherwise, infinite loops would be removed.
1820       _trstack.push(n);
1821     }
1822     const Type *t = n->Value(this);
1823     if (t != type(n)) {
1824       assert(ccp_type_widens(t, type(n)), "ccp type must widen");
1825 #ifndef PRODUCT
1826       if( TracePhaseCCP ) {
1827         t->dump();
1828         do { tty->print("\t"); } while (tty->position() < 16);
1829         n->dump();
1830       }
1831 #endif
1832       set_type(n, t);
1833       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1834         Node* m = n->fast_out(i);   // Get user
1835         if (m->is_Region()) {  // New path to Region?  Must recheck Phis too
1836           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1837             Node* p = m->fast_out(i2); // Propagate changes to uses
1838             if (p->bottom_type() != type(p)) { // If not already bottomed out
1839               worklist.push(p); // Propagate change to user
1840             }
1841           }

1867         if (m_op == Op_AddI || m_op == Op_SubI) {
1868           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1869             Node* p = m->fast_out(i2); // Propagate changes to uses
1870             if (p->Opcode() == Op_CmpU) {
1871               // Got a CmpU which might need the new type information from node n.
1872               if(p->bottom_type() != type(p)) { // If not already bottomed out
1873                 worklist.push(p); // Propagate change to user
1874               }
1875             }
1876           }
1877         }
1878         // If n is used in a counted loop exit condition then the type
1879         // of the counted loop's Phi depends on the type of n. See
1880         // PhiNode::Value().
1881         if (m_op == Op_CmpI || m_op == Op_CmpL) {
1882           PhiNode* phi = countedloop_phi_from_cmp(m->as_Cmp(), n);
1883           if (phi != NULL) {
1884             worklist.push(phi);
1885           }
1886         }
1887         if (m_op == Op_CastP2X) {
1888           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1889             Node* u = m->fast_out(i2);
1890             if (u->Opcode() == Op_AndX) {
1891               worklist.push(u);
1892             }
1893           }
1894         }
1895         // Loading the java mirror from a Klass requires two loads and the type
1896         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1897         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1898         bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1899 
1900         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1901           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1902             Node* u = m->fast_out(i2);
1903             const Type* ut = u->bottom_type();
1904             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
1905               if (has_load_barrier_nodes) {
1906                 // Search for load barriers behind the load
1907                 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1908                   Node* b = u->fast_out(i3);
1909                   if (bs->is_gc_barrier_node(b)) {
1910                     worklist.push(b);
1911                   }
1912                 }
1913               }
1914               worklist.push(u);

1921 }
1922 
1923 //------------------------------do_transform-----------------------------------
1924 // Top level driver for the recursive transformer
1925 void PhaseCCP::do_transform() {
1926   // Correct leaves of new-space Nodes; they point to old-space.
1927   C->set_root( transform(C->root())->as_Root() );
1928   assert( C->top(),  "missing TOP node" );
1929   assert( C->root(), "missing root" );
1930 }
1931 
1932 //------------------------------transform--------------------------------------
1933 // Given a Node in old-space, clone him into new-space.
1934 // Convert any of his old-space children into new-space children.
1935 Node *PhaseCCP::transform( Node *n ) {
1936   Node *new_node = _nodes[n->_idx]; // Check for transformed node
1937   if( new_node != NULL )
1938     return new_node;                // Been there, done that, return old answer
1939   new_node = transform_once(n);     // Check for constant
1940   _nodes.map( n->_idx, new_node );  // Flag as having been cloned
1941   _useful.push(new_node); // Keep track of nodes that are reachable from the bottom
1942 
1943   _trstack.push(new_node);           // Process children of cloned node
1944   while (_trstack.is_nonempty()) {
1945     Node* clone = _trstack.pop();



1946     uint cnt = clone->req();
1947     for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
1948       Node *input = clone->in(i);
1949       if( input != NULL ) {                    // Ignore NULLs
1950         Node *new_input = _nodes[input->_idx]; // Check for cloned input node
1951         if( new_input == NULL ) {
1952           new_input = transform_once(input);   // Check for constant
1953           _nodes.map( input->_idx, new_input );// Flag as having been cloned
1954           _useful.push(new_input);
1955           _trstack.push(new_input);
1956         }
1957         assert( new_input == clone->in(i), "insanity check");
1958       }
1959     }
1960   }
1961 
1962   // The above transformation might lead to subgraphs becoming unreachable from the
1963   // bottom while still being reachable from the top. As a result, nodes in that
1964   // subgraph are not transformed and their bottom types are not updated, leading to
1965   // an inconsistency between bottom_type() and type(). In rare cases, LoadNodes in
1966   // such a subgraph, kept alive by InlineTypePtrNodes, might be re-enqueued for IGVN
1967   // indefinitely by MemNode::Ideal_common because their address type is inconsistent.
1968   // Therefore, we aggressively remove all useless nodes here even before
1969   // PhaseIdealLoop::build_loop_late gets a chance to remove them anyway.
1970   if (C->cached_top_node()) {
1971     _useful.push(C->cached_top_node());
1972   }
1973   C->update_dead_node_list(_useful);
1974   remove_useless_nodes(_useful.member_set());
1975   _worklist.remove_useless_nodes(_useful.member_set());
1976   C->disconnect_useless_nodes(_useful, &_worklist);
1977 
1978   return new_node;
1979 }
1980 
1981 
1982 //------------------------------transform_once---------------------------------
1983 // For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
1984 Node *PhaseCCP::transform_once( Node *n ) {
1985   const Type *t = type(n);
1986   // Constant?  Use constant Node instead
1987   if( t->singleton() ) {
1988     Node *nn = n;               // Default is to return the original constant
1989     if( t == Type::TOP ) {
1990       // cache my top node on the Compile instance
1991       if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
1992         C->set_cached_top_node(ConNode::make(Type::TOP));
1993         set_type(C->top(), Type::TOP);
1994       }
1995       nn = C->top();
1996     }
1997     if( !n->is_Con() ) {
< prev index next >