< prev index next >

src/share/vm/opto/phaseX.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/block.hpp"
  28 #include "opto/callnode.hpp"
  29 #include "opto/cfgnode.hpp"
  30 #include "opto/connode.hpp"
  31 #include "opto/idealGraphPrinter.hpp"
  32 #include "opto/loopnode.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/opcodes.hpp"
  35 #include "opto/phaseX.hpp"
  36 #include "opto/regalloc.hpp"
  37 #include "opto/rootnode.hpp"



  38 
  39 //=============================================================================
  40 #define NODE_HASH_MINIMUM_SIZE    255
  41 //------------------------------NodeHash---------------------------------------
  42 NodeHash::NodeHash(uint est_max_size) :
  43   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
  44   _a(Thread::current()->resource_area()),
  45   _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
  46   _inserts(0), _insert_limit( insert_limit() ),
  47   _look_probes(0), _lookup_hits(0), _lookup_misses(0),
  48   _total_insert_probes(0), _total_inserts(0),
  49   _insert_probes(0), _grows(0) {
  50   // _sentinel must be in the current node space
  51   _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
  52   memset(_table,0,sizeof(Node*)*_max);
  53 }
  54 
  55 //------------------------------NodeHash---------------------------------------
  56 NodeHash::NodeHash(Arena *arena, uint est_max_size) :
  57   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),


1276             int nrep = dead->replace_edge(in, NULL);  // Kill edges
1277             assert((nrep > 0), "sanity");
1278             if (in->outcnt() == 0) { // Made input go dead?
1279               _stack.push(in, PROCESS_INPUTS); // Recursively remove
1280               recurse = true;
1281             } else if (in->outcnt() == 1 &&
1282                        in->has_special_unique_user()) {
1283               _worklist.push(in->unique_out());
1284             } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1285               if (in->Opcode() == Op_Region) {
1286                 _worklist.push(in);
1287               } else if (in->is_Store()) {
1288                 DUIterator_Fast imax, i = in->fast_outs(imax);
1289                 _worklist.push(in->fast_out(i));
1290                 i++;
1291                 if (in->outcnt() == 2) {
1292                   _worklist.push(in->fast_out(i));
1293                   i++;
1294                 }
1295                 assert(!(i < imax), "sanity");
1296               }


1297             }
1298             if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1299                 in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1300               // A Load that directly follows an InitializeNode is
1301               // going away. The Stores that follow are candidates
1302               // again to be captured by the InitializeNode.
1303               for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1304                 Node *n = in->fast_out(j);
1305                 if (n->is_Store()) {
1306                   _worklist.push(n);
1307                 }
1308               }
1309             }
1310           } // if (in != NULL && in != C->top())
1311         } // for (uint i = 0; i < dead->req(); i++)
1312         if (recurse) {
1313           continue;
1314         }
1315       } // if (!dead->is_Con())
1316     } // if (progress_state == PROCESS_INPUTS)


1324       _stack.push(dead->raw_out(0), PROCESS_INPUTS);
1325     } else {
1326       // Finished disconnecting all input and output edges.
1327       _stack.pop();
1328       // Remove dead node from iterative worklist
1329       _worklist.remove(dead);
1330       // Constant node that has no out-edges and has only one in-edge from
1331       // root is usually dead. However, sometimes reshaping walk makes
1332       // it reachable by adding use edges. So, we will NOT count Con nodes
1333       // as dead to be conservative about the dead node count at any
1334       // given time.
1335       if (!dead->is_Con()) {
1336         C->record_dead_node(dead->_idx);
1337       }
1338       if (dead->is_macro()) {
1339         C->remove_macro_node(dead);
1340       }
1341       if (dead->is_expensive()) {
1342         C->remove_expensive_node(dead);
1343       }



1344       CastIINode* cast = dead->isa_CastII();
1345       if (cast != NULL && cast->has_range_check()) {
1346         C->remove_range_check_cast(cast);
1347       }
1348     }
1349   } // while (_stack.is_nonempty())
1350 }
1351 
1352 //------------------------------subsume_node-----------------------------------
1353 // Remove users from node 'old' and add them to node 'nn'.
1354 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1355   assert( old != hash_find(old), "should already been removed" );
1356   assert( old != C->top(), "cannot subsume top node");
1357   // Copy debug or profile information to the new version:
1358   C->copy_node_notes_to(nn, old);
1359   // Move users of node 'old' to node 'nn'
1360   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1361     Node* use = old->last_out(i);  // for each use...
1362     // use might need re-hashing (but it won't if it's a new node)
1363     bool is_in_table = _table.hash_delete( use );


1537     // If changed AddP inputs, check Stores for loop invariant
1538     if( use_op == Op_AddP ) {
1539       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1540         Node* u = use->fast_out(i2);
1541         if (u->is_Mem())
1542           _worklist.push(u);
1543       }
1544     }
1545     // If changed initialization activity, check dependent Stores
1546     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1547       InitializeNode* init = use->as_Allocate()->initialization();
1548       if (init != NULL) {
1549         Node* imem = init->proj_out(TypeFunc::Memory);
1550         if (imem != NULL)  add_users_to_worklist0(imem);
1551       }
1552     }
1553     if (use_op == Op_Initialize) {
1554       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1555       if (imem != NULL)  add_users_to_worklist0(imem);
1556     }







1557   }
1558 }
1559 
1560 /**
1561  * Remove the speculative part of all types that we know of
1562  */
1563 void PhaseIterGVN::remove_speculative_types()  {
1564   assert(UseTypeSpeculation, "speculation is off");
1565   for (uint i = 0; i < _types.Size(); i++)  {
1566     const Type* t = _types.fast_lookup(i);
1567     if (t != NULL) {
1568       _types.map(i, t->remove_speculative());
1569     }
1570   }
1571   _table.check_no_speculative_types();
1572 }
1573 
1574 //=============================================================================
1575 #ifndef PRODUCT
1576 uint PhaseCCP::_total_invokes   = 0;


1661         if (m->bottom_type() != type(m)) { // If not already bottomed out
1662           worklist.push(m);     // Propagate change to user
1663         }
1664 
1665         // CmpU nodes can get their type information from two nodes up in the
1666         // graph (instead of from the nodes immediately above). Make sure they
1667         // are added to the worklist if nodes they depend on are updated, since
1668         // they could be missed and get wrong types otherwise.
1669         uint m_op = m->Opcode();
1670         if (m_op == Op_AddI || m_op == Op_SubI) {
1671           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1672             Node* p = m->fast_out(i2); // Propagate changes to uses
1673             if (p->Opcode() == Op_CmpU) {
1674               // Got a CmpU which might need the new type information from node n.
1675               if(p->bottom_type() != type(p)) { // If not already bottomed out
1676                 worklist.push(p); // Propagate change to user
1677               }
1678             }
1679           }
1680         }



















1681         // If n is used in a counted loop exit condition then the type
1682         // of the counted loop's Phi depends on the type of n. See
1683         // PhiNode::Value().
1684         if (m_op == Op_CmpI) {
1685           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1686           if (phi != NULL) {
1687             worklist.push(phi);
1688           }
1689         }
1690       }
1691     }
1692   }
1693 }
1694 
1695 //------------------------------do_transform-----------------------------------
1696 // Top level driver for the recursive transformer
1697 void PhaseCCP::do_transform() {
1698   // Correct leaves of new-space Nodes; they point to old-space.
1699   C->set_root( transform(C->root())->as_Root() );
1700   assert( C->top(),  "missing TOP node" );


1762           Node* m = n->out(i);
1763           if( m->is_Phi() ) {
1764             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1765             replace_node(m, nn);
1766             --i; // deleted this phi; rescan starting with next position
1767           }
1768         }
1769       }
1770       replace_node(n,nn);       // Update DefUse edges for new constant
1771     }
1772     return nn;
1773   }
1774 
1775   // If x is a TypeNode, capture any more-precise type permanently into Node
1776   if (t != n->bottom_type()) {
1777     hash_delete(n);             // changing bottom type may force a rehash
1778     n->raise_bottom_type(t);
1779     _worklist.push(n);          // n re-enters the hash table via the worklist
1780   }
1781 
1782   // Idealize graph using DU info.  Must clone() into new-space.
1783   // DU info is generally used to show profitability, progress or safety
1784   // (but generally not needed for correctness).
1785   Node *nn = n->Ideal_DU_postCCP(this);
1786 
1787   // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1788   switch( n->Opcode() ) {
1789   case Op_FastLock:      // Revisit FastLocks for lock coarsening
1790   case Op_If:
1791   case Op_CountedLoopEnd:
1792   case Op_Region:
1793   case Op_Loop:
1794   case Op_CountedLoop:
1795   case Op_Conv2B:
1796   case Op_Opaque1:
1797   case Op_Opaque2:
1798     _worklist.push(n);
1799     break;
1800   default:
1801     break;
1802   }
1803   if( nn ) {
1804     _worklist.push(n);
1805     // Put users of 'n' onto worklist for second igvn transform
1806     add_users_to_worklist(n);
1807     return nn;
1808   }
1809 
1810   return  n;
1811 }
1812 
1813 //---------------------------------saturate------------------------------------
1814 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1815                                const Type* limit_type) const {
1816   const Type* wide_type = new_type->widen(old_type, limit_type);
1817   if (wide_type != new_type) {          // did we widen?
1818     // If so, we may have widened beyond the limit type.  Clip it back down.
1819     new_type = wide_type->filter(limit_type);
1820   }
1821   return new_type;
1822 }
1823 
1824 //------------------------------print_statistics-------------------------------
1825 #ifndef PRODUCT
1826 void PhaseCCP::print_statistics() {
1827   tty->print_cr("CCP: %d  constants found: %d", _total_invokes, _total_constants);
1828 }


1940         igvn->_worklist.push( old );
1941       break;
1942     case 1:
1943       if( old->is_Store() || old->has_special_unique_user() )
1944         igvn->add_users_to_worklist( old );
1945       break;
1946     case 2:
1947       if( old->is_Store() )
1948         igvn->add_users_to_worklist( old );
1949       if( old->Opcode() == Op_Region )
1950         igvn->_worklist.push(old);
1951       break;
1952     case 3:
1953       if( old->Opcode() == Op_Region ) {
1954         igvn->_worklist.push(old);
1955         igvn->add_users_to_worklist( old );
1956       }
1957       break;
1958     default:
1959       break;



1960     }
1961   }
1962 
1963 }
1964 
1965 //-------------------------------replace_by-----------------------------------
1966 // Using def-use info, replace one node for another.  Follow the def-use info
1967 // to all users of the OLD node.  Then make all uses point to the NEW node.
1968 void Node::replace_by(Node *new_node) {
1969   assert(!is_top(), "top node has no DU info");
1970   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
1971     Node* use = last_out(i);
1972     uint uses_found = 0;
1973     for (uint j = 0; j < use->len(); j++) {
1974       if (use->in(j) == this) {
1975         if (j < use->req())
1976               use->set_req(j, new_node);
1977         else  use->set_prec(j, new_node);
1978         uses_found++;
1979       }




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "opto/block.hpp"
  28 #include "opto/callnode.hpp"
  29 #include "opto/cfgnode.hpp"
  30 #include "opto/connode.hpp"
  31 #include "opto/idealGraphPrinter.hpp"
  32 #include "opto/loopnode.hpp"
  33 #include "opto/machnode.hpp"
  34 #include "opto/opcodes.hpp"
  35 #include "opto/phaseX.hpp"
  36 #include "opto/regalloc.hpp"
  37 #include "opto/rootnode.hpp"
  38 #if INCLUDE_ALL_GCS
  39 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
  40 #endif
  41 
  42 //=============================================================================
  43 #define NODE_HASH_MINIMUM_SIZE    255
  44 //------------------------------NodeHash---------------------------------------
  45 NodeHash::NodeHash(uint est_max_size) :
  46   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
  47   _a(Thread::current()->resource_area()),
  48   _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
  49   _inserts(0), _insert_limit( insert_limit() ),
  50   _look_probes(0), _lookup_hits(0), _lookup_misses(0),
  51   _total_insert_probes(0), _total_inserts(0),
  52   _insert_probes(0), _grows(0) {
  53   // _sentinel must be in the current node space
  54   _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
  55   memset(_table,0,sizeof(Node*)*_max);
  56 }
  57 
  58 //------------------------------NodeHash---------------------------------------
  59 NodeHash::NodeHash(Arena *arena, uint est_max_size) :
  60   _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),


1279             int nrep = dead->replace_edge(in, NULL);  // Kill edges
1280             assert((nrep > 0), "sanity");
1281             if (in->outcnt() == 0) { // Made input go dead?
1282               _stack.push(in, PROCESS_INPUTS); // Recursively remove
1283               recurse = true;
1284             } else if (in->outcnt() == 1 &&
1285                        in->has_special_unique_user()) {
1286               _worklist.push(in->unique_out());
1287             } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1288               if (in->Opcode() == Op_Region) {
1289                 _worklist.push(in);
1290               } else if (in->is_Store()) {
1291                 DUIterator_Fast imax, i = in->fast_outs(imax);
1292                 _worklist.push(in->fast_out(i));
1293                 i++;
1294                 if (in->outcnt() == 2) {
1295                   _worklist.push(in->fast_out(i));
1296                   i++;
1297                 }
1298                 assert(!(i < imax), "sanity");
1299               }              
1300             } else if (in->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(in)) {
1301               add_users_to_worklist(in);
1302             }
1303             if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1304                 in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1305               // A Load that directly follows an InitializeNode is
1306               // going away. The Stores that follow are candidates
1307               // again to be captured by the InitializeNode.
1308               for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1309                 Node *n = in->fast_out(j);
1310                 if (n->is_Store()) {
1311                   _worklist.push(n);
1312                 }
1313               }
1314             }
1315           } // if (in != NULL && in != C->top())
1316         } // for (uint i = 0; i < dead->req(); i++)
1317         if (recurse) {
1318           continue;
1319         }
1320       } // if (!dead->is_Con())
1321     } // if (progress_state == PROCESS_INPUTS)


1329       _stack.push(dead->raw_out(0), PROCESS_INPUTS);
1330     } else {
1331       // Finished disconnecting all input and output edges.
1332       _stack.pop();
1333       // Remove dead node from iterative worklist
1334       _worklist.remove(dead);
1335       // Constant node that has no out-edges and has only one in-edge from
1336       // root is usually dead. However, sometimes reshaping walk makes
1337       // it reachable by adding use edges. So, we will NOT count Con nodes
1338       // as dead to be conservative about the dead node count at any
1339       // given time.
1340       if (!dead->is_Con()) {
1341         C->record_dead_node(dead->_idx);
1342       }
1343       if (dead->is_macro()) {
1344         C->remove_macro_node(dead);
1345       }
1346       if (dead->is_expensive()) {
1347         C->remove_expensive_node(dead);
1348       }
1349       if (dead->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1350         C->remove_shenandoah_barrier(reinterpret_cast<ShenandoahLoadReferenceBarrierNode*>(dead));
1351       }
1352       CastIINode* cast = dead->isa_CastII();
1353       if (cast != NULL && cast->has_range_check()) {
1354         C->remove_range_check_cast(cast);
1355       }
1356     }
1357   } // while (_stack.is_nonempty())
1358 }
1359 
1360 //------------------------------subsume_node-----------------------------------
1361 // Remove users from node 'old' and add them to node 'nn'.
1362 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1363   assert( old != hash_find(old), "should already been removed" );
1364   assert( old != C->top(), "cannot subsume top node");
1365   // Copy debug or profile information to the new version:
1366   C->copy_node_notes_to(nn, old);
1367   // Move users of node 'old' to node 'nn'
1368   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1369     Node* use = old->last_out(i);  // for each use...
1370     // use might need re-hashing (but it won't if it's a new node)
1371     bool is_in_table = _table.hash_delete( use );


1545     // If changed AddP inputs, check Stores for loop invariant
1546     if( use_op == Op_AddP ) {
1547       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1548         Node* u = use->fast_out(i2);
1549         if (u->is_Mem())
1550           _worklist.push(u);
1551       }
1552     }
1553     // If changed initialization activity, check dependent Stores
1554     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1555       InitializeNode* init = use->as_Allocate()->initialization();
1556       if (init != NULL) {
1557         Node* imem = init->proj_out(TypeFunc::Memory);
1558         if (imem != NULL)  add_users_to_worklist0(imem);
1559       }
1560     }
1561     if (use_op == Op_Initialize) {
1562       Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1563       if (imem != NULL)  add_users_to_worklist0(imem);
1564     }
1565 
1566     if (use->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1567       Node* cmp = use->find_out_with(Op_CmpP);
1568       if (cmp != NULL) {
1569         _worklist.push(cmp);
1570       }
1571     }
1572   }
1573 }
1574 
1575 /**
1576  * Remove the speculative part of all types that we know of
1577  */
1578 void PhaseIterGVN::remove_speculative_types()  {
1579   assert(UseTypeSpeculation, "speculation is off");
1580   for (uint i = 0; i < _types.Size(); i++)  {
1581     const Type* t = _types.fast_lookup(i);
1582     if (t != NULL) {
1583       _types.map(i, t->remove_speculative());
1584     }
1585   }
1586   _table.check_no_speculative_types();
1587 }
1588 
1589 //=============================================================================
1590 #ifndef PRODUCT
1591 uint PhaseCCP::_total_invokes   = 0;


1676         if (m->bottom_type() != type(m)) { // If not already bottomed out
1677           worklist.push(m);     // Propagate change to user
1678         }
1679 
1680         // CmpU nodes can get their type information from two nodes up in the
1681         // graph (instead of from the nodes immediately above). Make sure they
1682         // are added to the worklist if nodes they depend on are updated, since
1683         // they could be missed and get wrong types otherwise.
1684         uint m_op = m->Opcode();
1685         if (m_op == Op_AddI || m_op == Op_SubI) {
1686           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1687             Node* p = m->fast_out(i2); // Propagate changes to uses
1688             if (p->Opcode() == Op_CmpU) {
1689               // Got a CmpU which might need the new type information from node n.
1690               if(p->bottom_type() != type(p)) { // If not already bottomed out
1691                 worklist.push(p); // Propagate change to user
1692               }
1693             }
1694           }
1695         }
1696         if (m->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1697           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1698             Node* p = m->fast_out(i2);
1699             if (p->Opcode() == Op_CmpP) {
1700               if(p->bottom_type() != type(p)) {
1701                 worklist.push(p);
1702               }
1703             } else if (p->Opcode() == Op_AddP) {
1704               for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) {
1705                 Node* q = p->fast_out(i3);
1706                 if (q->is_Load()) {
1707                   if(q->bottom_type() != type(q)) {
1708                     worklist.push(q);
1709                   }
1710                 }
1711               }
1712             }
1713           }
1714         }
1715         // If n is used in a counted loop exit condition then the type
1716         // of the counted loop's Phi depends on the type of n. See
1717         // PhiNode::Value().
1718         if (m_op == Op_CmpI) {
1719           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1720           if (phi != NULL) {
1721             worklist.push(phi);
1722           }
1723         }
1724       }
1725     }
1726   }
1727 }
1728 
1729 //------------------------------do_transform-----------------------------------
1730 // Top level driver for the recursive transformer
1731 void PhaseCCP::do_transform() {
1732   // Correct leaves of new-space Nodes; they point to old-space.
1733   C->set_root( transform(C->root())->as_Root() );
1734   assert( C->top(),  "missing TOP node" );


1796           Node* m = n->out(i);
1797           if( m->is_Phi() ) {
1798             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1799             replace_node(m, nn);
1800             --i; // deleted this phi; rescan starting with next position
1801           }
1802         }
1803       }
1804       replace_node(n,nn);       // Update DefUse edges for new constant
1805     }
1806     return nn;
1807   }
1808 
1809   // If x is a TypeNode, capture any more-precise type permanently into Node
1810   if (t != n->bottom_type()) {
1811     hash_delete(n);             // changing bottom type may force a rehash
1812     n->raise_bottom_type(t);
1813     _worklist.push(n);          // n re-enters the hash table via the worklist
1814   }
1815 





1816   // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1817   switch( n->Opcode() ) {
1818   case Op_FastLock:      // Revisit FastLocks for lock coarsening
1819   case Op_If:
1820   case Op_CountedLoopEnd:
1821   case Op_Region:
1822   case Op_Loop:
1823   case Op_CountedLoop:
1824   case Op_Conv2B:
1825   case Op_Opaque1:
1826   case Op_Opaque2:
1827     _worklist.push(n);
1828     break;
1829   default:
1830     break;
1831   }






1832 
1833   return  n;
1834 }
1835 
1836 //---------------------------------saturate------------------------------------
1837 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1838                                const Type* limit_type) const {
1839   const Type* wide_type = new_type->widen(old_type, limit_type);
1840   if (wide_type != new_type) {          // did we widen?
1841     // If so, we may have widened beyond the limit type.  Clip it back down.
1842     new_type = wide_type->filter(limit_type);
1843   }
1844   return new_type;
1845 }
1846 
1847 //------------------------------print_statistics-------------------------------
1848 #ifndef PRODUCT
1849 void PhaseCCP::print_statistics() {
1850   tty->print_cr("CCP: %d  constants found: %d", _total_invokes, _total_constants);
1851 }


1963         igvn->_worklist.push( old );
1964       break;
1965     case 1:
1966       if( old->is_Store() || old->has_special_unique_user() )
1967         igvn->add_users_to_worklist( old );
1968       break;
1969     case 2:
1970       if( old->is_Store() )
1971         igvn->add_users_to_worklist( old );
1972       if( old->Opcode() == Op_Region )
1973         igvn->_worklist.push(old);
1974       break;
1975     case 3:
1976       if( old->Opcode() == Op_Region ) {
1977         igvn->_worklist.push(old);
1978         igvn->add_users_to_worklist( old );
1979       }
1980       break;
1981     default:
1982       break;
1983     }
1984     if (old->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(old)) {
1985       igvn->add_users_to_worklist(old);
1986     }
1987   }
1988 
1989 }
1990 
1991 //-------------------------------replace_by-----------------------------------
1992 // Using def-use info, replace one node for another.  Follow the def-use info
1993 // to all users of the OLD node.  Then make all uses point to the NEW node.
1994 void Node::replace_by(Node *new_node) {
1995   assert(!is_top(), "top node has no DU info");
1996   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
1997     Node* use = last_out(i);
1998     uint uses_found = 0;
1999     for (uint j = 0; j < use->len(); j++) {
2000       if (use->in(j) == this) {
2001         if (j < use->req())
2002               use->set_req(j, new_node);
2003         else  use->set_prec(j, new_node);
2004         uses_found++;
2005       }


< prev index next >