18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "opto/block.hpp"
28 #include "opto/callnode.hpp"
29 #include "opto/cfgnode.hpp"
30 #include "opto/connode.hpp"
31 #include "opto/idealGraphPrinter.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/machnode.hpp"
34 #include "opto/opcodes.hpp"
35 #include "opto/phaseX.hpp"
36 #include "opto/regalloc.hpp"
37 #include "opto/rootnode.hpp"
38
39 //=============================================================================
40 #define NODE_HASH_MINIMUM_SIZE 255
41 //------------------------------NodeHash---------------------------------------
42 NodeHash::NodeHash(uint est_max_size) :
43 _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
44 _a(Thread::current()->resource_area()),
45 _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
46 _inserts(0), _insert_limit( insert_limit() ),
47 _look_probes(0), _lookup_hits(0), _lookup_misses(0),
48 _total_insert_probes(0), _total_inserts(0),
49 _insert_probes(0), _grows(0) {
50 // _sentinel must be in the current node space
51 _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
52 memset(_table,0,sizeof(Node*)*_max);
53 }
54
55 //------------------------------NodeHash---------------------------------------
56 NodeHash::NodeHash(Arena *arena, uint est_max_size) :
57 _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
1265 int nrep = dead->replace_edge(in, NULL); // Kill edges
1266 assert((nrep > 0), "sanity");
1267 if (in->outcnt() == 0) { // Made input go dead?
1268 _stack.push(in, PROCESS_INPUTS); // Recursively remove
1269 recurse = true;
1270 } else if (in->outcnt() == 1 &&
1271 in->has_special_unique_user()) {
1272 _worklist.push(in->unique_out());
1273 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1274 if (in->Opcode() == Op_Region) {
1275 _worklist.push(in);
1276 } else if (in->is_Store()) {
1277 DUIterator_Fast imax, i = in->fast_outs(imax);
1278 _worklist.push(in->fast_out(i));
1279 i++;
1280 if (in->outcnt() == 2) {
1281 _worklist.push(in->fast_out(i));
1282 i++;
1283 }
1284 assert(!(i < imax), "sanity");
1285 }
1286 }
1287 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1288 in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1289 // A Load that directly follows an InitializeNode is
1290 // going away. The Stores that follow are candidates
1291 // again to be captured by the InitializeNode.
1292 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1293 Node *n = in->fast_out(j);
1294 if (n->is_Store()) {
1295 _worklist.push(n);
1296 }
1297 }
1298 }
1299 } // if (in != NULL && in != C->top())
1300 } // for (uint i = 0; i < dead->req(); i++)
1301 if (recurse) {
1302 continue;
1303 }
1304 } // if (!dead->is_Con())
1305 } // if (progress_state == PROCESS_INPUTS)
1313 _stack.push(dead->raw_out(0), PROCESS_INPUTS);
1314 } else {
1315 // Finished disconnecting all input and output edges.
1316 _stack.pop();
1317 // Remove dead node from iterative worklist
1318 _worklist.remove(dead);
1319 // Constant node that has no out-edges and has only one in-edge from
1320 // root is usually dead. However, sometimes reshaping walk makes
1321 // it reachable by adding use edges. So, we will NOT count Con nodes
1322 // as dead to be conservative about the dead node count at any
1323 // given time.
1324 if (!dead->is_Con()) {
1325 C->record_dead_node(dead->_idx);
1326 }
1327 if (dead->is_macro()) {
1328 C->remove_macro_node(dead);
1329 }
1330 if (dead->is_expensive()) {
1331 C->remove_expensive_node(dead);
1332 }
1333 CastIINode* cast = dead->isa_CastII();
1334 if (cast != NULL && cast->has_range_check()) {
1335 C->remove_range_check_cast(cast);
1336 }
1337 }
1338 } // while (_stack.is_nonempty())
1339 }
1340
1341 //------------------------------subsume_node-----------------------------------
1342 // Remove users from node 'old' and add them to node 'nn'.
1343 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1344 if (old->Opcode() == Op_SafePoint) {
1345 old->as_SafePoint()->disconnect_from_root(this);
1346 }
1347 assert( old != hash_find(old), "should already been removed" );
1348 assert( old != C->top(), "cannot subsume top node");
1349 // Copy debug or profile information to the new version:
1350 C->copy_node_notes_to(nn, old);
1351 // Move users of node 'old' to node 'nn'
1352 for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1529 // If changed AddP inputs, check Stores for loop invariant
1530 if( use_op == Op_AddP ) {
1531 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1532 Node* u = use->fast_out(i2);
1533 if (u->is_Mem())
1534 _worklist.push(u);
1535 }
1536 }
1537 // If changed initialization activity, check dependent Stores
1538 if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1539 InitializeNode* init = use->as_Allocate()->initialization();
1540 if (init != NULL) {
1541 Node* imem = init->proj_out(TypeFunc::Memory);
1542 if (imem != NULL) add_users_to_worklist0(imem);
1543 }
1544 }
1545 if (use_op == Op_Initialize) {
1546 Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1547 if (imem != NULL) add_users_to_worklist0(imem);
1548 }
1549 }
1550 }
1551
1552 /**
1553 * Remove the speculative part of all types that we know of
1554 */
1555 void PhaseIterGVN::remove_speculative_types() {
1556 assert(UseTypeSpeculation, "speculation is off");
1557 for (uint i = 0; i < _types.Size(); i++) {
1558 const Type* t = _types.fast_lookup(i);
1559 if (t != NULL) {
1560 _types.map(i, t->remove_speculative());
1561 }
1562 }
1563 _table.check_no_speculative_types();
1564 }
1565
1566 //=============================================================================
1567 #ifndef PRODUCT
1568 uint PhaseCCP::_total_invokes = 0;
1656 if (m->bottom_type() != type(m)) { // If not already bottomed out
1657 worklist.push(m); // Propagate change to user
1658 }
1659
1660 // CmpU nodes can get their type information from two nodes up in the
1661 // graph (instead of from the nodes immediately above). Make sure they
1662 // are added to the worklist if nodes they depend on are updated, since
1663 // they could be missed and get wrong types otherwise.
1664 uint m_op = m->Opcode();
1665 if (m_op == Op_AddI || m_op == Op_SubI) {
1666 for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1667 Node* p = m->fast_out(i2); // Propagate changes to uses
1668 if (p->Opcode() == Op_CmpU) {
1669 // Got a CmpU which might need the new type information from node n.
1670 if(p->bottom_type() != type(p)) { // If not already bottomed out
1671 worklist.push(p); // Propagate change to user
1672 }
1673 }
1674 }
1675 }
1676 // If n is used in a counted loop exit condition then the type
1677 // of the counted loop's Phi depends on the type of n. See
1678 // PhiNode::Value().
1679 if (m_op == Op_CmpI) {
1680 PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1681 if (phi != NULL) {
1682 worklist.push(phi);
1683 }
1684 }
1685 }
1686 }
1687 }
1688 }
1689
1690 //------------------------------do_transform-----------------------------------
1691 // Top level driver for the recursive transformer
1692 void PhaseCCP::do_transform() {
1693 // Correct leaves of new-space Nodes; they point to old-space.
1694 C->set_root( transform(C->root())->as_Root() );
1695 assert( C->top(), "missing TOP node" );
1757 Node* m = n->out(i);
1758 if( m->is_Phi() ) {
1759 assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1760 replace_node(m, nn);
1761 --i; // deleted this phi; rescan starting with next position
1762 }
1763 }
1764 }
1765 replace_node(n,nn); // Update DefUse edges for new constant
1766 }
1767 return nn;
1768 }
1769
1770 // If x is a TypeNode, capture any more-precise type permanently into Node
1771 if (t != n->bottom_type()) {
1772 hash_delete(n); // changing bottom type may force a rehash
1773 n->raise_bottom_type(t);
1774 _worklist.push(n); // n re-enters the hash table via the worklist
1775 }
1776
1777 // Idealize graph using DU info. Must clone() into new-space.
1778 // DU info is generally used to show profitability, progress or safety
1779 // (but generally not needed for correctness).
1780 Node *nn = n->Ideal_DU_postCCP(this);
1781
1782 // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1783 switch( n->Opcode() ) {
1784 case Op_FastLock: // Revisit FastLocks for lock coarsening
1785 case Op_If:
1786 case Op_CountedLoopEnd:
1787 case Op_Region:
1788 case Op_Loop:
1789 case Op_CountedLoop:
1790 case Op_Conv2B:
1791 case Op_Opaque1:
1792 case Op_Opaque2:
1793 _worklist.push(n);
1794 break;
1795 default:
1796 break;
1797 }
1798 if( nn ) {
1799 _worklist.push(n);
1800 // Put users of 'n' onto worklist for second igvn transform
1801 add_users_to_worklist(n);
1802 return nn;
1803 }
1804
1805 return n;
1806 }
1807
1808 //---------------------------------saturate------------------------------------
1809 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1810 const Type* limit_type) const {
1811 const Type* wide_type = new_type->widen(old_type, limit_type);
1812 if (wide_type != new_type) { // did we widen?
1813 // If so, we may have widened beyond the limit type. Clip it back down.
1814 new_type = wide_type->filter(limit_type);
1815 }
1816 return new_type;
1817 }
1818
1819 //------------------------------print_statistics-------------------------------
1820 #ifndef PRODUCT
1821 void PhaseCCP::print_statistics() {
1822 tty->print_cr("CCP: %d constants found: %d", _total_invokes, _total_constants);
1823 }
1935 igvn->_worklist.push( old );
1936 break;
1937 case 1:
1938 if( old->is_Store() || old->has_special_unique_user() )
1939 igvn->add_users_to_worklist( old );
1940 break;
1941 case 2:
1942 if( old->is_Store() )
1943 igvn->add_users_to_worklist( old );
1944 if( old->Opcode() == Op_Region )
1945 igvn->_worklist.push(old);
1946 break;
1947 case 3:
1948 if( old->Opcode() == Op_Region ) {
1949 igvn->_worklist.push(old);
1950 igvn->add_users_to_worklist( old );
1951 }
1952 break;
1953 default:
1954 break;
1955 }
1956 }
1957
1958 }
1959
1960 //-------------------------------replace_by-----------------------------------
1961 // Using def-use info, replace one node for another. Follow the def-use info
1962 // to all users of the OLD node. Then make all uses point to the NEW node.
1963 void Node::replace_by(Node *new_node) {
1964 assert(!is_top(), "top node has no DU info");
1965 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
1966 Node* use = last_out(i);
1967 uint uses_found = 0;
1968 for (uint j = 0; j < use->len(); j++) {
1969 if (use->in(j) == this) {
1970 if (j < use->req())
1971 use->set_req(j, new_node);
1972 else use->set_prec(j, new_node);
1973 uses_found++;
1974 }
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "opto/block.hpp"
28 #include "opto/callnode.hpp"
29 #include "opto/cfgnode.hpp"
30 #include "opto/connode.hpp"
31 #include "opto/idealGraphPrinter.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/machnode.hpp"
34 #include "opto/opcodes.hpp"
35 #include "opto/phaseX.hpp"
36 #include "opto/regalloc.hpp"
37 #include "opto/rootnode.hpp"
38 #if INCLUDE_ALL_GCS
39 #include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp"
40 #endif
41
42 //=============================================================================
43 #define NODE_HASH_MINIMUM_SIZE 255
44 //------------------------------NodeHash---------------------------------------
45 NodeHash::NodeHash(uint est_max_size) :
46 _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
47 _a(Thread::current()->resource_area()),
48 _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
49 _inserts(0), _insert_limit( insert_limit() ),
50 _look_probes(0), _lookup_hits(0), _lookup_misses(0),
51 _total_insert_probes(0), _total_inserts(0),
52 _insert_probes(0), _grows(0) {
53 // _sentinel must be in the current node space
54 _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
55 memset(_table,0,sizeof(Node*)*_max);
56 }
57
58 //------------------------------NodeHash---------------------------------------
59 NodeHash::NodeHash(Arena *arena, uint est_max_size) :
60 _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
1268 int nrep = dead->replace_edge(in, NULL); // Kill edges
1269 assert((nrep > 0), "sanity");
1270 if (in->outcnt() == 0) { // Made input go dead?
1271 _stack.push(in, PROCESS_INPUTS); // Recursively remove
1272 recurse = true;
1273 } else if (in->outcnt() == 1 &&
1274 in->has_special_unique_user()) {
1275 _worklist.push(in->unique_out());
1276 } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1277 if (in->Opcode() == Op_Region) {
1278 _worklist.push(in);
1279 } else if (in->is_Store()) {
1280 DUIterator_Fast imax, i = in->fast_outs(imax);
1281 _worklist.push(in->fast_out(i));
1282 i++;
1283 if (in->outcnt() == 2) {
1284 _worklist.push(in->fast_out(i));
1285 i++;
1286 }
1287 assert(!(i < imax), "sanity");
1288 }
1289 } else if (in->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(in)) {
1290 add_users_to_worklist(in);
1291 }
1292 if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1293 in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1294 // A Load that directly follows an InitializeNode is
1295 // going away. The Stores that follow are candidates
1296 // again to be captured by the InitializeNode.
1297 for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1298 Node *n = in->fast_out(j);
1299 if (n->is_Store()) {
1300 _worklist.push(n);
1301 }
1302 }
1303 }
1304 } // if (in != NULL && in != C->top())
1305 } // for (uint i = 0; i < dead->req(); i++)
1306 if (recurse) {
1307 continue;
1308 }
1309 } // if (!dead->is_Con())
1310 } // if (progress_state == PROCESS_INPUTS)
1318 _stack.push(dead->raw_out(0), PROCESS_INPUTS);
1319 } else {
1320 // Finished disconnecting all input and output edges.
1321 _stack.pop();
1322 // Remove dead node from iterative worklist
1323 _worklist.remove(dead);
1324 // Constant node that has no out-edges and has only one in-edge from
1325 // root is usually dead. However, sometimes reshaping walk makes
1326 // it reachable by adding use edges. So, we will NOT count Con nodes
1327 // as dead to be conservative about the dead node count at any
1328 // given time.
1329 if (!dead->is_Con()) {
1330 C->record_dead_node(dead->_idx);
1331 }
1332 if (dead->is_macro()) {
1333 C->remove_macro_node(dead);
1334 }
1335 if (dead->is_expensive()) {
1336 C->remove_expensive_node(dead);
1337 }
1338 if (dead->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1339 C->remove_shenandoah_barrier(reinterpret_cast<ShenandoahLoadReferenceBarrierNode*>(dead));
1340 }
1341 CastIINode* cast = dead->isa_CastII();
1342 if (cast != NULL && cast->has_range_check()) {
1343 C->remove_range_check_cast(cast);
1344 }
1345 }
1346 } // while (_stack.is_nonempty())
1347 }
1348
1349 //------------------------------subsume_node-----------------------------------
1350 // Remove users from node 'old' and add them to node 'nn'.
1351 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1352 if (old->Opcode() == Op_SafePoint) {
1353 old->as_SafePoint()->disconnect_from_root(this);
1354 }
1355 assert( old != hash_find(old), "should already been removed" );
1356 assert( old != C->top(), "cannot subsume top node");
1357 // Copy debug or profile information to the new version:
1358 C->copy_node_notes_to(nn, old);
1359 // Move users of node 'old' to node 'nn'
1360 for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1537 // If changed AddP inputs, check Stores for loop invariant
1538 if( use_op == Op_AddP ) {
1539 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1540 Node* u = use->fast_out(i2);
1541 if (u->is_Mem())
1542 _worklist.push(u);
1543 }
1544 }
1545 // If changed initialization activity, check dependent Stores
1546 if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1547 InitializeNode* init = use->as_Allocate()->initialization();
1548 if (init != NULL) {
1549 Node* imem = init->proj_out(TypeFunc::Memory);
1550 if (imem != NULL) add_users_to_worklist0(imem);
1551 }
1552 }
1553 if (use_op == Op_Initialize) {
1554 Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1555 if (imem != NULL) add_users_to_worklist0(imem);
1556 }
1557
1558 if (use->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1559 Node* cmp = use->find_out_with(Op_CmpP);
1560 if (cmp != NULL) {
1561 _worklist.push(cmp);
1562 }
1563 }
1564 }
1565 }
1566
1567 /**
1568 * Remove the speculative part of all types that we know of
1569 */
1570 void PhaseIterGVN::remove_speculative_types() {
1571 assert(UseTypeSpeculation, "speculation is off");
1572 for (uint i = 0; i < _types.Size(); i++) {
1573 const Type* t = _types.fast_lookup(i);
1574 if (t != NULL) {
1575 _types.map(i, t->remove_speculative());
1576 }
1577 }
1578 _table.check_no_speculative_types();
1579 }
1580
1581 //=============================================================================
1582 #ifndef PRODUCT
1583 uint PhaseCCP::_total_invokes = 0;
1671 if (m->bottom_type() != type(m)) { // If not already bottomed out
1672 worklist.push(m); // Propagate change to user
1673 }
1674
1675 // CmpU nodes can get their type information from two nodes up in the
1676 // graph (instead of from the nodes immediately above). Make sure they
1677 // are added to the worklist if nodes they depend on are updated, since
1678 // they could be missed and get wrong types otherwise.
1679 uint m_op = m->Opcode();
1680 if (m_op == Op_AddI || m_op == Op_SubI) {
1681 for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1682 Node* p = m->fast_out(i2); // Propagate changes to uses
1683 if (p->Opcode() == Op_CmpU) {
1684 // Got a CmpU which might need the new type information from node n.
1685 if(p->bottom_type() != type(p)) { // If not already bottomed out
1686 worklist.push(p); // Propagate change to user
1687 }
1688 }
1689 }
1690 }
1691 if (m->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
1692 for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1693 Node* p = m->fast_out(i2);
1694 if (p->Opcode() == Op_CmpP) {
1695 if(p->bottom_type() != type(p)) {
1696 worklist.push(p);
1697 }
1698 } else if (p->Opcode() == Op_AddP) {
1699 for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) {
1700 Node* q = p->fast_out(i3);
1701 if (q->is_Load()) {
1702 if(q->bottom_type() != type(q)) {
1703 worklist.push(q);
1704 }
1705 }
1706 }
1707 }
1708 }
1709 }
1710 // If n is used in a counted loop exit condition then the type
1711 // of the counted loop's Phi depends on the type of n. See
1712 // PhiNode::Value().
1713 if (m_op == Op_CmpI) {
1714 PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1715 if (phi != NULL) {
1716 worklist.push(phi);
1717 }
1718 }
1719 }
1720 }
1721 }
1722 }
1723
1724 //------------------------------do_transform-----------------------------------
1725 // Top level driver for the recursive transformer
1726 void PhaseCCP::do_transform() {
1727 // Correct leaves of new-space Nodes; they point to old-space.
1728 C->set_root( transform(C->root())->as_Root() );
1729 assert( C->top(), "missing TOP node" );
1791 Node* m = n->out(i);
1792 if( m->is_Phi() ) {
1793 assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1794 replace_node(m, nn);
1795 --i; // deleted this phi; rescan starting with next position
1796 }
1797 }
1798 }
1799 replace_node(n,nn); // Update DefUse edges for new constant
1800 }
1801 return nn;
1802 }
1803
1804 // If x is a TypeNode, capture any more-precise type permanently into Node
1805 if (t != n->bottom_type()) {
1806 hash_delete(n); // changing bottom type may force a rehash
1807 n->raise_bottom_type(t);
1808 _worklist.push(n); // n re-enters the hash table via the worklist
1809 }
1810
1811 // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1812 switch( n->Opcode() ) {
1813 case Op_FastLock: // Revisit FastLocks for lock coarsening
1814 case Op_If:
1815 case Op_CountedLoopEnd:
1816 case Op_Region:
1817 case Op_Loop:
1818 case Op_CountedLoop:
1819 case Op_Conv2B:
1820 case Op_Opaque1:
1821 case Op_Opaque2:
1822 _worklist.push(n);
1823 break;
1824 default:
1825 break;
1826 }
1827
1828 return n;
1829 }
1830
1831 //---------------------------------saturate------------------------------------
1832 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1833 const Type* limit_type) const {
1834 const Type* wide_type = new_type->widen(old_type, limit_type);
1835 if (wide_type != new_type) { // did we widen?
1836 // If so, we may have widened beyond the limit type. Clip it back down.
1837 new_type = wide_type->filter(limit_type);
1838 }
1839 return new_type;
1840 }
1841
1842 //------------------------------print_statistics-------------------------------
1843 #ifndef PRODUCT
1844 void PhaseCCP::print_statistics() {
1845 tty->print_cr("CCP: %d constants found: %d", _total_invokes, _total_constants);
1846 }
1958 igvn->_worklist.push( old );
1959 break;
1960 case 1:
1961 if( old->is_Store() || old->has_special_unique_user() )
1962 igvn->add_users_to_worklist( old );
1963 break;
1964 case 2:
1965 if( old->is_Store() )
1966 igvn->add_users_to_worklist( old );
1967 if( old->Opcode() == Op_Region )
1968 igvn->_worklist.push(old);
1969 break;
1970 case 3:
1971 if( old->Opcode() == Op_Region ) {
1972 igvn->_worklist.push(old);
1973 igvn->add_users_to_worklist( old );
1974 }
1975 break;
1976 default:
1977 break;
1978 }
1979 if (old->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(old)) {
1980 igvn->add_users_to_worklist(old);
1981 }
1982 }
1983
1984 }
1985
1986 //-------------------------------replace_by-----------------------------------
1987 // Using def-use info, replace one node for another. Follow the def-use info
1988 // to all users of the OLD node. Then make all uses point to the NEW node.
1989 void Node::replace_by(Node *new_node) {
1990 assert(!is_top(), "top node has no DU info");
1991 for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
1992 Node* use = last_out(i);
1993 uint uses_found = 0;
1994 for (uint j = 0; j < use->len(); j++) {
1995 if (use->in(j) == this) {
1996 if (j < use->req())
1997 use->set_req(j, new_node);
1998 else use->set_prec(j, new_node);
1999 uses_found++;
2000 }
|