< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page




1225     }
1226   }
1227   NOT_PRODUCT(verify_PhaseIterGVN();)
1228 }
1229 
1230 
1231 /**
1232  * Register a new node with the optimizer.  Update the types array, the def-use
1233  * info.  Put on worklist.
1234  */
1235 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1236   set_type_bottom(n);
1237   _worklist.push(n);
1238   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1239   return n;
1240 }
1241 
1242 //------------------------------transform--------------------------------------
1243 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1244 Node *PhaseIterGVN::transform( Node *n ) {
1245   if (_delay_transform) {
1246     // Register the node but don't optimize for now
1247     register_new_node_with_optimizer(n);
1248     return n;
1249   }
1250 
1251   // If brand new node, make space in type array, and give it a type.
1252   ensure_type_or_null(n);
1253   if (type_or_null(n) == NULL) {
1254     set_type_bottom(n);
1255   }
1256 






1257   return transform_old(n);
1258 }
1259 
1260 Node *PhaseIterGVN::transform_old(Node* n) {
1261   DEBUG_ONLY(uint loop_count = 0;);
1262   NOT_PRODUCT(set_transforms());
1263 
1264   // Remove 'n' from hash table in case it gets modified
1265   _table.hash_delete(n);
1266   if (VerifyIterativeGVN) {
1267    assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1268   }
1269 
1270   // Apply the Ideal call in a loop until it no longer applies
1271   Node* k = n;
1272   DEBUG_ONLY(dead_loop_check(k);)
1273   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1274   C->remove_modified_node(k);
1275   Node* i = apply_ideal(k, /*can_reshape=*/true);
1276   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");


1476       // root is usually dead. However, sometimes reshaping walk makes
1477       // it reachable by adding use edges. So, we will NOT count Con nodes
1478       // as dead to be conservative about the dead node count at any
1479       // given time.
1480       if (!dead->is_Con()) {
1481         C->record_dead_node(dead->_idx);
1482       }
1483       if (dead->is_macro()) {
1484         C->remove_macro_node(dead);
1485       }
1486       if (dead->is_expensive()) {
1487         C->remove_expensive_node(dead);
1488       }
1489       CastIINode* cast = dead->isa_CastII();
1490       if (cast != NULL && cast->has_range_check()) {
1491         C->remove_range_check_cast(cast);
1492       }
1493       if (dead->Opcode() == Op_Opaque4) {
1494         C->remove_opaque4_node(dead);
1495       }



1496       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1497       bs->unregister_potential_barrier_node(dead);
1498     }
1499   } // while (_stack.is_nonempty())
1500 }
1501 
1502 //------------------------------subsume_node-----------------------------------
1503 // Remove users from node 'old' and add them to node 'nn'.
1504 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1505   if (old->Opcode() == Op_SafePoint) {
1506     old->as_SafePoint()->disconnect_from_root(this);
1507   }
1508   assert( old != hash_find(old), "should already been removed" );
1509   assert( old != C->top(), "cannot subsume top node");
1510   // Copy debug or profile information to the new version:
1511   C->copy_node_notes_to(nn, old);
1512   // Move users of node 'old' to node 'nn'
1513   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1514     Node* use = old->last_out(i);  // for each use...
1515     // use might need re-hashing (but it won't if it's a new node)


1540     }
1541   }
1542 
1543   // Smash all inputs to 'old', isolating him completely
1544   Node *temp = new Node(1);
1545   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1546   remove_dead_node( old );
1547   temp->del_req(0);         // Yank bogus edge
1548 #ifndef PRODUCT
1549   if( VerifyIterativeGVN ) {
1550     for ( int i = 0; i < _verify_window_size; i++ ) {
1551       if ( _verify_window[i] == old )
1552         _verify_window[i] = nn;
1553     }
1554   }
1555 #endif
1556   _worklist.remove(temp);   // this can be necessary
1557   temp->destruct();         // reuse the _idx of this little guy
1558 }
1559 











1560 //------------------------------add_users_to_worklist--------------------------
1561 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1562   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1563     _worklist.push(n->fast_out(i));  // Push on worklist
1564   }
1565 }
1566 
1567 // Return counted loop Phi if as a counted loop exit condition, cmp
1568 // compares the the induction variable with n
1569 static PhiNode* countedloop_phi_from_cmp(CmpINode* cmp, Node* n) {
1570   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1571     Node* bol = cmp->fast_out(i);
1572     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1573       Node* iff = bol->fast_out(i2);
1574       if (iff->is_CountedLoopEnd()) {
1575         CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
1576         if (cle->limit() == n) {
1577           PhiNode* phi = cle->phi();
1578           if (phi != NULL) {
1579             return phi;


1685     // If changed AddP inputs, check Stores for loop invariant
1686     if( use_op == Op_AddP ) {
1687       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1688         Node* u = use->fast_out(i2);
1689         if (u->is_Mem())
1690           _worklist.push(u);
1691       }
1692     }
1693     // If changed initialization activity, check dependent Stores
1694     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1695       InitializeNode* init = use->as_Allocate()->initialization();
1696       if (init != NULL) {
1697         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1698         if (imem != NULL)  add_users_to_worklist0(imem);
1699       }
1700     }
1701     if (use_op == Op_Initialize) {
1702       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1703       if (imem != NULL)  add_users_to_worklist0(imem);
1704     }








1705     // Loading the java mirror from a Klass requires two loads and the type
1706     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1707     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1708     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1709     bool has_load_barriers = bs->has_load_barriers();
1710 
1711     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1712       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1713         Node* u = use->fast_out(i2);
1714         const Type* ut = u->bottom_type();
1715         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1716           if (has_load_barriers) {
1717             // Search for load barriers behind the load
1718             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1719               Node* b = u->fast_out(i3);
1720               if (bs->is_gc_barrier_node(b)) {
1721                 _worklist.push(b);
1722               }
1723             }
1724           }


1844         // they could be missed and get wrong types otherwise.
1845         uint m_op = m->Opcode();
1846         if (m_op == Op_AddI || m_op == Op_SubI) {
1847           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1848             Node* p = m->fast_out(i2); // Propagate changes to uses
1849             if (p->Opcode() == Op_CmpU) {
1850               // Got a CmpU which might need the new type information from node n.
1851               if(p->bottom_type() != type(p)) { // If not already bottomed out
1852                 worklist.push(p); // Propagate change to user
1853               }
1854             }
1855           }
1856         }
1857         // If n is used in a counted loop exit condition then the type
1858         // of the counted loop's Phi depends on the type of n. See
1859         // PhiNode::Value().
1860         if (m_op == Op_CmpI) {
1861           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1862           if (phi != NULL) {
1863             worklist.push(phi);








1864           }
1865         }
1866         // Loading the java mirror from a Klass requires two loads and the type
1867         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1868         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1869         bool has_load_barriers = bs->has_load_barriers();
1870 
1871         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1872           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1873             Node* u = m->fast_out(i2);
1874             const Type* ut = u->bottom_type();
1875             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
1876               if (has_load_barriers) {
1877                 // Search for load barriers behind the load
1878                 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1879                   Node* b = u->fast_out(i3);
1880                   if (bs->is_gc_barrier_node(b)) {
1881                     worklist.push(b);
1882                   }
1883                 }




1225     }
1226   }
1227   NOT_PRODUCT(verify_PhaseIterGVN();)
1228 }
1229 
1230 
1231 /**
1232  * Register a new node with the optimizer.  Update the types array, the def-use
1233  * info.  Put on worklist.
1234  */
1235 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1236   set_type_bottom(n);
1237   _worklist.push(n);
1238   if (orig != NULL)  C->copy_node_notes_to(n, orig);
1239   return n;
1240 }
1241 
1242 //------------------------------transform--------------------------------------
1243 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1244 Node *PhaseIterGVN::transform( Node *n ) {






1245   // If brand new node, make space in type array, and give it a type.
1246   ensure_type_or_null(n);
1247   if (type_or_null(n) == NULL) {
1248     set_type_bottom(n);
1249   }
1250 
1251   if (_delay_transform) {
1252     // Add the node to the worklist but don't optimize for now
1253     _worklist.push(n);
1254     return n;
1255   }
1256 
1257   return transform_old(n);
1258 }
1259 
1260 Node *PhaseIterGVN::transform_old(Node* n) {
1261   DEBUG_ONLY(uint loop_count = 0;);
1262   NOT_PRODUCT(set_transforms());
1263 
1264   // Remove 'n' from hash table in case it gets modified
1265   _table.hash_delete(n);
1266   if (VerifyIterativeGVN) {
1267    assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1268   }
1269 
1270   // Apply the Ideal call in a loop until it no longer applies
1271   Node* k = n;
1272   DEBUG_ONLY(dead_loop_check(k);)
1273   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1274   C->remove_modified_node(k);
1275   Node* i = apply_ideal(k, /*can_reshape=*/true);
1276   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");


1476       // root is usually dead. However, sometimes reshaping walk makes
1477       // it reachable by adding use edges. So, we will NOT count Con nodes
1478       // as dead to be conservative about the dead node count at any
1479       // given time.
1480       if (!dead->is_Con()) {
1481         C->record_dead_node(dead->_idx);
1482       }
1483       if (dead->is_macro()) {
1484         C->remove_macro_node(dead);
1485       }
1486       if (dead->is_expensive()) {
1487         C->remove_expensive_node(dead);
1488       }
1489       CastIINode* cast = dead->isa_CastII();
1490       if (cast != NULL && cast->has_range_check()) {
1491         C->remove_range_check_cast(cast);
1492       }
1493       if (dead->Opcode() == Op_Opaque4) {
1494         C->remove_opaque4_node(dead);
1495       }
1496       if (dead->is_ValueTypeBase()) {
1497         C->remove_value_type(dead);
1498       }
1499       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1500       bs->unregister_potential_barrier_node(dead);
1501     }
1502   } // while (_stack.is_nonempty())
1503 }
1504 
1505 //------------------------------subsume_node-----------------------------------
1506 // Remove users from node 'old' and add them to node 'nn'.
1507 void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1508   if (old->Opcode() == Op_SafePoint) {
1509     old->as_SafePoint()->disconnect_from_root(this);
1510   }
1511   assert( old != hash_find(old), "should already been removed" );
1512   assert( old != C->top(), "cannot subsume top node");
1513   // Copy debug or profile information to the new version:
1514   C->copy_node_notes_to(nn, old);
1515   // Move users of node 'old' to node 'nn'
1516   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1517     Node* use = old->last_out(i);  // for each use...
1518     // use might need re-hashing (but it won't if it's a new node)


1543     }
1544   }
1545 
1546   // Smash all inputs to 'old', isolating him completely
1547   Node *temp = new Node(1);
1548   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1549   remove_dead_node( old );
1550   temp->del_req(0);         // Yank bogus edge
1551 #ifndef PRODUCT
1552   if( VerifyIterativeGVN ) {
1553     for ( int i = 0; i < _verify_window_size; i++ ) {
1554       if ( _verify_window[i] == old )
1555         _verify_window[i] = nn;
1556     }
1557   }
1558 #endif
1559   _worklist.remove(temp);   // this can be necessary
1560   temp->destruct();         // reuse the _idx of this little guy
1561 }
1562 
1563 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1564   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1565     Node* u = n->fast_out(i);
1566     if (u != n) {
1567       rehash_node_delayed(u);
1568       int nb = u->replace_edge(n, m);
1569       --i, imax -= nb;
1570     }
1571   }
1572 }
1573 
1574 //------------------------------add_users_to_worklist--------------------------
1575 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1576   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1577     _worklist.push(n->fast_out(i));  // Push on worklist
1578   }
1579 }
1580 
1581 // Return counted loop Phi if as a counted loop exit condition, cmp
1582 // compares the the induction variable with n
1583 static PhiNode* countedloop_phi_from_cmp(CmpINode* cmp, Node* n) {
1584   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1585     Node* bol = cmp->fast_out(i);
1586     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1587       Node* iff = bol->fast_out(i2);
1588       if (iff->is_CountedLoopEnd()) {
1589         CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
1590         if (cle->limit() == n) {
1591           PhiNode* phi = cle->phi();
1592           if (phi != NULL) {
1593             return phi;


1699     // If changed AddP inputs, check Stores for loop invariant
1700     if( use_op == Op_AddP ) {
1701       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1702         Node* u = use->fast_out(i2);
1703         if (u->is_Mem())
1704           _worklist.push(u);
1705       }
1706     }
1707     // If changed initialization activity, check dependent Stores
1708     if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1709       InitializeNode* init = use->as_Allocate()->initialization();
1710       if (init != NULL) {
1711         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1712         if (imem != NULL)  add_users_to_worklist0(imem);
1713       }
1714     }
1715     if (use_op == Op_Initialize) {
1716       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1717       if (imem != NULL)  add_users_to_worklist0(imem);
1718     }
1719     if (use_op == Op_CastP2X) {
1720       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1721         Node* u = use->fast_out(i2);
1722         if (u->Opcode() == Op_AndX) {
1723           _worklist.push(u);
1724         }
1725       }
1726     }
1727     // Loading the java mirror from a Klass requires two loads and the type
1728     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1729     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1730     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1731     bool has_load_barriers = bs->has_load_barriers();
1732 
1733     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1734       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1735         Node* u = use->fast_out(i2);
1736         const Type* ut = u->bottom_type();
1737         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1738           if (has_load_barriers) {
1739             // Search for load barriers behind the load
1740             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1741               Node* b = u->fast_out(i3);
1742               if (bs->is_gc_barrier_node(b)) {
1743                 _worklist.push(b);
1744               }
1745             }
1746           }


1866         // they could be missed and get wrong types otherwise.
1867         uint m_op = m->Opcode();
1868         if (m_op == Op_AddI || m_op == Op_SubI) {
1869           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1870             Node* p = m->fast_out(i2); // Propagate changes to uses
1871             if (p->Opcode() == Op_CmpU) {
1872               // Got a CmpU which might need the new type information from node n.
1873               if(p->bottom_type() != type(p)) { // If not already bottomed out
1874                 worklist.push(p); // Propagate change to user
1875               }
1876             }
1877           }
1878         }
1879         // If n is used in a counted loop exit condition then the type
1880         // of the counted loop's Phi depends on the type of n. See
1881         // PhiNode::Value().
1882         if (m_op == Op_CmpI) {
1883           PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
1884           if (phi != NULL) {
1885             worklist.push(phi);
1886           }
1887         }
1888         if (m_op == Op_CastP2X) {
1889           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1890             Node* u = m->fast_out(i2);
1891             if (u->Opcode() == Op_AndX) {
1892               worklist.push(u);
1893             }
1894           }
1895         }
1896         // Loading the java mirror from a Klass requires two loads and the type
1897         // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1898         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1899         bool has_load_barriers = bs->has_load_barriers();
1900 
1901         if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
1902           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1903             Node* u = m->fast_out(i2);
1904             const Type* ut = u->bottom_type();
1905             if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
1906               if (has_load_barriers) {
1907                 // Search for load barriers behind the load
1908                 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1909                   Node* b = u->fast_out(i3);
1910                   if (bs->is_gc_barrier_node(b)) {
1911                     worklist.push(b);
1912                   }
1913                 }


< prev index next >