< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

1118   // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1119   if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1120     // SubNode::Value
1121     // CmpPNode::sub
1122     // MemNode::detect_ptr_independence
1123     // MemNode::all_controls_dominate
1124     // We find all controls of a pointer load, and see if they dominate the control of
1125     // an allocation. If they all dominate, we know the allocation is after (independent)
1126     // of the pointer load, and we can say the pointers are different. For this we call
1127     // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1128     // control sub of the allocation. The problems is that sometimes dominates answers
1129     // false conservatively, and later it can determine that it is indeed true. Loops with
1130     // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1131     // so the traversal becomes more powerful. This is difficult to remidy, we would have
1132     // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1133     // after loop-opts, so that should take care of many of these cases.
1134     return false;
1135   }
1136   tty->cr();
1137   tty->print_cr("Missed Value optimization:");
1138   n->dump_bfs(1, nullptr, "");
1139   tty->print_cr("Current type:");
1140   told->dump_on(tty);
1141   tty->cr();
1142   tty->print_cr("Optimized type:");
1143   tnew->dump_on(tty);
1144   tty->cr();
1145   return true;
1146 }
1147 #endif
1148 
1149 /**
1150  * Register a new node with the optimizer.  Update the types array, the def-use
1151  * info.  Put on worklist.
1152  */
1153 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1154   set_type_bottom(n);
1155   _worklist.push(n);
1156   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1157   return n;
1158 }
1159 
1160 //------------------------------transform--------------------------------------
1161 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1162 Node *PhaseIterGVN::transform( Node *n ) {
1163   if (_delay_transform) {
1164     // Register the node but don't optimize for now
1165     register_new_node_with_optimizer(n);
1166     return n;
1167   }
1168 
1169   // If brand new node, make space in type array, and give it a type.
1170   ensure_type_or_null(n);
1171   if (type_or_null(n) == nullptr) {
1172     set_type_bottom(n);
1173   }
1174 






1175   return transform_old(n);
1176 }
1177 
1178 Node *PhaseIterGVN::transform_old(Node* n) {
1179   NOT_PRODUCT(set_transforms());
1180   // Remove 'n' from hash table in case it gets modified
1181   _table.hash_delete(n);
1182 #ifdef ASSERT
1183   if (is_verify_def_use()) {
1184     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1185   }
1186 #endif
1187 
1188   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1189   if (n->is_Cmp()) {
1190     add_users_to_worklist(n);
1191   }
1192 
1193   // Apply the Ideal call in a loop until it no longer applies
1194   Node* k = n;

1425 
1426   // Smash all inputs to 'old', isolating him completely
1427   Node *temp = new Node(1);
1428   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1429   remove_dead_node( old );
1430   temp->del_req(0);         // Yank bogus edge
1431   if (nn != nullptr && nn->outcnt() == 0) {
1432     _worklist.push(nn);
1433   }
1434 #ifndef PRODUCT
1435   if (is_verify_def_use()) {
1436     for ( int i = 0; i < _verify_window_size; i++ ) {
1437       if ( _verify_window[i] == old )
1438         _verify_window[i] = nn;
1439     }
1440   }
1441 #endif
1442   temp->destruct(this);     // reuse the _idx of this little guy
1443 }
1444 













1445 //------------------------------add_users_to_worklist--------------------------
1446 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1447   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1448     worklist.push(n->fast_out(i));  // Push on worklist
1449   }
1450 }
1451 
1452 // Return counted loop Phi if as a counted loop exit condition, cmp
1453 // compares the induction variable with n
1454 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1455   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1456     Node* bol = cmp->fast_out(i);
1457     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1458       Node* iff = bol->fast_out(i2);
1459       if (iff->is_BaseCountedLoopEnd()) {
1460         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1461         if (cle->limit() == n) {
1462           PhiNode* phi = cle->phi();
1463           if (phi != nullptr) {
1464             return phi;

1480     add_users_of_use_to_worklist(n, use, worklist);
1481   }
1482 }
1483 
1484 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
1485   if(use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1486       use->is_Store() )       // Enable store/load same address
1487     add_users_to_worklist0(use, worklist);
1488 
1489   // If we changed the receiver type to a call, we need to revisit
1490   // the Catch following the call.  It's looking for a non-null
1491   // receiver to know when to enable the regular fall-through path
1492   // in addition to the NullPtrException path.
1493   if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1494     Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
1495     if (p != nullptr) {
1496       add_users_to_worklist0(p, worklist);
1497     }
1498   }
1499 










1500   uint use_op = use->Opcode();
1501   if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1502     add_users_to_worklist0(use, worklist); // Put Bool on worklist
1503     if (use->outcnt() > 0) {
1504       Node* bol = use->raw_out(0);
1505       if (bol->outcnt() > 0) {
1506         Node* iff = bol->raw_out(0);
1507         if (iff->outcnt() == 2) {
1508           // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1509           // phi merging either 0 or 1 onto the worklist
1510           Node* ifproj0 = iff->raw_out(0);
1511           Node* ifproj1 = iff->raw_out(1);
1512           if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1513             Node* region0 = ifproj0->raw_out(0);
1514             Node* region1 = ifproj1->raw_out(0);
1515             if( region0 == region1 )
1516               add_users_to_worklist0(region0, worklist);
1517           }
1518         }
1519       }

1577           assert(n == in2, "only in2 modified");
1578           // Find all CastII with input in1.
1579           for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1580             Node* castii = in1->fast_out(j);
1581             if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1582               // Find If.
1583               if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1584                 Node* ifnode = castii->in(0)->in(0);
1585                 // Check that if connects to the cmp
1586                 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1587                   worklist.push(castii);
1588                 }
1589               }
1590             }
1591           }
1592         }
1593       }
1594     }
1595   }
1596 









1597   // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1598   // Patterns:
1599   // ConstraintCast+ -> Sub
1600   // ConstraintCast+ -> Phi
1601   // ConstraintCast+ -> Xor
1602   if (use->is_ConstraintCast()) {
1603     auto push_the_uses_to_worklist = [&](Node* n){
1604       if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1605         worklist.push(n);
1606       }
1607     };
1608     auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1609     use->visit_uses(push_the_uses_to_worklist, is_boundary);
1610   }
1611   // If changed LShift inputs, check RShift users for useless sign-ext
1612   if( use_op == Op_LShiftI ) {
1613     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1614       Node* u = use->fast_out(i2);
1615       if (u->Opcode() == Op_RShiftI)
1616         worklist.push(u);

1653   // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1654   // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1655   // to guarantee the change is not missed.
1656   if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1657     Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1658     if (p != nullptr) {
1659       add_users_to_worklist0(p, worklist);
1660     }
1661   }
1662 
1663   if (use_op == Op_Initialize) {
1664     Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1665     if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1666   }
1667   // Loading the java mirror from a Klass requires two loads and the type
1668   // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1669   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1670   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1671   bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1672 








1673   if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1674     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1675       Node* u = use->fast_out(i2);
1676       const Type* ut = u->bottom_type();
1677       if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1678         if (has_load_barrier_nodes) {
1679           // Search for load barriers behind the load
1680           for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1681             Node* b = u->fast_out(i3);
1682             if (bs->is_gc_barrier_node(b)) {
1683               worklist.push(b);
1684             }
1685           }
1686         }
1687         worklist.push(u);
1688       }
1689     }
1690   }










1691   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1692     assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1693     if (use->outcnt() == 1) {
1694       Node* cmp = use->unique_out();
1695       worklist.push(cmp);
1696     }
1697   }
1698 }
1699 
1700 /**
1701  * Remove the speculative part of all types that we know of
1702  */
1703 void PhaseIterGVN::remove_speculative_types()  {
1704   assert(UseTypeSpeculation, "speculation is off");
1705   for (uint i = 0; i < _types.Size(); i++)  {
1706     const Type* t = _types.fast_lookup(i);
1707     if (t != nullptr) {
1708       _types.map(i, t->remove_speculative());
1709     }
1710   }

1746 //------------------------------PhaseCCP---------------------------------------
1747 // Conditional Constant Propagation, ala Wegman & Zadeck
1748 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1749   NOT_PRODUCT( clear_constants(); )
1750   assert( _worklist.size() == 0, "" );
1751   analyze();
1752 }
1753 
1754 #ifndef PRODUCT
1755 //------------------------------~PhaseCCP--------------------------------------
1756 PhaseCCP::~PhaseCCP() {
1757   inc_invokes();
1758   _total_constants += count_constants();
1759 }
1760 #endif
1761 
1762 
1763 #ifdef ASSERT
1764 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
1765   if (tnew->meet(told) != tnew->remove_speculative()) {
1766     n->dump(1);
1767     tty->print("told = "); told->dump(); tty->cr();
1768     tty->print("tnew = "); tnew->dump(); tty->cr();
1769     fatal("Not monotonic");
1770   }
1771   assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
1772   assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
1773 }
1774 #endif //ASSERT
1775 
1776 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
1777 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
1778 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
1779 // or nodes even further down back to the worklist because their type could change as a result of the current type
1780 // change).
1781 void PhaseCCP::analyze() {
1782   // Initialize all types to TOP, optimistic analysis
1783   for (uint i = 0; i < C->unique(); i++)  {
1784     _types.map(i, Type::TOP);
1785   }
1786 

1863   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1864     Node* use = n->fast_out(i);
1865     push_if_not_bottom_type(worklist, use);
1866     push_more_uses(worklist, n, use);
1867   }
1868 }
1869 
1870 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1871   if (n->bottom_type() != type(n)) {
1872     worklist.push(n);
1873   }
1874 }
1875 
1876 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1877 // Add them back to the worklist.
1878 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1879   push_phis(worklist, use);
1880   push_catch(worklist, use);
1881   push_cmpu(worklist, use);
1882   push_counted_loop_phi(worklist, parent, use);

1883   push_loadp(worklist, use);
1884   push_and(worklist, parent, use);
1885   push_cast_ii(worklist, parent, use);
1886   push_opaque_zero_trip_guard(worklist, use);
1887 }
1888 
1889 
1890 // We must recheck Phis too if use is a Region.
1891 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1892   if (use->is_Region()) {
1893     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1894       push_if_not_bottom_type(worklist, use->fast_out(i));
1895     }
1896   }
1897 }
1898 
1899 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1900 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1901 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1902 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

1925       if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) {
1926         // Got a CmpU or CmpU3 which might need the new type information from node n.
1927         push_if_not_bottom_type(worklist, cmpu);
1928       }
1929     }
1930   }
1931 }
1932 
1933 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1934 // Seem PhiNode::Value().
1935 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1936   uint use_op = use->Opcode();
1937   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1938     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1939     if (phi != nullptr) {
1940       worklist.push(phi);
1941     }
1942   }
1943 }
1944 












1945 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1946 // See LoadNode::Value().
1947 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1948   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1949   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1950 
1951   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1952     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1953       Node* loadp = use->fast_out(i);
1954       const Type* ut = loadp->bottom_type();
1955       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1956         if (has_load_barrier_nodes) {
1957           // Search for load barriers behind the load
1958           push_load_barrier(worklist, barrier_set, loadp);
1959         }
1960         worklist.push(loadp);
1961       }
1962     }
1963   }
1964 }

1118   // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1119   if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1120     // SubNode::Value
1121     // CmpPNode::sub
1122     // MemNode::detect_ptr_independence
1123     // MemNode::all_controls_dominate
1124     // We find all controls of a pointer load, and see if they dominate the control of
1125     // an allocation. If they all dominate, we know the allocation is after (independent)
1126     // of the pointer load, and we can say the pointers are different. For this we call
1127     // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1128     // control sub of the allocation. The problems is that sometimes dominates answers
1129     // false conservatively, and later it can determine that it is indeed true. Loops with
1130     // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1131     // so the traversal becomes more powerful. This is difficult to remidy, we would have
1132     // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1133     // after loop-opts, so that should take care of many of these cases.
1134     return false;
1135   }
1136   tty->cr();
1137   tty->print_cr("Missed Value optimization:");
1138   n->dump_bfs(3, nullptr, "");
1139   tty->print_cr("Current type:");
1140   told->dump_on(tty);
1141   tty->cr();
1142   tty->print_cr("Optimized type:");
1143   tnew->dump_on(tty);
1144   tty->cr();
1145   return true;
1146 }
1147 #endif
1148 
1149 /**
1150  * Register a new node with the optimizer.  Update the types array, the def-use
1151  * info.  Put on worklist.
1152  */
1153 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1154   set_type_bottom(n);
1155   _worklist.push(n);
1156   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1157   return n;
1158 }
1159 
1160 //------------------------------transform--------------------------------------
1161 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1162 Node *PhaseIterGVN::transform( Node *n ) {






1163   // If brand new node, make space in type array, and give it a type.
1164   ensure_type_or_null(n);
1165   if (type_or_null(n) == nullptr) {
1166     set_type_bottom(n);
1167   }
1168 
1169   if (_delay_transform) {
1170     // Add the node to the worklist but don't optimize for now
1171     _worklist.push(n);
1172     return n;
1173   }
1174 
1175   return transform_old(n);
1176 }
1177 
1178 Node *PhaseIterGVN::transform_old(Node* n) {
1179   NOT_PRODUCT(set_transforms());
1180   // Remove 'n' from hash table in case it gets modified
1181   _table.hash_delete(n);
1182 #ifdef ASSERT
1183   if (is_verify_def_use()) {
1184     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1185   }
1186 #endif
1187 
1188   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1189   if (n->is_Cmp()) {
1190     add_users_to_worklist(n);
1191   }
1192 
1193   // Apply the Ideal call in a loop until it no longer applies
1194   Node* k = n;

1425 
1426   // Smash all inputs to 'old', isolating him completely
1427   Node *temp = new Node(1);
1428   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1429   remove_dead_node( old );
1430   temp->del_req(0);         // Yank bogus edge
1431   if (nn != nullptr && nn->outcnt() == 0) {
1432     _worklist.push(nn);
1433   }
1434 #ifndef PRODUCT
1435   if (is_verify_def_use()) {
1436     for ( int i = 0; i < _verify_window_size; i++ ) {
1437       if ( _verify_window[i] == old )
1438         _verify_window[i] = nn;
1439     }
1440   }
1441 #endif
1442   temp->destruct(this);     // reuse the _idx of this little guy
1443 }
1444 
1445 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1446   assert(n != nullptr, "sanity");
1447   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1448     Node* u = n->fast_out(i);
1449     if (u != n) {
1450       rehash_node_delayed(u);
1451       int nb = u->replace_edge(n, m);
1452       --i, imax -= nb;
1453     }
1454   }
1455   assert(n->outcnt() == 0, "all uses must be deleted");
1456 }
1457 
1458 //------------------------------add_users_to_worklist--------------------------
1459 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1460   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1461     worklist.push(n->fast_out(i));  // Push on worklist
1462   }
1463 }
1464 
1465 // Return counted loop Phi if as a counted loop exit condition, cmp
1466 // compares the induction variable with n
1467 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1468   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1469     Node* bol = cmp->fast_out(i);
1470     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1471       Node* iff = bol->fast_out(i2);
1472       if (iff->is_BaseCountedLoopEnd()) {
1473         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1474         if (cle->limit() == n) {
1475           PhiNode* phi = cle->phi();
1476           if (phi != nullptr) {
1477             return phi;

1493     add_users_of_use_to_worklist(n, use, worklist);
1494   }
1495 }
1496 
1497 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
1498   if(use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1499       use->is_Store() )       // Enable store/load same address
1500     add_users_to_worklist0(use, worklist);
1501 
1502   // If we changed the receiver type to a call, we need to revisit
1503   // the Catch following the call.  It's looking for a non-null
1504   // receiver to know when to enable the regular fall-through path
1505   // in addition to the NullPtrException path.
1506   if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1507     Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
1508     if (p != nullptr) {
1509       add_users_to_worklist0(p, worklist);
1510     }
1511   }
1512 
1513   // AndLNode::Ideal folds GraphKit::mark_word_test patterns. Give it a chance to run.
1514   if (n->is_Load() && use->is_Phi()) {
1515     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1516       Node* u = use->fast_out(i);
1517       if (u->Opcode() == Op_AndL) {
1518         worklist.push(u);
1519       }
1520     }
1521   }
1522 
1523   uint use_op = use->Opcode();
1524   if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1525     add_users_to_worklist0(use, worklist); // Put Bool on worklist
1526     if (use->outcnt() > 0) {
1527       Node* bol = use->raw_out(0);
1528       if (bol->outcnt() > 0) {
1529         Node* iff = bol->raw_out(0);
1530         if (iff->outcnt() == 2) {
1531           // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1532           // phi merging either 0 or 1 onto the worklist
1533           Node* ifproj0 = iff->raw_out(0);
1534           Node* ifproj1 = iff->raw_out(1);
1535           if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1536             Node* region0 = ifproj0->raw_out(0);
1537             Node* region1 = ifproj1->raw_out(0);
1538             if( region0 == region1 )
1539               add_users_to_worklist0(region0, worklist);
1540           }
1541         }
1542       }

1600           assert(n == in2, "only in2 modified");
1601           // Find all CastII with input in1.
1602           for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1603             Node* castii = in1->fast_out(j);
1604             if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1605               // Find If.
1606               if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1607                 Node* ifnode = castii->in(0)->in(0);
1608                 // Check that if connects to the cmp
1609                 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1610                   worklist.push(castii);
1611                 }
1612               }
1613             }
1614           }
1615         }
1616       }
1617     }
1618   }
1619 
1620   // Inline type nodes can have other inline types as users. If an input gets
1621   // updated, make sure that inline type users get a chance for optimization.
1622   if (use->is_InlineType()) {
1623     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1624       Node* u = use->fast_out(i2);
1625       if (u->is_InlineType())
1626         worklist.push(u);
1627     }
1628   }
1629   // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1630   // Patterns:
1631   // ConstraintCast+ -> Sub
1632   // ConstraintCast+ -> Phi
1633   // ConstraintCast+ -> Xor
1634   if (use->is_ConstraintCast()) {
1635     auto push_the_uses_to_worklist = [&](Node* n){
1636       if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1637         worklist.push(n);
1638       }
1639     };
1640     auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1641     use->visit_uses(push_the_uses_to_worklist, is_boundary);
1642   }
1643   // If changed LShift inputs, check RShift users for useless sign-ext
1644   if( use_op == Op_LShiftI ) {
1645     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1646       Node* u = use->fast_out(i2);
1647       if (u->Opcode() == Op_RShiftI)
1648         worklist.push(u);

1685   // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1686   // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1687   // to guarantee the change is not missed.
1688   if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1689     Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1690     if (p != nullptr) {
1691       add_users_to_worklist0(p, worklist);
1692     }
1693   }
1694 
1695   if (use_op == Op_Initialize) {
1696     Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1697     if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1698   }
1699   // Loading the java mirror from a Klass requires two loads and the type
1700   // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1701   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1702   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1703   bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1704 
1705   if (use_op == Op_CastP2X) {
1706     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1707       Node* u = use->fast_out(i2);
1708       if (u->Opcode() == Op_AndX) {
1709         worklist.push(u);
1710       }
1711     }
1712   }
1713   if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1714     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1715       Node* u = use->fast_out(i2);
1716       const Type* ut = u->bottom_type();
1717       if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1718         if (has_load_barrier_nodes) {
1719           // Search for load barriers behind the load
1720           for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1721             Node* b = u->fast_out(i3);
1722             if (bs->is_gc_barrier_node(b)) {
1723               worklist.push(b);
1724             }
1725           }
1726         }
1727         worklist.push(u);
1728       }
1729     }
1730   }
1731   // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1732   if (use->is_Region()) {
1733     Node* c = use;
1734     do {
1735       c = c->unique_ctrl_out_or_null();
1736     } while (c != nullptr && c->is_Region());
1737     if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1738       worklist.push(c);
1739     }
1740   }
1741   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1742     assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1743     if (use->outcnt() == 1) {
1744       Node* cmp = use->unique_out();
1745       worklist.push(cmp);
1746     }
1747   }
1748 }
1749 
1750 /**
1751  * Remove the speculative part of all types that we know of
1752  */
1753 void PhaseIterGVN::remove_speculative_types()  {
1754   assert(UseTypeSpeculation, "speculation is off");
1755   for (uint i = 0; i < _types.Size(); i++)  {
1756     const Type* t = _types.fast_lookup(i);
1757     if (t != nullptr) {
1758       _types.map(i, t->remove_speculative());
1759     }
1760   }

1796 //------------------------------PhaseCCP---------------------------------------
1797 // Conditional Constant Propagation, ala Wegman & Zadeck
1798 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1799   NOT_PRODUCT( clear_constants(); )
1800   assert( _worklist.size() == 0, "" );
1801   analyze();
1802 }
1803 
1804 #ifndef PRODUCT
1805 //------------------------------~PhaseCCP--------------------------------------
1806 PhaseCCP::~PhaseCCP() {
1807   inc_invokes();
1808   _total_constants += count_constants();
1809 }
1810 #endif
1811 
1812 
1813 #ifdef ASSERT
1814 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
1815   if (tnew->meet(told) != tnew->remove_speculative()) {
1816     n->dump(3);
1817     tty->print("told = "); told->dump(); tty->cr();
1818     tty->print("tnew = "); tnew->dump(); tty->cr();
1819     fatal("Not monotonic");
1820   }
1821   assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
1822   assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
1823 }
1824 #endif //ASSERT
1825 
1826 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
1827 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
1828 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
1829 // or nodes even further down back to the worklist because their type could change as a result of the current type
1830 // change).
1831 void PhaseCCP::analyze() {
1832   // Initialize all types to TOP, optimistic analysis
1833   for (uint i = 0; i < C->unique(); i++)  {
1834     _types.map(i, Type::TOP);
1835   }
1836 

1913   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1914     Node* use = n->fast_out(i);
1915     push_if_not_bottom_type(worklist, use);
1916     push_more_uses(worklist, n, use);
1917   }
1918 }
1919 
1920 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1921   if (n->bottom_type() != type(n)) {
1922     worklist.push(n);
1923   }
1924 }
1925 
1926 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1927 // Add them back to the worklist.
1928 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1929   push_phis(worklist, use);
1930   push_catch(worklist, use);
1931   push_cmpu(worklist, use);
1932   push_counted_loop_phi(worklist, parent, use);
1933   push_cast(worklist, use);
1934   push_loadp(worklist, use);
1935   push_and(worklist, parent, use);
1936   push_cast_ii(worklist, parent, use);
1937   push_opaque_zero_trip_guard(worklist, use);
1938 }
1939 
1940 
1941 // We must recheck Phis too if use is a Region.
1942 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1943   if (use->is_Region()) {
1944     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1945       push_if_not_bottom_type(worklist, use->fast_out(i));
1946     }
1947   }
1948 }
1949 
1950 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1951 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1952 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1953 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

1976       if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) {
1977         // Got a CmpU or CmpU3 which might need the new type information from node n.
1978         push_if_not_bottom_type(worklist, cmpu);
1979       }
1980     }
1981   }
1982 }
1983 
1984 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1985 // Seem PhiNode::Value().
1986 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1987   uint use_op = use->Opcode();
1988   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1989     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1990     if (phi != nullptr) {
1991       worklist.push(phi);
1992     }
1993   }
1994 }
1995 
1996 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
1997   uint use_op = use->Opcode();
1998   if (use_op == Op_CastP2X) {
1999     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2000       Node* u = use->fast_out(i2);
2001       if (u->Opcode() == Op_AndX) {
2002         worklist.push(u);
2003       }
2004     }
2005   }
2006 }
2007 
2008 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2009 // See LoadNode::Value().
2010 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2011   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2012   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2013 
2014   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2015     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2016       Node* loadp = use->fast_out(i);
2017       const Type* ut = loadp->bottom_type();
2018       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2019         if (has_load_barrier_nodes) {
2020           // Search for load barriers behind the load
2021           push_load_barrier(worklist, barrier_set, loadp);
2022         }
2023         worklist.push(loadp);
2024       }
2025     }
2026   }
2027 }
< prev index next >