< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

1118   // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1119   if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1120     // SubNode::Value
1121     // CmpPNode::sub
1122     // MemNode::detect_ptr_independence
1123     // MemNode::all_controls_dominate
1124     // We find all controls of a pointer load, and see if they dominate the control of
1125     // an allocation. If they all dominate, we know the allocation is after (independent)
1126     // of the pointer load, and we can say the pointers are different. For this we call
1127     // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1128     // control sub of the allocation. The problems is that sometimes dominates answers
1129     // false conservatively, and later it can determine that it is indeed true. Loops with
1130     // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1131     // so the traversal becomes more powerful. This is difficult to remidy, we would have
1132     // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1133     // after loop-opts, so that should take care of many of these cases.
1134     return false;
1135   }
1136   tty->cr();
1137   tty->print_cr("Missed Value optimization:");
1138   n->dump_bfs(1, nullptr, "");
1139   tty->print_cr("Current type:");
1140   told->dump_on(tty);
1141   tty->cr();
1142   tty->print_cr("Optimized type:");
1143   tnew->dump_on(tty);
1144   tty->cr();
1145   return true;
1146 }
1147 #endif
1148 
1149 /**
1150  * Register a new node with the optimizer.  Update the types array, the def-use
1151  * info.  Put on worklist.
1152  */
1153 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1154   set_type_bottom(n);
1155   _worklist.push(n);
1156   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1157   return n;
1158 }
1159 
1160 //------------------------------transform--------------------------------------
1161 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1162 Node *PhaseIterGVN::transform( Node *n ) {
1163   if (_delay_transform) {
1164     // Register the node but don't optimize for now
1165     register_new_node_with_optimizer(n);
1166     return n;
1167   }
1168 
1169   // If brand new node, make space in type array, and give it a type.
1170   ensure_type_or_null(n);
1171   if (type_or_null(n) == nullptr) {
1172     set_type_bottom(n);
1173   }
1174 






1175   return transform_old(n);
1176 }
1177 
1178 Node *PhaseIterGVN::transform_old(Node* n) {
1179   NOT_PRODUCT(set_transforms());
1180   // Remove 'n' from hash table in case it gets modified
1181   _table.hash_delete(n);
1182 #ifdef ASSERT
1183   if (is_verify_def_use()) {
1184     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1185   }
1186 #endif
1187 
1188   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1189   if (n->is_Cmp()) {
1190     add_users_to_worklist(n);
1191   }
1192 
1193   // Apply the Ideal call in a loop until it no longer applies
1194   Node* k = n;

1427 
1428   // Smash all inputs to 'old', isolating him completely
1429   Node *temp = new Node(1);
1430   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1431   remove_dead_node( old );
1432   temp->del_req(0);         // Yank bogus edge
1433   if (nn != nullptr && nn->outcnt() == 0) {
1434     _worklist.push(nn);
1435   }
1436 #ifndef PRODUCT
1437   if (is_verify_def_use()) {
1438     for ( int i = 0; i < _verify_window_size; i++ ) {
1439       if ( _verify_window[i] == old )
1440         _verify_window[i] = nn;
1441     }
1442   }
1443 #endif
1444   temp->destruct(this);     // reuse the _idx of this little guy
1445 }
1446 













1447 //------------------------------add_users_to_worklist--------------------------
1448 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1449   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1450     worklist.push(n->fast_out(i));  // Push on worklist
1451   }
1452 }
1453 
1454 // Return counted loop Phi if as a counted loop exit condition, cmp
1455 // compares the induction variable with n
1456 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1457   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1458     Node* bol = cmp->fast_out(i);
1459     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1460       Node* iff = bol->fast_out(i2);
1461       if (iff->is_BaseCountedLoopEnd()) {
1462         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1463         if (cle->limit() == n) {
1464           PhiNode* phi = cle->phi();
1465           if (phi != nullptr) {
1466             return phi;

1482     add_users_of_use_to_worklist(n, use, worklist);
1483   }
1484 }
1485 
1486 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
1487   if(use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1488       use->is_Store() )       // Enable store/load same address
1489     add_users_to_worklist0(use, worklist);
1490 
1491   // If we changed the receiver type to a call, we need to revisit
1492   // the Catch following the call.  It's looking for a non-null
1493   // receiver to know when to enable the regular fall-through path
1494   // in addition to the NullPtrException path.
1495   if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1496     Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
1497     if (p != nullptr) {
1498       add_users_to_worklist0(p, worklist);
1499     }
1500   }
1501 










1502   uint use_op = use->Opcode();
1503   if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1504     add_users_to_worklist0(use, worklist); // Put Bool on worklist
1505     if (use->outcnt() > 0) {
1506       Node* bol = use->raw_out(0);
1507       if (bol->outcnt() > 0) {
1508         Node* iff = bol->raw_out(0);
1509         if (iff->outcnt() == 2) {
1510           // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1511           // phi merging either 0 or 1 onto the worklist
1512           Node* ifproj0 = iff->raw_out(0);
1513           Node* ifproj1 = iff->raw_out(1);
1514           if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1515             Node* region0 = ifproj0->raw_out(0);
1516             Node* region1 = ifproj1->raw_out(0);
1517             if( region0 == region1 )
1518               add_users_to_worklist0(region0, worklist);
1519           }
1520         }
1521       }

1579           assert(n == in2, "only in2 modified");
1580           // Find all CastII with input in1.
1581           for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1582             Node* castii = in1->fast_out(j);
1583             if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1584               // Find If.
1585               if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1586                 Node* ifnode = castii->in(0)->in(0);
1587                 // Check that if connects to the cmp
1588                 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1589                   worklist.push(castii);
1590                 }
1591               }
1592             }
1593           }
1594         }
1595       }
1596     }
1597   }
1598 









1599   // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1600   // Patterns:
1601   // ConstraintCast+ -> Sub
1602   // ConstraintCast+ -> Phi
1603   // ConstraintCast+ -> Xor
1604   if (use->is_ConstraintCast()) {
1605     auto push_the_uses_to_worklist = [&](Node* n){
1606       if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1607         worklist.push(n);
1608       }
1609     };
1610     auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1611     use->visit_uses(push_the_uses_to_worklist, is_boundary);
1612   }
1613   // If changed LShift inputs, check RShift users for useless sign-ext
1614   if( use_op == Op_LShiftI ) {
1615     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1616       Node* u = use->fast_out(i2);
1617       if (u->Opcode() == Op_RShiftI)
1618         worklist.push(u);

1662   // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1663   // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1664   // to guarantee the change is not missed.
1665   if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1666     Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1667     if (p != nullptr) {
1668       add_users_to_worklist0(p, worklist);
1669     }
1670   }
1671 
1672   if (use_op == Op_Initialize) {
1673     Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1674     if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1675   }
1676   // Loading the java mirror from a Klass requires two loads and the type
1677   // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1678   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1679   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1680   bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1681 








1682   if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1683     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1684       Node* u = use->fast_out(i2);
1685       const Type* ut = u->bottom_type();
1686       if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1687         if (has_load_barrier_nodes) {
1688           // Search for load barriers behind the load
1689           for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1690             Node* b = u->fast_out(i3);
1691             if (bs->is_gc_barrier_node(b)) {
1692               worklist.push(b);
1693             }
1694           }
1695         }
1696         worklist.push(u);
1697       }
1698     }
1699   }










1700   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1701     assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1702     if (use->outcnt() == 1) {
1703       Node* cmp = use->unique_out();
1704       worklist.push(cmp);
1705     }
1706   }
1707   if (use->Opcode() == Op_AddX) {
1708     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1709       Node* u = use->fast_out(i2);
1710       if (u->Opcode() == Op_CastX2P) {
1711         worklist.push(u);
1712       }
1713     }
1714   }
1715 }
1716 
1717 /**
1718  * Remove the speculative part of all types that we know of
1719  */

1767 //------------------------------PhaseCCP---------------------------------------
1768 // Conditional Constant Propagation, ala Wegman & Zadeck
1769 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1770   NOT_PRODUCT( clear_constants(); )
1771   assert( _worklist.size() == 0, "" );
1772   analyze();
1773 }
1774 
1775 #ifndef PRODUCT
1776 //------------------------------~PhaseCCP--------------------------------------
1777 PhaseCCP::~PhaseCCP() {
1778   inc_invokes();
1779   _total_constants += count_constants();
1780 }
1781 #endif
1782 
1783 
1784 #ifdef ASSERT
1785 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
1786   if (tnew->meet(told) != tnew->remove_speculative()) {
1787     n->dump(1);
1788     tty->print("told = "); told->dump(); tty->cr();
1789     tty->print("tnew = "); tnew->dump(); tty->cr();
1790     fatal("Not monotonic");
1791   }
1792   assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
1793   assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
1794 }
1795 #endif //ASSERT
1796 
1797 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
1798 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
1799 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
1800 // or nodes even further down back to the worklist because their type could change as a result of the current type
1801 // change).
1802 void PhaseCCP::analyze() {
1803   // Initialize all types to TOP, optimistic analysis
1804   for (uint i = 0; i < C->unique(); i++)  {
1805     _types.map(i, Type::TOP);
1806   }
1807 

1884   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1885     Node* use = n->fast_out(i);
1886     push_if_not_bottom_type(worklist, use);
1887     push_more_uses(worklist, n, use);
1888   }
1889 }
1890 
1891 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1892   if (n->bottom_type() != type(n)) {
1893     worklist.push(n);
1894   }
1895 }
1896 
1897 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1898 // Add them back to the worklist.
1899 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1900   push_phis(worklist, use);
1901   push_catch(worklist, use);
1902   push_cmpu(worklist, use);
1903   push_counted_loop_phi(worklist, parent, use);

1904   push_loadp(worklist, use);
1905   push_and(worklist, parent, use);
1906   push_cast_ii(worklist, parent, use);
1907   push_opaque_zero_trip_guard(worklist, use);
1908 }
1909 
1910 
1911 // We must recheck Phis too if use is a Region.
1912 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1913   if (use->is_Region()) {
1914     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1915       push_if_not_bottom_type(worklist, use->fast_out(i));
1916     }
1917   }
1918 }
1919 
1920 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1921 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1922 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1923 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

1946       if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) {
1947         // Got a CmpU or CmpU3 which might need the new type information from node n.
1948         push_if_not_bottom_type(worklist, cmpu);
1949       }
1950     }
1951   }
1952 }
1953 
1954 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1955 // Seem PhiNode::Value().
1956 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1957   uint use_op = use->Opcode();
1958   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1959     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1960     if (phi != nullptr) {
1961       worklist.push(phi);
1962     }
1963   }
1964 }
1965 












1966 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1967 // See LoadNode::Value().
1968 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1969   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1970   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1971 
1972   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1973     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1974       Node* loadp = use->fast_out(i);
1975       const Type* ut = loadp->bottom_type();
1976       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1977         if (has_load_barrier_nodes) {
1978           // Search for load barriers behind the load
1979           push_load_barrier(worklist, barrier_set, loadp);
1980         }
1981         worklist.push(loadp);
1982       }
1983     }
1984   }
1985 }

1118   // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1119   if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1120     // SubNode::Value
1121     // CmpPNode::sub
1122     // MemNode::detect_ptr_independence
1123     // MemNode::all_controls_dominate
1124     // We find all controls of a pointer load, and see if they dominate the control of
1125     // an allocation. If they all dominate, we know the allocation is after (independent)
1126     // of the pointer load, and we can say the pointers are different. For this we call
1127     // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1128     // control sub of the allocation. The problems is that sometimes dominates answers
1129     // false conservatively, and later it can determine that it is indeed true. Loops with
1130     // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1131     // so the traversal becomes more powerful. This is difficult to remidy, we would have
1132     // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1133     // after loop-opts, so that should take care of many of these cases.
1134     return false;
1135   }
1136   tty->cr();
1137   tty->print_cr("Missed Value optimization:");
1138   n->dump_bfs(3, nullptr, "");
1139   tty->print_cr("Current type:");
1140   told->dump_on(tty);
1141   tty->cr();
1142   tty->print_cr("Optimized type:");
1143   tnew->dump_on(tty);
1144   tty->cr();
1145   return true;
1146 }
1147 #endif
1148 
1149 /**
1150  * Register a new node with the optimizer.  Update the types array, the def-use
1151  * info.  Put on worklist.
1152  */
1153 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1154   set_type_bottom(n);
1155   _worklist.push(n);
1156   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1157   return n;
1158 }
1159 
1160 //------------------------------transform--------------------------------------
1161 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1162 Node *PhaseIterGVN::transform( Node *n ) {






1163   // If brand new node, make space in type array, and give it a type.
1164   ensure_type_or_null(n);
1165   if (type_or_null(n) == nullptr) {
1166     set_type_bottom(n);
1167   }
1168 
1169   if (_delay_transform) {
1170     // Add the node to the worklist but don't optimize for now
1171     _worklist.push(n);
1172     return n;
1173   }
1174 
1175   return transform_old(n);
1176 }
1177 
1178 Node *PhaseIterGVN::transform_old(Node* n) {
1179   NOT_PRODUCT(set_transforms());
1180   // Remove 'n' from hash table in case it gets modified
1181   _table.hash_delete(n);
1182 #ifdef ASSERT
1183   if (is_verify_def_use()) {
1184     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1185   }
1186 #endif
1187 
1188   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1189   if (n->is_Cmp()) {
1190     add_users_to_worklist(n);
1191   }
1192 
1193   // Apply the Ideal call in a loop until it no longer applies
1194   Node* k = n;

1427 
1428   // Smash all inputs to 'old', isolating him completely
1429   Node *temp = new Node(1);
1430   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1431   remove_dead_node( old );
1432   temp->del_req(0);         // Yank bogus edge
1433   if (nn != nullptr && nn->outcnt() == 0) {
1434     _worklist.push(nn);
1435   }
1436 #ifndef PRODUCT
1437   if (is_verify_def_use()) {
1438     for ( int i = 0; i < _verify_window_size; i++ ) {
1439       if ( _verify_window[i] == old )
1440         _verify_window[i] = nn;
1441     }
1442   }
1443 #endif
1444   temp->destruct(this);     // reuse the _idx of this little guy
1445 }
1446 
1447 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1448   assert(n != nullptr, "sanity");
1449   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1450     Node* u = n->fast_out(i);
1451     if (u != n) {
1452       rehash_node_delayed(u);
1453       int nb = u->replace_edge(n, m);
1454       --i, imax -= nb;
1455     }
1456   }
1457   assert(n->outcnt() == 0, "all uses must be deleted");
1458 }
1459 
1460 //------------------------------add_users_to_worklist--------------------------
1461 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1462   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1463     worklist.push(n->fast_out(i));  // Push on worklist
1464   }
1465 }
1466 
1467 // Return counted loop Phi if as a counted loop exit condition, cmp
1468 // compares the induction variable with n
1469 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1470   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1471     Node* bol = cmp->fast_out(i);
1472     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1473       Node* iff = bol->fast_out(i2);
1474       if (iff->is_BaseCountedLoopEnd()) {
1475         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1476         if (cle->limit() == n) {
1477           PhiNode* phi = cle->phi();
1478           if (phi != nullptr) {
1479             return phi;

1495     add_users_of_use_to_worklist(n, use, worklist);
1496   }
1497 }
1498 
1499 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
1500   if(use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1501       use->is_Store() )       // Enable store/load same address
1502     add_users_to_worklist0(use, worklist);
1503 
1504   // If we changed the receiver type to a call, we need to revisit
1505   // the Catch following the call.  It's looking for a non-null
1506   // receiver to know when to enable the regular fall-through path
1507   // in addition to the NullPtrException path.
1508   if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1509     Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
1510     if (p != nullptr) {
1511       add_users_to_worklist0(p, worklist);
1512     }
1513   }
1514 
1515   // AndLNode::Ideal folds GraphKit::mark_word_test patterns. Give it a chance to run.
1516   if (n->is_Load() && use->is_Phi()) {
1517     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1518       Node* u = use->fast_out(i);
1519       if (u->Opcode() == Op_AndL) {
1520         worklist.push(u);
1521       }
1522     }
1523   }
1524 
1525   uint use_op = use->Opcode();
1526   if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1527     add_users_to_worklist0(use, worklist); // Put Bool on worklist
1528     if (use->outcnt() > 0) {
1529       Node* bol = use->raw_out(0);
1530       if (bol->outcnt() > 0) {
1531         Node* iff = bol->raw_out(0);
1532         if (iff->outcnt() == 2) {
1533           // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1534           // phi merging either 0 or 1 onto the worklist
1535           Node* ifproj0 = iff->raw_out(0);
1536           Node* ifproj1 = iff->raw_out(1);
1537           if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1538             Node* region0 = ifproj0->raw_out(0);
1539             Node* region1 = ifproj1->raw_out(0);
1540             if( region0 == region1 )
1541               add_users_to_worklist0(region0, worklist);
1542           }
1543         }
1544       }

1602           assert(n == in2, "only in2 modified");
1603           // Find all CastII with input in1.
1604           for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1605             Node* castii = in1->fast_out(j);
1606             if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1607               // Find If.
1608               if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1609                 Node* ifnode = castii->in(0)->in(0);
1610                 // Check that if connects to the cmp
1611                 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1612                   worklist.push(castii);
1613                 }
1614               }
1615             }
1616           }
1617         }
1618       }
1619     }
1620   }
1621 
1622   // Inline type nodes can have other inline types as users. If an input gets
1623   // updated, make sure that inline type users get a chance for optimization.
1624   if (use->is_InlineType()) {
1625     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1626       Node* u = use->fast_out(i2);
1627       if (u->is_InlineType())
1628         worklist.push(u);
1629     }
1630   }
1631   // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1632   // Patterns:
1633   // ConstraintCast+ -> Sub
1634   // ConstraintCast+ -> Phi
1635   // ConstraintCast+ -> Xor
1636   if (use->is_ConstraintCast()) {
1637     auto push_the_uses_to_worklist = [&](Node* n){
1638       if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1639         worklist.push(n);
1640       }
1641     };
1642     auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1643     use->visit_uses(push_the_uses_to_worklist, is_boundary);
1644   }
1645   // If changed LShift inputs, check RShift users for useless sign-ext
1646   if( use_op == Op_LShiftI ) {
1647     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1648       Node* u = use->fast_out(i2);
1649       if (u->Opcode() == Op_RShiftI)
1650         worklist.push(u);

1694   // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1695   // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1696   // to guarantee the change is not missed.
1697   if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1698     Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1699     if (p != nullptr) {
1700       add_users_to_worklist0(p, worklist);
1701     }
1702   }
1703 
1704   if (use_op == Op_Initialize) {
1705     Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1706     if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1707   }
1708   // Loading the java mirror from a Klass requires two loads and the type
1709   // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1710   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1711   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1712   bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1713 
1714   if (use_op == Op_CastP2X) {
1715     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1716       Node* u = use->fast_out(i2);
1717       if (u->Opcode() == Op_AndX) {
1718         worklist.push(u);
1719       }
1720     }
1721   }
1722   if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1723     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1724       Node* u = use->fast_out(i2);
1725       const Type* ut = u->bottom_type();
1726       if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1727         if (has_load_barrier_nodes) {
1728           // Search for load barriers behind the load
1729           for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1730             Node* b = u->fast_out(i3);
1731             if (bs->is_gc_barrier_node(b)) {
1732               worklist.push(b);
1733             }
1734           }
1735         }
1736         worklist.push(u);
1737       }
1738     }
1739   }
1740   // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1741   if (use->is_Region()) {
1742     Node* c = use;
1743     do {
1744       c = c->unique_ctrl_out_or_null();
1745     } while (c != nullptr && c->is_Region());
1746     if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1747       worklist.push(c);
1748     }
1749   }
1750   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1751     assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1752     if (use->outcnt() == 1) {
1753       Node* cmp = use->unique_out();
1754       worklist.push(cmp);
1755     }
1756   }
1757   if (use->Opcode() == Op_AddX) {
1758     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1759       Node* u = use->fast_out(i2);
1760       if (u->Opcode() == Op_CastX2P) {
1761         worklist.push(u);
1762       }
1763     }
1764   }
1765 }
1766 
1767 /**
1768  * Remove the speculative part of all types that we know of
1769  */

1817 //------------------------------PhaseCCP---------------------------------------
1818 // Conditional Constant Propagation, ala Wegman & Zadeck
1819 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1820   NOT_PRODUCT( clear_constants(); )
1821   assert( _worklist.size() == 0, "" );
1822   analyze();
1823 }
1824 
1825 #ifndef PRODUCT
1826 //------------------------------~PhaseCCP--------------------------------------
1827 PhaseCCP::~PhaseCCP() {
1828   inc_invokes();
1829   _total_constants += count_constants();
1830 }
1831 #endif
1832 
1833 
1834 #ifdef ASSERT
1835 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
1836   if (tnew->meet(told) != tnew->remove_speculative()) {
1837     n->dump(3);
1838     tty->print("told = "); told->dump(); tty->cr();
1839     tty->print("tnew = "); tnew->dump(); tty->cr();
1840     fatal("Not monotonic");
1841   }
1842   assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
1843   assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
1844 }
1845 #endif //ASSERT
1846 
1847 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
1848 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
1849 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
1850 // or nodes even further down back to the worklist because their type could change as a result of the current type
1851 // change).
1852 void PhaseCCP::analyze() {
1853   // Initialize all types to TOP, optimistic analysis
1854   for (uint i = 0; i < C->unique(); i++)  {
1855     _types.map(i, Type::TOP);
1856   }
1857 

1934   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1935     Node* use = n->fast_out(i);
1936     push_if_not_bottom_type(worklist, use);
1937     push_more_uses(worklist, n, use);
1938   }
1939 }
1940 
1941 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1942   if (n->bottom_type() != type(n)) {
1943     worklist.push(n);
1944   }
1945 }
1946 
1947 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1948 // Add them back to the worklist.
1949 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1950   push_phis(worklist, use);
1951   push_catch(worklist, use);
1952   push_cmpu(worklist, use);
1953   push_counted_loop_phi(worklist, parent, use);
1954   push_cast(worklist, use);
1955   push_loadp(worklist, use);
1956   push_and(worklist, parent, use);
1957   push_cast_ii(worklist, parent, use);
1958   push_opaque_zero_trip_guard(worklist, use);
1959 }
1960 
1961 
1962 // We must recheck Phis too if use is a Region.
1963 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1964   if (use->is_Region()) {
1965     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1966       push_if_not_bottom_type(worklist, use->fast_out(i));
1967     }
1968   }
1969 }
1970 
1971 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1972 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1973 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1974 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

1997       if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) {
1998         // Got a CmpU or CmpU3 which might need the new type information from node n.
1999         push_if_not_bottom_type(worklist, cmpu);
2000       }
2001     }
2002   }
2003 }
2004 
2005 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2006 // Seem PhiNode::Value().
2007 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2008   uint use_op = use->Opcode();
2009   if (use_op == Op_CmpI || use_op == Op_CmpL) {
2010     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2011     if (phi != nullptr) {
2012       worklist.push(phi);
2013     }
2014   }
2015 }
2016 
2017 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
2018   uint use_op = use->Opcode();
2019   if (use_op == Op_CastP2X) {
2020     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2021       Node* u = use->fast_out(i2);
2022       if (u->Opcode() == Op_AndX) {
2023         worklist.push(u);
2024       }
2025     }
2026   }
2027 }
2028 
2029 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2030 // See LoadNode::Value().
2031 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2032   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2033   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2034 
2035   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2036     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2037       Node* loadp = use->fast_out(i);
2038       const Type* ut = loadp->bottom_type();
2039       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2040         if (has_load_barrier_nodes) {
2041           // Search for load barriers behind the load
2042           push_load_barrier(worklist, barrier_set, loadp);
2043         }
2044         worklist.push(loadp);
2045       }
2046     }
2047   }
2048 }
< prev index next >