< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

1134   // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1135   if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1136     // SubNode::Value
1137     // CmpPNode::sub
1138     // MemNode::detect_ptr_independence
1139     // MemNode::all_controls_dominate
1140     // We find all controls of a pointer load, and see if they dominate the control of
1141     // an allocation. If they all dominate, we know the allocation is after (independent)
1142     // of the pointer load, and we can say the pointers are different. For this we call
1143     // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1144     // control sub of the allocation. The problems is that sometimes dominates answers
1145     // false conservatively, and later it can determine that it is indeed true. Loops with
1146     // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1147     // so the traversal becomes more powerful. This is difficult to remidy, we would have
1148     // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1149     // after loop-opts, so that should take care of many of these cases.
1150     return false;
1151   }
1152   tty->cr();
1153   tty->print_cr("Missed Value optimization:");
1154   n->dump_bfs(1, nullptr, "");
1155   tty->print_cr("Current type:");
1156   told->dump_on(tty);
1157   tty->cr();
1158   tty->print_cr("Optimized type:");
1159   tnew->dump_on(tty);
1160   tty->cr();
1161   return true;
1162 }
1163 #endif
1164 
1165 /**
1166  * Register a new node with the optimizer.  Update the types array, the def-use
1167  * info.  Put on worklist.
1168  */
1169 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1170   set_type_bottom(n);
1171   _worklist.push(n);
1172   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1173   return n;
1174 }
1175 
1176 //------------------------------transform--------------------------------------
1177 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1178 Node *PhaseIterGVN::transform( Node *n ) {
1179   if (_delay_transform) {
1180     // Register the node but don't optimize for now
1181     register_new_node_with_optimizer(n);
1182     return n;
1183   }
1184 
1185   // If brand new node, make space in type array, and give it a type.
1186   ensure_type_or_null(n);
1187   if (type_or_null(n) == nullptr) {
1188     set_type_bottom(n);
1189   }
1190 






1191   return transform_old(n);
1192 }
1193 
1194 Node *PhaseIterGVN::transform_old(Node* n) {
1195   NOT_PRODUCT(set_transforms());
1196   // Remove 'n' from hash table in case it gets modified
1197   _table.hash_delete(n);
1198 #ifdef ASSERT
1199   if (is_verify_def_use()) {
1200     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1201   }
1202 #endif
1203 
1204   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1205   if (n->is_Cmp()) {
1206     add_users_to_worklist(n);
1207   }
1208 
1209   // Apply the Ideal call in a loop until it no longer applies
1210   Node* k = n;

1443 
1444   // Smash all inputs to 'old', isolating him completely
1445   Node *temp = new Node(1);
1446   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1447   remove_dead_node( old );
1448   temp->del_req(0);         // Yank bogus edge
1449   if (nn != nullptr && nn->outcnt() == 0) {
1450     _worklist.push(nn);
1451   }
1452 #ifndef PRODUCT
1453   if (is_verify_def_use()) {
1454     for ( int i = 0; i < _verify_window_size; i++ ) {
1455       if ( _verify_window[i] == old )
1456         _verify_window[i] = nn;
1457     }
1458   }
1459 #endif
1460   temp->destruct(this);     // reuse the _idx of this little guy
1461 }
1462 













1463 //------------------------------add_users_to_worklist--------------------------
1464 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1465   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1466     worklist.push(n->fast_out(i));  // Push on worklist
1467   }
1468 }
1469 
1470 // Return counted loop Phi if as a counted loop exit condition, cmp
1471 // compares the induction variable with n
1472 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1473   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1474     Node* bol = cmp->fast_out(i);
1475     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1476       Node* iff = bol->fast_out(i2);
1477       if (iff->is_BaseCountedLoopEnd()) {
1478         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1479         if (cle->limit() == n) {
1480           PhiNode* phi = cle->phi();
1481           if (phi != nullptr) {
1482             return phi;

1498     add_users_of_use_to_worklist(n, use, worklist);
1499   }
1500 }
1501 
1502 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
1503   if(use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1504       use->is_Store() )       // Enable store/load same address
1505     add_users_to_worklist0(use, worklist);
1506 
1507   // If we changed the receiver type to a call, we need to revisit
1508   // the Catch following the call.  It's looking for a non-null
1509   // receiver to know when to enable the regular fall-through path
1510   // in addition to the NullPtrException path.
1511   if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1512     Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
1513     if (p != nullptr) {
1514       add_users_to_worklist0(p, worklist);
1515     }
1516   }
1517 










1518   uint use_op = use->Opcode();
1519   if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1520     add_users_to_worklist0(use, worklist); // Put Bool on worklist
1521     if (use->outcnt() > 0) {
1522       Node* bol = use->raw_out(0);
1523       if (bol->outcnt() > 0) {
1524         Node* iff = bol->raw_out(0);
1525         if (iff->outcnt() == 2) {
1526           // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1527           // phi merging either 0 or 1 onto the worklist
1528           Node* ifproj0 = iff->raw_out(0);
1529           Node* ifproj1 = iff->raw_out(1);
1530           if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1531             Node* region0 = ifproj0->raw_out(0);
1532             Node* region1 = ifproj1->raw_out(0);
1533             if( region0 == region1 )
1534               add_users_to_worklist0(region0, worklist);
1535           }
1536         }
1537       }

1595           assert(n == in2, "only in2 modified");
1596           // Find all CastII with input in1.
1597           for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1598             Node* castii = in1->fast_out(j);
1599             if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1600               // Find If.
1601               if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1602                 Node* ifnode = castii->in(0)->in(0);
1603                 // Check that if connects to the cmp
1604                 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1605                   worklist.push(castii);
1606                 }
1607               }
1608             }
1609           }
1610         }
1611       }
1612     }
1613   }
1614 









1615   // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1616   // Patterns:
1617   // ConstraintCast+ -> Sub
1618   // ConstraintCast+ -> Phi
1619   // ConstraintCast+ -> Xor
1620   if (use->is_ConstraintCast()) {
1621     auto push_the_uses_to_worklist = [&](Node* n){
1622       if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1623         worklist.push(n);
1624       }
1625     };
1626     auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1627     use->visit_uses(push_the_uses_to_worklist, is_boundary);
1628   }
1629   // If changed LShift inputs, check RShift users for useless sign-ext
1630   if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
1631     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1632       Node* u = use->fast_out(i2);
1633       if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL)
1634         worklist.push(u);

1678   // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1679   // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1680   // to guarantee the change is not missed.
1681   if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1682     Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1683     if (p != nullptr) {
1684       add_users_to_worklist0(p, worklist);
1685     }
1686   }
1687 
1688   if (use_op == Op_Initialize) {
1689     Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1690     if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1691   }
1692   // Loading the java mirror from a Klass requires two loads and the type
1693   // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1694   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1695   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1696   bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1697 








1698   if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1699     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1700       Node* u = use->fast_out(i2);
1701       const Type* ut = u->bottom_type();
1702       if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1703         if (has_load_barrier_nodes) {
1704           // Search for load barriers behind the load
1705           for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1706             Node* b = u->fast_out(i3);
1707             if (bs->is_gc_barrier_node(b)) {
1708               worklist.push(b);
1709             }
1710           }
1711         }
1712         worklist.push(u);
1713       }
1714     }
1715   }










1716   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1717     assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1718     if (use->outcnt() == 1) {
1719       Node* cmp = use->unique_out();
1720       worklist.push(cmp);
1721     }
1722   }
1723   if (use->Opcode() == Op_AddX) {
1724     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1725       Node* u = use->fast_out(i2);
1726       if (u->Opcode() == Op_CastX2P) {
1727         worklist.push(u);
1728       }
1729     }
1730   }
1731 
1732   /* AndNode has a special handling when one of the operands is a LShiftNode:
1733    * (LHS << s) & RHS
1734    * if RHS fits in less than s bits, the value of this expression is 0.
1735    * The difficulty is that there might be a conversion node (ConvI2L) between

1811 //------------------------------PhaseCCP---------------------------------------
1812 // Conditional Constant Propagation, ala Wegman & Zadeck
1813 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1814   NOT_PRODUCT( clear_constants(); )
1815   assert( _worklist.size() == 0, "" );
1816   analyze();
1817 }
1818 
1819 #ifndef PRODUCT
1820 //------------------------------~PhaseCCP--------------------------------------
1821 PhaseCCP::~PhaseCCP() {
1822   inc_invokes();
1823   _total_constants += count_constants();
1824 }
1825 #endif
1826 
1827 
1828 #ifdef ASSERT
1829 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
1830   if (tnew->meet(told) != tnew->remove_speculative()) {
1831     n->dump(1);
1832     tty->print("told = "); told->dump(); tty->cr();
1833     tty->print("tnew = "); tnew->dump(); tty->cr();
1834     fatal("Not monotonic");
1835   }
1836   assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
1837   assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
1838 }
1839 #endif //ASSERT
1840 
1841 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
1842 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
1843 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
1844 // or nodes even further down back to the worklist because their type could change as a result of the current type
1845 // change).
1846 void PhaseCCP::analyze() {
1847   // Initialize all types to TOP, optimistic analysis
1848   for (uint i = 0; i < C->unique(); i++)  {
1849     _types.map(i, Type::TOP);
1850   }
1851 

1933   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1934     Node* use = n->fast_out(i);
1935     push_if_not_bottom_type(worklist, use);
1936     push_more_uses(worklist, n, use);
1937   }
1938 }
1939 
1940 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1941   if (n->bottom_type() != type(n)) {
1942     worklist.push(n);
1943   }
1944 }
1945 
1946 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1947 // Add them back to the worklist.
1948 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1949   push_phis(worklist, use);
1950   push_catch(worklist, use);
1951   push_cmpu(worklist, use);
1952   push_counted_loop_phi(worklist, parent, use);

1953   push_loadp(worklist, use);
1954   push_and(worklist, parent, use);
1955   push_cast_ii(worklist, parent, use);
1956   push_opaque_zero_trip_guard(worklist, use);
1957 }
1958 
1959 
1960 // We must recheck Phis too if use is a Region.
1961 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1962   if (use->is_Region()) {
1963     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1964       push_if_not_bottom_type(worklist, use->fast_out(i));
1965     }
1966   }
1967 }
1968 
1969 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1970 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1971 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1972 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

1995       if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) {
1996         // Got a CmpU or CmpU3 which might need the new type information from node n.
1997         push_if_not_bottom_type(worklist, cmpu);
1998       }
1999     }
2000   }
2001 }
2002 
2003 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2004 // Seem PhiNode::Value().
2005 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2006   uint use_op = use->Opcode();
2007   if (use_op == Op_CmpI || use_op == Op_CmpL) {
2008     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2009     if (phi != nullptr) {
2010       worklist.push(phi);
2011     }
2012   }
2013 }
2014 












2015 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2016 // See LoadNode::Value().
2017 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2018   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2019   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2020 
2021   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2022     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2023       Node* loadp = use->fast_out(i);
2024       const Type* ut = loadp->bottom_type();
2025       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2026         if (has_load_barrier_nodes) {
2027           // Search for load barriers behind the load
2028           push_load_barrier(worklist, barrier_set, loadp);
2029         }
2030         worklist.push(loadp);
2031       }
2032     }
2033   }
2034 }

1134   // CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
1135   if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
1136     // SubNode::Value
1137     // CmpPNode::sub
1138     // MemNode::detect_ptr_independence
1139     // MemNode::all_controls_dominate
1140     // We find all controls of a pointer load, and see if they dominate the control of
1141     // an allocation. If they all dominate, we know the allocation is after (independent)
1142     // of the pointer load, and we can say the pointers are different. For this we call
1143     // n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
1144     // control sub of the allocation. The problems is that sometimes dominates answers
1145     // false conservatively, and later it can determine that it is indeed true. Loops with
1146     // Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
1147     // so the traversal becomes more powerful. This is difficult to remidy, we would have
1148     // to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
1149     // after loop-opts, so that should take care of many of these cases.
1150     return false;
1151   }
1152   tty->cr();
1153   tty->print_cr("Missed Value optimization:");
1154   n->dump_bfs(3, nullptr, "");
1155   tty->print_cr("Current type:");
1156   told->dump_on(tty);
1157   tty->cr();
1158   tty->print_cr("Optimized type:");
1159   tnew->dump_on(tty);
1160   tty->cr();
1161   return true;
1162 }
1163 #endif
1164 
1165 /**
1166  * Register a new node with the optimizer.  Update the types array, the def-use
1167  * info.  Put on worklist.
1168  */
1169 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1170   set_type_bottom(n);
1171   _worklist.push(n);
1172   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1173   return n;
1174 }
1175 
1176 //------------------------------transform--------------------------------------
1177 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1178 Node *PhaseIterGVN::transform( Node *n ) {






1179   // If brand new node, make space in type array, and give it a type.
1180   ensure_type_or_null(n);
1181   if (type_or_null(n) == nullptr) {
1182     set_type_bottom(n);
1183   }
1184 
1185   if (_delay_transform) {
1186     // Add the node to the worklist but don't optimize for now
1187     _worklist.push(n);
1188     return n;
1189   }
1190 
1191   return transform_old(n);
1192 }
1193 
1194 Node *PhaseIterGVN::transform_old(Node* n) {
1195   NOT_PRODUCT(set_transforms());
1196   // Remove 'n' from hash table in case it gets modified
1197   _table.hash_delete(n);
1198 #ifdef ASSERT
1199   if (is_verify_def_use()) {
1200     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1201   }
1202 #endif
1203 
1204   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1205   if (n->is_Cmp()) {
1206     add_users_to_worklist(n);
1207   }
1208 
1209   // Apply the Ideal call in a loop until it no longer applies
1210   Node* k = n;

1443 
1444   // Smash all inputs to 'old', isolating him completely
1445   Node *temp = new Node(1);
1446   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1447   remove_dead_node( old );
1448   temp->del_req(0);         // Yank bogus edge
1449   if (nn != nullptr && nn->outcnt() == 0) {
1450     _worklist.push(nn);
1451   }
1452 #ifndef PRODUCT
1453   if (is_verify_def_use()) {
1454     for ( int i = 0; i < _verify_window_size; i++ ) {
1455       if ( _verify_window[i] == old )
1456         _verify_window[i] = nn;
1457     }
1458   }
1459 #endif
1460   temp->destruct(this);     // reuse the _idx of this little guy
1461 }
1462 
1463 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1464   assert(n != nullptr, "sanity");
1465   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1466     Node* u = n->fast_out(i);
1467     if (u != n) {
1468       rehash_node_delayed(u);
1469       int nb = u->replace_edge(n, m);
1470       --i, imax -= nb;
1471     }
1472   }
1473   assert(n->outcnt() == 0, "all uses must be deleted");
1474 }
1475 
1476 //------------------------------add_users_to_worklist--------------------------
1477 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1478   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1479     worklist.push(n->fast_out(i));  // Push on worklist
1480   }
1481 }
1482 
1483 // Return counted loop Phi if as a counted loop exit condition, cmp
1484 // compares the induction variable with n
1485 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1486   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1487     Node* bol = cmp->fast_out(i);
1488     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1489       Node* iff = bol->fast_out(i2);
1490       if (iff->is_BaseCountedLoopEnd()) {
1491         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1492         if (cle->limit() == n) {
1493           PhiNode* phi = cle->phi();
1494           if (phi != nullptr) {
1495             return phi;

1511     add_users_of_use_to_worklist(n, use, worklist);
1512   }
1513 }
1514 
1515 void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_List& worklist) {
1516   if(use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1517       use->is_Store() )       // Enable store/load same address
1518     add_users_to_worklist0(use, worklist);
1519 
1520   // If we changed the receiver type to a call, we need to revisit
1521   // the Catch following the call.  It's looking for a non-null
1522   // receiver to know when to enable the regular fall-through path
1523   // in addition to the NullPtrException path.
1524   if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1525     Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control);
1526     if (p != nullptr) {
1527       add_users_to_worklist0(p, worklist);
1528     }
1529   }
1530 
1531   // AndLNode::Ideal folds GraphKit::mark_word_test patterns. Give it a chance to run.
1532   if (n->is_Load() && use->is_Phi()) {
1533     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1534       Node* u = use->fast_out(i);
1535       if (u->Opcode() == Op_AndL) {
1536         worklist.push(u);
1537       }
1538     }
1539   }
1540 
1541   uint use_op = use->Opcode();
1542   if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
1543     add_users_to_worklist0(use, worklist); // Put Bool on worklist
1544     if (use->outcnt() > 0) {
1545       Node* bol = use->raw_out(0);
1546       if (bol->outcnt() > 0) {
1547         Node* iff = bol->raw_out(0);
1548         if (iff->outcnt() == 2) {
1549           // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1550           // phi merging either 0 or 1 onto the worklist
1551           Node* ifproj0 = iff->raw_out(0);
1552           Node* ifproj1 = iff->raw_out(1);
1553           if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1554             Node* region0 = ifproj0->raw_out(0);
1555             Node* region1 = ifproj1->raw_out(0);
1556             if( region0 == region1 )
1557               add_users_to_worklist0(region0, worklist);
1558           }
1559         }
1560       }

1618           assert(n == in2, "only in2 modified");
1619           // Find all CastII with input in1.
1620           for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1621             Node* castii = in1->fast_out(j);
1622             if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1623               // Find If.
1624               if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1625                 Node* ifnode = castii->in(0)->in(0);
1626                 // Check that if connects to the cmp
1627                 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1628                   worklist.push(castii);
1629                 }
1630               }
1631             }
1632           }
1633         }
1634       }
1635     }
1636   }
1637 
1638   // Inline type nodes can have other inline types as users. If an input gets
1639   // updated, make sure that inline type users get a chance for optimization.
1640   if (use->is_InlineType()) {
1641     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1642       Node* u = use->fast_out(i2);
1643       if (u->is_InlineType())
1644         worklist.push(u);
1645     }
1646   }
1647   // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1648   // Patterns:
1649   // ConstraintCast+ -> Sub
1650   // ConstraintCast+ -> Phi
1651   // ConstraintCast+ -> Xor
1652   if (use->is_ConstraintCast()) {
1653     auto push_the_uses_to_worklist = [&](Node* n){
1654       if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1655         worklist.push(n);
1656       }
1657     };
1658     auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1659     use->visit_uses(push_the_uses_to_worklist, is_boundary);
1660   }
1661   // If changed LShift inputs, check RShift users for useless sign-ext
1662   if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
1663     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1664       Node* u = use->fast_out(i2);
1665       if (u->Opcode() == Op_RShiftI || u->Opcode() == Op_RShiftL)
1666         worklist.push(u);

1710   // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1711   // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1712   // to guarantee the change is not missed.
1713   if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1714     Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1715     if (p != nullptr) {
1716       add_users_to_worklist0(p, worklist);
1717     }
1718   }
1719 
1720   if (use_op == Op_Initialize) {
1721     Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1722     if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1723   }
1724   // Loading the java mirror from a Klass requires two loads and the type
1725   // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1726   //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1727   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1728   bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1729 
1730   if (use_op == Op_CastP2X) {
1731     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1732       Node* u = use->fast_out(i2);
1733       if (u->Opcode() == Op_AndX) {
1734         worklist.push(u);
1735       }
1736     }
1737   }
1738   if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1739     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1740       Node* u = use->fast_out(i2);
1741       const Type* ut = u->bottom_type();
1742       if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1743         if (has_load_barrier_nodes) {
1744           // Search for load barriers behind the load
1745           for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1746             Node* b = u->fast_out(i3);
1747             if (bs->is_gc_barrier_node(b)) {
1748               worklist.push(b);
1749             }
1750           }
1751         }
1752         worklist.push(u);
1753       }
1754     }
1755   }
1756   // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1757   if (use->is_Region()) {
1758     Node* c = use;
1759     do {
1760       c = c->unique_ctrl_out_or_null();
1761     } while (c != nullptr && c->is_Region());
1762     if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1763       worklist.push(c);
1764     }
1765   }
1766   if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1767     assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1768     if (use->outcnt() == 1) {
1769       Node* cmp = use->unique_out();
1770       worklist.push(cmp);
1771     }
1772   }
1773   if (use->Opcode() == Op_AddX) {
1774     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1775       Node* u = use->fast_out(i2);
1776       if (u->Opcode() == Op_CastX2P) {
1777         worklist.push(u);
1778       }
1779     }
1780   }
1781 
1782   /* AndNode has a special handling when one of the operands is a LShiftNode:
1783    * (LHS << s) & RHS
1784    * if RHS fits in less than s bits, the value of this expression is 0.
1785    * The difficulty is that there might be a conversion node (ConvI2L) between

1861 //------------------------------PhaseCCP---------------------------------------
1862 // Conditional Constant Propagation, ala Wegman & Zadeck
1863 PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1864   NOT_PRODUCT( clear_constants(); )
1865   assert( _worklist.size() == 0, "" );
1866   analyze();
1867 }
1868 
1869 #ifndef PRODUCT
1870 //------------------------------~PhaseCCP--------------------------------------
1871 PhaseCCP::~PhaseCCP() {
1872   inc_invokes();
1873   _total_constants += count_constants();
1874 }
1875 #endif
1876 
1877 
1878 #ifdef ASSERT
1879 void PhaseCCP::verify_type(Node* n, const Type* tnew, const Type* told) {
1880   if (tnew->meet(told) != tnew->remove_speculative()) {
1881     n->dump(3);
1882     tty->print("told = "); told->dump(); tty->cr();
1883     tty->print("tnew = "); tnew->dump(); tty->cr();
1884     fatal("Not monotonic");
1885   }
1886   assert(!told->isa_int() || !tnew->isa_int() || told->is_int()->_widen <= tnew->is_int()->_widen, "widen increases");
1887   assert(!told->isa_long() || !tnew->isa_long() || told->is_long()->_widen <= tnew->is_long()->_widen, "widen increases");
1888 }
1889 #endif //ASSERT
1890 
1891 // In this analysis, all types are initially set to TOP. We iteratively call Value() on all nodes of the graph until
1892 // we reach a fixed-point (i.e. no types change anymore). We start with a list that only contains the root node. Each time
1893 // a new type is set, we push all uses of that node back to the worklist (in some cases, we also push grandchildren
1894 // or nodes even further down back to the worklist because their type could change as a result of the current type
1895 // change).
1896 void PhaseCCP::analyze() {
1897   // Initialize all types to TOP, optimistic analysis
1898   for (uint i = 0; i < C->unique(); i++)  {
1899     _types.map(i, Type::TOP);
1900   }
1901 

1983   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1984     Node* use = n->fast_out(i);
1985     push_if_not_bottom_type(worklist, use);
1986     push_more_uses(worklist, n, use);
1987   }
1988 }
1989 
1990 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1991   if (n->bottom_type() != type(n)) {
1992     worklist.push(n);
1993   }
1994 }
1995 
1996 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1997 // Add them back to the worklist.
1998 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1999   push_phis(worklist, use);
2000   push_catch(worklist, use);
2001   push_cmpu(worklist, use);
2002   push_counted_loop_phi(worklist, parent, use);
2003   push_cast(worklist, use);
2004   push_loadp(worklist, use);
2005   push_and(worklist, parent, use);
2006   push_cast_ii(worklist, parent, use);
2007   push_opaque_zero_trip_guard(worklist, use);
2008 }
2009 
2010 
2011 // We must recheck Phis too if use is a Region.
2012 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
2013   if (use->is_Region()) {
2014     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2015       push_if_not_bottom_type(worklist, use->fast_out(i));
2016     }
2017   }
2018 }
2019 
2020 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
2021 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
2022 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
2023 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

2046       if (cmpu_opcode == Op_CmpU || cmpu_opcode == Op_CmpU3) {
2047         // Got a CmpU or CmpU3 which might need the new type information from node n.
2048         push_if_not_bottom_type(worklist, cmpu);
2049       }
2050     }
2051   }
2052 }
2053 
2054 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2055 // Seem PhiNode::Value().
2056 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2057   uint use_op = use->Opcode();
2058   if (use_op == Op_CmpI || use_op == Op_CmpL) {
2059     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2060     if (phi != nullptr) {
2061       worklist.push(phi);
2062     }
2063   }
2064 }
2065 
2066 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
2067   uint use_op = use->Opcode();
2068   if (use_op == Op_CastP2X) {
2069     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2070       Node* u = use->fast_out(i2);
2071       if (u->Opcode() == Op_AndX) {
2072         worklist.push(u);
2073       }
2074     }
2075   }
2076 }
2077 
2078 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2079 // See LoadNode::Value().
2080 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2081   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2082   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2083 
2084   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2085     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2086       Node* loadp = use->fast_out(i);
2087       const Type* ut = loadp->bottom_type();
2088       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2089         if (has_load_barrier_nodes) {
2090           // Search for load barriers behind the load
2091           push_load_barrier(worklist, barrier_set, loadp);
2092         }
2093         worklist.push(loadp);
2094       }
2095     }
2096   }
2097 }
< prev index next >