< prev index next >

src/hotspot/share/opto/phaseX.cpp

Print this page

1148   tnew->dump_on(tty);
1149   tty->cr();
1150   return true;
1151 }
1152 #endif
1153 
1154 /**
1155  * Register a new node with the optimizer.  Update the types array, the def-use
1156  * info.  Put on worklist.
1157  */
1158 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1159   set_type_bottom(n);
1160   _worklist.push(n);
1161   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1162   return n;
1163 }
1164 
1165 //------------------------------transform--------------------------------------
1166 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1167 Node *PhaseIterGVN::transform( Node *n ) {
1168   if (_delay_transform) {
1169     // Register the node but don't optimize for now
1170     register_new_node_with_optimizer(n);
1171     return n;
1172   }
1173 
1174   // If brand new node, make space in type array, and give it a type.
1175   ensure_type_or_null(n);
1176   if (type_or_null(n) == nullptr) {
1177     set_type_bottom(n);
1178   }
1179 






1180   return transform_old(n);
1181 }
1182 
1183 Node *PhaseIterGVN::transform_old(Node* n) {
1184   NOT_PRODUCT(set_transforms());
1185   // Remove 'n' from hash table in case it gets modified
1186   _table.hash_delete(n);
1187 #ifdef ASSERT
1188   if (is_verify_def_use()) {
1189     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1190   }
1191 #endif
1192 
1193   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1194   if (n->is_Cmp()) {
1195     add_users_to_worklist(n);
1196   }
1197 
1198   // Apply the Ideal call in a loop until it no longer applies
1199   Node* k = n;

1430 
1431   // Smash all inputs to 'old', isolating him completely
1432   Node *temp = new Node(1);
1433   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1434   remove_dead_node( old );
1435   temp->del_req(0);         // Yank bogus edge
1436   if (nn != nullptr && nn->outcnt() == 0) {
1437     _worklist.push(nn);
1438   }
1439 #ifndef PRODUCT
1440   if (is_verify_def_use()) {
1441     for ( int i = 0; i < _verify_window_size; i++ ) {
1442       if ( _verify_window[i] == old )
1443         _verify_window[i] = nn;
1444     }
1445   }
1446 #endif
1447   temp->destruct(this);     // reuse the _idx of this little guy
1448 }
1449 













1450 //------------------------------add_users_to_worklist--------------------------
1451 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1452   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1453     _worklist.push(n->fast_out(i));  // Push on worklist
1454   }
1455 }
1456 
1457 // Return counted loop Phi if as a counted loop exit condition, cmp
1458 // compares the induction variable with n
1459 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1460   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1461     Node* bol = cmp->fast_out(i);
1462     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1463       Node* iff = bol->fast_out(i2);
1464       if (iff->is_BaseCountedLoopEnd()) {
1465         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1466         if (cle->limit() == n) {
1467           PhiNode* phi = cle->phi();
1468           if (phi != nullptr) {
1469             return phi;

1577             assert(n == in2, "only in2 modified");
1578             // Find all CastII with input in1.
1579             for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1580               Node* castii = in1->fast_out(j);
1581               if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1582                 // Find If.
1583                 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1584                   Node* ifnode = castii->in(0)->in(0);
1585                   // Check that if connects to the cmp
1586                   if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1587                     _worklist.push(castii);
1588                   }
1589                 }
1590               }
1591             }
1592           }
1593         }
1594       }
1595     }
1596 









1597     // If changed Cast input, notify down for Phi and Sub - both do "uncast"
1598     // Patterns:
1599     // ConstraintCast+ -> Sub
1600     // ConstraintCast+ -> Phi
1601     if (use->is_ConstraintCast()) {
1602       auto push_phi_or_sub_uses_to_worklist = [&](Node* n){
1603         if (n->is_Phi() || n->is_Sub()) {
1604           _worklist.push(n);
1605         }
1606       };
1607       ConstraintCastNode::visit_uncasted_uses(use, push_phi_or_sub_uses_to_worklist);
1608     }
1609     // If changed LShift inputs, check RShift users for useless sign-ext
1610     if( use_op == Op_LShiftI ) {
1611       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1612         Node* u = use->fast_out(i2);
1613         if (u->Opcode() == Op_RShiftI)
1614           _worklist.push(u);
1615       }
1616     }

1645       InitializeNode* init = use->as_Allocate()->initialization();
1646       if (init != nullptr) {
1647         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1648         if (imem != nullptr)  add_users_to_worklist0(imem);
1649       }
1650     }
1651     // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1652     // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1653     // to guarantee the change is not missed.
1654     if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1655       Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1656       if (p != nullptr) {
1657         add_users_to_worklist0(p);
1658       }
1659     }
1660 
1661     if (use_op == Op_Initialize) {
1662       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1663       if (imem != nullptr)  add_users_to_worklist0(imem);
1664     }








1665     // Loading the java mirror from a Klass requires two loads and the type
1666     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1667     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1668     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1669     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1670 
1671     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1672       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1673         Node* u = use->fast_out(i2);
1674         const Type* ut = u->bottom_type();
1675         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1676           if (has_load_barrier_nodes) {
1677             // Search for load barriers behind the load
1678             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1679               Node* b = u->fast_out(i3);
1680               if (bs->is_gc_barrier_node(b)) {
1681                 _worklist.push(b);
1682               }
1683             }
1684           }
1685           _worklist.push(u);
1686         }
1687       }
1688     }











1689     if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1690       assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1691       if (use->outcnt() == 1) {
1692         Node* cmp = use->unique_out();
1693         _worklist.push(cmp);
1694       }
1695     }
1696   }
1697 }
1698 
1699 /**
1700  * Remove the speculative part of all types that we know of
1701  */
1702 void PhaseIterGVN::remove_speculative_types()  {
1703   assert(UseTypeSpeculation, "speculation is off");
1704   for (uint i = 0; i < _types.Size(); i++)  {
1705     const Type* t = _types.fast_lookup(i);
1706     if (t != nullptr) {
1707       _types.map(i, t->remove_speculative());
1708     }

1862   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1863     Node* use = n->fast_out(i);
1864     push_if_not_bottom_type(worklist, use);
1865     push_more_uses(worklist, n, use);
1866   }
1867 }
1868 
1869 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1870   if (n->bottom_type() != type(n)) {
1871     worklist.push(n);
1872   }
1873 }
1874 
1875 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1876 // Add them back to the worklist.
1877 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1878   push_phis(worklist, use);
1879   push_catch(worklist, use);
1880   push_cmpu(worklist, use);
1881   push_counted_loop_phi(worklist, parent, use);

1882   push_loadp(worklist, use);
1883   push_and(worklist, parent, use);
1884   push_cast_ii(worklist, parent, use);
1885   push_opaque_zero_trip_guard(worklist, use);
1886 }
1887 
1888 
1889 // We must recheck Phis too if use is a Region.
1890 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1891   if (use->is_Region()) {
1892     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1893       push_if_not_bottom_type(worklist, use->fast_out(i));
1894     }
1895   }
1896 }
1897 
1898 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1899 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1900 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1901 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

1923       if (cmpu->Opcode() == Op_CmpU) {
1924         // Got a CmpU which might need the new type information from node n.
1925         push_if_not_bottom_type(worklist, cmpu);
1926       }
1927     }
1928   }
1929 }
1930 
1931 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1932 // Seem PhiNode::Value().
1933 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1934   uint use_op = use->Opcode();
1935   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1936     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1937     if (phi != nullptr) {
1938       worklist.push(phi);
1939     }
1940   }
1941 }
1942 












1943 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1944 // See LoadNode::Value().
1945 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1946   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1947   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1948 
1949   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1950     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1951       Node* loadp = use->fast_out(i);
1952       const Type* ut = loadp->bottom_type();
1953       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1954         if (has_load_barrier_nodes) {
1955           // Search for load barriers behind the load
1956           push_load_barrier(worklist, barrier_set, loadp);
1957         }
1958         worklist.push(loadp);
1959       }
1960     }
1961   }
1962 }

1148   tnew->dump_on(tty);
1149   tty->cr();
1150   return true;
1151 }
1152 #endif
1153 
1154 /**
1155  * Register a new node with the optimizer.  Update the types array, the def-use
1156  * info.  Put on worklist.
1157  */
1158 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1159   set_type_bottom(n);
1160   _worklist.push(n);
1161   if (orig != nullptr)  C->copy_node_notes_to(n, orig);
1162   return n;
1163 }
1164 
1165 //------------------------------transform--------------------------------------
1166 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1167 Node *PhaseIterGVN::transform( Node *n ) {






1168   // If brand new node, make space in type array, and give it a type.
1169   ensure_type_or_null(n);
1170   if (type_or_null(n) == nullptr) {
1171     set_type_bottom(n);
1172   }
1173 
1174   if (_delay_transform) {
1175     // Add the node to the worklist but don't optimize for now
1176     _worklist.push(n);
1177     return n;
1178   }
1179 
1180   return transform_old(n);
1181 }
1182 
1183 Node *PhaseIterGVN::transform_old(Node* n) {
1184   NOT_PRODUCT(set_transforms());
1185   // Remove 'n' from hash table in case it gets modified
1186   _table.hash_delete(n);
1187 #ifdef ASSERT
1188   if (is_verify_def_use()) {
1189     assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1190   }
1191 #endif
1192 
1193   // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1194   if (n->is_Cmp()) {
1195     add_users_to_worklist(n);
1196   }
1197 
1198   // Apply the Ideal call in a loop until it no longer applies
1199   Node* k = n;

1430 
1431   // Smash all inputs to 'old', isolating him completely
1432   Node *temp = new Node(1);
1433   temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1434   remove_dead_node( old );
1435   temp->del_req(0);         // Yank bogus edge
1436   if (nn != nullptr && nn->outcnt() == 0) {
1437     _worklist.push(nn);
1438   }
1439 #ifndef PRODUCT
1440   if (is_verify_def_use()) {
1441     for ( int i = 0; i < _verify_window_size; i++ ) {
1442       if ( _verify_window[i] == old )
1443         _verify_window[i] = nn;
1444     }
1445   }
1446 #endif
1447   temp->destruct(this);     // reuse the _idx of this little guy
1448 }
1449 
1450 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1451   assert(n != nullptr, "sanity");
1452   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1453     Node* u = n->fast_out(i);
1454     if (u != n) {
1455       rehash_node_delayed(u);
1456       int nb = u->replace_edge(n, m);
1457       --i, imax -= nb;
1458     }
1459   }
1460   assert(n->outcnt() == 0, "all uses must be deleted");
1461 }
1462 
1463 //------------------------------add_users_to_worklist--------------------------
1464 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1465   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1466     _worklist.push(n->fast_out(i));  // Push on worklist
1467   }
1468 }
1469 
1470 // Return counted loop Phi if as a counted loop exit condition, cmp
1471 // compares the induction variable with n
1472 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1473   for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1474     Node* bol = cmp->fast_out(i);
1475     for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1476       Node* iff = bol->fast_out(i2);
1477       if (iff->is_BaseCountedLoopEnd()) {
1478         BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1479         if (cle->limit() == n) {
1480           PhiNode* phi = cle->phi();
1481           if (phi != nullptr) {
1482             return phi;

1590             assert(n == in2, "only in2 modified");
1591             // Find all CastII with input in1.
1592             for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1593               Node* castii = in1->fast_out(j);
1594               if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1595                 // Find If.
1596                 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1597                   Node* ifnode = castii->in(0)->in(0);
1598                   // Check that if connects to the cmp
1599                   if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1600                     _worklist.push(castii);
1601                   }
1602                 }
1603               }
1604             }
1605           }
1606         }
1607       }
1608     }
1609 
1610     // Inline type nodes can have other inline types as users. If an input gets
1611     // updated, make sure that inline type users get a chance for optimization.
1612     if (use->is_InlineType()) {
1613       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1614         Node* u = use->fast_out(i2);
1615         if (u->is_InlineType())
1616           _worklist.push(u);
1617       }
1618     }
1619     // If changed Cast input, notify down for Phi and Sub - both do "uncast"
1620     // Patterns:
1621     // ConstraintCast+ -> Sub
1622     // ConstraintCast+ -> Phi
1623     if (use->is_ConstraintCast()) {
1624       auto push_phi_or_sub_uses_to_worklist = [&](Node* n){
1625         if (n->is_Phi() || n->is_Sub()) {
1626           _worklist.push(n);
1627         }
1628       };
1629       ConstraintCastNode::visit_uncasted_uses(use, push_phi_or_sub_uses_to_worklist);
1630     }
1631     // If changed LShift inputs, check RShift users for useless sign-ext
1632     if( use_op == Op_LShiftI ) {
1633       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1634         Node* u = use->fast_out(i2);
1635         if (u->Opcode() == Op_RShiftI)
1636           _worklist.push(u);
1637       }
1638     }

1667       InitializeNode* init = use->as_Allocate()->initialization();
1668       if (init != nullptr) {
1669         Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1670         if (imem != nullptr)  add_users_to_worklist0(imem);
1671       }
1672     }
1673     // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1674     // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1675     // to guarantee the change is not missed.
1676     if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1677       Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1678       if (p != nullptr) {
1679         add_users_to_worklist0(p);
1680       }
1681     }
1682 
1683     if (use_op == Op_Initialize) {
1684       Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1685       if (imem != nullptr)  add_users_to_worklist0(imem);
1686     }
1687     if (use_op == Op_CastP2X) {
1688       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1689         Node* u = use->fast_out(i2);
1690         if (u->Opcode() == Op_AndX) {
1691           _worklist.push(u);
1692         }
1693       }
1694     }
1695     // Loading the java mirror from a Klass requires two loads and the type
1696     // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1697     //   LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1698     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1699     bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1700 
1701     if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1702       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1703         Node* u = use->fast_out(i2);
1704         const Type* ut = u->bottom_type();
1705         if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1706           if (has_load_barrier_nodes) {
1707             // Search for load barriers behind the load
1708             for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1709               Node* b = u->fast_out(i3);
1710               if (bs->is_gc_barrier_node(b)) {
1711                 _worklist.push(b);
1712               }
1713             }
1714           }
1715           _worklist.push(u);
1716         }
1717       }
1718     }
1719 
1720     // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1721     if (use->is_Region()) {
1722       Node* c = use;
1723       do {
1724         c = c->unique_ctrl_out_or_null();
1725       } while (c != nullptr && c->is_Region());
1726       if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1727         _worklist.push(c);
1728       }
1729     }
1730     if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1731       assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1732       if (use->outcnt() == 1) {
1733         Node* cmp = use->unique_out();
1734         _worklist.push(cmp);
1735       }
1736     }
1737   }
1738 }
1739 
1740 /**
1741  * Remove the speculative part of all types that we know of
1742  */
1743 void PhaseIterGVN::remove_speculative_types()  {
1744   assert(UseTypeSpeculation, "speculation is off");
1745   for (uint i = 0; i < _types.Size(); i++)  {
1746     const Type* t = _types.fast_lookup(i);
1747     if (t != nullptr) {
1748       _types.map(i, t->remove_speculative());
1749     }

1903   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1904     Node* use = n->fast_out(i);
1905     push_if_not_bottom_type(worklist, use);
1906     push_more_uses(worklist, n, use);
1907   }
1908 }
1909 
1910 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1911   if (n->bottom_type() != type(n)) {
1912     worklist.push(n);
1913   }
1914 }
1915 
1916 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1917 // Add them back to the worklist.
1918 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1919   push_phis(worklist, use);
1920   push_catch(worklist, use);
1921   push_cmpu(worklist, use);
1922   push_counted_loop_phi(worklist, parent, use);
1923   push_cast(worklist, use);
1924   push_loadp(worklist, use);
1925   push_and(worklist, parent, use);
1926   push_cast_ii(worklist, parent, use);
1927   push_opaque_zero_trip_guard(worklist, use);
1928 }
1929 
1930 
1931 // We must recheck Phis too if use is a Region.
1932 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1933   if (use->is_Region()) {
1934     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1935       push_if_not_bottom_type(worklist, use->fast_out(i));
1936     }
1937   }
1938 }
1939 
1940 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1941 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1942 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1943 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {

1965       if (cmpu->Opcode() == Op_CmpU) {
1966         // Got a CmpU which might need the new type information from node n.
1967         push_if_not_bottom_type(worklist, cmpu);
1968       }
1969     }
1970   }
1971 }
1972 
1973 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1974 // Seem PhiNode::Value().
1975 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1976   uint use_op = use->Opcode();
1977   if (use_op == Op_CmpI || use_op == Op_CmpL) {
1978     PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1979     if (phi != nullptr) {
1980       worklist.push(phi);
1981     }
1982   }
1983 }
1984 
1985 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
1986   uint use_op = use->Opcode();
1987   if (use_op == Op_CastP2X) {
1988     for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1989       Node* u = use->fast_out(i2);
1990       if (u->Opcode() == Op_AndX) {
1991         worklist.push(u);
1992       }
1993     }
1994   }
1995 }
1996 
1997 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1998 // See LoadNode::Value().
1999 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2000   BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2001   bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2002 
2003   if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2004     for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2005       Node* loadp = use->fast_out(i);
2006       const Type* ut = loadp->bottom_type();
2007       if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2008         if (has_load_barrier_nodes) {
2009           // Search for load barriers behind the load
2010           push_load_barrier(worklist, barrier_set, loadp);
2011         }
2012         worklist.push(loadp);
2013       }
2014     }
2015   }
2016 }
< prev index next >