1314 tnew->dump_on(tty);
1315 tty->cr();
1316 return true;
1317 }
1318 #endif
1319
1320 /**
1321 * Register a new node with the optimizer. Update the types array, the def-use
1322 * info. Put on worklist.
1323 */
1324 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1325 set_type_bottom(n);
1326 _worklist.push(n);
1327 if (orig != nullptr) C->copy_node_notes_to(n, orig);
1328 return n;
1329 }
1330
1331 //------------------------------transform--------------------------------------
1332 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1333 Node *PhaseIterGVN::transform( Node *n ) {
1334 if (_delay_transform) {
1335 // Register the node but don't optimize for now
1336 register_new_node_with_optimizer(n);
1337 return n;
1338 }
1339
1340 // If brand new node, make space in type array, and give it a type.
1341 ensure_type_or_null(n);
1342 if (type_or_null(n) == nullptr) {
1343 set_type_bottom(n);
1344 }
1345
1346 return transform_old(n);
1347 }
1348
1349 Node *PhaseIterGVN::transform_old(Node* n) {
1350 NOT_PRODUCT(set_transforms());
1351 // Remove 'n' from hash table in case it gets modified
1352 _table.hash_delete(n);
1353 #ifdef ASSERT
1354 if (is_verify_def_use()) {
1355 assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1356 }
1357 #endif
1358
1359 // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1360 if (n->is_Cmp()) {
1361 add_users_to_worklist(n);
1362 }
1363
1364 // Apply the Ideal call in a loop until it no longer applies
1365 Node* k = n;
1595
1596 // Smash all inputs to 'old', isolating him completely
1597 Node *temp = new Node(1);
1598 temp->init_req(0,nn); // Add a use to nn to prevent him from dying
1599 remove_dead_node( old );
1600 temp->del_req(0); // Yank bogus edge
1601 if (nn != nullptr && nn->outcnt() == 0) {
1602 _worklist.push(nn);
1603 }
1604 #ifndef PRODUCT
1605 if (is_verify_def_use()) {
1606 for ( int i = 0; i < _verify_window_size; i++ ) {
1607 if ( _verify_window[i] == old )
1608 _verify_window[i] = nn;
1609 }
1610 }
1611 #endif
1612 temp->destruct(this); // reuse the _idx of this little guy
1613 }
1614
1615 //------------------------------add_users_to_worklist--------------------------
1616 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1617 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1618 _worklist.push(n->fast_out(i)); // Push on worklist
1619 }
1620 }
1621
1622 // Return counted loop Phi if as a counted loop exit condition, cmp
1623 // compares the induction variable with n
1624 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1625 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1626 Node* bol = cmp->fast_out(i);
1627 for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1628 Node* iff = bol->fast_out(i2);
1629 if (iff->is_BaseCountedLoopEnd()) {
1630 BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1631 if (cle->limit() == n) {
1632 PhiNode* phi = cle->phi();
1633 if (phi != nullptr) {
1634 return phi;
1742 assert(n == in2, "only in2 modified");
1743 // Find all CastII with input in1.
1744 for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1745 Node* castii = in1->fast_out(j);
1746 if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1747 // Find If.
1748 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1749 Node* ifnode = castii->in(0)->in(0);
1750 // Check that if connects to the cmp
1751 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1752 _worklist.push(castii);
1753 }
1754 }
1755 }
1756 }
1757 }
1758 }
1759 }
1760 }
1761
1762 // If changed Cast input, notify down for Phi and Sub - both do "uncast"
1763 if (use->is_ConstraintCast()) {
1764 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1765 Node* u = use->fast_out(i2);
1766 if (u->is_Phi() || u->is_Sub()) {
1767 // Phi (.., CastII, ..) or Sub(Cast(x), x)
1768 _worklist.push(u);
1769 } else if (u->is_ConstraintCast()) {
1770 // Follow cast-chains down to Sub: Sub( CastII(CastII(x)), x)
1771 // This case is quite rare. Let's BFS-traverse casts, to find Subs:
1772 ResourceMark rm;
1773 Unique_Node_List casts;
1774 casts.push(u); // start traversal
1775 for (uint j = 0; j < casts.size(); ++j) {
1776 Node* cast = casts.at(j); // for every cast
1777 for (DUIterator_Fast kmax, k = cast->fast_outs(kmax); k < kmax; k++) {
1778 Node* cast_use = cast->fast_out(k);
1779 if (cast_use->is_ConstraintCast()) {
1780 casts.push(cast_use); // traverse this cast also
1781 } else if (cast_use->is_Sub()) {
1825 InitializeNode* init = use->as_Allocate()->initialization();
1826 if (init != nullptr) {
1827 Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1828 if (imem != nullptr) add_users_to_worklist0(imem);
1829 }
1830 }
1831 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1832 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1833 // to guarantee the change is not missed.
1834 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1835 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1836 if (p != nullptr) {
1837 add_users_to_worklist0(p);
1838 }
1839 }
1840
1841 if (use_op == Op_Initialize) {
1842 Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1843 if (imem != nullptr) add_users_to_worklist0(imem);
1844 }
1845 // Loading the java mirror from a Klass requires two loads and the type
1846 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1847 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1848 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1849 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1850
1851 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1852 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1853 Node* u = use->fast_out(i2);
1854 const Type* ut = u->bottom_type();
1855 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1856 if (has_load_barrier_nodes) {
1857 // Search for load barriers behind the load
1858 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1859 Node* b = u->fast_out(i3);
1860 if (bs->is_gc_barrier_node(b)) {
1861 _worklist.push(b);
1862 }
1863 }
1864 }
1865 _worklist.push(u);
1866 }
1867 }
1868 }
1869 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1870 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1871 if (use->outcnt() == 1) {
1872 Node* cmp = use->unique_out();
1873 _worklist.push(cmp);
1874 }
1875 }
1876 }
1877 }
1878
1879 /**
1880 * Remove the speculative part of all types that we know of
1881 */
1882 void PhaseIterGVN::remove_speculative_types() {
1883 assert(UseTypeSpeculation, "speculation is off");
1884 for (uint i = 0; i < _types.Size(); i++) {
1885 const Type* t = _types.fast_lookup(i);
1886 if (t != nullptr) {
1887 _types.map(i, t->remove_speculative());
1888 }
2039 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2040 Node* use = n->fast_out(i);
2041 push_if_not_bottom_type(worklist, use);
2042 push_more_uses(worklist, n, use);
2043 }
2044 }
2045
2046 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
2047 if (n->bottom_type() != type(n)) {
2048 worklist.push(n);
2049 }
2050 }
2051
2052 // For some nodes, we need to propagate the type change to grandchildren or even further down.
2053 // Add them back to the worklist.
2054 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
2055 push_phis(worklist, use);
2056 push_catch(worklist, use);
2057 push_cmpu(worklist, use);
2058 push_counted_loop_phi(worklist, parent, use);
2059 push_loadp(worklist, use);
2060 push_and(worklist, parent, use);
2061 push_cast_ii(worklist, parent, use);
2062 push_opaque_zero_trip_guard(worklist, use);
2063 }
2064
2065
2066 // We must recheck Phis too if use is a Region.
2067 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
2068 if (use->is_Region()) {
2069 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2070 push_if_not_bottom_type(worklist, use->fast_out(i));
2071 }
2072 }
2073 }
2074
2075 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
2076 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
2077 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
2078 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
2100 if (cmpu->Opcode() == Op_CmpU) {
2101 // Got a CmpU which might need the new type information from node n.
2102 push_if_not_bottom_type(worklist, cmpu);
2103 }
2104 }
2105 }
2106 }
2107
2108 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2109 // Seem PhiNode::Value().
2110 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2111 uint use_op = use->Opcode();
2112 if (use_op == Op_CmpI || use_op == Op_CmpL) {
2113 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2114 if (phi != nullptr) {
2115 worklist.push(phi);
2116 }
2117 }
2118 }
2119
2120 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2121 // See LoadNode::Value().
2122 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2123 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2124 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2125
2126 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2127 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2128 Node* loadp = use->fast_out(i);
2129 const Type* ut = loadp->bottom_type();
2130 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2131 if (has_load_barrier_nodes) {
2132 // Search for load barriers behind the load
2133 push_load_barrier(worklist, barrier_set, loadp);
2134 }
2135 worklist.push(loadp);
2136 }
2137 }
2138 }
2139 }
|
1314 tnew->dump_on(tty);
1315 tty->cr();
1316 return true;
1317 }
1318 #endif
1319
1320 /**
1321 * Register a new node with the optimizer. Update the types array, the def-use
1322 * info. Put on worklist.
1323 */
1324 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1325 set_type_bottom(n);
1326 _worklist.push(n);
1327 if (orig != nullptr) C->copy_node_notes_to(n, orig);
1328 return n;
1329 }
1330
1331 //------------------------------transform--------------------------------------
1332 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1333 Node *PhaseIterGVN::transform( Node *n ) {
1334 // If brand new node, make space in type array, and give it a type.
1335 ensure_type_or_null(n);
1336 if (type_or_null(n) == nullptr) {
1337 set_type_bottom(n);
1338 }
1339
1340 if (_delay_transform) {
1341 // Add the node to the worklist but don't optimize for now
1342 _worklist.push(n);
1343 return n;
1344 }
1345
1346 return transform_old(n);
1347 }
1348
1349 Node *PhaseIterGVN::transform_old(Node* n) {
1350 NOT_PRODUCT(set_transforms());
1351 // Remove 'n' from hash table in case it gets modified
1352 _table.hash_delete(n);
1353 #ifdef ASSERT
1354 if (is_verify_def_use()) {
1355 assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1356 }
1357 #endif
1358
1359 // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1360 if (n->is_Cmp()) {
1361 add_users_to_worklist(n);
1362 }
1363
1364 // Apply the Ideal call in a loop until it no longer applies
1365 Node* k = n;
1595
1596 // Smash all inputs to 'old', isolating him completely
1597 Node *temp = new Node(1);
1598 temp->init_req(0,nn); // Add a use to nn to prevent him from dying
1599 remove_dead_node( old );
1600 temp->del_req(0); // Yank bogus edge
1601 if (nn != nullptr && nn->outcnt() == 0) {
1602 _worklist.push(nn);
1603 }
1604 #ifndef PRODUCT
1605 if (is_verify_def_use()) {
1606 for ( int i = 0; i < _verify_window_size; i++ ) {
1607 if ( _verify_window[i] == old )
1608 _verify_window[i] = nn;
1609 }
1610 }
1611 #endif
1612 temp->destruct(this); // reuse the _idx of this little guy
1613 }
1614
1615 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1616 assert(n != NULL, "sanity");
1617 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1618 Node* u = n->fast_out(i);
1619 if (u != n) {
1620 rehash_node_delayed(u);
1621 int nb = u->replace_edge(n, m);
1622 --i, imax -= nb;
1623 }
1624 }
1625 assert(n->outcnt() == 0, "all uses must be deleted");
1626 }
1627
1628 //------------------------------add_users_to_worklist--------------------------
1629 void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1630 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1631 _worklist.push(n->fast_out(i)); // Push on worklist
1632 }
1633 }
1634
1635 // Return counted loop Phi if as a counted loop exit condition, cmp
1636 // compares the induction variable with n
1637 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1638 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1639 Node* bol = cmp->fast_out(i);
1640 for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1641 Node* iff = bol->fast_out(i2);
1642 if (iff->is_BaseCountedLoopEnd()) {
1643 BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1644 if (cle->limit() == n) {
1645 PhiNode* phi = cle->phi();
1646 if (phi != nullptr) {
1647 return phi;
1755 assert(n == in2, "only in2 modified");
1756 // Find all CastII with input in1.
1757 for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1758 Node* castii = in1->fast_out(j);
1759 if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1760 // Find If.
1761 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1762 Node* ifnode = castii->in(0)->in(0);
1763 // Check that if connects to the cmp
1764 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1765 _worklist.push(castii);
1766 }
1767 }
1768 }
1769 }
1770 }
1771 }
1772 }
1773 }
1774
1775 // Inline type nodes can have other inline types as users. If an input gets
1776 // updated, make sure that inline type users get a chance for optimization.
1777 if (use->is_InlineType()) {
1778 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1779 Node* u = use->fast_out(i2);
1780 if (u->is_InlineType())
1781 _worklist.push(u);
1782 }
1783 }
1784 // If changed Cast input, notify down for Phi and Sub - both do "uncast"
1785 if (use->is_ConstraintCast()) {
1786 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1787 Node* u = use->fast_out(i2);
1788 if (u->is_Phi() || u->is_Sub()) {
1789 // Phi (.., CastII, ..) or Sub(Cast(x), x)
1790 _worklist.push(u);
1791 } else if (u->is_ConstraintCast()) {
1792 // Follow cast-chains down to Sub: Sub( CastII(CastII(x)), x)
1793 // This case is quite rare. Let's BFS-traverse casts, to find Subs:
1794 ResourceMark rm;
1795 Unique_Node_List casts;
1796 casts.push(u); // start traversal
1797 for (uint j = 0; j < casts.size(); ++j) {
1798 Node* cast = casts.at(j); // for every cast
1799 for (DUIterator_Fast kmax, k = cast->fast_outs(kmax); k < kmax; k++) {
1800 Node* cast_use = cast->fast_out(k);
1801 if (cast_use->is_ConstraintCast()) {
1802 casts.push(cast_use); // traverse this cast also
1803 } else if (cast_use->is_Sub()) {
1847 InitializeNode* init = use->as_Allocate()->initialization();
1848 if (init != nullptr) {
1849 Node* imem = init->proj_out_or_null(TypeFunc::Memory);
1850 if (imem != nullptr) add_users_to_worklist0(imem);
1851 }
1852 }
1853 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1854 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1855 // to guarantee the change is not missed.
1856 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1857 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1858 if (p != nullptr) {
1859 add_users_to_worklist0(p);
1860 }
1861 }
1862
1863 if (use_op == Op_Initialize) {
1864 Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1865 if (imem != nullptr) add_users_to_worklist0(imem);
1866 }
1867 if (use_op == Op_CastP2X) {
1868 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1869 Node* u = use->fast_out(i2);
1870 if (u->Opcode() == Op_AndX) {
1871 _worklist.push(u);
1872 }
1873 }
1874 }
1875 // Loading the java mirror from a Klass requires two loads and the type
1876 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1877 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1878 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1879 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1880
1881 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1882 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1883 Node* u = use->fast_out(i2);
1884 const Type* ut = u->bottom_type();
1885 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1886 if (has_load_barrier_nodes) {
1887 // Search for load barriers behind the load
1888 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1889 Node* b = u->fast_out(i3);
1890 if (bs->is_gc_barrier_node(b)) {
1891 _worklist.push(b);
1892 }
1893 }
1894 }
1895 _worklist.push(u);
1896 }
1897 }
1898 }
1899
1900 // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1901 if (use->is_Region()) {
1902 Node* c = use;
1903 do {
1904 c = c->unique_ctrl_out_or_null();
1905 } while (c != NULL && c->is_Region());
1906 if (c != NULL && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1907 _worklist.push(c);
1908 }
1909 }
1910 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1911 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1912 if (use->outcnt() == 1) {
1913 Node* cmp = use->unique_out();
1914 _worklist.push(cmp);
1915 }
1916 }
1917 }
1918 }
1919
1920 /**
1921 * Remove the speculative part of all types that we know of
1922 */
1923 void PhaseIterGVN::remove_speculative_types() {
1924 assert(UseTypeSpeculation, "speculation is off");
1925 for (uint i = 0; i < _types.Size(); i++) {
1926 const Type* t = _types.fast_lookup(i);
1927 if (t != nullptr) {
1928 _types.map(i, t->remove_speculative());
1929 }
2080 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2081 Node* use = n->fast_out(i);
2082 push_if_not_bottom_type(worklist, use);
2083 push_more_uses(worklist, n, use);
2084 }
2085 }
2086
2087 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
2088 if (n->bottom_type() != type(n)) {
2089 worklist.push(n);
2090 }
2091 }
2092
2093 // For some nodes, we need to propagate the type change to grandchildren or even further down.
2094 // Add them back to the worklist.
2095 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
2096 push_phis(worklist, use);
2097 push_catch(worklist, use);
2098 push_cmpu(worklist, use);
2099 push_counted_loop_phi(worklist, parent, use);
2100 push_cast(worklist, use);
2101 push_loadp(worklist, use);
2102 push_and(worklist, parent, use);
2103 push_cast_ii(worklist, parent, use);
2104 push_opaque_zero_trip_guard(worklist, use);
2105 }
2106
2107
2108 // We must recheck Phis too if use is a Region.
2109 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
2110 if (use->is_Region()) {
2111 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2112 push_if_not_bottom_type(worklist, use->fast_out(i));
2113 }
2114 }
2115 }
2116
2117 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
2118 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
2119 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
2120 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
2142 if (cmpu->Opcode() == Op_CmpU) {
2143 // Got a CmpU which might need the new type information from node n.
2144 push_if_not_bottom_type(worklist, cmpu);
2145 }
2146 }
2147 }
2148 }
2149
2150 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
2151 // Seem PhiNode::Value().
2152 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
2153 uint use_op = use->Opcode();
2154 if (use_op == Op_CmpI || use_op == Op_CmpL) {
2155 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
2156 if (phi != nullptr) {
2157 worklist.push(phi);
2158 }
2159 }
2160 }
2161
2162 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
2163 uint use_op = use->Opcode();
2164 if (use_op == Op_CastP2X) {
2165 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
2166 Node* u = use->fast_out(i2);
2167 if (u->Opcode() == Op_AndX) {
2168 worklist.push(u);
2169 }
2170 }
2171 }
2172 }
2173
2174 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2175 // See LoadNode::Value().
2176 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2177 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2178 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2179
2180 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2181 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2182 Node* loadp = use->fast_out(i);
2183 const Type* ut = loadp->bottom_type();
2184 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2185 if (has_load_barrier_nodes) {
2186 // Search for load barriers behind the load
2187 push_load_barrier(worklist, barrier_set, loadp);
2188 }
2189 worklist.push(loadp);
2190 }
2191 }
2192 }
2193 }
|