1149 tnew->dump_on(tty);
1150 tty->cr();
1151 return true;
1152 }
1153 #endif
1154
1155 /**
1156 * Register a new node with the optimizer. Update the types array, the def-use
1157 * info. Put on worklist.
1158 */
1159 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1160 set_type_bottom(n);
1161 _worklist.push(n);
1162 if (orig != nullptr) C->copy_node_notes_to(n, orig);
1163 return n;
1164 }
1165
1166 //------------------------------transform--------------------------------------
1167 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1168 Node *PhaseIterGVN::transform( Node *n ) {
1169 if (_delay_transform) {
1170 // Register the node but don't optimize for now
1171 register_new_node_with_optimizer(n);
1172 return n;
1173 }
1174
1175 // If brand new node, make space in type array, and give it a type.
1176 ensure_type_or_null(n);
1177 if (type_or_null(n) == nullptr) {
1178 set_type_bottom(n);
1179 }
1180
1181 return transform_old(n);
1182 }
1183
1184 Node *PhaseIterGVN::transform_old(Node* n) {
1185 NOT_PRODUCT(set_transforms());
1186 // Remove 'n' from hash table in case it gets modified
1187 _table.hash_delete(n);
1188 #ifdef ASSERT
1189 if (is_verify_def_use()) {
1190 assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1191 }
1192 #endif
1193
1194 // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1195 if (n->is_Cmp()) {
1196 add_users_to_worklist(n);
1197 }
1198
1199 // Apply the Ideal call in a loop until it no longer applies
1200 Node* k = n;
1431
1432 // Smash all inputs to 'old', isolating him completely
1433 Node *temp = new Node(1);
1434 temp->init_req(0,nn); // Add a use to nn to prevent him from dying
1435 remove_dead_node( old );
1436 temp->del_req(0); // Yank bogus edge
1437 if (nn != nullptr && nn->outcnt() == 0) {
1438 _worklist.push(nn);
1439 }
1440 #ifndef PRODUCT
1441 if (is_verify_def_use()) {
1442 for ( int i = 0; i < _verify_window_size; i++ ) {
1443 if ( _verify_window[i] == old )
1444 _verify_window[i] = nn;
1445 }
1446 }
1447 #endif
1448 temp->destruct(this); // reuse the _idx of this little guy
1449 }
1450
1451 //------------------------------add_users_to_worklist--------------------------
1452 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1453 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1454 worklist.push(n->fast_out(i)); // Push on worklist
1455 }
1456 }
1457
1458 // Return counted loop Phi if as a counted loop exit condition, cmp
1459 // compares the induction variable with n
1460 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1461 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1462 Node* bol = cmp->fast_out(i);
1463 for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1464 Node* iff = bol->fast_out(i2);
1465 if (iff->is_BaseCountedLoopEnd()) {
1466 BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1467 if (cle->limit() == n) {
1468 PhiNode* phi = cle->phi();
1469 if (phi != nullptr) {
1470 return phi;
1583 assert(n == in2, "only in2 modified");
1584 // Find all CastII with input in1.
1585 for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1586 Node* castii = in1->fast_out(j);
1587 if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1588 // Find If.
1589 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1590 Node* ifnode = castii->in(0)->in(0);
1591 // Check that if connects to the cmp
1592 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1593 worklist.push(castii);
1594 }
1595 }
1596 }
1597 }
1598 }
1599 }
1600 }
1601 }
1602
1603 // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1604 // Patterns:
1605 // ConstraintCast+ -> Sub
1606 // ConstraintCast+ -> Phi
1607 // ConstraintCast+ -> Xor
1608 if (use->is_ConstraintCast()) {
1609 auto push_the_uses_to_worklist = [&](Node* n){
1610 if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1611 worklist.push(n);
1612 }
1613 };
1614 auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1615 use->visit_uses(push_the_uses_to_worklist, is_boundary);
1616 }
1617 // If changed LShift inputs, check RShift users for useless sign-ext
1618 if( use_op == Op_LShiftI ) {
1619 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1620 Node* u = use->fast_out(i2);
1621 if (u->Opcode() == Op_RShiftI)
1622 worklist.push(u);
1659 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1660 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1661 // to guarantee the change is not missed.
1662 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1663 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1664 if (p != nullptr) {
1665 add_users_to_worklist0(p, worklist);
1666 }
1667 }
1668
1669 if (use_op == Op_Initialize) {
1670 Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1671 if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1672 }
1673 // Loading the java mirror from a Klass requires two loads and the type
1674 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1675 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1676 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1677 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1678
1679 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1680 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1681 Node* u = use->fast_out(i2);
1682 const Type* ut = u->bottom_type();
1683 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1684 if (has_load_barrier_nodes) {
1685 // Search for load barriers behind the load
1686 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1687 Node* b = u->fast_out(i3);
1688 if (bs->is_gc_barrier_node(b)) {
1689 worklist.push(b);
1690 }
1691 }
1692 }
1693 worklist.push(u);
1694 }
1695 }
1696 }
1697 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1698 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1699 if (use->outcnt() == 1) {
1700 Node* cmp = use->unique_out();
1701 worklist.push(cmp);
1702 }
1703 }
1704 }
1705
1706 /**
1707 * Remove the speculative part of all types that we know of
1708 */
1709 void PhaseIterGVN::remove_speculative_types() {
1710 assert(UseTypeSpeculation, "speculation is off");
1711 for (uint i = 0; i < _types.Size(); i++) {
1712 const Type* t = _types.fast_lookup(i);
1713 if (t != nullptr) {
1714 _types.map(i, t->remove_speculative());
1715 }
1716 }
1869 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1870 Node* use = n->fast_out(i);
1871 push_if_not_bottom_type(worklist, use);
1872 push_more_uses(worklist, n, use);
1873 }
1874 }
1875
1876 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1877 if (n->bottom_type() != type(n)) {
1878 worklist.push(n);
1879 }
1880 }
1881
1882 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1883 // Add them back to the worklist.
1884 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1885 push_phis(worklist, use);
1886 push_catch(worklist, use);
1887 push_cmpu(worklist, use);
1888 push_counted_loop_phi(worklist, parent, use);
1889 push_loadp(worklist, use);
1890 push_and(worklist, parent, use);
1891 push_cast_ii(worklist, parent, use);
1892 push_opaque_zero_trip_guard(worklist, use);
1893 }
1894
1895
1896 // We must recheck Phis too if use is a Region.
1897 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1898 if (use->is_Region()) {
1899 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1900 push_if_not_bottom_type(worklist, use->fast_out(i));
1901 }
1902 }
1903 }
1904
1905 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1906 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1907 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1908 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
1930 if (cmpu->Opcode() == Op_CmpU) {
1931 // Got a CmpU which might need the new type information from node n.
1932 push_if_not_bottom_type(worklist, cmpu);
1933 }
1934 }
1935 }
1936 }
1937
1938 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1939 // Seem PhiNode::Value().
1940 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1941 uint use_op = use->Opcode();
1942 if (use_op == Op_CmpI || use_op == Op_CmpL) {
1943 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1944 if (phi != nullptr) {
1945 worklist.push(phi);
1946 }
1947 }
1948 }
1949
1950 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
1951 // See LoadNode::Value().
1952 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
1953 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
1954 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
1955
1956 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1957 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1958 Node* loadp = use->fast_out(i);
1959 const Type* ut = loadp->bottom_type();
1960 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
1961 if (has_load_barrier_nodes) {
1962 // Search for load barriers behind the load
1963 push_load_barrier(worklist, barrier_set, loadp);
1964 }
1965 worklist.push(loadp);
1966 }
1967 }
1968 }
1969 }
|
1149 tnew->dump_on(tty);
1150 tty->cr();
1151 return true;
1152 }
1153 #endif
1154
1155 /**
1156 * Register a new node with the optimizer. Update the types array, the def-use
1157 * info. Put on worklist.
1158 */
1159 Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1160 set_type_bottom(n);
1161 _worklist.push(n);
1162 if (orig != nullptr) C->copy_node_notes_to(n, orig);
1163 return n;
1164 }
1165
1166 //------------------------------transform--------------------------------------
1167 // Non-recursive: idealize Node 'n' with respect to its inputs and its value
1168 Node *PhaseIterGVN::transform( Node *n ) {
1169 // If brand new node, make space in type array, and give it a type.
1170 ensure_type_or_null(n);
1171 if (type_or_null(n) == nullptr) {
1172 set_type_bottom(n);
1173 }
1174
1175 if (_delay_transform) {
1176 // Add the node to the worklist but don't optimize for now
1177 _worklist.push(n);
1178 return n;
1179 }
1180
1181 return transform_old(n);
1182 }
1183
1184 Node *PhaseIterGVN::transform_old(Node* n) {
1185 NOT_PRODUCT(set_transforms());
1186 // Remove 'n' from hash table in case it gets modified
1187 _table.hash_delete(n);
1188 #ifdef ASSERT
1189 if (is_verify_def_use()) {
1190 assert(!_table.find_index(n->_idx), "found duplicate entry in table");
1191 }
1192 #endif
1193
1194 // Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
1195 if (n->is_Cmp()) {
1196 add_users_to_worklist(n);
1197 }
1198
1199 // Apply the Ideal call in a loop until it no longer applies
1200 Node* k = n;
1431
1432 // Smash all inputs to 'old', isolating him completely
1433 Node *temp = new Node(1);
1434 temp->init_req(0,nn); // Add a use to nn to prevent him from dying
1435 remove_dead_node( old );
1436 temp->del_req(0); // Yank bogus edge
1437 if (nn != nullptr && nn->outcnt() == 0) {
1438 _worklist.push(nn);
1439 }
1440 #ifndef PRODUCT
1441 if (is_verify_def_use()) {
1442 for ( int i = 0; i < _verify_window_size; i++ ) {
1443 if ( _verify_window[i] == old )
1444 _verify_window[i] = nn;
1445 }
1446 }
1447 #endif
1448 temp->destruct(this); // reuse the _idx of this little guy
1449 }
1450
1451 void PhaseIterGVN::replace_in_uses(Node* n, Node* m) {
1452 assert(n != nullptr, "sanity");
1453 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1454 Node* u = n->fast_out(i);
1455 if (u != n) {
1456 rehash_node_delayed(u);
1457 int nb = u->replace_edge(n, m);
1458 --i, imax -= nb;
1459 }
1460 }
1461 assert(n->outcnt() == 0, "all uses must be deleted");
1462 }
1463
1464 //------------------------------add_users_to_worklist--------------------------
1465 void PhaseIterGVN::add_users_to_worklist0(Node* n, Unique_Node_List& worklist) {
1466 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1467 worklist.push(n->fast_out(i)); // Push on worklist
1468 }
1469 }
1470
1471 // Return counted loop Phi if as a counted loop exit condition, cmp
1472 // compares the induction variable with n
1473 static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) {
1474 for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
1475 Node* bol = cmp->fast_out(i);
1476 for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
1477 Node* iff = bol->fast_out(i2);
1478 if (iff->is_BaseCountedLoopEnd()) {
1479 BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd();
1480 if (cle->limit() == n) {
1481 PhiNode* phi = cle->phi();
1482 if (phi != nullptr) {
1483 return phi;
1596 assert(n == in2, "only in2 modified");
1597 // Find all CastII with input in1.
1598 for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
1599 Node* castii = in1->fast_out(j);
1600 if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
1601 // Find If.
1602 if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
1603 Node* ifnode = castii->in(0)->in(0);
1604 // Check that if connects to the cmp
1605 if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
1606 worklist.push(castii);
1607 }
1608 }
1609 }
1610 }
1611 }
1612 }
1613 }
1614 }
1615
1616 // Inline type nodes can have other inline types as users. If an input gets
1617 // updated, make sure that inline type users get a chance for optimization.
1618 if (use->is_InlineType()) {
1619 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1620 Node* u = use->fast_out(i2);
1621 if (u->is_InlineType())
1622 worklist.push(u);
1623 }
1624 }
1625 // If changed Cast input, notify down for Phi, Sub, and Xor - all do "uncast"
1626 // Patterns:
1627 // ConstraintCast+ -> Sub
1628 // ConstraintCast+ -> Phi
1629 // ConstraintCast+ -> Xor
1630 if (use->is_ConstraintCast()) {
1631 auto push_the_uses_to_worklist = [&](Node* n){
1632 if (n->is_Phi() || n->is_Sub() || n->Opcode() == Op_XorI || n->Opcode() == Op_XorL) {
1633 worklist.push(n);
1634 }
1635 };
1636 auto is_boundary = [](Node* n){ return !n->is_ConstraintCast(); };
1637 use->visit_uses(push_the_uses_to_worklist, is_boundary);
1638 }
1639 // If changed LShift inputs, check RShift users for useless sign-ext
1640 if( use_op == Op_LShiftI ) {
1641 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1642 Node* u = use->fast_out(i2);
1643 if (u->Opcode() == Op_RShiftI)
1644 worklist.push(u);
1681 // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead.
1682 // CatchNode::Value() is responsible for killing that path. The CatchNode has to be explicitly enqueued for igvn
1683 // to guarantee the change is not missed.
1684 if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) {
1685 Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control);
1686 if (p != nullptr) {
1687 add_users_to_worklist0(p, worklist);
1688 }
1689 }
1690
1691 if (use_op == Op_Initialize) {
1692 Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory);
1693 if (imem != nullptr) add_users_to_worklist0(imem, worklist);
1694 }
1695 // Loading the java mirror from a Klass requires two loads and the type
1696 // of the mirror load depends on the type of 'n'. See LoadNode::Value().
1697 // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
1698 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1699 bool has_load_barrier_nodes = bs->has_load_barrier_nodes();
1700
1701 if (use_op == Op_CastP2X) {
1702 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1703 Node* u = use->fast_out(i2);
1704 if (u->Opcode() == Op_AndX) {
1705 worklist.push(u);
1706 }
1707 }
1708 }
1709 if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
1710 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1711 Node* u = use->fast_out(i2);
1712 const Type* ut = u->bottom_type();
1713 if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
1714 if (has_load_barrier_nodes) {
1715 // Search for load barriers behind the load
1716 for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
1717 Node* b = u->fast_out(i3);
1718 if (bs->is_gc_barrier_node(b)) {
1719 worklist.push(b);
1720 }
1721 }
1722 }
1723 worklist.push(u);
1724 }
1725 }
1726 }
1727 // Give CallStaticJavaNode::remove_useless_allocation a chance to run
1728 if (use->is_Region()) {
1729 Node* c = use;
1730 do {
1731 c = c->unique_ctrl_out_or_null();
1732 } while (c != nullptr && c->is_Region());
1733 if (c != nullptr && c->is_CallStaticJava() && c->as_CallStaticJava()->uncommon_trap_request() != 0) {
1734 worklist.push(c);
1735 }
1736 }
1737 if (use->Opcode() == Op_OpaqueZeroTripGuard) {
1738 assert(use->outcnt() <= 1, "OpaqueZeroTripGuard can't be shared");
1739 if (use->outcnt() == 1) {
1740 Node* cmp = use->unique_out();
1741 worklist.push(cmp);
1742 }
1743 }
1744 }
1745
1746 /**
1747 * Remove the speculative part of all types that we know of
1748 */
1749 void PhaseIterGVN::remove_speculative_types() {
1750 assert(UseTypeSpeculation, "speculation is off");
1751 for (uint i = 0; i < _types.Size(); i++) {
1752 const Type* t = _types.fast_lookup(i);
1753 if (t != nullptr) {
1754 _types.map(i, t->remove_speculative());
1755 }
1756 }
1909 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1910 Node* use = n->fast_out(i);
1911 push_if_not_bottom_type(worklist, use);
1912 push_more_uses(worklist, n, use);
1913 }
1914 }
1915
1916 void PhaseCCP::push_if_not_bottom_type(Unique_Node_List& worklist, Node* n) const {
1917 if (n->bottom_type() != type(n)) {
1918 worklist.push(n);
1919 }
1920 }
1921
1922 // For some nodes, we need to propagate the type change to grandchildren or even further down.
1923 // Add them back to the worklist.
1924 void PhaseCCP::push_more_uses(Unique_Node_List& worklist, Node* parent, const Node* use) const {
1925 push_phis(worklist, use);
1926 push_catch(worklist, use);
1927 push_cmpu(worklist, use);
1928 push_counted_loop_phi(worklist, parent, use);
1929 push_cast(worklist, use);
1930 push_loadp(worklist, use);
1931 push_and(worklist, parent, use);
1932 push_cast_ii(worklist, parent, use);
1933 push_opaque_zero_trip_guard(worklist, use);
1934 }
1935
1936
1937 // We must recheck Phis too if use is a Region.
1938 void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const {
1939 if (use->is_Region()) {
1940 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1941 push_if_not_bottom_type(worklist, use->fast_out(i));
1942 }
1943 }
1944 }
1945
1946 // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a
1947 // non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path.
1948 // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes.
1949 void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) {
1971 if (cmpu->Opcode() == Op_CmpU) {
1972 // Got a CmpU which might need the new type information from node n.
1973 push_if_not_bottom_type(worklist, cmpu);
1974 }
1975 }
1976 }
1977 }
1978
1979 // If n is used in a counted loop exit condition, then the type of the counted loop's Phi depends on the type of 'n'.
1980 // Seem PhiNode::Value().
1981 void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, const Node* use) {
1982 uint use_op = use->Opcode();
1983 if (use_op == Op_CmpI || use_op == Op_CmpL) {
1984 PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent);
1985 if (phi != nullptr) {
1986 worklist.push(phi);
1987 }
1988 }
1989 }
1990
1991 void PhaseCCP::push_cast(Unique_Node_List& worklist, const Node* use) {
1992 uint use_op = use->Opcode();
1993 if (use_op == Op_CastP2X) {
1994 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1995 Node* u = use->fast_out(i2);
1996 if (u->Opcode() == Op_AndX) {
1997 worklist.push(u);
1998 }
1999 }
2000 }
2001 }
2002
2003 // Loading the java mirror from a Klass requires two loads and the type of the mirror load depends on the type of 'n'.
2004 // See LoadNode::Value().
2005 void PhaseCCP::push_loadp(Unique_Node_List& worklist, const Node* use) const {
2006 BarrierSetC2* barrier_set = BarrierSet::barrier_set()->barrier_set_c2();
2007 bool has_load_barrier_nodes = barrier_set->has_load_barrier_nodes();
2008
2009 if (use->Opcode() == Op_LoadP && use->bottom_type()->isa_rawptr()) {
2010 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
2011 Node* loadp = use->fast_out(i);
2012 const Type* ut = loadp->bottom_type();
2013 if (loadp->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(loadp)) {
2014 if (has_load_barrier_nodes) {
2015 // Search for load barriers behind the load
2016 push_load_barrier(worklist, barrier_set, loadp);
2017 }
2018 worklist.push(loadp);
2019 }
2020 }
2021 }
2022 }
|