14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/connode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/loopnode.hpp"
35 #include "opto/matcher.hpp"
36 #include "opto/movenode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/opaquenode.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/subnode.hpp"
41 #include "opto/subtypenode.hpp"
42 #include "opto/superword.hpp"
43 #include "opto/vectornode.hpp"
44 #include "utilities/checkedCast.hpp"
45 #include "utilities/macros.hpp"
46
47 //=============================================================================
48 //------------------------------split_thru_phi---------------------------------
49 // Split Node 'n' through merge point if there is enough win.
50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
51 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
52 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
53 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
54 // so disable this for now
55 return nullptr;
56 }
57
58 // Splitting range check CastIIs through a loop induction Phi can
59 // cause new Phis to be created that are left unrelated to the loop
60 // induction Phi and prevent optimizations (vectorization)
61 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
62 n->in(1) == region->as_CountedLoop()->phi()) {
63 return nullptr;
64 }
65
66 if (cannot_split_division(n, region)) {
67 return nullptr;
68 }
69
70 SplitThruPhiWins wins(region);
71 assert(!n->is_CFG(), "");
72 assert(region->is_Region(), "");
73
74 const Type* type = n->bottom_type();
75 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
76 Node* phi;
77 if (t_oop != nullptr && t_oop->is_known_instance_field()) {
78 int iid = t_oop->instance_id();
79 int index = C->get_alias_index(t_oop);
80 int offset = t_oop->offset();
81 phi = new PhiNode(region, type, nullptr, iid, index, offset);
82 } else {
83 phi = PhiNode::make_blank(region, n);
84 }
85 uint old_unique = C->unique();
774 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
775 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
776 // have a Phi for the base here that we convert to a CMOVE all is well
777 // and good. But if the base is dead, we'll not make a CMOVE. Later
778 // the allocator will have to produce a base by creating a CMOVE of the
779 // relevant bases. This puts the allocator in the business of
780 // manufacturing expensive instructions, generally a bad plan.
781 // Just Say No to Conditionally-Moved Derived Pointers.
782 if (tp && tp->offset() != 0)
783 return nullptr;
784 cost++;
785 break;
786 }
787 default:
788 return nullptr; // In particular, can't do memory or I/O
789 }
790 // Add in cost any speculative ops
791 for (uint j = 1; j < region->req(); j++) {
792 Node *proj = region->in(j);
793 Node *inp = phi->in(j);
794 if (get_ctrl(inp) == proj) { // Found local op
795 cost++;
796 // Check for a chain of dependent ops; these will all become
797 // speculative in a CMOV.
798 for (uint k = 1; k < inp->req(); k++)
799 if (get_ctrl(inp->in(k)) == proj)
800 cost += ConditionalMoveLimit; // Too much speculative goo
801 }
802 }
803 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
804 // This will likely Split-If, a higher-payoff operation.
805 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
806 Node* use = phi->fast_out(k);
807 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
808 cost += ConditionalMoveLimit;
809 // Is there a use inside the loop?
810 // Note: check only basic types since CMoveP is pinned.
811 if (!used_inside_loop && is_java_primitive(bt)) {
812 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
813 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
1099 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1100
1101 // Move store out of the loop
1102 _igvn.replace_node(hook, n->in(MemNode::Memory));
1103 _igvn.replace_input_of(n, 0, lca);
1104 set_ctrl_and_loop(n, lca);
1105
1106 // Disconnect the phi now. An empty phi can confuse other
1107 // optimizations in this pass of loop opts..
1108 if (phi->in(LoopNode::LoopBackControl) == phi) {
1109 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1110 n_loop->_body.yank(phi);
1111 }
1112 }
1113 }
1114 }
1115 }
1116 }
1117 }
1118
1119 //------------------------------split_if_with_blocks_pre-----------------------
1120 // Do the real work in a non-recursive function. Data nodes want to be
1121 // cloned in the pre-order so they can feed each other nicely.
1122 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1123 // Cloning these guys is unlikely to win
1124 int n_op = n->Opcode();
1125 if (n_op == Op_MergeMem) {
1126 return n;
1127 }
1128 if (n->is_Proj()) {
1129 return n;
1130 }
1131 // Do not clone-up CmpFXXX variations, as these are always
1132 // followed by a CmpI
1133 if (n->is_Cmp()) {
1134 return n;
1135 }
1136 // Attempt to use a conditional move instead of a phi/branch
1137 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1138 Node *cmov = conditional_move( n );
1139 if (cmov) {
1140 return cmov;
1141 }
1142 }
1143 if (n->is_CFG() || n->is_LoadStore()) {
1144 return n;
1145 }
1146 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1147 if (!C->major_progress()) { // If chance of no more loop opts...
1148 _igvn._worklist.push(n); // maybe we'll remove them
1149 }
1150 return n;
1390
1391 return true;
1392 }
1393
1394 // Detect if the node is the inner strip-mined loop
1395 // Return: null if it's not the case, or the exit of outer strip-mined loop
1396 static Node* is_inner_of_stripmined_loop(const Node* out) {
1397 Node* out_le = nullptr;
1398
1399 if (out->is_CountedLoopEnd()) {
1400 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1401
1402 if (loop != nullptr && loop->is_strip_mined()) {
1403 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1404 }
1405 }
1406
1407 return out_le;
1408 }
1409
1410 //------------------------------split_if_with_blocks_post----------------------
1411 // Do the real work in a non-recursive function. CFG hackery wants to be
1412 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1413 // info.
1414 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1415
1416 // Cloning Cmp through Phi's involves the split-if transform.
1417 // FastLock is not used by an If
1418 if (n->is_Cmp() && !n->is_FastLock()) {
1419 Node *n_ctrl = get_ctrl(n);
1420 // Determine if the Node has inputs from some local Phi.
1421 // Returns the block to clone thru.
1422 Node *n_blk = has_local_phi_input(n);
1423 if (n_blk != n_ctrl) {
1424 return;
1425 }
1426
1427 if (!can_split_if(n_ctrl)) {
1428 return;
1429 }
1430
1431 if (n->outcnt() != 1) {
1432 return; // Multiple bool's from 1 compare?
1433 }
1434 Node *bol = n->unique_out();
1435 assert(bol->is_Bool(), "expect a bool here");
1544 // accesses would start to float, since we don't pin at that point.
1545 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1546 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck &&
1547 prevdom->in(0)->Opcode() != Op_RangeCheck;
1548 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1549 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1550 return;
1551 }
1552 prevdom = dom;
1553 dom = idom(prevdom);
1554 }
1555 }
1556 }
1557
1558 try_sink_out_of_loop(n);
1559 if (C->failing()) {
1560 return;
1561 }
1562
1563 try_move_store_after_loop(n);
1564 }
1565
1566 // Transform:
1567 //
1568 // if (some_condition) {
1569 // // body 1
1570 // } else {
1571 // // body 2
1572 // }
1573 // if (some_condition) {
1574 // // body 3
1575 // } else {
1576 // // body 4
1577 // }
1578 //
1579 // into:
1580 //
1581 //
1582 // if (some_condition) {
1583 // // body 1
2058 uint i;
2059 for (i = 1; i < phi->req(); i++) {
2060 Node* b = phi->in(i);
2061 if (b->is_Phi()) {
2062 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2063 } else {
2064 assert(b->is_Bool() || b->is_OpaqueConstantBool() || b->is_OpaqueInitializedAssertionPredicate(),
2065 "bool, non-null check with OpaqueConstantBool or Initialized Assertion Predicate with its Opaque node");
2066 }
2067 }
2068 Node* n = phi->in(1);
2069 Node* sample_opaque = nullptr;
2070 Node *sample_bool = nullptr;
2071 if (n->is_OpaqueConstantBool() || n->is_OpaqueInitializedAssertionPredicate()) {
2072 sample_opaque = n;
2073 sample_bool = n->in(1);
2074 assert(sample_bool->is_Bool(), "wrong type");
2075 } else {
2076 sample_bool = n;
2077 }
2078 Node *sample_cmp = sample_bool->in(1);
2079
2080 // Make Phis to merge the Cmp's inputs.
2081 PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
2082 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2083 for (i = 1; i < phi->req(); i++) {
2084 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2085 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2086 phi1->set_req(i, n1);
2087 phi2->set_req(i, n2);
2088 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2089 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2090 }
2091 // See if these Phis have been made before.
2092 // Register with optimizer
2093 Node *hit1 = _igvn.hash_find_insert(phi1);
2094 if (hit1) { // Hit, toss just made Phi
2095 _igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
2096 assert(hit1->is_Phi(), "" );
2097 phi1 = (PhiNode*)hit1; // Use existing phi
2098 } else { // Miss
2099 _igvn.register_new_node_with_optimizer(phi1);
2100 }
2101 Node *hit2 = _igvn.hash_find_insert(phi2);
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/connode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/inlinetypenode.hpp"
35 #include "opto/loopnode.hpp"
36 #include "opto/matcher.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/opaquenode.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/subnode.hpp"
42 #include "opto/subtypenode.hpp"
43 #include "opto/superword.hpp"
44 #include "opto/vectornode.hpp"
45 #include "utilities/checkedCast.hpp"
46 #include "utilities/macros.hpp"
47
48 //=============================================================================
49 //------------------------------split_thru_phi---------------------------------
50 // Split Node 'n' through merge point if there is enough win.
51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
52 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
53 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
54 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
55 // so disable this for now
56 return nullptr;
57 }
58
59 // Splitting range check CastIIs through a loop induction Phi can
60 // cause new Phis to be created that are left unrelated to the loop
61 // induction Phi and prevent optimizations (vectorization)
62 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
63 n->in(1) == region->as_CountedLoop()->phi()) {
64 return nullptr;
65 }
66
67 // Inline types should not be split through Phis because they cannot be merged
68 // through Phi nodes but each value input needs to be merged individually.
69 if (n->is_InlineType()) {
70 return nullptr;
71 }
72
73 if (cannot_split_division(n, region)) {
74 return nullptr;
75 }
76
77 SplitThruPhiWins wins(region);
78 assert(!n->is_CFG(), "");
79 assert(region->is_Region(), "");
80
81 const Type* type = n->bottom_type();
82 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
83 Node* phi;
84 if (t_oop != nullptr && t_oop->is_known_instance_field()) {
85 int iid = t_oop->instance_id();
86 int index = C->get_alias_index(t_oop);
87 int offset = t_oop->offset();
88 phi = new PhiNode(region, type, nullptr, iid, index, offset);
89 } else {
90 phi = PhiNode::make_blank(region, n);
91 }
92 uint old_unique = C->unique();
781 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
782 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
783 // have a Phi for the base here that we convert to a CMOVE all is well
784 // and good. But if the base is dead, we'll not make a CMOVE. Later
785 // the allocator will have to produce a base by creating a CMOVE of the
786 // relevant bases. This puts the allocator in the business of
787 // manufacturing expensive instructions, generally a bad plan.
788 // Just Say No to Conditionally-Moved Derived Pointers.
789 if (tp && tp->offset() != 0)
790 return nullptr;
791 cost++;
792 break;
793 }
794 default:
795 return nullptr; // In particular, can't do memory or I/O
796 }
797 // Add in cost any speculative ops
798 for (uint j = 1; j < region->req(); j++) {
799 Node *proj = region->in(j);
800 Node *inp = phi->in(j);
801 if (inp->isa_InlineType()) {
802 // TODO 8302217 This prevents PhiNode::push_inline_types_through
803 return nullptr;
804 }
805 if (get_ctrl(inp) == proj) { // Found local op
806 cost++;
807 // Check for a chain of dependent ops; these will all become
808 // speculative in a CMOV.
809 for (uint k = 1; k < inp->req(); k++)
810 if (get_ctrl(inp->in(k)) == proj)
811 cost += ConditionalMoveLimit; // Too much speculative goo
812 }
813 }
814 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
815 // This will likely Split-If, a higher-payoff operation.
816 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
817 Node* use = phi->fast_out(k);
818 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
819 cost += ConditionalMoveLimit;
820 // Is there a use inside the loop?
821 // Note: check only basic types since CMoveP is pinned.
822 if (!used_inside_loop && is_java_primitive(bt)) {
823 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
824 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
1110 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1111
1112 // Move store out of the loop
1113 _igvn.replace_node(hook, n->in(MemNode::Memory));
1114 _igvn.replace_input_of(n, 0, lca);
1115 set_ctrl_and_loop(n, lca);
1116
1117 // Disconnect the phi now. An empty phi can confuse other
1118 // optimizations in this pass of loop opts..
1119 if (phi->in(LoopNode::LoopBackControl) == phi) {
1120 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1121 n_loop->_body.yank(phi);
1122 }
1123 }
1124 }
1125 }
1126 }
1127 }
1128 }
1129
1130 // We can't use immutable memory for the flat array check because we are loading the mark word which is
1131 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
1132 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
1133 // flat array checks out of loops, mainly to enable loop unswitching.
1134 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1135 // Skip checks for more than one array
1136 if (n->req() > 3) {
1137 return;
1138 }
1139 Node* mem = n->in(FlatArrayCheckNode::Memory);
1140 Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1141 IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1142 IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1143
1144 // Check if array is loop invariant
1145 if (!check_loop->is_member(ary_loop)) {
1146 // Walk up memory graph from the check until we leave the loop
1147 VectorSet wq;
1148 wq.set(mem->_idx);
1149 while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1150 if (mem->is_Phi()) {
1151 mem = mem->in(1);
1152 } else if (mem->is_MergeMem()) {
1153 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1154 } else if (mem->is_Proj()) {
1155 mem = mem->in(0);
1156 } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1157 mem = mem->in(TypeFunc::Memory);
1158 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1159 mem = mem->in(MemNode::Memory);
1160 } else {
1161 #ifdef ASSERT
1162 mem->dump();
1163 #endif
1164 ShouldNotReachHere();
1165 }
1166 if (wq.test_set(mem->_idx)) {
1167 return;
1168 }
1169 }
1170 // Replace memory input and re-compute ctrl to move the check out of the loop
1171 _igvn.replace_input_of(n, 1, mem);
1172 set_ctrl_and_loop(n, get_early_ctrl(n));
1173 Node* bol = n->unique_out();
1174 set_ctrl_and_loop(bol, get_early_ctrl(bol));
1175 }
1176 }
1177
1178 //------------------------------split_if_with_blocks_pre-----------------------
1179 // Do the real work in a non-recursive function. Data nodes want to be
1180 // cloned in the pre-order so they can feed each other nicely.
1181 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1182 // Cloning these guys is unlikely to win
1183 int n_op = n->Opcode();
1184 if (n_op == Op_MergeMem) {
1185 return n;
1186 }
1187 if (n->is_Proj()) {
1188 return n;
1189 }
1190
1191 if (n->isa_FlatArrayCheck()) {
1192 move_flat_array_check_out_of_loop(n);
1193 return n;
1194 }
1195
1196 // Do not clone-up CmpFXXX variations, as these are always
1197 // followed by a CmpI
1198 if (n->is_Cmp()) {
1199 return n;
1200 }
1201 // Attempt to use a conditional move instead of a phi/branch
1202 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1203 Node *cmov = conditional_move( n );
1204 if (cmov) {
1205 return cmov;
1206 }
1207 }
1208 if (n->is_CFG() || n->is_LoadStore()) {
1209 return n;
1210 }
1211 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1212 if (!C->major_progress()) { // If chance of no more loop opts...
1213 _igvn._worklist.push(n); // maybe we'll remove them
1214 }
1215 return n;
1455
1456 return true;
1457 }
1458
1459 // Detect if the node is the inner strip-mined loop
1460 // Return: null if it's not the case, or the exit of outer strip-mined loop
1461 static Node* is_inner_of_stripmined_loop(const Node* out) {
1462 Node* out_le = nullptr;
1463
1464 if (out->is_CountedLoopEnd()) {
1465 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1466
1467 if (loop != nullptr && loop->is_strip_mined()) {
1468 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1469 }
1470 }
1471
1472 return out_le;
1473 }
1474
1475 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1476 // If the CmpP is a subtype check for a value that has just been
1477 // loaded from an array, the subtype check guarantees the value
1478 // can't be stored in a flat array and the load of the value
1479 // happens with a flat array check then: push the type check
1480 // through the phi of the flat array check. This needs special
1481 // logic because the subtype check's input is not a phi but a
1482 // LoadKlass that must first be cloned through the phi.
1483 if (n->Opcode() != Op_CmpP) {
1484 return false;
1485 }
1486
1487 Node* klassptr = n->in(1);
1488 Node* klasscon = n->in(2);
1489
1490 if (klassptr->is_DecodeNarrowPtr()) {
1491 klassptr = klassptr->in(1);
1492 }
1493
1494 if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1495 return false;
1496 }
1497
1498 if (!klasscon->is_Con()) {
1499 return false;
1500 }
1501
1502 Node* addr = klassptr->in(MemNode::Address);
1503
1504 if (!addr->is_AddP()) {
1505 return false;
1506 }
1507
1508 intptr_t offset;
1509 Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1510
1511 if (obj == nullptr) {
1512 return false;
1513 }
1514
1515 // TODO 8378077: The code below does not work anymore with off-heap accesses which set their bases to top with
1516 // JDK-8373343. Also: flat_array_element_type_check() was introduced with JDK-8228622 for a specific check to enable
1517 // split-if but JDK-8245729 changed how that check looks like. Is it still relevant? This should be revisited.
1518 if (addr->in(AddPNode::Base)->is_top()) {
1519 return false;
1520 }
1521
1522 if (obj->Opcode() == Op_CastPP) {
1523 obj = obj->in(1);
1524 }
1525
1526 if (!obj->is_Phi()) {
1527 return false;
1528 }
1529
1530 Node* region = obj->in(0);
1531
1532 Node* phi = PhiNode::make_blank(region, n->in(1));
1533 for (uint i = 1; i < region->req(); i++) {
1534 Node* in = obj->in(i);
1535 Node* ctrl = region->in(i);
1536 if (addr->in(AddPNode::Base) != obj) {
1537 Node* cast = addr->in(AddPNode::Base);
1538 assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1539 Node* cast_clone = cast->clone();
1540 cast_clone->set_req(0, ctrl);
1541 cast_clone->set_req(1, in);
1542 register_new_node(cast_clone, ctrl);
1543 const Type* tcast = cast_clone->Value(&_igvn);
1544 _igvn.set_type(cast_clone, tcast);
1545 cast_clone->as_Type()->set_type(tcast);
1546 in = cast_clone;
1547 }
1548 Node* addr_clone = addr->clone();
1549 addr_clone->set_req(AddPNode::Base, in);
1550 addr_clone->set_req(AddPNode::Address, in);
1551 register_new_node(addr_clone, ctrl);
1552 _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1553 Node* klassptr_clone = klassptr->clone();
1554 klassptr_clone->set_req(2, addr_clone);
1555 register_new_node(klassptr_clone, ctrl);
1556 _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1557 if (klassptr != n->in(1)) {
1558 Node* decode = n->in(1);
1559 assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1560 Node* decode_clone = decode->clone();
1561 decode_clone->set_req(1, klassptr_clone);
1562 register_new_node(decode_clone, ctrl);
1563 _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1564 klassptr_clone = decode_clone;
1565 }
1566 phi->set_req(i, klassptr_clone);
1567 }
1568 register_new_node(phi, region);
1569 Node* orig = n->in(1);
1570 _igvn.replace_input_of(n, 1, phi);
1571 split_if_with_blocks_post(n);
1572 if (n->outcnt() != 0) {
1573 _igvn.replace_input_of(n, 1, orig);
1574 _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Graph);
1575 }
1576 return true;
1577 }
1578
1579 //------------------------------split_if_with_blocks_post----------------------
1580 // Do the real work in a non-recursive function. CFG hackery wants to be
1581 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1582 // info.
1583 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1584
1585 if (flat_array_element_type_check(n)) {
1586 return;
1587 }
1588
1589 // Cloning Cmp through Phi's involves the split-if transform.
1590 // FastLock is not used by an If
1591 if (n->is_Cmp() && !n->is_FastLock()) {
1592 Node *n_ctrl = get_ctrl(n);
1593 // Determine if the Node has inputs from some local Phi.
1594 // Returns the block to clone thru.
1595 Node *n_blk = has_local_phi_input(n);
1596 if (n_blk != n_ctrl) {
1597 return;
1598 }
1599
1600 if (!can_split_if(n_ctrl)) {
1601 return;
1602 }
1603
1604 if (n->outcnt() != 1) {
1605 return; // Multiple bool's from 1 compare?
1606 }
1607 Node *bol = n->unique_out();
1608 assert(bol->is_Bool(), "expect a bool here");
1717 // accesses would start to float, since we don't pin at that point.
1718 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1719 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck &&
1720 prevdom->in(0)->Opcode() != Op_RangeCheck;
1721 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1722 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1723 return;
1724 }
1725 prevdom = dom;
1726 dom = idom(prevdom);
1727 }
1728 }
1729 }
1730
1731 try_sink_out_of_loop(n);
1732 if (C->failing()) {
1733 return;
1734 }
1735
1736 try_move_store_after_loop(n);
1737
1738 // Remove multiple allocations of the same inline type
1739 if (n->is_InlineType()) {
1740 n->as_InlineType()->remove_redundant_allocations(this);
1741 }
1742 }
1743
1744 // Transform:
1745 //
1746 // if (some_condition) {
1747 // // body 1
1748 // } else {
1749 // // body 2
1750 // }
1751 // if (some_condition) {
1752 // // body 3
1753 // } else {
1754 // // body 4
1755 // }
1756 //
1757 // into:
1758 //
1759 //
1760 // if (some_condition) {
1761 // // body 1
2236 uint i;
2237 for (i = 1; i < phi->req(); i++) {
2238 Node* b = phi->in(i);
2239 if (b->is_Phi()) {
2240 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2241 } else {
2242 assert(b->is_Bool() || b->is_OpaqueConstantBool() || b->is_OpaqueInitializedAssertionPredicate(),
2243 "bool, non-null check with OpaqueConstantBool or Initialized Assertion Predicate with its Opaque node");
2244 }
2245 }
2246 Node* n = phi->in(1);
2247 Node* sample_opaque = nullptr;
2248 Node *sample_bool = nullptr;
2249 if (n->is_OpaqueConstantBool() || n->is_OpaqueInitializedAssertionPredicate()) {
2250 sample_opaque = n;
2251 sample_bool = n->in(1);
2252 assert(sample_bool->is_Bool(), "wrong type");
2253 } else {
2254 sample_bool = n;
2255 }
2256 Node* sample_cmp = sample_bool->in(1);
2257 const Type* t = Type::TOP;
2258 const TypePtr* at = nullptr;
2259 if (sample_cmp->is_FlatArrayCheck()) {
2260 // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2261 assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2262 t = Type::MEMORY;
2263 at = TypeRawPtr::BOTTOM;
2264 }
2265
2266 // Make Phis to merge the Cmp's inputs.
2267 PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2268 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2269 for (i = 1; i < phi->req(); i++) {
2270 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2271 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2272 phi1->set_req(i, n1);
2273 phi2->set_req(i, n2);
2274 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2275 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2276 }
2277 // See if these Phis have been made before.
2278 // Register with optimizer
2279 Node *hit1 = _igvn.hash_find_insert(phi1);
2280 if (hit1) { // Hit, toss just made Phi
2281 _igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
2282 assert(hit1->is_Phi(), "" );
2283 phi1 = (PhiNode*)hit1; // Use existing phi
2284 } else { // Miss
2285 _igvn.register_new_node_with_optimizer(phi1);
2286 }
2287 Node *hit2 = _igvn.hash_find_insert(phi2);
|