1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/connode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/inlinetypenode.hpp"
35 #include "opto/loopnode.hpp"
36 #include "opto/matcher.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/opaquenode.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/subnode.hpp"
42 #include "opto/subtypenode.hpp"
43 #include "opto/superword.hpp"
44 #include "opto/vectornode.hpp"
45 #include "utilities/checkedCast.hpp"
46 #include "utilities/macros.hpp"
47
48 //=============================================================================
49 //------------------------------split_thru_phi---------------------------------
50 // Split Node 'n' through merge point if there is enough win.
51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
52 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
53 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
54 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
55 // so disable this for now
56 return nullptr;
57 }
58
59 // Splitting range check CastIIs through a loop induction Phi can
60 // cause new Phis to be created that are left unrelated to the loop
61 // induction Phi and prevent optimizations (vectorization)
62 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
63 n->in(1) == region->as_CountedLoop()->phi()) {
64 return nullptr;
65 }
66
67 // Inline types should not be split through Phis because they cannot be merged
68 // through Phi nodes but each value input needs to be merged individually.
69 if (n->is_InlineType()) {
70 return nullptr;
71 }
72
73 if (cannot_split_division(n, region)) {
74 return nullptr;
75 }
76
77 SplitThruPhiWins wins(region);
78 assert(!n->is_CFG(), "");
79 assert(region->is_Region(), "");
80
81 const Type* type = n->bottom_type();
82 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
83 Node* phi;
84 if (t_oop != nullptr && t_oop->is_known_instance_field()) {
85 int iid = t_oop->instance_id();
86 int index = C->get_alias_index(t_oop);
87 int offset = t_oop->offset();
88 phi = new PhiNode(region, type, nullptr, iid, index, offset);
89 } else {
90 phi = PhiNode::make_blank(region, n);
91 }
92 uint old_unique = C->unique();
93 for (uint i = 1; i < region->req(); i++) {
94 Node* x;
95 Node* the_clone = nullptr;
96 if (region->in(i) == C->top()) {
97 x = C->top(); // Dead path? Use a dead data op
98 } else {
99 x = n->clone(); // Else clone up the data op
100 the_clone = x; // Remember for possible deletion.
101 // Alter data node to use pre-phi inputs
102 if (n->in(0) == region)
103 x->set_req( 0, region->in(i) );
104 for (uint j = 1; j < n->req(); j++) {
105 Node* in = n->in(j);
106 if (in->is_Phi() && in->in(0) == region)
107 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
108 }
109 }
110 // Check for a 'win' on some paths
111 const Type* t = x->Value(&_igvn);
112
113 bool singleton = t->singleton();
114
115 // A TOP singleton indicates that there are no possible values incoming
116 // along a particular edge. In most cases, this is OK, and the Phi will
117 // be eliminated later in an Ideal call. However, we can't allow this to
118 // happen if the singleton occurs on loop entry, as the elimination of
119 // the PhiNode may cause the resulting node to migrate back to a previous
120 // loop iteration.
121 if (singleton && t == Type::TOP) {
122 // Is_Loop() == false does not confirm the absence of a loop (e.g., an
123 // irreducible loop may not be indicated by an affirmative is_Loop());
124 // therefore, the only top we can split thru a phi is on a backedge of
125 // a loop.
126 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
127 }
128
129 if (singleton) {
130 wins.add_win(i);
131 x = makecon(t);
132 } else {
133 // We now call Identity to try to simplify the cloned node.
134 // Note that some Identity methods call phase->type(this).
135 // Make sure that the type array is big enough for
136 // our new node, even though we may throw the node away.
137 // (Note: This tweaking with igvn only works because x is a new node.)
138 _igvn.set_type(x, t);
139 // If x is a TypeNode, capture any more-precise type permanently into Node
140 // otherwise it will be not updated during igvn->transform since
141 // igvn->type(x) is set to x->Value() already.
142 x->raise_bottom_type(t);
143 Node* y = x->Identity(&_igvn);
144 if (y != x) {
145 wins.add_win(i);
146 x = y;
147 } else {
148 y = _igvn.hash_find(x);
149 if (y == nullptr) {
150 y = similar_subtype_check(x, region->in(i));
151 }
152 if (y) {
153 wins.add_win(i);
154 x = y;
155 } else {
156 // Else x is a new node we are keeping
157 // We do not need register_new_node_with_optimizer
158 // because set_type has already been called.
159 _igvn._worklist.push(x);
160 }
161 }
162 }
163
164 phi->set_req( i, x );
165
166 if (the_clone == nullptr) {
167 continue;
168 }
169
170 if (the_clone != x) {
171 _igvn.remove_dead_node(the_clone, PhaseIterGVN::NodeOrigin::Speculative);
172 } else if (region->is_Loop() && i == LoopNode::LoopBackControl &&
173 n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) {
174 // it is not a win if 'x' moved from an outer to an inner loop
175 // this edge case can only happen for Load nodes
176 wins.reset();
177 break;
178 }
179 }
180 // Too few wins?
181 if (!wins.profitable(policy)) {
182 _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Speculative);
183 return nullptr;
184 }
185
186 // Record Phi
187 register_new_node( phi, region );
188
189 for (uint i2 = 1; i2 < phi->req(); i2++) {
190 Node *x = phi->in(i2);
191 // If we commoned up the cloned 'x' with another existing Node,
192 // the existing Node picks up a new use. We need to make the
193 // existing Node occur higher up so it dominates its uses.
194 Node *old_ctrl;
195 IdealLoopTree *old_loop;
196
197 if (x->is_Con()) {
198 assert(get_ctrl(x) == C->root(), "constant control is not root");
199 continue;
200 }
201 // The occasional new node
202 if (x->_idx >= old_unique) { // Found a new, unplaced node?
203 old_ctrl = nullptr;
204 old_loop = nullptr; // Not in any prior loop
205 } else {
206 old_ctrl = get_ctrl(x);
207 old_loop = get_loop(old_ctrl); // Get prior loop
208 }
209 // New late point must dominate new use
210 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
211 if (new_ctrl == old_ctrl) // Nothing is changed
212 continue;
213
214 IdealLoopTree *new_loop = get_loop(new_ctrl);
215
216 // Don't move x into a loop if its uses are
217 // outside of loop. Otherwise x will be cloned
218 // for each use outside of this loop.
219 IdealLoopTree *use_loop = get_loop(region);
220 if (!new_loop->is_member(use_loop) &&
221 (old_loop == nullptr || !new_loop->is_member(old_loop))) {
222 // Take early control, later control will be recalculated
223 // during next iteration of loop optimizations.
224 new_ctrl = get_early_ctrl(x);
225 new_loop = get_loop(new_ctrl);
226 }
227 // Set new location
228 set_ctrl(x, new_ctrl);
229 // If changing loop bodies, see if we need to collect into new body
230 if (old_loop != new_loop) {
231 if (old_loop && !old_loop->_child)
232 old_loop->_body.yank(x);
233 if (!new_loop->_child)
234 new_loop->_body.push(x); // Collect body info
235 }
236 }
237
238 split_thru_phi_yank_old_nodes(n, region);
239 _igvn.replace_node(n, phi);
240
241 #ifndef PRODUCT
242 if (TraceLoopOpts) {
243 tty->print_cr("Split %d %s through %d Phi in %d %s",
244 n->_idx, n->Name(), phi->_idx, region->_idx, region->Name());
245 }
246 #endif // !PRODUCT
247
248 return phi;
249 }
250
251 // If the region is a Loop, we are removing the old n,
252 // and need to yank it from the _body. If any phi we
253 // just split through now has no use any more, it also
254 // has to be removed.
255 void PhaseIdealLoop::split_thru_phi_yank_old_nodes(Node* n, Node* region) {
256 IdealLoopTree* region_loop = get_loop(region);
257 if (region->is_Loop() && region_loop->is_innermost()) {
258 region_loop->_body.yank(n);
259 for (uint j = 1; j < n->req(); j++) {
260 PhiNode* phi = n->in(j)->isa_Phi();
261 // Check that phi belongs to the region and only has n as a use.
262 if (phi != nullptr &&
263 phi->in(0) == region &&
264 phi->unique_multiple_edges_out_or_null() == n) {
265 assert(get_ctrl(phi) == region, "sanity");
266 assert(get_ctrl(n) == region, "sanity");
267 region_loop->_body.yank(phi);
268 }
269 }
270 }
271 }
272
273 // Test whether node 'x' can move into an inner loop relative to node 'n'.
274 // Note: The test is not exact. Returns true if 'x' COULD end up in an inner loop,
275 // BUT it can also return true and 'x' is in the outer loop
276 bool PhaseIdealLoop::can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x) {
277 IdealLoopTree* n_loop_tree = get_loop(n_loop);
278 IdealLoopTree* x_loop_tree = get_loop(get_early_ctrl(x));
279 // x_loop_tree should be outer or same loop as n_loop_tree
280 return !x_loop_tree->is_member(n_loop_tree);
281 }
282
283 // Subtype checks that carry profile data don't common so look for a replacement by following edges
284 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) {
285 if (x->is_SubTypeCheck()) {
286 Node* in1 = x->in(1);
287 for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) {
288 Node* u = in1->fast_out(i);
289 if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) {
290 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
291 Node* bol = u->fast_out(j);
292 for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) {
293 Node* iff = bol->fast_out(k);
294 // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with
295 // unrelated profile
296 if (iff->is_If() && is_dominator(iff, r_in)) {
297 return u;
298 }
299 }
300 }
301 }
302 }
303 }
304 return nullptr;
305 }
306
307 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor
308 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In
309 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in
310 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could
311 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis).
312 // We also need to check other loop phis as they could have been created in the same split-if pass when applying
313 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi.
314 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const {
315 const Type* zero;
316 switch (n->Opcode()) {
317 case Op_DivI:
318 case Op_ModI:
319 case Op_UDivI:
320 case Op_UModI:
321 zero = TypeInt::ZERO;
322 break;
323 case Op_DivL:
324 case Op_ModL:
325 case Op_UDivL:
326 case Op_UModL:
327 zero = TypeLong::ZERO;
328 break;
329 default:
330 return false;
331 }
332
333 if (n->in(0) != nullptr) {
334 // Cannot split through phi if Div or Mod node has a control dependency to a zero check.
335 return true;
336 }
337
338 Node* divisor = n->in(2);
339 return is_divisor_loop_phi(divisor, region) &&
340 loop_phi_backedge_type_contains_zero(divisor, zero);
341 }
342
343 bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) {
344 return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop;
345 }
346
347 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const {
348 return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP;
349 }
350
351 //------------------------------dominated_by------------------------------------
352 // Replace the dominated test with an obvious true or false. Place it on the
353 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
354 // live path up to the dominating control.
355 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool prevdom_not_imply_this) {
356 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
357
358 // prevdom is the dominating projection of the dominating test.
359 assert(iff->Opcode() == Op_If ||
360 iff->Opcode() == Op_CountedLoopEnd ||
361 iff->Opcode() == Op_LongCountedLoopEnd ||
362 iff->Opcode() == Op_RangeCheck ||
363 iff->Opcode() == Op_ParsePredicate,
364 "Check this code when new subtype is added");
365
366 int pop = prevdom->Opcode();
367 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
368 if (flip) {
369 if (pop == Op_IfTrue)
370 pop = Op_IfFalse;
371 else
372 pop = Op_IfTrue;
373 }
374 // 'con' is set to true or false to kill the dominated test.
375 Node* con = makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
376 // Hack the dominated test
377 _igvn.replace_input_of(iff, 1, con);
378
379 // If I don't have a reachable TRUE and FALSE path following the IfNode then
380 // I can assume this path reaches an infinite loop. In this case it's not
381 // important to optimize the data Nodes - either the whole compilation will
382 // be tossed or this path (and all data Nodes) will go dead.
383 if (iff->outcnt() != 2) {
384 return;
385 }
386
387 // Make control-dependent data Nodes on the live path (path that will remain
388 // once the dominated IF is removed) become control-dependent on the
389 // dominating projection.
390 Node* dp = iff->proj_out_or_null(pop == Op_IfTrue);
391
392 if (dp == nullptr) {
393 return;
394 }
395
396 rewire_safe_outputs_to_dominator(dp, prevdom, prevdom_not_imply_this);
397 }
398
399 void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool dominator_not_imply_source) {
400 IdealLoopTree* old_loop = get_loop(source);
401
402 for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) {
403 Node* out = source->fast_out(i); // Control-dependent node
404 if (out->depends_only_on_test()) {
405 assert(out->in(0) == source, "must be control dependent on source");
406 _igvn.replace_input_of(out, 0, dominator);
407 if (dominator_not_imply_source) {
408 // Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range
409 // check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the
410 // removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate
411 // in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
412 // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest
413 // dominating check.
414 Node* clone = out->pin_node_under_control();
415 if (clone != nullptr) {
416 clone = _igvn.register_new_node_with_optimizer(clone, out);
417 _igvn.replace_node(out, clone);
418 out = clone;
419 }
420 }
421 set_early_ctrl(out, false);
422 IdealLoopTree* new_loop = get_loop(get_ctrl(out));
423 if (old_loop != new_loop) {
424 if (!old_loop->_child) {
425 old_loop->_body.yank(out);
426 }
427 if (!new_loop->_child) {
428 new_loop->_body.push(out);
429 }
430 }
431 --i;
432 --imax;
433 }
434 }
435 }
436
437 //------------------------------has_local_phi_input----------------------------
438 // Return TRUE if 'n' has Phi inputs from its local block and no other
439 // block-local inputs (all non-local-phi inputs come from earlier blocks)
440 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
441 Node *n_ctrl = get_ctrl(n);
442 // See if some inputs come from a Phi in this block, or from before
443 // this block.
444 uint i;
445 for( i = 1; i < n->req(); i++ ) {
446 Node *phi = n->in(i);
447 if( phi->is_Phi() && phi->in(0) == n_ctrl )
448 break;
449 }
450 if( i >= n->req() )
451 return nullptr; // No Phi inputs; nowhere to clone thru
452
453 // Check for inputs created between 'n' and the Phi input. These
454 // must split as well; they have already been given the chance
455 // (courtesy of a post-order visit) and since they did not we must
456 // recover the 'cost' of splitting them by being very profitable
457 // when splitting 'n'. Since this is unlikely we simply give up.
458 for( i = 1; i < n->req(); i++ ) {
459 Node *m = n->in(i);
460 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
461 // We allow the special case of AddP's with no local inputs.
462 // This allows us to split-up address expressions.
463 if (m->is_AddP() &&
464 get_ctrl(m->in(AddPNode::Base)) != n_ctrl &&
465 get_ctrl(m->in(AddPNode::Address)) != n_ctrl &&
466 get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) {
467 // Move the AddP up to the dominating point. That's fine because control of m's inputs
468 // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl.
469 Node* c = find_non_split_ctrl(idom(n_ctrl));
470 if (c->is_OuterStripMinedLoop()) {
471 c->as_Loop()->verify_strip_mined(1);
472 c = c->in(LoopNode::EntryControl);
473 }
474 set_ctrl_and_loop(m, c);
475 continue;
476 }
477 return nullptr;
478 }
479 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
480 }
481
482 return n_ctrl;
483 }
484
485 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
486 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) {
487 assert(bt == T_INT || bt == T_LONG, "only for integers");
488 int n_op = n->Opcode();
489
490 if (n_op == Op_LShift(bt)) {
491 // Scale is loop invariant
492 Node* scale = n->in(2);
493 Node* scale_ctrl = get_ctrl(scale);
494 IdealLoopTree* scale_loop = get_loop(scale_ctrl);
495 if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) {
496 return nullptr;
497 }
498 const TypeInt* scale_t = scale->bottom_type()->isa_int();
499 if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) {
500 return nullptr; // Dont bother with byte/short masking
501 }
502 // Add must vary with loop (else shift would be loop-invariant)
503 Node* add = n->in(1);
504 Node* add_ctrl = get_ctrl(add);
505 IdealLoopTree* add_loop = get_loop(add_ctrl);
506 if (n_loop != add_loop) {
507 return nullptr; // happens w/ evil ZKM loops
508 }
509
510 // Convert I-V into I+ (0-V); same for V-I
511 if (add->Opcode() == Op_Sub(bt) &&
512 _igvn.type(add->in(1)) != TypeInteger::zero(bt)) {
513 assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, "");
514 Node* zero = integercon(0, bt);
515 Node* neg = SubNode::make(zero, add->in(2), bt);
516 register_new_node_with_ctrl_of(neg, add->in(2));
517 add = AddNode::make(add->in(1), neg, bt);
518 register_new_node(add, add_ctrl);
519 }
520 if (add->Opcode() != Op_Add(bt)) return nullptr;
521 assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, "");
522 // See if one add input is loop invariant
523 Node* add_var = add->in(1);
524 Node* add_var_ctrl = get_ctrl(add_var);
525 IdealLoopTree* add_var_loop = get_loop(add_var_ctrl);
526 Node* add_invar = add->in(2);
527 Node* add_invar_ctrl = get_ctrl(add_invar);
528 IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl);
529 if (add_invar_loop == n_loop) {
530 // Swap to find the invariant part
531 add_invar = add_var;
532 add_invar_ctrl = add_var_ctrl;
533 add_invar_loop = add_var_loop;
534 add_var = add->in(2);
535 } else if (add_var_loop != n_loop) { // Else neither input is loop invariant
536 return nullptr;
537 }
538 if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) {
539 return nullptr; // No invariant part of the add?
540 }
541
542 // Yes! Reshape address expression!
543 Node* inv_scale = LShiftNode::make(add_invar, scale, bt);
544 Node* inv_scale_ctrl =
545 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
546 add_invar_ctrl : scale_ctrl;
547 register_new_node(inv_scale, inv_scale_ctrl);
548 Node* var_scale = LShiftNode::make(add_var, scale, bt);
549 register_new_node(var_scale, n_ctrl);
550 Node* var_add = AddNode::make(var_scale, inv_scale, bt);
551 register_new_node(var_add, n_ctrl);
552 _igvn.replace_node(n, var_add);
553 return var_add;
554 }
555 return nullptr;
556 }
557
558 //------------------------------remix_address_expressions----------------------
559 // Rework addressing expressions to get the most loop-invariant stuff
560 // moved out. We'd like to do all associative operators, but it's especially
561 // important (common) to do address expressions.
562 Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
563 if (!has_ctrl(n)) return nullptr;
564 Node* n_ctrl = get_ctrl(n);
565 IdealLoopTree* n_loop = get_loop(n_ctrl);
566
567 // See if 'n' mixes loop-varying and loop-invariant inputs and
568 // itself is loop-varying.
569
570 // Only interested in binary ops (and AddP)
571 if (n->req() < 3 || n->req() > 4) return nullptr;
572
573 Node* n1_ctrl = get_ctrl(n->in( 1));
574 Node* n2_ctrl = get_ctrl(n->in( 2));
575 Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
576 IdealLoopTree* n1_loop = get_loop(n1_ctrl);
577 IdealLoopTree* n2_loop = get_loop(n2_ctrl);
578 IdealLoopTree* n3_loop = get_loop(n3_ctrl);
579
580 // Does one of my inputs spin in a tighter loop than self?
581 if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) ||
582 (n_loop->is_member(n2_loop) && n_loop != n2_loop) ||
583 (n_loop->is_member(n3_loop) && n_loop != n3_loop)) {
584 return nullptr; // Leave well enough alone
585 }
586
587 // Is at least one of my inputs loop-invariant?
588 if (n1_loop == n_loop &&
589 n2_loop == n_loop &&
590 n3_loop == n_loop) {
591 return nullptr; // No loop-invariant inputs
592 }
593
594 Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT);
595 if (res != nullptr) {
596 return res;
597 }
598 res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG);
599 if (res != nullptr) {
600 return res;
601 }
602
603 int n_op = n->Opcode();
604 // Replace (I+V) with (V+I)
605 if (n_op == Op_AddI ||
606 n_op == Op_AddL ||
607 n_op == Op_AddF ||
608 n_op == Op_AddD ||
609 n_op == Op_MulI ||
610 n_op == Op_MulL ||
611 n_op == Op_MulF ||
612 n_op == Op_MulD) {
613 if (n2_loop == n_loop) {
614 assert(n1_loop != n_loop, "");
615 n->swap_edges(1, 2);
616 }
617 }
618
619 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
620 // but not if I2 is a constant. Skip for irreducible loops.
621 if (n_op == Op_AddP && n_loop->_head->is_Loop()) {
622 if (n2_loop == n_loop && n3_loop != n_loop) {
623 if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) {
624 Node* n22_ctrl = get_ctrl(n->in(2)->in(2));
625 Node* n23_ctrl = get_ctrl(n->in(2)->in(3));
626 IdealLoopTree* n22loop = get_loop(n22_ctrl);
627 IdealLoopTree* n23_loop = get_loop(n23_ctrl);
628 if (n22loop != n_loop && n22loop->is_member(n_loop) &&
629 n23_loop == n_loop) {
630 Node* add1 = AddPNode::make_with_base(n->in(1), n->in(2)->in(2), n->in(3));
631 // Stuff new AddP in the loop preheader
632 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
633 Node* add2 = AddPNode::make_with_base(n->in(1), add1, n->in(2)->in(3));
634 register_new_node(add2, n_ctrl);
635 _igvn.replace_node(n, add2);
636 return add2;
637 }
638 }
639 }
640
641 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
642 if (n2_loop != n_loop && n3_loop == n_loop) {
643 if (n->in(3)->Opcode() == Op_AddX) {
644 Node* V = n->in(3)->in(1);
645 Node* I = n->in(3)->in(2);
646 if (ctrl_is_member(n_loop, V)) {
647 } else {
648 Node *tmp = V; V = I; I = tmp;
649 }
650 if (!ctrl_is_member(n_loop, I)) {
651 Node* add1 = AddPNode::make_with_base(n->in(1), n->in(2), I);
652 // Stuff new AddP in the loop preheader
653 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
654 Node* add2 = AddPNode::make_with_base(n->in(1), add1, V);
655 register_new_node(add2, n_ctrl);
656 _igvn.replace_node(n, add2);
657 return add2;
658 }
659 }
660 }
661 }
662
663 return nullptr;
664 }
665
666 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
667 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
668 assert(n->Opcode() == Op_AddI, "sanity");
669 Node * nn = nullptr;
670 Node * in1 = n->in(1);
671 Node * in2 = n->in(2);
672 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
673 IdealLoopTree* loop_n = get_loop(get_ctrl(n));
674 if (loop_n->is_counted() &&
675 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) &&
676 Matcher::match_rule_supported(Op_MulAddVS2VI) &&
677 Matcher::match_rule_supported(Op_MulAddS2I)) {
678 Node* mul_in1 = in1->in(1);
679 Node* mul_in2 = in1->in(2);
680 Node* mul_in3 = in2->in(1);
681 Node* mul_in4 = in2->in(2);
682 if (mul_in1->Opcode() == Op_LoadS &&
683 mul_in2->Opcode() == Op_LoadS &&
684 mul_in3->Opcode() == Op_LoadS &&
685 mul_in4->Opcode() == Op_LoadS) {
686 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
687 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
688 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
689 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
690 IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
691 IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
692 // All nodes should be in the same counted loop.
693 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
694 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
695 Node* adr1 = mul_in1->in(MemNode::Address);
696 Node* adr2 = mul_in2->in(MemNode::Address);
697 Node* adr3 = mul_in3->in(MemNode::Address);
698 Node* adr4 = mul_in4->in(MemNode::Address);
699 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
700 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
701 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
702 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
703 register_new_node_with_ctrl_of(nn, n);
704 _igvn.replace_node(n, nn);
705 return nn;
706 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
707 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
708 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
709 register_new_node_with_ctrl_of(nn, n);
710 _igvn.replace_node(n, nn);
711 return nn;
712 }
713 }
714 }
715 }
716 }
717 }
718 return nn;
719 }
720
721 //------------------------------conditional_move-------------------------------
722 // Attempt to replace a Phi with a conditional move. We have some pretty
723 // strict profitability requirements. All Phis at the merge point must
724 // be converted, so we can remove the control flow. We need to limit the
725 // number of c-moves to a small handful. All code that was in the side-arms
726 // of the CFG diamond is now speculatively executed. This code has to be
727 // "cheap enough". We are pretty much limited to CFG diamonds that merge
728 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
729 Node *PhaseIdealLoop::conditional_move( Node *region ) {
730
731 assert(region->is_Region(), "sanity check");
732 if (region->req() != 3) return nullptr;
733
734 // Check for CFG diamond
735 Node *lp = region->in(1);
736 Node *rp = region->in(2);
737 if (!lp || !rp) return nullptr;
738 Node *lp_c = lp->in(0);
739 if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr;
740 IfNode *iff = lp_c->as_If();
741
742 // Check for ops pinned in an arm of the diamond.
743 // Can't remove the control flow in this case
744 if (lp->outcnt() > 1) return nullptr;
745 if (rp->outcnt() > 1) return nullptr;
746
747 IdealLoopTree* r_loop = get_loop(region);
748 assert(r_loop == get_loop(iff), "sanity");
749 // Always convert to CMOVE if all results are used only outside this loop.
750 bool used_inside_loop = (r_loop == _ltree_root);
751
752 // Check profitability
753 int cost = 0;
754 int phis = 0;
755 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
756 Node *out = region->fast_out(i);
757 if (!out->is_Phi()) continue; // Ignore other control edges, etc
758 phis++;
759 PhiNode* phi = out->as_Phi();
760 BasicType bt = phi->type()->basic_type();
761 switch (bt) {
762 case T_DOUBLE:
763 case T_FLOAT:
764 if (C->use_cmove()) {
765 continue; //TODO: maybe we want to add some cost
766 }
767 cost += Matcher::float_cmove_cost(); // Could be very expensive
768 break;
769 case T_LONG: {
770 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
771 }
772 case T_INT: // These all CMOV fine
773 case T_ADDRESS: { // (RawPtr)
774 cost++;
775 break;
776 }
777 case T_NARROWOOP: // Fall through
778 case T_OBJECT: { // Base oops are OK, but not derived oops
779 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
780 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
781 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
782 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
783 // have a Phi for the base here that we convert to a CMOVE all is well
784 // and good. But if the base is dead, we'll not make a CMOVE. Later
785 // the allocator will have to produce a base by creating a CMOVE of the
786 // relevant bases. This puts the allocator in the business of
787 // manufacturing expensive instructions, generally a bad plan.
788 // Just Say No to Conditionally-Moved Derived Pointers.
789 if (tp && tp->offset() != 0)
790 return nullptr;
791 cost++;
792 break;
793 }
794 default:
795 return nullptr; // In particular, can't do memory or I/O
796 }
797 // Add in cost any speculative ops
798 for (uint j = 1; j < region->req(); j++) {
799 Node *proj = region->in(j);
800 Node *inp = phi->in(j);
801 if (inp->isa_InlineType()) {
802 // TODO 8302217 This prevents PhiNode::push_inline_types_through
803 return nullptr;
804 }
805 if (get_ctrl(inp) == proj) { // Found local op
806 cost++;
807 // Check for a chain of dependent ops; these will all become
808 // speculative in a CMOV.
809 for (uint k = 1; k < inp->req(); k++)
810 if (get_ctrl(inp->in(k)) == proj)
811 cost += ConditionalMoveLimit; // Too much speculative goo
812 }
813 }
814 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
815 // This will likely Split-If, a higher-payoff operation.
816 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
817 Node* use = phi->fast_out(k);
818 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
819 cost += ConditionalMoveLimit;
820 // Is there a use inside the loop?
821 // Note: check only basic types since CMoveP is pinned.
822 if (!used_inside_loop && is_java_primitive(bt)) {
823 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
824 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
825 used_inside_loop = true;
826 }
827 }
828 }
829 }//for
830 Node* bol = iff->in(1);
831 assert(!bol->is_OpaqueInitializedAssertionPredicate(), "Initialized Assertion Predicates cannot form a diamond with Halt");
832 if (bol->is_OpaqueTemplateAssertionPredicate()) {
833 // Ignore Template Assertion Predicates with OpaqueTemplateAssertionPredicate nodes.
834 return nullptr;
835 }
836 if (bol->is_OpaqueMultiversioning()) {
837 assert(bol->as_OpaqueMultiversioning()->is_useless(), "Must be useless, i.e. fast main loop has already disappeared.");
838 // Ignore multiversion_if that just lost its loops. The OpaqueMultiversioning is marked useless,
839 // and will make the multiversion_if constant fold in the next IGVN round.
840 return nullptr;
841 }
842 if (!bol->is_Bool()) {
843 assert(false, "Expected Bool, but got %s", NodeClassNames[bol->Opcode()]);
844 return nullptr;
845 }
846 int cmp_op = bol->in(1)->Opcode();
847 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
848 return nullptr;
849 }
850 // It is expensive to generate flags from a float compare.
851 // Avoid duplicated float compare.
852 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
853
854 float infrequent_prob = PROB_UNLIKELY_MAG(3);
855 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
856 if (used_inside_loop) {
857 if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
858
859 // BlockLayoutByFrequency optimization moves infrequent branch
860 // from hot path. No point in CMOV'ing in such case (110 is used
861 // instead of 100 to take into account not exactness of float value).
862 if (BlockLayoutByFrequency) {
863 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
864 }
865 }
866 // Check for highly predictable branch. No point in CMOV'ing if
867 // we are going to predict accurately all the time.
868 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
869 //keep going
870 } else if (iff->_prob < infrequent_prob ||
871 iff->_prob > (1.0f - infrequent_prob))
872 return nullptr;
873
874 // --------------
875 // Now replace all Phis with CMOV's
876 Node *cmov_ctrl = iff->in(0);
877 uint flip = (lp->Opcode() == Op_IfTrue);
878 Node_List wq;
879 while (1) {
880 PhiNode* phi = nullptr;
881 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
882 Node *out = region->fast_out(i);
883 if (out->is_Phi()) {
884 phi = out->as_Phi();
885 break;
886 }
887 }
888 if (phi == nullptr || _igvn.type(phi) == Type::TOP || !CMoveNode::supported(_igvn.type(phi))) {
889 break;
890 }
891 // Move speculative ops
892 wq.push(phi);
893 while (wq.size() > 0) {
894 Node *n = wq.pop();
895 for (uint j = 1; j < n->req(); j++) {
896 Node* m = n->in(j);
897 if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) {
898 set_ctrl(m, cmov_ctrl);
899 wq.push(m);
900 }
901 }
902 }
903 Node* cmov = CMoveNode::make(iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
904 register_new_node(cmov, cmov_ctrl);
905 _igvn.replace_node(phi, cmov);
906 #ifndef PRODUCT
907 if (TraceLoopOpts) {
908 tty->print("CMOV ");
909 r_loop->dump_head();
910 if (Verbose) {
911 bol->in(1)->dump(1);
912 cmov->dump(1);
913 }
914 }
915 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
916 #endif
917 }
918
919 // The useless CFG diamond will fold up later; see the optimization in
920 // RegionNode::Ideal.
921 _igvn._worklist.push(region);
922
923 return iff->in(1);
924 }
925
926 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
927 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
928 Node* u = m->fast_out(i);
929 if (u->is_CFG()) {
930 if (u->is_NeverBranch()) {
931 u = u->as_NeverBranch()->proj_out(0);
932 enqueue_cfg_uses(u, wq);
933 } else {
934 wq.push(u);
935 }
936 }
937 }
938 }
939
940 // Try moving a store out of a loop, right before the loop
941 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
942 // Store has to be first in the loop body
943 IdealLoopTree *n_loop = get_loop(n_ctrl);
944 if (n->is_Store() && n_loop != _ltree_root &&
945 n_loop->is_loop() && n_loop->_head->is_Loop() &&
946 n->in(0) != nullptr) {
947 Node* address = n->in(MemNode::Address);
948 Node* value = n->in(MemNode::ValueIn);
949 Node* mem = n->in(MemNode::Memory);
950
951 // - address and value must be loop invariant
952 // - memory must be a memory Phi for the loop
953 // - Store must be the only store on this memory slice in the
954 // loop: if there's another store following this one then value
955 // written at iteration i by the second store could be overwritten
956 // at iteration i+n by the first store: it's not safe to move the
957 // first store out of the loop
958 // - nothing must observe the memory Phi: it guarantees no read
959 // before the store, we are also guaranteed the store post
960 // dominates the loop head (ignoring a possible early
961 // exit). Otherwise there would be extra Phi involved between the
962 // loop's Phi and the store.
963 // - there must be no early exit from the loop before the Store
964 // (such an exit most of the time would be an extra use of the
965 // memory Phi but sometimes is a bottom memory Phi that takes the
966 // store as input).
967
968 if (!ctrl_is_member(n_loop, address) &&
969 !ctrl_is_member(n_loop, value) &&
970 mem->is_Phi() && mem->in(0) == n_loop->_head &&
971 mem->outcnt() == 1 &&
972 mem->in(LoopNode::LoopBackControl) == n) {
973
974 assert(n_loop->_tail != nullptr, "need a tail");
975 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
976
977 // Verify that there's no early exit of the loop before the store.
978 bool ctrl_ok = false;
979 {
980 // Follow control from loop head until n, we exit the loop or
981 // we reach the tail
982 ResourceMark rm;
983 Unique_Node_List wq;
984 wq.push(n_loop->_head);
985
986 for (uint next = 0; next < wq.size(); ++next) {
987 Node *m = wq.at(next);
988 if (m == n->in(0)) {
989 ctrl_ok = true;
990 continue;
991 }
992 assert(!has_ctrl(m), "should be CFG");
993 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
994 ctrl_ok = false;
995 break;
996 }
997 enqueue_cfg_uses(m, wq);
998 if (wq.size() > 10) {
999 ctrl_ok = false;
1000 break;
1001 }
1002 }
1003 }
1004 if (ctrl_ok) {
1005 // move the Store
1006 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
1007 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
1008 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
1009 // Disconnect the phi now. An empty phi can confuse other
1010 // optimizations in this pass of loop opts.
1011 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
1012 n_loop->_body.yank(mem);
1013
1014 set_ctrl_and_loop(n, n->in(0));
1015
1016 return n;
1017 }
1018 }
1019 }
1020 return nullptr;
1021 }
1022
1023 // Try moving a store out of a loop, right after the loop
1024 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
1025 if (n->is_Store() && n->in(0) != nullptr) {
1026 Node *n_ctrl = get_ctrl(n);
1027 IdealLoopTree *n_loop = get_loop(n_ctrl);
1028 // Store must be in a loop
1029 if (n_loop != _ltree_root && !n_loop->_irreducible) {
1030 Node* address = n->in(MemNode::Address);
1031 Node* value = n->in(MemNode::ValueIn);
1032 // address must be loop invariant
1033 if (!ctrl_is_member(n_loop, address)) {
1034 // Store must be last on this memory slice in the loop and
1035 // nothing in the loop must observe it
1036 Node* phi = nullptr;
1037 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1038 Node* u = n->fast_out(i);
1039 if (has_ctrl(u)) { // control use?
1040 if (!ctrl_is_member(n_loop, u)) {
1041 continue;
1042 }
1043 if (u->is_Phi() && u->in(0) == n_loop->_head) {
1044 assert(_igvn.type(u) == Type::MEMORY, "bad phi");
1045 // multiple phis on the same slice are possible
1046 if (phi != nullptr) {
1047 return;
1048 }
1049 phi = u;
1050 continue;
1051 }
1052 }
1053 return;
1054 }
1055 if (phi != nullptr) {
1056 // Nothing in the loop before the store (next iteration)
1057 // must observe the stored value
1058 bool mem_ok = true;
1059 {
1060 ResourceMark rm;
1061 Unique_Node_List wq;
1062 wq.push(phi);
1063 for (uint next = 0; next < wq.size() && mem_ok; ++next) {
1064 Node *m = wq.at(next);
1065 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
1066 Node* u = m->fast_out(i);
1067 if (u->is_Store() || u->is_Phi()) {
1068 if (u != n) {
1069 wq.push(u);
1070 mem_ok = (wq.size() <= 10);
1071 }
1072 } else {
1073 mem_ok = false;
1074 break;
1075 }
1076 }
1077 }
1078 }
1079 if (mem_ok) {
1080 // Move the store out of the loop if the LCA of all
1081 // users (except for the phi) is outside the loop.
1082 Node* hook = new Node(1);
1083 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
1084 _igvn.rehash_node_delayed(phi);
1085 int count = phi->replace_edge(n, hook, &_igvn);
1086 assert(count > 0, "inconsistent phi");
1087
1088 // Compute latest point this store can go
1089 Node* lca = get_late_ctrl(n, get_ctrl(n));
1090 if (lca->is_OuterStripMinedLoop()) {
1091 lca = lca->in(LoopNode::EntryControl);
1092 }
1093 if (n_loop->is_member(get_loop(lca))) {
1094 // LCA is in the loop - bail out
1095 _igvn.replace_node(hook, n);
1096 return;
1097 }
1098 #ifdef ASSERT
1099 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
1100 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
1101 n_loop->_head->as_Loop()->verify_strip_mined(1);
1102 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
1103 IdealLoopTree* outer_loop = get_loop(outer);
1104 assert(n_loop->_parent == outer_loop, "broken loop tree");
1105 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
1106 }
1107 #endif
1108 lca = place_outside_loop(lca, n_loop);
1109 assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop");
1110 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1111
1112 // Move store out of the loop
1113 _igvn.replace_node(hook, n->in(MemNode::Memory));
1114 _igvn.replace_input_of(n, 0, lca);
1115 set_ctrl_and_loop(n, lca);
1116
1117 // Disconnect the phi now. An empty phi can confuse other
1118 // optimizations in this pass of loop opts..
1119 if (phi->in(LoopNode::LoopBackControl) == phi) {
1120 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1121 n_loop->_body.yank(phi);
1122 }
1123 }
1124 }
1125 }
1126 }
1127 }
1128 }
1129
1130 // We can't use immutable memory for the flat array check because we are loading the mark word which is
1131 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
1132 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
1133 // flat array checks out of loops, mainly to enable loop unswitching.
1134 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1135 // Skip checks for more than one array
1136 if (n->req() > 3) {
1137 return;
1138 }
1139 Node* mem = n->in(FlatArrayCheckNode::Memory);
1140 Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1141 IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1142 IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1143
1144 // Check if array is loop invariant
1145 if (!check_loop->is_member(ary_loop)) {
1146 // Walk up memory graph from the check until we leave the loop
1147 VectorSet wq;
1148 wq.set(mem->_idx);
1149 while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1150 if (mem->is_Phi()) {
1151 mem = mem->in(1);
1152 } else if (mem->is_MergeMem()) {
1153 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1154 } else if (mem->is_Proj()) {
1155 mem = mem->in(0);
1156 } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1157 mem = mem->in(TypeFunc::Memory);
1158 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1159 mem = mem->in(MemNode::Memory);
1160 } else {
1161 #ifdef ASSERT
1162 mem->dump();
1163 #endif
1164 ShouldNotReachHere();
1165 }
1166 if (wq.test_set(mem->_idx)) {
1167 return;
1168 }
1169 }
1170 // Replace memory input and re-compute ctrl to move the check out of the loop
1171 _igvn.replace_input_of(n, 1, mem);
1172 set_ctrl_and_loop(n, get_early_ctrl(n));
1173 Node* bol = n->unique_out();
1174 set_ctrl_and_loop(bol, get_early_ctrl(bol));
1175 }
1176 }
1177
1178 //------------------------------split_if_with_blocks_pre-----------------------
1179 // Do the real work in a non-recursive function. Data nodes want to be
1180 // cloned in the pre-order so they can feed each other nicely.
1181 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1182 // Cloning these guys is unlikely to win
1183 int n_op = n->Opcode();
1184 if (n_op == Op_MergeMem) {
1185 return n;
1186 }
1187 if (n->is_Proj()) {
1188 return n;
1189 }
1190
1191 if (n->isa_FlatArrayCheck()) {
1192 move_flat_array_check_out_of_loop(n);
1193 return n;
1194 }
1195
1196 // Do not clone-up CmpFXXX variations, as these are always
1197 // followed by a CmpI
1198 if (n->is_Cmp()) {
1199 return n;
1200 }
1201 // Attempt to use a conditional move instead of a phi/branch
1202 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1203 Node *cmov = conditional_move( n );
1204 if (cmov) {
1205 return cmov;
1206 }
1207 }
1208 if (n->is_CFG() || n->is_LoadStore()) {
1209 return n;
1210 }
1211 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1212 if (!C->major_progress()) { // If chance of no more loop opts...
1213 _igvn._worklist.push(n); // maybe we'll remove them
1214 }
1215 return n;
1216 }
1217
1218 if (n->is_Con()) {
1219 return n; // No cloning for Con nodes
1220 }
1221
1222 Node *n_ctrl = get_ctrl(n);
1223 if (!n_ctrl) {
1224 return n; // Dead node
1225 }
1226
1227 Node* res = try_move_store_before_loop(n, n_ctrl);
1228 if (res != nullptr) {
1229 return n;
1230 }
1231
1232 // Attempt to remix address expressions for loop invariants
1233 Node *m = remix_address_expressions( n );
1234 if( m ) return m;
1235
1236 if (n_op == Op_AddI) {
1237 Node *nn = convert_add_to_muladd( n );
1238 if ( nn ) return nn;
1239 }
1240
1241 if (n->is_ConstraintCast() && n->as_ConstraintCast()->dependency().narrows_type()) {
1242 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1243 // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1244 // Node control inputs don't necessarily agree with loop control info (due to
1245 // transformations happened in between), thus additional dominance check is needed
1246 // to keep loop info valid.
1247 if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1248 _igvn.replace_node(n, dom_cast);
1249 return dom_cast;
1250 }
1251 }
1252
1253 // Determine if the Node has inputs from some local Phi.
1254 // Returns the block to clone thru.
1255 Node *n_blk = has_local_phi_input( n );
1256 if( !n_blk ) return n;
1257
1258 // Do not clone the trip counter through on a CountedLoop
1259 // (messes up the canonical shape).
1260 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) ||
1261 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) {
1262 return n;
1263 }
1264 // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination
1265 if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) &&
1266 n->in(1) == n_blk->as_BaseCountedLoop()->phi()) {
1267 return n;
1268 }
1269
1270 // Check for having no control input; not pinned. Allow
1271 // dominating control.
1272 if (n->in(0)) {
1273 Node *dom = idom(n_blk);
1274 if (dom_lca(n->in(0), dom) != n->in(0)) {
1275 return n;
1276 }
1277 }
1278 // Policy: when is it profitable. You must get more wins than
1279 // policy before it is considered profitable. Policy is usually 0,
1280 // so 1 win is considered profitable. Big merges will require big
1281 // cloning, so get a larger policy.
1282 int policy = n_blk->req() >> 2;
1283
1284 // If the loop is a candidate for range check elimination,
1285 // delay splitting through it's phi until a later loop optimization
1286 if (n_blk->is_BaseCountedLoop()) {
1287 IdealLoopTree *lp = get_loop(n_blk);
1288 if (lp && lp->_rce_candidate) {
1289 return n;
1290 }
1291 }
1292
1293 if (must_throttle_split_if()) return n;
1294
1295 // Split 'n' through the merge point if it is profitable, replacing it with a new phi.
1296 Node* phi = split_thru_phi(n, n_blk, policy);
1297 if (phi == nullptr) { return n; }
1298
1299 // Moved a load around the loop, 'en-registering' something.
1300 if (n_blk->is_Loop() && n->is_Load() &&
1301 !phi->in(LoopNode::LoopBackControl)->is_Load())
1302 C->set_major_progress();
1303
1304 return phi;
1305 }
1306
1307 static bool merge_point_too_heavy(Compile* C, Node* region) {
1308 // Bail out if the region and its phis have too many users.
1309 int weight = 0;
1310 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1311 weight += region->fast_out(i)->outcnt();
1312 }
1313 int nodes_left = C->max_node_limit() - C->live_nodes();
1314 if (weight * 8 > nodes_left) {
1315 if (PrintOpto) {
1316 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
1317 }
1318 return true;
1319 } else {
1320 return false;
1321 }
1322 }
1323
1324 static bool merge_point_safe(Node* region) {
1325 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1326 // having a PhiNode input. This sidesteps the dangerous case where the split
1327 // ConvI2LNode may become TOP if the input Value() does not
1328 // overlap the ConvI2L range, leaving a node which may not dominate its
1329 // uses.
1330 // A better fix for this problem can be found in the BugTraq entry, but
1331 // expediency for Mantis demands this hack.
1332 #ifdef _LP64
1333 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1334 Node* n = region->fast_out(i);
1335 if (n->is_Phi()) {
1336 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1337 Node* m = n->fast_out(j);
1338 if (m->Opcode() == Op_ConvI2L)
1339 return false;
1340 if (m->is_CastII()) {
1341 return false;
1342 }
1343 }
1344 }
1345 }
1346 #endif
1347 return true;
1348 }
1349
1350
1351 //------------------------------place_outside_loop---------------------------------
1352 // Place some computation outside of this loop on the path to the use passed as argument
1353 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const {
1354 Node* head = loop->_head;
1355 assert(!loop->is_member(get_loop(useblock)), "must be outside loop");
1356 if (head->is_Loop() && head->as_Loop()->is_strip_mined()) {
1357 loop = loop->_parent;
1358 assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop");
1359 }
1360
1361 // Pick control right outside the loop
1362 for (;;) {
1363 Node* dom = idom(useblock);
1364 if (loop->is_member(get_loop(dom))) {
1365 break;
1366 }
1367 useblock = dom;
1368 }
1369 assert(find_non_split_ctrl(useblock) == useblock, "should be non split control");
1370 return useblock;
1371 }
1372
1373
1374 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1375 if (!n->is_If()) {
1376 return false;
1377 }
1378 if (n->outcnt() != n->as_If()->required_outcnt()) {
1379 assert(false, "malformed IfNode with %d outputs", n->outcnt());
1380 return false;
1381 }
1382 if (n->is_BaseCountedLoopEnd()) {
1383 return false;
1384 }
1385 if (!n->in(0)->is_Region()) {
1386 return false;
1387 }
1388
1389 Node* region = n->in(0);
1390 Node* dom = idom(region);
1391 if (!dom->is_If() || !n->as_If()->same_condition(dom, &_igvn)) {
1392 return false;
1393 }
1394 IfNode* dom_if = dom->as_If();
1395 IfTrueNode* proj_true = dom_if->true_proj();
1396 IfFalseNode* proj_false = dom_if->false_proj();
1397
1398 for (uint i = 1; i < region->req(); i++) {
1399 if (is_dominator(proj_true, region->in(i))) {
1400 continue;
1401 }
1402 if (is_dominator(proj_false, region->in(i))) {
1403 continue;
1404 }
1405 return false;
1406 }
1407
1408 return true;
1409 }
1410
1411
1412 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1413 if (must_throttle_split_if()) {
1414 return false;
1415 }
1416
1417 // Do not do 'split-if' if irreducible loops are present.
1418 if (_has_irreducible_loops) {
1419 return false;
1420 }
1421
1422 if (merge_point_too_heavy(C, n_ctrl)) {
1423 return false;
1424 }
1425
1426 // Do not do 'split-if' if some paths are dead. First do dead code
1427 // elimination and then see if its still profitable.
1428 for (uint i = 1; i < n_ctrl->req(); i++) {
1429 if (n_ctrl->in(i) == C->top()) {
1430 return false;
1431 }
1432 }
1433
1434 // If trying to do a 'Split-If' at the loop head, it is only
1435 // profitable if the cmp folds up on BOTH paths. Otherwise we
1436 // risk peeling a loop forever.
1437
1438 // CNC - Disabled for now. Requires careful handling of loop
1439 // body selection for the cloned code. Also, make sure we check
1440 // for any input path not being in the same loop as n_ctrl. For
1441 // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1442 // because the alternative loop entry points won't be converted
1443 // into LoopNodes.
1444 IdealLoopTree *n_loop = get_loop(n_ctrl);
1445 for (uint j = 1; j < n_ctrl->req(); j++) {
1446 if (get_loop(n_ctrl->in(j)) != n_loop) {
1447 return false;
1448 }
1449 }
1450
1451 // Check for safety of the merge point.
1452 if (!merge_point_safe(n_ctrl)) {
1453 return false;
1454 }
1455
1456 return true;
1457 }
1458
1459 // Detect if the node is the inner strip-mined loop
1460 // Return: null if it's not the case, or the exit of outer strip-mined loop
1461 static Node* is_inner_of_stripmined_loop(const Node* out) {
1462 Node* out_le = nullptr;
1463
1464 if (out->is_CountedLoopEnd()) {
1465 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1466
1467 if (loop != nullptr && loop->is_strip_mined()) {
1468 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1469 }
1470 }
1471
1472 return out_le;
1473 }
1474
1475 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1476 // If the CmpP is a subtype check for a value that has just been
1477 // loaded from an array, the subtype check guarantees the value
1478 // can't be stored in a flat array and the load of the value
1479 // happens with a flat array check then: push the type check
1480 // through the phi of the flat array check. This needs special
1481 // logic because the subtype check's input is not a phi but a
1482 // LoadKlass that must first be cloned through the phi.
1483 if (n->Opcode() != Op_CmpP) {
1484 return false;
1485 }
1486
1487 Node* klassptr = n->in(1);
1488 Node* klasscon = n->in(2);
1489
1490 if (klassptr->is_DecodeNarrowPtr()) {
1491 klassptr = klassptr->in(1);
1492 }
1493
1494 if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1495 return false;
1496 }
1497
1498 if (!klasscon->is_Con()) {
1499 return false;
1500 }
1501
1502 Node* addr = klassptr->in(MemNode::Address);
1503
1504 if (!addr->is_AddP()) {
1505 return false;
1506 }
1507
1508 intptr_t offset;
1509 Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1510
1511 if (obj == nullptr) {
1512 return false;
1513 }
1514
1515 // TODO 8378077: The code below does not work anymore with off-heap accesses which set their bases to top with
1516 // JDK-8373343. Also: flat_array_element_type_check() was introduced with JDK-8228622 for a specific check to enable
1517 // split-if but JDK-8245729 changed how that check looks like. Is it still relevant? This should be revisited.
1518 if (addr->in(AddPNode::Base)->is_top()) {
1519 return false;
1520 }
1521
1522 if (obj->Opcode() == Op_CastPP) {
1523 obj = obj->in(1);
1524 }
1525
1526 if (!obj->is_Phi()) {
1527 return false;
1528 }
1529
1530 Node* region = obj->in(0);
1531
1532 Node* phi = PhiNode::make_blank(region, n->in(1));
1533 for (uint i = 1; i < region->req(); i++) {
1534 Node* in = obj->in(i);
1535 Node* ctrl = region->in(i);
1536 if (addr->in(AddPNode::Base) != obj) {
1537 Node* cast = addr->in(AddPNode::Base);
1538 assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1539 Node* cast_clone = cast->clone();
1540 cast_clone->set_req(0, ctrl);
1541 cast_clone->set_req(1, in);
1542 register_new_node(cast_clone, ctrl);
1543 const Type* tcast = cast_clone->Value(&_igvn);
1544 _igvn.set_type(cast_clone, tcast);
1545 cast_clone->as_Type()->set_type(tcast);
1546 in = cast_clone;
1547 }
1548 Node* addr_clone = addr->clone();
1549 addr_clone->set_req(AddPNode::Base, in);
1550 addr_clone->set_req(AddPNode::Address, in);
1551 register_new_node(addr_clone, ctrl);
1552 _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1553 Node* klassptr_clone = klassptr->clone();
1554 klassptr_clone->set_req(2, addr_clone);
1555 register_new_node(klassptr_clone, ctrl);
1556 _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1557 if (klassptr != n->in(1)) {
1558 Node* decode = n->in(1);
1559 assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1560 Node* decode_clone = decode->clone();
1561 decode_clone->set_req(1, klassptr_clone);
1562 register_new_node(decode_clone, ctrl);
1563 _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1564 klassptr_clone = decode_clone;
1565 }
1566 phi->set_req(i, klassptr_clone);
1567 }
1568 register_new_node(phi, region);
1569 Node* orig = n->in(1);
1570 _igvn.replace_input_of(n, 1, phi);
1571 split_if_with_blocks_post(n);
1572 if (n->outcnt() != 0) {
1573 _igvn.replace_input_of(n, 1, orig);
1574 _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Graph);
1575 }
1576 return true;
1577 }
1578
1579 //------------------------------split_if_with_blocks_post----------------------
1580 // Do the real work in a non-recursive function. CFG hackery wants to be
1581 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1582 // info.
1583 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1584
1585 if (flat_array_element_type_check(n)) {
1586 return;
1587 }
1588
1589 // Cloning Cmp through Phi's involves the split-if transform.
1590 // FastLock is not used by an If
1591 if (n->is_Cmp() && !n->is_FastLock()) {
1592 Node *n_ctrl = get_ctrl(n);
1593 // Determine if the Node has inputs from some local Phi.
1594 // Returns the block to clone thru.
1595 Node *n_blk = has_local_phi_input(n);
1596 if (n_blk != n_ctrl) {
1597 return;
1598 }
1599
1600 if (!can_split_if(n_ctrl)) {
1601 return;
1602 }
1603
1604 if (n->outcnt() != 1) {
1605 return; // Multiple bool's from 1 compare?
1606 }
1607 Node *bol = n->unique_out();
1608 assert(bol->is_Bool(), "expect a bool here");
1609 if (bol->outcnt() != 1) {
1610 return;// Multiple branches from 1 compare?
1611 }
1612 Node *iff = bol->unique_out();
1613
1614 // Check some safety conditions
1615 if (iff->is_If()) { // Classic split-if?
1616 if (iff->outcnt() != iff->as_If()->required_outcnt()) {
1617 assert(false, "malformed IfNode with %d outputs", iff->outcnt());
1618 return;
1619 } else if (iff->in(0) != n_ctrl) {
1620 return; // Compare must be in same blk as if
1621 }
1622 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1623 // Can't split CMove with different control.
1624 if (get_ctrl(iff) != n_ctrl) {
1625 return;
1626 }
1627 if (get_ctrl(iff->in(2)) == n_ctrl ||
1628 get_ctrl(iff->in(3)) == n_ctrl) {
1629 return; // Inputs not yet split-up
1630 }
1631 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1632 return; // Loop-invar test gates loop-varying CMOVE
1633 }
1634 } else {
1635 return; // some other kind of node, such as an Allocate
1636 }
1637
1638 // When is split-if profitable? Every 'win' on means some control flow
1639 // goes dead, so it's almost always a win.
1640 int policy = 0;
1641 // Split compare 'n' through the merge point if it is profitable
1642 Node *phi = split_thru_phi( n, n_ctrl, policy);
1643 if (!phi) {
1644 return;
1645 }
1646
1647 // Now split the bool up thru the phi
1648 Node* bolphi = split_thru_phi(bol, n_ctrl, -1);
1649 guarantee(bolphi != nullptr, "null boolean phi node");
1650 assert(iff->in(1) == bolphi, "");
1651
1652 if (bolphi->Value(&_igvn)->singleton()) {
1653 return;
1654 }
1655
1656 // Conditional-move? Must split up now
1657 if (!iff->is_If()) {
1658 Node* cmovphi = split_thru_phi(iff, n_ctrl, -1);
1659 return;
1660 }
1661
1662 // Now split the IF
1663 C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff);
1664 #ifndef PRODUCT
1665 if (TraceLoopOpts || TraceSplitIf) {
1666 tty->print_cr("Split-If: %d %s", iff->_idx, iff->Name());
1667 }
1668 #endif
1669 do_split_if(iff);
1670 C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff);
1671 return;
1672 }
1673
1674 // Two identical ifs back to back can be merged
1675 if (try_merge_identical_ifs(n)) {
1676 return;
1677 }
1678
1679 // Check for an IF ready to split; one that has its
1680 // condition codes input coming from a Phi at the block start.
1681 int n_op = n->Opcode();
1682
1683 // Check for an IF being dominated by another IF same test
1684 if (n_op == Op_If ||
1685 n_op == Op_RangeCheck) {
1686 Node *bol = n->in(1);
1687 uint max = bol->outcnt();
1688 // Check for same test used more than once?
1689 if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) {
1690 // Search up IDOMs to see if this IF is dominated.
1691 Node* cmp = bol->in(1);
1692 Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol);
1693
1694 // Now search up IDOMs till cutoff, looking for a dominating test
1695 Node *prevdom = n;
1696 Node *dom = idom(prevdom);
1697 while (dom != cutoff) {
1698 if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom &&
1699 safe_for_if_replacement(dom)) {
1700 // It's invalid to move control dependent data nodes in the inner
1701 // strip-mined loop, because:
1702 // 1) break validation of LoopNode::verify_strip_mined()
1703 // 2) move code with side-effect in strip-mined loop
1704 // Move to the exit of outer strip-mined loop in that case.
1705 Node* out_le = is_inner_of_stripmined_loop(dom);
1706 if (out_le != nullptr) {
1707 prevdom = out_le;
1708 }
1709 // Replace the dominated test with an obvious true or false.
1710 // Place it on the IGVN worklist for later cleanup.
1711 C->set_major_progress();
1712 // Split if: pin array accesses that are control dependent on a range check and moved to a regular if,
1713 // to prevent an array load from floating above its range check. There are three cases:
1714 // 1. Move from RangeCheck "a" to RangeCheck "b": don't need to pin. If we ever remove b, then we pin
1715 // all its array accesses at that point.
1716 // 2. We move from RangeCheck "a" to regular if "b": need to pin. If we ever remove b, then its array
1717 // accesses would start to float, since we don't pin at that point.
1718 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1719 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck &&
1720 prevdom->in(0)->Opcode() != Op_RangeCheck;
1721 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1722 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1723 return;
1724 }
1725 prevdom = dom;
1726 dom = idom(prevdom);
1727 }
1728 }
1729 }
1730
1731 try_sink_out_of_loop(n);
1732 if (C->failing()) {
1733 return;
1734 }
1735
1736 try_move_store_after_loop(n);
1737
1738 // Remove multiple allocations of the same inline type
1739 if (n->is_InlineType()) {
1740 n->as_InlineType()->remove_redundant_allocations(this);
1741 }
1742 }
1743
1744 // Transform:
1745 //
1746 // if (some_condition) {
1747 // // body 1
1748 // } else {
1749 // // body 2
1750 // }
1751 // if (some_condition) {
1752 // // body 3
1753 // } else {
1754 // // body 4
1755 // }
1756 //
1757 // into:
1758 //
1759 //
1760 // if (some_condition) {
1761 // // body 1
1762 // // body 3
1763 // } else {
1764 // // body 2
1765 // // body 4
1766 // }
1767 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) {
1768 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1769 Node *n_ctrl = n->in(0);
1770 IfNode* dom_if = idom(n_ctrl)->as_If();
1771 if (n->in(1) != dom_if->in(1)) {
1772 assert(n->in(1)->in(1)->is_SubTypeCheck() &&
1773 (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr ||
1774 dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached");
1775 _igvn.replace_input_of(n, 1, dom_if->in(1));
1776 }
1777 IfTrueNode* dom_proj_true = dom_if->true_proj();
1778 IfFalseNode* dom_proj_false = dom_if->false_proj();
1779
1780 // Now split the IF
1781 RegionNode* new_false_region;
1782 RegionNode* new_true_region;
1783 #ifndef PRODUCT
1784 if (TraceLoopOpts || TraceSplitIf) {
1785 tty->print_cr("Split-If Merging Identical Ifs: Dom-If: %d %s, If: %d %s", dom_if->_idx, dom_if->Name(), n->_idx, n->Name());
1786 }
1787 #endif
1788 do_split_if(n, &new_false_region, &new_true_region);
1789 assert(new_false_region->req() == new_true_region->req(), "");
1790 #ifdef ASSERT
1791 for (uint i = 1; i < new_false_region->req(); ++i) {
1792 assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if");
1793 assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if");
1794 }
1795 #endif
1796 assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test");
1797
1798 // We now have:
1799 // if (some_condition) {
1800 // // body 1
1801 // if (some_condition) {
1802 // body3: // new_true_region
1803 // // body3
1804 // } else {
1805 // goto body4;
1806 // }
1807 // } else {
1808 // // body 2
1809 // if (some_condition) {
1810 // goto body3;
1811 // } else {
1812 // body4: // new_false_region
1813 // // body4;
1814 // }
1815 // }
1816 //
1817
1818 // clone pinned nodes thru the resulting regions
1819 push_pinned_nodes_thru_region(dom_if, new_true_region);
1820 push_pinned_nodes_thru_region(dom_if, new_false_region);
1821
1822 // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent
1823 // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an
1824 // unrelated control dependency.
1825 for (uint i = 1; i < new_false_region->req(); i++) {
1826 if (is_dominator(dom_proj_true, new_false_region->in(i))) {
1827 dominated_by(dom_proj_true, new_false_region->in(i)->in(0)->as_If());
1828 } else {
1829 assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if");
1830 dominated_by(dom_proj_false, new_false_region->in(i)->in(0)->as_If());
1831 }
1832 }
1833 return true;
1834 }
1835 return false;
1836 }
1837
1838 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) {
1839 for (DUIterator i = region->outs(); region->has_out(i); i++) {
1840 Node* u = region->out(i);
1841 if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test()) {
1842 continue;
1843 }
1844 assert(u->in(0) == region, "not a control dependent node?");
1845 uint j = 1;
1846 for (; j < u->req(); ++j) {
1847 Node* in = u->in(j);
1848 if (!is_dominator(ctrl_or_self(in), dom_if)) {
1849 break;
1850 }
1851 }
1852 if (j == u->req()) {
1853 Node *phi = PhiNode::make_blank(region, u);
1854 for (uint k = 1; k < region->req(); ++k) {
1855 Node* clone = u->clone();
1856 clone->set_req(0, region->in(k));
1857 register_new_node(clone, region->in(k));
1858 phi->init_req(k, clone);
1859 }
1860 register_new_node(phi, region);
1861 _igvn.replace_node(u, phi);
1862 --i;
1863 }
1864 }
1865 }
1866
1867 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const {
1868 if (!dom->is_CountedLoopEnd()) {
1869 return true;
1870 }
1871 CountedLoopEndNode* le = dom->as_CountedLoopEnd();
1872 CountedLoopNode* cl = le->loopnode();
1873 if (cl == nullptr) {
1874 return true;
1875 }
1876 if (!cl->is_main_loop()) {
1877 return true;
1878 }
1879 if (cl->is_canonical_loop_entry() == nullptr) {
1880 return true;
1881 }
1882 // Further unrolling is possible so loop exit condition might change
1883 return false;
1884 }
1885
1886 // See if a shared loop-varying computation has no loop-varying uses.
1887 // Happens if something is only used for JVM state in uncommon trap exits,
1888 // like various versions of induction variable+offset. Clone the
1889 // computation per usage to allow it to sink out of the loop.
1890 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1891 bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
1892 n->in(1)->bottom_type()->isa_rawptr() &&
1893 !n->bottom_type()->isa_rawptr();
1894
1895 if (has_ctrl(n) &&
1896 !n->is_Phi() &&
1897 !n->is_Bool() &&
1898 !n->is_Proj() &&
1899 !n->is_MergeMem() &&
1900 !n->is_CMove() &&
1901 !n->is_OpaqueConstantBool() &&
1902 !n->is_OpaqueInitializedAssertionPredicate() &&
1903 !n->is_OpaqueTemplateAssertionPredicate() &&
1904 !is_raw_to_oop_cast && // don't extend live ranges of raw oops
1905 n->Opcode() != Op_CreateEx &&
1906 (KillPathsReachableByDeadTypeNode || !n->is_Type())
1907 ) {
1908 Node *n_ctrl = get_ctrl(n);
1909 IdealLoopTree *n_loop = get_loop(n_ctrl);
1910
1911 if (n->in(0) != nullptr) {
1912 IdealLoopTree* loop_ctrl = get_loop(n->in(0));
1913 if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) {
1914 // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example,
1915 // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop).
1916 // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not.
1917 Node* maybe_pinned_n = n;
1918 Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl);
1919 if (!would_sink_below_pre_loop_exit(loop_ctrl, outside_ctrl)) {
1920 if (n->depends_only_on_test()) {
1921 // If this node depends_only_on_test, it will be rewired to a control input that is not
1922 // the correct test. As a result, it must be pinned otherwise it can be incorrectly
1923 // rewired to a dominating test equivalent to the new control.
1924 Node* pinned_clone = n->pin_node_under_control();
1925 if (pinned_clone != nullptr) {
1926 register_new_node(pinned_clone, n_ctrl);
1927 maybe_pinned_n = pinned_clone;
1928 _igvn.replace_node(n, pinned_clone);
1929 }
1930 }
1931 _igvn.replace_input_of(maybe_pinned_n, 0, outside_ctrl);
1932 }
1933 }
1934 }
1935 if (n_loop != _ltree_root && n->outcnt() > 1) {
1936 // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of
1937 // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us.
1938 Node* early_ctrl = compute_early_ctrl(n, n_ctrl);
1939 if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now
1940 ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops!
1941 if (n->is_Store() || n->is_LoadStore()) {
1942 assert(false, "no node with a side effect");
1943 C->record_failure("no node with a side effect");
1944 return;
1945 }
1946 Node* outer_loop_clone = nullptr;
1947 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) {
1948 Node* u = n->last_out(j); // Clone private computation per use
1949 _igvn.rehash_node_delayed(u);
1950 Node* x = nullptr;
1951 if (n->in(0) != nullptr && n->depends_only_on_test()) {
1952 // If this node depends_only_on_test, it will be rewired to a control input that is not
1953 // the correct test. As a result, it must be pinned otherwise it can be incorrectly
1954 // rewired to a dominating test equivalent to the new control.
1955 x = n->pin_node_under_control();
1956 }
1957 if (x == nullptr) {
1958 x = n->clone();
1959 }
1960 Node* x_ctrl = nullptr;
1961 if (u->is_Phi()) {
1962 // Replace all uses of normal nodes. Replace Phi uses
1963 // individually, so the separate Nodes can sink down
1964 // different paths.
1965 uint k = 1;
1966 while (u->in(k) != n) k++;
1967 u->set_req(k, x);
1968 // x goes next to Phi input path
1969 x_ctrl = u->in(0)->in(k);
1970 // Find control for 'x' next to use but not inside inner loops.
1971 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1972 --j;
1973 } else { // Normal use
1974 if (has_ctrl(u)) {
1975 x_ctrl = get_ctrl(u);
1976 } else {
1977 x_ctrl = u->in(0);
1978 }
1979 // Find control for 'x' next to use but not inside inner loops.
1980 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1981 // Replace all uses
1982 if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) {
1983 // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary
1984 // anymore now that we're going to pin n as well
1985 _igvn.replace_node(u, x);
1986 --j;
1987 } else {
1988 int nb = u->replace_edge(n, x, &_igvn);
1989 j -= nb;
1990 }
1991 }
1992
1993 if (n->is_Load()) {
1994 // For loads, add a control edge to a CFG node outside of the loop
1995 // to force them to not combine and return back inside the loop
1996 // during GVN optimization (4641526).
1997 assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked");
1998
1999 IdealLoopTree* x_loop = get_loop(x_ctrl);
2000 Node* x_head = x_loop->_head;
2001 if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) {
2002 // Do not add duplicate LoadNodes to the outer strip mined loop
2003 if (outer_loop_clone != nullptr) {
2004 _igvn.replace_node(x, outer_loop_clone);
2005 continue;
2006 }
2007 outer_loop_clone = x;
2008 }
2009 x->set_req(0, x_ctrl);
2010 } else if (n->in(0) != nullptr){
2011 x->set_req(0, x_ctrl);
2012 }
2013 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
2014 assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop");
2015 register_new_node(x, x_ctrl);
2016
2017 // Chain of AddP nodes: (AddP base (AddP base (AddP base )))
2018 // All AddP nodes must keep the same base after sinking so:
2019 // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk,
2020 // their bases remain the same.
2021 // (see 2- below)
2022 assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() ||
2023 x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) ||
2024 !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape");
2025 if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() &&
2026 !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) {
2027 assert(!x->is_Load(), "load should be pinned");
2028 // Use a cast node to pin clone out of loop
2029 Node* cast = nullptr;
2030 for (uint k = 0; k < x->req(); k++) {
2031 Node* in = x->in(k);
2032 if (in != nullptr && ctrl_is_member(n_loop, in)) {
2033 const Type* in_t = _igvn.type(in);
2034 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
2035 ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr);
2036 }
2037 if (cast != nullptr) {
2038 Node* prev = _igvn.hash_find_insert(cast);
2039 if (prev != nullptr && get_ctrl(prev) == x_ctrl) {
2040 cast->destruct(&_igvn);
2041 cast = prev;
2042 } else {
2043 register_new_node(cast, x_ctrl);
2044 }
2045 x->replace_edge(in, cast);
2046 // Chain of AddP nodes:
2047 // 2- A CastPP of the base is only added now that all AddP nodes are sunk
2048 if (x->is_AddP() && k == AddPNode::Base) {
2049 update_addp_chain_base(x, n->in(AddPNode::Base), cast);
2050 }
2051 break;
2052 }
2053 }
2054 assert(cast != nullptr, "must have added a cast to pin the node");
2055 }
2056 }
2057 _igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph);
2058 }
2059 _dom_lca_tags_round = 0;
2060 }
2061 }
2062 }
2063
2064 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) {
2065 ResourceMark rm;
2066 Node_List wq;
2067 wq.push(x);
2068 while (wq.size() != 0) {
2069 Node* n = wq.pop();
2070 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2071 Node* u = n->fast_out(i);
2072 if (u->is_AddP() && u->in(AddPNode::Base) == old_base) {
2073 _igvn.replace_input_of(u, AddPNode::Base, new_base);
2074 wq.push(u);
2075 }
2076 }
2077 }
2078 }
2079
2080 // Compute the early control of a node by following its inputs until we reach
2081 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes.
2082 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) {
2083 Node* early_ctrl = nullptr;
2084 ResourceMark rm;
2085 Unique_Node_List wq;
2086 wq.push(n);
2087 for (uint i = 0; i < wq.size(); i++) {
2088 Node* m = wq.at(i);
2089 Node* c = nullptr;
2090 if (m->is_CFG()) {
2091 c = m;
2092 } else if (m->pinned()) {
2093 c = m->in(0);
2094 } else {
2095 for (uint j = 0; j < m->req(); j++) {
2096 Node* in = m->in(j);
2097 if (in != nullptr) {
2098 wq.push(in);
2099 }
2100 }
2101 }
2102 if (c != nullptr) {
2103 assert(is_dominator(c, n_ctrl), "control input must dominate current control");
2104 if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) {
2105 early_ctrl = c;
2106 }
2107 }
2108 }
2109 assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control");
2110 return early_ctrl;
2111 }
2112
2113 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) {
2114 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2115 Node* u = n->fast_out(i);
2116 if (u->is_Opaque1()) {
2117 return false; // Found loop limit, bugfix for 4677003
2118 }
2119 if (u->is_Phi()) {
2120 for (uint j = 1; j < u->req(); ++j) {
2121 if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) {
2122 return false;
2123 }
2124 }
2125 } else {
2126 Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0);
2127 if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) {
2128 return false;
2129 }
2130 }
2131 }
2132 return true;
2133 }
2134
2135 // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input
2136 // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit
2137 // test of the pre loop above the point in the graph where it's pinned. This results in a broken graph. One way to avoid
2138 // it would be to not eliminate the check in the main loop. Instead, we prevent sinking of the node here so better code
2139 // is generated for the main loop.
2140 bool PhaseIdealLoop::would_sink_below_pre_loop_exit(IdealLoopTree* n_loop, Node* ctrl) {
2141 if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop()) {
2142 CountedLoopNode* pre_loop = n_loop->_head->as_CountedLoop();
2143 if (is_dominator(pre_loop->loopexit(), ctrl)) {
2144 return true;
2145 }
2146 }
2147 return false;
2148 }
2149
2150 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) {
2151 if (n->is_Load()) {
2152 // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure each call to
2153 // get_late_ctrl_with_anti_dep() uses its own tag
2154 _dom_lca_tags_round++;
2155 assert(_dom_lca_tags_round != 0, "shouldn't wrap around");
2156
2157 ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl);
2158 }
2159 IdealLoopTree *u_loop = get_loop(ctrl);
2160 if (u_loop == n_loop) {
2161 return false; // Found loop-varying use
2162 }
2163 if (n_loop->is_member(u_loop)) {
2164 return false; // Found use in inner loop
2165 }
2166 if (would_sink_below_pre_loop_exit(n_loop, ctrl)) {
2167 return false;
2168 }
2169 return true;
2170 }
2171
2172 //------------------------------split_if_with_blocks---------------------------
2173 // Check for aggressive application of 'split-if' optimization,
2174 // using basic block level info.
2175 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
2176 Node* root = C->root();
2177 visited.set(root->_idx); // first, mark root as visited
2178 // Do pre-visit work for root
2179 Node* n = split_if_with_blocks_pre(root);
2180 uint cnt = n->outcnt();
2181 uint i = 0;
2182
2183 while (true) {
2184 // Visit all children
2185 if (i < cnt) {
2186 Node* use = n->raw_out(i);
2187 ++i;
2188 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
2189 // Now do pre-visit work for this use
2190 use = split_if_with_blocks_pre(use);
2191 nstack.push(n, i); // Save parent and next use's index.
2192 n = use; // Process all children of current use.
2193 cnt = use->outcnt();
2194 i = 0;
2195 }
2196 }
2197 else {
2198 // All of n's children have been processed, complete post-processing.
2199 if (cnt != 0 && !n->is_Con()) {
2200 assert(has_node(n), "no dead nodes");
2201 split_if_with_blocks_post(n);
2202 if (C->failing()) {
2203 return;
2204 }
2205 }
2206 if (must_throttle_split_if()) {
2207 nstack.clear();
2208 }
2209 if (nstack.is_empty()) {
2210 // Finished all nodes on stack.
2211 break;
2212 }
2213 // Get saved parent node and next use's index. Visit the rest of uses.
2214 n = nstack.node();
2215 cnt = n->outcnt();
2216 i = nstack.index();
2217 nstack.pop();
2218 }
2219 }
2220 }
2221
2222
2223 //=============================================================================
2224 //
2225 // C L O N E A L O O P B O D Y
2226 //
2227
2228 //------------------------------clone_iff--------------------------------------
2229 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2230 // "Nearly" because all Nodes have been cloned from the original in the loop,
2231 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2232 // through the Phi recursively, and return a Bool.
2233 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
2234
2235 // Convert this Phi into a Phi merging Bools
2236 uint i;
2237 for (i = 1; i < phi->req(); i++) {
2238 Node* b = phi->in(i);
2239 if (b->is_Phi()) {
2240 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2241 } else {
2242 assert(b->is_Bool() || b->is_OpaqueConstantBool() || b->is_OpaqueInitializedAssertionPredicate(),
2243 "bool, non-null check with OpaqueConstantBool or Initialized Assertion Predicate with its Opaque node");
2244 }
2245 }
2246 Node* n = phi->in(1);
2247 Node* sample_opaque = nullptr;
2248 Node *sample_bool = nullptr;
2249 if (n->is_OpaqueConstantBool() || n->is_OpaqueInitializedAssertionPredicate()) {
2250 sample_opaque = n;
2251 sample_bool = n->in(1);
2252 assert(sample_bool->is_Bool(), "wrong type");
2253 } else {
2254 sample_bool = n;
2255 }
2256 Node* sample_cmp = sample_bool->in(1);
2257 const Type* t = Type::TOP;
2258 const TypePtr* at = nullptr;
2259 if (sample_cmp->is_FlatArrayCheck()) {
2260 // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2261 assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2262 t = Type::MEMORY;
2263 at = TypeRawPtr::BOTTOM;
2264 }
2265
2266 // Make Phis to merge the Cmp's inputs.
2267 PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2268 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2269 for (i = 1; i < phi->req(); i++) {
2270 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2271 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2272 phi1->set_req(i, n1);
2273 phi2->set_req(i, n2);
2274 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2275 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2276 }
2277 // See if these Phis have been made before.
2278 // Register with optimizer
2279 Node *hit1 = _igvn.hash_find_insert(phi1);
2280 if (hit1) { // Hit, toss just made Phi
2281 _igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
2282 assert(hit1->is_Phi(), "" );
2283 phi1 = (PhiNode*)hit1; // Use existing phi
2284 } else { // Miss
2285 _igvn.register_new_node_with_optimizer(phi1);
2286 }
2287 Node *hit2 = _igvn.hash_find_insert(phi2);
2288 if (hit2) { // Hit, toss just made Phi
2289 _igvn.remove_dead_node(phi2, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
2290 assert(hit2->is_Phi(), "" );
2291 phi2 = (PhiNode*)hit2; // Use existing phi
2292 } else { // Miss
2293 _igvn.register_new_node_with_optimizer(phi2);
2294 }
2295 // Register Phis with loop/block info
2296 set_ctrl(phi1, phi->in(0));
2297 set_ctrl(phi2, phi->in(0));
2298 // Make a new Cmp
2299 Node *cmp = sample_cmp->clone();
2300 cmp->set_req(1, phi1);
2301 cmp->set_req(2, phi2);
2302 _igvn.register_new_node_with_optimizer(cmp);
2303 set_ctrl(cmp, phi->in(0));
2304
2305 // Make a new Bool
2306 Node *b = sample_bool->clone();
2307 b->set_req(1,cmp);
2308 _igvn.register_new_node_with_optimizer(b);
2309 set_ctrl(b, phi->in(0));
2310
2311 if (sample_opaque != nullptr) {
2312 Node* opaque = sample_opaque->clone();
2313 opaque->set_req(1, b);
2314 _igvn.register_new_node_with_optimizer(opaque);
2315 set_ctrl(opaque, phi->in(0));
2316 return opaque;
2317 }
2318
2319 assert(b->is_Bool(), "");
2320 return b;
2321 }
2322
2323 //------------------------------clone_bool-------------------------------------
2324 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2325 // "Nearly" because all Nodes have been cloned from the original in the loop,
2326 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2327 // through the Phi recursively, and return a Bool.
2328 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
2329 uint i;
2330 // Convert this Phi into a Phi merging Bools
2331 for( i = 1; i < phi->req(); i++ ) {
2332 Node *b = phi->in(i);
2333 if( b->is_Phi() ) {
2334 _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi()));
2335 } else {
2336 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
2337 }
2338 }
2339
2340 Node *sample_cmp = phi->in(1);
2341
2342 // Make Phis to merge the Cmp's inputs.
2343 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
2344 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
2345 for( uint j = 1; j < phi->req(); j++ ) {
2346 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
2347 Node *n1, *n2;
2348 if( cmp_top->is_Cmp() ) {
2349 n1 = cmp_top->in(1);
2350 n2 = cmp_top->in(2);
2351 } else {
2352 n1 = n2 = cmp_top;
2353 }
2354 phi1->set_req( j, n1 );
2355 phi2->set_req( j, n2 );
2356 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2357 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2358 }
2359
2360 // See if these Phis have been made before.
2361 // Register with optimizer
2362 Node *hit1 = _igvn.hash_find_insert(phi1);
2363 if( hit1 ) { // Hit, toss just made Phi
2364 _igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
2365 assert( hit1->is_Phi(), "" );
2366 phi1 = (PhiNode*)hit1; // Use existing phi
2367 } else { // Miss
2368 _igvn.register_new_node_with_optimizer(phi1);
2369 }
2370 Node *hit2 = _igvn.hash_find_insert(phi2);
2371 if( hit2 ) { // Hit, toss just made Phi
2372 _igvn.remove_dead_node(phi2, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
2373 assert( hit2->is_Phi(), "" );
2374 phi2 = (PhiNode*)hit2; // Use existing phi
2375 } else { // Miss
2376 _igvn.register_new_node_with_optimizer(phi2);
2377 }
2378 // Register Phis with loop/block info
2379 set_ctrl(phi1, phi->in(0));
2380 set_ctrl(phi2, phi->in(0));
2381 // Make a new Cmp
2382 Node *cmp = sample_cmp->clone();
2383 cmp->set_req( 1, phi1 );
2384 cmp->set_req( 2, phi2 );
2385 _igvn.register_new_node_with_optimizer(cmp);
2386 set_ctrl(cmp, phi->in(0));
2387
2388 assert( cmp->is_Cmp(), "" );
2389 return (CmpNode*)cmp;
2390 }
2391
2392 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
2393 IdealLoopTree* loop, IdealLoopTree* outer_loop,
2394 Node_List*& split_if_set, Node_List*& split_bool_set,
2395 Node_List*& split_cex_set, Node_List& worklist,
2396 uint new_counter, CloneLoopMode mode) {
2397 Node* nnn = old_new[old->_idx];
2398 // Copy uses to a worklist, so I can munge the def-use info
2399 // with impunity.
2400 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2401 worklist.push(old->fast_out(j));
2402
2403 while( worklist.size() ) {
2404 Node *use = worklist.pop();
2405 if (!has_node(use)) continue; // Ignore dead nodes
2406 if (use->in(0) == C->top()) continue;
2407 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2408 // Check for data-use outside of loop - at least one of OLD or USE
2409 // must not be a CFG node.
2410 #ifdef ASSERT
2411 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) {
2412 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
2413 assert(mode != IgnoreStripMined, "incorrect cloning mode");
2414 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
2415 }
2416 #endif
2417 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
2418
2419 // If the Data use is an IF, that means we have an IF outside the
2420 // loop that is switching on a condition that is set inside the
2421 // loop. Happens if people set a loop-exit flag; then test the flag
2422 // in the loop to break the loop, then test is again outside the
2423 // loop to determine which way the loop exited.
2424 //
2425 // For several uses we need to make sure that there is no phi between,
2426 // the use and the Bool/Cmp. We therefore clone the Bool/Cmp down here
2427 // to avoid such a phi in between.
2428 // For example, it is unexpected that there is a Phi between an
2429 // AllocateArray node and its ValidLengthTest input that could cause
2430 // split if to break.
2431 assert(!use->is_OpaqueTemplateAssertionPredicate(),
2432 "should not clone a Template Assertion Predicate which should be removed once it's useless");
2433 if (use->is_If() || use->is_CMove() || use->is_OpaqueConstantBool() || use->is_OpaqueInitializedAssertionPredicate() ||
2434 (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) {
2435 // Since this code is highly unlikely, we lazily build the worklist
2436 // of such Nodes to go split.
2437 if (!split_if_set) {
2438 split_if_set = new Node_List();
2439 }
2440 split_if_set->push(use);
2441 }
2442 if (use->is_Bool()) {
2443 if (!split_bool_set) {
2444 split_bool_set = new Node_List();
2445 }
2446 split_bool_set->push(use);
2447 }
2448 if (use->Opcode() == Op_CreateEx) {
2449 if (!split_cex_set) {
2450 split_cex_set = new Node_List();
2451 }
2452 split_cex_set->push(use);
2453 }
2454
2455
2456 // Get "block" use is in
2457 uint idx = 0;
2458 while( use->in(idx) != old ) idx++;
2459 Node *prev = use->is_CFG() ? use : get_ctrl(use);
2460 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
2461 Node* cfg = (prev->_idx >= new_counter && prev->is_Region())
2462 ? prev->in(2)
2463 : idom(prev);
2464 if( use->is_Phi() ) // Phi use is in prior block
2465 cfg = prev->in(idx); // NOT in block of Phi itself
2466 if (cfg->is_top()) { // Use is dead?
2467 _igvn.replace_input_of(use, idx, C->top());
2468 continue;
2469 }
2470
2471 // If use is referenced through control edge... (idx == 0)
2472 if (mode == IgnoreStripMined && idx == 0) {
2473 LoopNode *head = loop->_head->as_Loop();
2474 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
2475 // That node is outside the inner loop, leave it outside the
2476 // outer loop as well to not confuse verification code.
2477 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
2478 _igvn.replace_input_of(use, 0, head->outer_loop_exit());
2479 continue;
2480 }
2481 }
2482
2483 while(!outer_loop->is_member(get_loop(cfg))) {
2484 prev = cfg;
2485 cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg);
2486 }
2487 // If the use occurs after merging several exits from the loop, then
2488 // old value must have dominated all those exits. Since the same old
2489 // value was used on all those exits we did not need a Phi at this
2490 // merge point. NOW we do need a Phi here. Each loop exit value
2491 // is now merged with the peeled body exit; each exit gets its own
2492 // private Phi and those Phis need to be merged here.
2493 Node *phi;
2494 if( prev->is_Region() ) {
2495 if( idx == 0 ) { // Updating control edge?
2496 phi = prev; // Just use existing control
2497 } else { // Else need a new Phi
2498 phi = PhiNode::make( prev, old );
2499 // Now recursively fix up the new uses of old!
2500 for( uint i = 1; i < prev->req(); i++ ) {
2501 worklist.push(phi); // Onto worklist once for each 'old' input
2502 }
2503 }
2504 } else {
2505 // Get new RegionNode merging old and new loop exits
2506 prev = old_new[prev->_idx];
2507 assert( prev, "just made this in step 7" );
2508 if( idx == 0) { // Updating control edge?
2509 phi = prev; // Just use existing control
2510 } else { // Else need a new Phi
2511 // Make a new Phi merging data values properly
2512 phi = PhiNode::make( prev, old );
2513 phi->set_req( 1, nnn );
2514 }
2515 }
2516 // If inserting a new Phi, check for prior hits
2517 if( idx != 0 ) {
2518 Node *hit = _igvn.hash_find_insert(phi);
2519 if( hit == nullptr ) {
2520 _igvn.register_new_node_with_optimizer(phi); // Register new phi
2521 } else { // or
2522 // Remove the new phi from the graph and use the hit
2523 _igvn.remove_dead_node(phi, phi == prev ? PhaseIterGVN::NodeOrigin::Graph : PhaseIterGVN::NodeOrigin::Speculative);
2524 phi = hit; // Use existing phi
2525 }
2526 set_ctrl(phi, prev);
2527 }
2528 // Make 'use' use the Phi instead of the old loop body exit value
2529 assert(use->in(idx) == old, "old is still input of use");
2530 // We notify all uses of old, including use, and the indirect uses,
2531 // that may now be optimized because we have replaced old with phi.
2532 _igvn.add_users_to_worklist(old);
2533 if (idx == 0 && use->depends_only_on_test()) {
2534 // If this node depends_only_on_test, it will be rewired to a control input that is not the
2535 // correct test. As a result, it must be pinned otherwise it can be incorrectly rewired to
2536 // a dominating test equivalent to the new control.
2537 Node* pinned_clone = use->pin_node_under_control();
2538 if (pinned_clone != nullptr) {
2539 pinned_clone->set_req(0, phi);
2540 register_new_node_with_ctrl_of(pinned_clone, use);
2541 _igvn.replace_node(use, pinned_clone);
2542 continue;
2543 }
2544 }
2545 _igvn.replace_input_of(use, idx, phi);
2546 if( use->_idx >= new_counter ) { // If updating new phis
2547 // Not needed for correctness, but prevents a weak assert
2548 // in AddPNode from tripping (when we end up with different
2549 // base & derived Phis that will become the same after
2550 // IGVN does CSE).
2551 Node *hit = _igvn.hash_find_insert(use);
2552 if( hit ) // Go ahead and re-hash for hits.
2553 _igvn.replace_node( use, hit );
2554 }
2555 }
2556 }
2557 }
2558
2559 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
2560 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
2561 bool check_old_new) {
2562 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2563 Node* u = n->fast_out(j);
2564 assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned");
2565 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) {
2566 assert(!phase->ctrl_is_member(loop, u) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
2567 if (!phase->ctrl_is_member(loop, u)) {
2568 if (phase->ctrl_is_member(outer_loop, u)) {
2569 wq.push(u);
2570 } else {
2571 // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of
2572 // the outer loop too
2573 Node* u_c = u->in(0);
2574 if (u_c != nullptr) {
2575 IdealLoopTree* u_c_loop = phase->get_loop(u_c);
2576 if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) {
2577 wq.push(u);
2578 }
2579 }
2580 }
2581 }
2582 }
2583 }
2584 }
2585
2586 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
2587 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
2588 Node_List& extra_data_nodes) {
2589 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2590 CountedLoopNode* cl = head->as_CountedLoop();
2591 Node* l = cl->outer_loop();
2592 Node* tail = cl->outer_loop_tail();
2593 IfNode* le = cl->outer_loop_end();
2594 Node* sfpt = cl->outer_safepoint();
2595 CountedLoopEndNode* cle = cl->loopexit();
2596 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
2597 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
2598 IfFalseNode* cle_out = cle->false_proj();
2599
2600 Node* new_sfpt = nullptr;
2601 Node* new_cle_out = cle_out->clone();
2602 old_new.map(cle_out->_idx, new_cle_out);
2603 if (mode == CloneIncludesStripMined) {
2604 // clone outer loop body
2605 Node* new_l = l->clone();
2606 Node* new_tail = tail->clone();
2607 IfNode* new_le = le->clone()->as_If();
2608 new_sfpt = sfpt->clone();
2609
2610 set_loop(new_l, outer_loop->_parent);
2611 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
2612 set_loop(new_cle_out, outer_loop->_parent);
2613 set_idom(new_cle_out, new_cle, dd);
2614 set_loop(new_sfpt, outer_loop->_parent);
2615 set_idom(new_sfpt, new_cle_out, dd);
2616 set_loop(new_le, outer_loop->_parent);
2617 set_idom(new_le, new_sfpt, dd);
2618 set_loop(new_tail, outer_loop->_parent);
2619 set_idom(new_tail, new_le, dd);
2620 set_idom(new_cl, new_l, dd);
2621
2622 old_new.map(l->_idx, new_l);
2623 old_new.map(tail->_idx, new_tail);
2624 old_new.map(le->_idx, new_le);
2625 old_new.map(sfpt->_idx, new_sfpt);
2626
2627 new_l->set_req(LoopNode::LoopBackControl, new_tail);
2628 new_l->set_req(0, new_l);
2629 new_tail->set_req(0, new_le);
2630 new_le->set_req(0, new_sfpt);
2631 new_sfpt->set_req(0, new_cle_out);
2632 new_cle_out->set_req(0, new_cle);
2633 new_cl->set_req(LoopNode::EntryControl, new_l);
2634
2635 _igvn.register_new_node_with_optimizer(new_l);
2636 _igvn.register_new_node_with_optimizer(new_tail);
2637 _igvn.register_new_node_with_optimizer(new_le);
2638 } else {
2639 Node *newhead = old_new[loop->_head->_idx];
2640 newhead->as_Loop()->clear_strip_mined();
2641 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
2642 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2643 }
2644 // Look at data node that were assigned a control in the outer
2645 // loop: they are kept in the outer loop by the safepoint so start
2646 // from the safepoint node's inputs.
2647 IdealLoopTree* outer_loop = get_loop(l);
2648 Node_Stack stack(2);
2649 stack.push(sfpt, 1);
2650 uint new_counter = C->unique();
2651 while (stack.size() > 0) {
2652 Node* n = stack.node();
2653 uint i = stack.index();
2654 while (i < n->req() &&
2655 (n->in(i) == nullptr ||
2656 !has_ctrl(n->in(i)) ||
2657 get_loop(get_ctrl(n->in(i))) != outer_loop ||
2658 (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
2659 i++;
2660 }
2661 if (i < n->req()) {
2662 stack.set_index(i+1);
2663 stack.push(n->in(i), 0);
2664 } else {
2665 assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
2666 Node* m = n == sfpt ? new_sfpt : n->clone();
2667 if (m != nullptr) {
2668 for (uint i = 0; i < n->req(); i++) {
2669 if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) {
2670 m->set_req(i, old_new[m->in(i)->_idx]);
2671 }
2672 }
2673 } else {
2674 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
2675 }
2676 if (n != sfpt) {
2677 extra_data_nodes.push(n);
2678 _igvn.register_new_node_with_optimizer(m);
2679 assert(get_ctrl(n) == cle_out, "what other control?");
2680 set_ctrl(m, new_cle_out);
2681 old_new.map(n->_idx, m);
2682 }
2683 stack.pop();
2684 }
2685 }
2686 if (mode == CloneIncludesStripMined) {
2687 _igvn.register_new_node_with_optimizer(new_sfpt);
2688 _igvn.register_new_node_with_optimizer(new_cle_out);
2689 }
2690 // Some other transformation may have pessimistically assigned some
2691 // data nodes to the outer loop. Set their control so they are out
2692 // of the outer loop.
2693 ResourceMark rm;
2694 Unique_Node_List wq;
2695 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2696 Node* old = extra_data_nodes.at(i);
2697 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2698 }
2699
2700 for (uint i = 0; i < loop->_body.size(); i++) {
2701 Node* old = loop->_body.at(i);
2702 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2703 }
2704
2705 Node* inner_out = sfpt->in(0);
2706 if (inner_out->outcnt() > 1) {
2707 collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true);
2708 }
2709
2710 Node* new_ctrl = cl->outer_loop_exit();
2711 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2712 for (uint i = 0; i < wq.size(); i++) {
2713 Node* n = wq.at(i);
2714 set_ctrl(n, new_ctrl);
2715 if (n->in(0) != nullptr) {
2716 _igvn.replace_input_of(n, 0, new_ctrl);
2717 }
2718 collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false);
2719 }
2720 } else {
2721 Node *newhead = old_new[loop->_head->_idx];
2722 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2723 }
2724 }
2725
2726 //------------------------------clone_loop-------------------------------------
2727 //
2728 // C L O N E A L O O P B O D Y
2729 //
2730 // This is the basic building block of the loop optimizations. It clones an
2731 // entire loop body. It makes an old_new loop body mapping; with this mapping
2732 // you can find the new-loop equivalent to an old-loop node. All new-loop
2733 // nodes are exactly equal to their old-loop counterparts, all edges are the
2734 // same. All exits from the old-loop now have a RegionNode that merges the
2735 // equivalent new-loop path. This is true even for the normal "loop-exit"
2736 // condition. All uses of loop-invariant old-loop values now come from (one
2737 // or more) Phis that merge their new-loop equivalents.
2738 //
2739 // This operation leaves the graph in an illegal state: there are two valid
2740 // control edges coming from the loop pre-header to both loop bodies. I'll
2741 // definitely have to hack the graph after running this transform.
2742 //
2743 // From this building block I will further edit edges to perform loop peeling
2744 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2745 //
2746 // Parameter side_by_size_idom:
2747 // When side_by_size_idom is null, the dominator tree is constructed for
2748 // the clone loop to dominate the original. Used in construction of
2749 // pre-main-post loop sequence.
2750 // When nonnull, the clone and original are side-by-side, both are
2751 // dominated by the side_by_side_idom node. Used in construction of
2752 // unswitched loops.
2753 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2754 CloneLoopMode mode, Node* side_by_side_idom) {
2755
2756 LoopNode* head = loop->_head->as_Loop();
2757 head->verify_strip_mined(1);
2758
2759 if (C->do_vector_loop() && PrintOpto) {
2760 const char* mname = C->method()->name()->as_quoted_ascii();
2761 if (mname != nullptr) {
2762 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2763 }
2764 }
2765
2766 CloneMap& cm = C->clone_map();
2767 if (C->do_vector_loop()) {
2768 cm.set_clone_idx(cm.max_gen()+1);
2769 #ifndef PRODUCT
2770 if (PrintOpto) {
2771 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2772 loop->dump_head();
2773 }
2774 #endif
2775 }
2776
2777 // Step 1: Clone the loop body. Make the old->new mapping.
2778 clone_loop_body(loop->_body, old_new, &cm);
2779
2780 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2781
2782 // Step 2: Fix the edges in the new body. If the old input is outside the
2783 // loop use it. If the old input is INside the loop, use the corresponding
2784 // new node instead.
2785 fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false);
2786
2787 Node_List extra_data_nodes; // data nodes in the outer strip mined loop
2788 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2789
2790 // Step 3: Now fix control uses. Loop varying control uses have already
2791 // been fixed up (as part of all input edges in Step 2). Loop invariant
2792 // control uses must be either an IfFalse or an IfTrue. Make a merge
2793 // point to merge the old and new IfFalse/IfTrue nodes; make the use
2794 // refer to this.
2795 Node_List worklist;
2796 uint new_counter = C->unique();
2797 fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist);
2798
2799 // Step 4: If loop-invariant use is not control, it must be dominated by a
2800 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region
2801 // there if needed. Make a Phi there merging old and new used values.
2802 Node_List *split_if_set = nullptr;
2803 Node_List *split_bool_set = nullptr;
2804 Node_List *split_cex_set = nullptr;
2805 fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set);
2806
2807 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2808 Node* old = extra_data_nodes.at(i);
2809 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2810 split_bool_set, split_cex_set, worklist, new_counter,
2811 mode);
2812 }
2813
2814 // Check for IFs that need splitting/cloning. Happens if an IF outside of
2815 // the loop uses a condition set in the loop. The original IF probably
2816 // takes control from one or more OLD Regions (which in turn get from NEW
2817 // Regions). In any case, there will be a set of Phis for each merge point
2818 // from the IF up to where the original BOOL def exists the loop.
2819 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
2820
2821 }
2822
2823 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) {
2824 if (split_if_set) {
2825 while (split_if_set->size()) {
2826 Node *iff = split_if_set->pop();
2827 uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1;
2828 if (iff->in(input)->is_Phi()) {
2829 Node *b = clone_iff(iff->in(input)->as_Phi());
2830 _igvn.replace_input_of(iff, input, b);
2831 }
2832 }
2833 }
2834 if (split_bool_set) {
2835 while (split_bool_set->size()) {
2836 Node *b = split_bool_set->pop();
2837 Node *phi = b->in(1);
2838 assert(phi->is_Phi(), "");
2839 CmpNode *cmp = clone_bool((PhiNode*) phi);
2840 _igvn.replace_input_of(b, 1, cmp);
2841 }
2842 }
2843 if (split_cex_set) {
2844 while (split_cex_set->size()) {
2845 Node *b = split_cex_set->pop();
2846 assert(b->in(0)->is_Region(), "");
2847 assert(b->in(1)->is_Phi(), "");
2848 assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2849 split_up(b, b->in(0), nullptr);
2850 }
2851 }
2852 }
2853
2854 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop,
2855 uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set,
2856 Node_List*& split_bool_set, Node_List*& split_cex_set) {
2857 for(uint i = 0; i < body.size(); i++ ) {
2858 Node* old = body.at(i);
2859 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2860 split_bool_set, split_cex_set, worklist, new_counter,
2861 mode);
2862 }
2863 }
2864
2865 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode,
2866 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) {
2867 LoopNode* head = loop->_head->as_Loop();
2868 for(uint i = 0; i < body.size(); i++ ) {
2869 Node* old = body.at(i);
2870 if( !old->is_CFG() ) continue;
2871
2872 // Copy uses to a worklist, so I can munge the def-use info
2873 // with impunity.
2874 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) {
2875 worklist.push(old->fast_out(j));
2876 }
2877
2878 while (worklist.size()) { // Visit all uses
2879 Node *use = worklist.pop();
2880 if (!has_node(use)) continue; // Ignore dead nodes
2881 IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use );
2882 if (!loop->is_member(use_loop) && use->is_CFG()) {
2883 // Both OLD and USE are CFG nodes here.
2884 assert(use->is_Proj(), "" );
2885 Node* nnn = old_new[old->_idx];
2886
2887 Node* newuse = nullptr;
2888 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2889 CountedLoopNode* cl = head->as_CountedLoop();
2890 CountedLoopEndNode* cle = cl->loopexit();
2891 // is use the projection that exits the loop from the CountedLoopEndNode?
2892 if (use->in(0) == cle) {
2893 IfFalseNode* cle_out = use->as_IfFalse();
2894 IfNode* le = cl->outer_loop_end();
2895 use = le->false_proj();
2896 use_loop = get_loop(use);
2897 if (mode == CloneIncludesStripMined) {
2898 nnn = old_new[le->_idx];
2899 } else {
2900 newuse = old_new[cle_out->_idx];
2901 }
2902 }
2903 }
2904 if (newuse == nullptr) {
2905 newuse = use->clone();
2906 }
2907
2908 // Clone the loop exit control projection
2909 if (C->do_vector_loop() && cm != nullptr) {
2910 cm->verify_insert_and_clone(use, newuse, cm->clone_idx());
2911 }
2912 newuse->set_req(0,nnn);
2913 _igvn.register_new_node_with_optimizer(newuse);
2914 set_loop(newuse, use_loop);
2915 set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2916
2917 // We need a Region to merge the exit from the peeled body and the
2918 // exit from the old loop body.
2919 RegionNode *r = new RegionNode(3);
2920 uint dd_r = MIN2(dom_depth(newuse), dom_depth(use));
2921 assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" );
2922
2923 // The original user of 'use' uses 'r' instead.
2924 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2925 Node* useuse = use->last_out(l);
2926 _igvn.rehash_node_delayed(useuse);
2927 uint uses_found = 0;
2928 if (useuse->in(0) == use) {
2929 useuse->set_req(0, r);
2930 uses_found++;
2931 if (useuse->is_CFG()) {
2932 // This is not a dom_depth > dd_r because when new
2933 // control flow is constructed by a loop opt, a node and
2934 // its dominator can end up at the same dom_depth
2935 assert(dom_depth(useuse) >= dd_r, "");
2936 set_idom(useuse, r, dom_depth(useuse));
2937 }
2938 }
2939 for (uint k = 1; k < useuse->req(); k++) {
2940 if( useuse->in(k) == use ) {
2941 useuse->set_req(k, r);
2942 uses_found++;
2943 if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2944 // This is not a dom_depth > dd_r because when new
2945 // control flow is constructed by a loop opt, a node
2946 // and its dominator can end up at the same dom_depth
2947 assert(dom_depth(useuse) >= dd_r , "");
2948 set_idom(useuse, r, dom_depth(useuse));
2949 }
2950 }
2951 }
2952 l -= uses_found; // we deleted 1 or more copies of this edge
2953 }
2954
2955 assert(use->is_Proj(), "loop exit should be projection");
2956 // replace_node_and_forward_ctrl() below moves all nodes that are:
2957 // - control dependent on the loop exit or
2958 // - have control set to the loop exit
2959 // below the post-loop merge point.
2960 // replace_node_and_forward_ctrl() takes a dead control as first input.
2961 // To make it possible to use it, the loop exit projection is cloned and becomes the
2962 // new exit projection. The initial one becomes dead and is "replaced" by the region.
2963 Node* use_clone = use->clone();
2964 register_control(use_clone, use_loop, idom(use), dom_depth(use));
2965 // Now finish up 'r'
2966 r->set_req(1, newuse);
2967 r->set_req(2, use_clone);
2968 _igvn.register_new_node_with_optimizer(r);
2969 set_loop(r, use_loop);
2970 set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r);
2971 replace_node_and_forward_ctrl(use, r);
2972 // Map the (cloned) old use to the new merge point
2973 old_new.map(use_clone->_idx, r);
2974 } // End of if a loop-exit test
2975 }
2976 }
2977 }
2978
2979 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd,
2980 IdealLoopTree* parent, bool partial) {
2981 for(uint i = 0; i < body.size(); i++ ) {
2982 Node *old = body.at(i);
2983 Node *nnn = old_new[old->_idx];
2984 // Fix CFG/Loop controlling the new node
2985 if (has_ctrl(old)) {
2986 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2987 } else {
2988 set_loop(nnn, parent);
2989 if (old->outcnt() > 0) {
2990 Node* dom = idom(old);
2991 if (old_new[dom->_idx] != nullptr) {
2992 dom = old_new[dom->_idx];
2993 set_idom(nnn, dom, dd );
2994 }
2995 }
2996 }
2997 // Correct edges to the new node
2998 for (uint j = 0; j < nnn->req(); j++) {
2999 Node *n = nnn->in(j);
3000 if (n != nullptr) {
3001 IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n);
3002 if (loop->is_member(old_in_loop)) {
3003 if (old_new[n->_idx] != nullptr) {
3004 nnn->set_req(j, old_new[n->_idx]);
3005 } else {
3006 assert(!body.contains(n), "");
3007 assert(partial, "node not cloned");
3008 }
3009 }
3010 }
3011 }
3012 _igvn.hash_find_insert(nnn);
3013 }
3014 }
3015
3016 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) {
3017 for (uint i = 0; i < body.size(); i++) {
3018 Node* old = body.at(i);
3019 Node* nnn = old->clone();
3020 old_new.map(old->_idx, nnn);
3021 if (C->do_vector_loop() && cm != nullptr) {
3022 cm->verify_insert_and_clone(old, nnn, cm->clone_idx());
3023 }
3024 _igvn.register_new_node_with_optimizer(nnn);
3025 }
3026 }
3027
3028
3029 //---------------------- stride_of_possible_iv -------------------------------------
3030 // Looks for an iff/bool/comp with one operand of the compare
3031 // being a cycle involving an add and a phi,
3032 // with an optional truncation (left-shift followed by a right-shift)
3033 // of the add. Returns zero if not an iv.
3034 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
3035 const TypeInteger* ttype = nullptr;
3036 if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) {
3037 return 0;
3038 }
3039 BoolNode* bl = iff->in(1)->as_Bool();
3040 Node* cmp = bl->in(1);
3041 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
3042 return 0;
3043 }
3044 // Must have an invariant operand
3045 if (ctrl_is_member(get_loop(iff), cmp->in(2))) {
3046 return 0;
3047 }
3048 Node* add2 = nullptr;
3049 Node* cmp1 = cmp->in(1);
3050 if (cmp1->is_Phi()) {
3051 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
3052 Node* phi = cmp1;
3053 for (uint i = 1; i < phi->req(); i++) {
3054 Node* in = phi->in(i);
3055 CountedLoopConverter::TruncatedIncrement add(T_INT);
3056 add.build(in);
3057 if (add.is_valid() && add.incr()->in(1) == phi) {
3058 add2 = add.incr()->in(2);
3059 break;
3060 }
3061 }
3062 } else {
3063 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
3064 Node* addtrunc = cmp1;
3065 CountedLoopConverter::TruncatedIncrement add(T_INT);
3066 add.build(addtrunc);
3067 if (add.is_valid() && add.incr()->in(1)->is_Phi()) {
3068 Node* phi = add.incr()->in(1);
3069 for (uint i = 1; i < phi->req(); i++) {
3070 if (phi->in(i) == addtrunc) {
3071 add2 = add.incr()->in(2);
3072 break;
3073 }
3074 }
3075 }
3076 }
3077 if (add2 != nullptr) {
3078 const TypeInt* add2t = _igvn.type(add2)->is_int();
3079 if (add2t->is_con()) {
3080 return add2t->get_con();
3081 }
3082 }
3083 return 0;
3084 }
3085
3086
3087 //---------------------- stay_in_loop -------------------------------------
3088 // Return the (unique) control output node that's in the loop (if it exists.)
3089 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
3090 Node* unique = nullptr;
3091 if (!n) return nullptr;
3092 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3093 Node* use = n->fast_out(i);
3094 if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
3095 if (unique != nullptr) {
3096 return nullptr;
3097 }
3098 unique = use;
3099 }
3100 }
3101 return unique;
3102 }
3103
3104 //------------------------------ register_node -------------------------------------
3105 // Utility to register node "n" with PhaseIdealLoop
3106 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) {
3107 _igvn.register_new_node_with_optimizer(n);
3108 loop->_body.push(n);
3109 if (n->is_CFG()) {
3110 set_loop(n, loop);
3111 set_idom(n, pred, ddepth);
3112 } else {
3113 set_ctrl(n, pred);
3114 }
3115 }
3116
3117 //------------------------------ proj_clone -------------------------------------
3118 // Utility to create an if-projection
3119 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
3120 ProjNode* c = p->clone()->as_Proj();
3121 c->set_req(0, iff);
3122 return c;
3123 }
3124
3125 //------------------------------ short_circuit_if -------------------------------------
3126 // Force the iff control output to be the live_proj
3127 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
3128 guarantee(live_proj != nullptr, "null projection");
3129 int proj_con = live_proj->_con;
3130 assert(proj_con == 0 || proj_con == 1, "false or true projection");
3131 Node* con = intcon(proj_con);
3132 if (iff) {
3133 iff->set_req(1, con);
3134 }
3135 return con;
3136 }
3137
3138 //------------------------------ insert_if_before_proj -------------------------------------
3139 // Insert a new if before an if projection (* - new node)
3140 //
3141 // before
3142 // if(test)
3143 // / \
3144 // v v
3145 // other-proj proj (arg)
3146 //
3147 // after
3148 // if(test)
3149 // / \
3150 // / v
3151 // | * proj-clone
3152 // v |
3153 // other-proj v
3154 // * new_if(relop(cmp[IU](left,right)))
3155 // / \
3156 // v v
3157 // * new-proj proj
3158 // (returned)
3159 //
3160 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
3161 IfNode* iff = proj->in(0)->as_If();
3162 IdealLoopTree *loop = get_loop(proj);
3163 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3164 uint ddepth = dom_depth(proj);
3165
3166 _igvn.rehash_node_delayed(iff);
3167 _igvn.rehash_node_delayed(proj);
3168
3169 proj->set_req(0, nullptr); // temporary disconnect
3170 ProjNode* proj2 = proj_clone(proj, iff);
3171 register_node(proj2, loop, iff, ddepth);
3172
3173 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
3174 register_node(cmp, loop, proj2, ddepth);
3175
3176 BoolNode* bol = new BoolNode(cmp, relop);
3177 register_node(bol, loop, proj2, ddepth);
3178
3179 int opcode = iff->Opcode();
3180 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
3181 IfNode* new_if = IfNode::make_with_same_profile(iff, proj2, bol);
3182 register_node(new_if, loop, proj2, ddepth);
3183
3184 proj->set_req(0, new_if); // reattach
3185 set_idom(proj, new_if, ddepth);
3186
3187 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
3188 guarantee(new_exit != nullptr, "null exit node");
3189 register_node(new_exit, get_loop(other_proj), new_if, ddepth);
3190
3191 return new_exit;
3192 }
3193
3194 //------------------------------ insert_region_before_proj -------------------------------------
3195 // Insert a region before an if projection (* - new node)
3196 //
3197 // before
3198 // if(test)
3199 // / |
3200 // v |
3201 // proj v
3202 // other-proj
3203 //
3204 // after
3205 // if(test)
3206 // / |
3207 // v |
3208 // * proj-clone v
3209 // | other-proj
3210 // v
3211 // * new-region
3212 // |
3213 // v
3214 // * dum_if
3215 // / \
3216 // v \
3217 // * dum-proj v
3218 // proj
3219 //
3220 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
3221 IfNode* iff = proj->in(0)->as_If();
3222 IdealLoopTree *loop = get_loop(proj);
3223 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3224 uint ddepth = dom_depth(proj);
3225
3226 _igvn.rehash_node_delayed(iff);
3227 _igvn.rehash_node_delayed(proj);
3228
3229 proj->set_req(0, nullptr); // temporary disconnect
3230 ProjNode* proj2 = proj_clone(proj, iff);
3231 register_node(proj2, loop, iff, ddepth);
3232
3233 RegionNode* reg = new RegionNode(2);
3234 reg->set_req(1, proj2);
3235 register_node(reg, loop, iff, ddepth);
3236
3237 IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt);
3238 register_node(dum_if, loop, reg, ddepth);
3239
3240 proj->set_req(0, dum_if); // reattach
3241 set_idom(proj, dum_if, ddepth);
3242
3243 ProjNode* dum_proj = proj_clone(other_proj, dum_if);
3244 register_node(dum_proj, loop, dum_if, ddepth);
3245
3246 return reg;
3247 }
3248
3249 // Idea
3250 // ----
3251 // Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops
3252 // require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel
3253 // with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new
3254 // loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned
3255 // test alone instead:
3256 //
3257 // Before Partial Peeling:
3258 // Loop:
3259 // <peeled section>
3260 // Split off signed loop exit test
3261 // <-- CUT HERE -->
3262 // Unchanged unsigned loop exit test
3263 // <rest of unpeeled section>
3264 // goto Loop
3265 //
3266 // After Partial Peeling:
3267 // <cloned peeled section>
3268 // Cloned split off signed loop exit test
3269 // Loop:
3270 // Unchanged unsigned loop exit test
3271 // <rest of unpeeled section>
3272 // <peeled section>
3273 // Split off signed loop exit test
3274 // goto Loop
3275 //
3276 // Details
3277 // -------
3278 // Before:
3279 // if (i <u limit) Unsigned loop exit condition
3280 // / |
3281 // v v
3282 // exit-proj stay-in-loop-proj
3283 //
3284 // Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it
3285 // before the CmpU on the stay-in-loop path and keep both tests:
3286 //
3287 // if (i <u limit) Signed loop exit test
3288 // / |
3289 // / if (i <u limit) Unsigned loop exit test
3290 // / / |
3291 // v v v
3292 // exit-region stay-in-loop-proj
3293 //
3294 // Implementation
3295 // --------------
3296 // We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned
3297 // loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop
3298 // exit tests is preserved, and their loop nesting is correct.
3299 //
3300 // To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit
3301 // test above it and kill the original unsigned loop exit test by setting it's condition to a constant
3302 // (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later:
3303 //
3304 // if (stay-in-loop-const) Killed original unsigned loop exit test
3305 // / |
3306 // / v
3307 // / if (i < limit) Split off signed loop exit test
3308 // / / |
3309 // / / v
3310 // / / if (i <u limit) Cloned unsigned loop exit test
3311 // / / / |
3312 // v v v |
3313 // exit-region |
3314 // | |
3315 // dummy-if |
3316 // / | |
3317 // dead | |
3318 // v v
3319 // exit-proj stay-in-loop-proj
3320 //
3321 // Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned
3322 // loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()).
3323 //
3324 // Requirements
3325 // ------------
3326 // Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly
3327 // the same as before with only a single unsigned test. This is only possible if certain requirements are met.
3328 // Otherwise, we need to bail out (see comments in the code below).
3329 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) {
3330 const bool Signed = true;
3331 const bool Unsigned = false;
3332
3333 BoolNode* bol = if_cmpu->in(1)->as_Bool();
3334 if (bol->_test._test != BoolTest::lt) {
3335 return nullptr;
3336 }
3337 CmpNode* cmpu = bol->in(1)->as_Cmp();
3338 assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison");
3339
3340 int stride = stride_of_possible_iv(if_cmpu);
3341 if (stride == 0) {
3342 return nullptr;
3343 }
3344
3345 Node* lp_proj = stay_in_loop(if_cmpu, loop);
3346 guarantee(lp_proj != nullptr, "null loop node");
3347
3348 ProjNode* lp_continue = lp_proj->as_Proj();
3349 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
3350 if (!lp_exit->is_IfFalse()) {
3351 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit).
3352 // We therefore can't add a single exit condition.
3353 return nullptr;
3354 }
3355 // The unsigned loop exit condition is
3356 // !(i <u limit)
3357 // = i >=u limit
3358 //
3359 // First, we note that for any x for which
3360 // 0 <= x <= INT_MAX
3361 // we can convert x to an unsigned int and still get the same guarantee:
3362 // 0 <= (uint) x <= INT_MAX = (uint) INT_MAX
3363 // 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA)
3364 //
3365 // With that in mind, if
3366 // limit >= 0 (COND)
3367 // then the unsigned loop exit condition
3368 // i >=u limit (ULE)
3369 // is equivalent to
3370 // i < 0 || i >= limit (SLE-full)
3371 // because either i is negative and therefore always greater than MAX_INT when converting to unsigned
3372 // (uint) i >=u MAX_INT >= limit >= 0
3373 // or otherwise
3374 // i >= limit >= 0
3375 // holds due to (LEMMA).
3376 //
3377 // For completeness, a counterexample with limit < 0:
3378 // Assume i = -3 and limit = -2:
3379 // i < 0
3380 // -2 < 0
3381 // is true and thus also "i < 0 || i >= limit". But
3382 // i >=u limit
3383 // -3 >=u -2
3384 // is false.
3385 Node* limit = cmpu->in(2);
3386 const TypeInt* type_limit = _igvn.type(limit)->is_int();
3387 if (type_limit->_lo < 0) {
3388 return nullptr;
3389 }
3390
3391 // We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride:
3392 // stride < 0:
3393 // i < 0 (SLE = SLE-negative)
3394 // stride > 0:
3395 // i >= limit (SLE = SLE-positive)
3396 // such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0):
3397 //
3398 // Loop:
3399 // <peeled section>
3400 // i >= limit (SLE-positive)
3401 // <-- CUT HERE -->
3402 // i >=u limit (ULE)
3403 // <rest of unpeeled section>
3404 // goto Loop
3405 //
3406 // We exit the loop if:
3407 // (SLE) is true OR (ULE) is true
3408 // However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly
3409 // exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure:
3410 // (SLE) IMPLIES (ULE)
3411 // This indeed holds when (COND) is given:
3412 // - stride > 0:
3413 // i >= limit // (SLE = SLE-positive)
3414 // i >= limit >= 0 // (COND)
3415 // i >=u limit >= 0 // (LEMMA)
3416 // which is the unsigned loop exit condition (ULE).
3417 // - stride < 0:
3418 // i < 0 // (SLE = SLE-negative)
3419 // (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned
3420 // MAX_INT >= limit >= 0 // (COND)
3421 // MAX_INT >=u limit >= 0 // (LEMMA)
3422 // and thus from (NEG) and (LEMMA):
3423 // i >=u limit
3424 // which is the unsigned loop exit condition (ULE).
3425 //
3426 //
3427 // After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0):
3428 // <cloned peeled section>
3429 // i >= limit (SLE-positive)
3430 // Loop:
3431 // i >=u limit (ULE)
3432 // <rest of unpeeled section>
3433 // <peeled section>
3434 // i >= limit (SLE-positive)
3435 // goto Loop
3436 Node* rhs_cmpi;
3437 if (stride > 0) {
3438 rhs_cmpi = limit; // For i >= limit
3439 } else {
3440 rhs_cmpi = makecon(TypeInt::ZERO); // For i < 0
3441 }
3442 // Create a new region on the exit path
3443 RegionNode* reg = insert_region_before_proj(lp_exit);
3444 guarantee(reg != nullptr, "null region node");
3445
3446 // Clone the if-cmpu-true-false using a signed compare
3447 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
3448 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue);
3449 reg->add_req(cmpi_exit);
3450
3451 // Clone the if-cmpu-true-false
3452 BoolTest::mask rel_u = bol->_test._test;
3453 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
3454 reg->add_req(cmpu_exit);
3455
3456 // Force original if to stay in loop.
3457 short_circuit_if(if_cmpu, lp_continue);
3458
3459 return cmpi_exit->in(0)->as_If();
3460 }
3461
3462 //------------------------------ remove_cmpi_loop_exit -------------------------------------
3463 // Remove a previously inserted signed compare loop exit.
3464 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
3465 Node* lp_proj = stay_in_loop(if_cmp, loop);
3466 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
3467 stay_in_loop(lp_proj, loop)->is_If() &&
3468 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
3469 Node* con = makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
3470 if_cmp->set_req(1, con);
3471 }
3472
3473 //------------------------------ scheduled_nodelist -------------------------------------
3474 // Create a post order schedule of nodes that are in the
3475 // "member" set. The list is returned in "sched".
3476 // The first node in "sched" is the loop head, followed by
3477 // nodes which have no inputs in the "member" set, and then
3478 // followed by the nodes that have an immediate input dependence
3479 // on a node in "sched".
3480 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
3481
3482 assert(member.test(loop->_head->_idx), "loop head must be in member set");
3483 VectorSet visited;
3484 Node_Stack nstack(loop->_body.size());
3485
3486 Node* n = loop->_head; // top of stack is cached in "n"
3487 uint idx = 0;
3488 visited.set(n->_idx);
3489
3490 // Initially push all with no inputs from within member set
3491 for(uint i = 0; i < loop->_body.size(); i++ ) {
3492 Node *elt = loop->_body.at(i);
3493 if (member.test(elt->_idx)) {
3494 bool found = false;
3495 for (uint j = 0; j < elt->req(); j++) {
3496 Node* def = elt->in(j);
3497 if (def && member.test(def->_idx) && def != elt) {
3498 found = true;
3499 break;
3500 }
3501 }
3502 if (!found && elt != loop->_head) {
3503 nstack.push(n, idx);
3504 n = elt;
3505 assert(!visited.test(n->_idx), "not seen yet");
3506 visited.set(n->_idx);
3507 }
3508 }
3509 }
3510
3511 // traverse out's that are in the member set
3512 while (true) {
3513 if (idx < n->outcnt()) {
3514 Node* use = n->raw_out(idx);
3515 idx++;
3516 if (!visited.test_set(use->_idx)) {
3517 if (member.test(use->_idx)) {
3518 nstack.push(n, idx);
3519 n = use;
3520 idx = 0;
3521 }
3522 }
3523 } else {
3524 // All outputs processed
3525 sched.push(n);
3526 if (nstack.is_empty()) break;
3527 n = nstack.node();
3528 idx = nstack.index();
3529 nstack.pop();
3530 }
3531 }
3532 }
3533
3534
3535 //------------------------------ has_use_in_set -------------------------------------
3536 // Has a use in the vector set
3537 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
3538 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3539 Node* use = n->fast_out(j);
3540 if (vset.test(use->_idx)) {
3541 return true;
3542 }
3543 }
3544 return false;
3545 }
3546
3547
3548 //------------------------------ has_use_internal_to_set -------------------------------------
3549 // Has use internal to the vector set (ie. not in a phi at the loop head)
3550 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
3551 Node* head = loop->_head;
3552 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3553 Node* use = n->fast_out(j);
3554 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
3555 return true;
3556 }
3557 }
3558 return false;
3559 }
3560
3561
3562 //------------------------------ clone_for_use_outside_loop -------------------------------------
3563 // clone "n" for uses that are outside of loop
3564 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
3565 int cloned = 0;
3566 assert(worklist.size() == 0, "should be empty");
3567 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3568 Node* use = n->fast_out(j);
3569 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
3570 worklist.push(use);
3571 }
3572 }
3573
3574 if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor,
3575 "Too many clones required in clone_for_use_outside_loop in partial peeling")) {
3576 return -1;
3577 }
3578
3579 while( worklist.size() ) {
3580 Node *use = worklist.pop();
3581 if (!has_node(use) || use->in(0) == C->top()) continue;
3582 uint j;
3583 for (j = 0; j < use->req(); j++) {
3584 if (use->in(j) == n) break;
3585 }
3586 assert(j < use->req(), "must be there");
3587
3588 // clone "n" and insert it between the inputs of "n" and the use outside the loop
3589 Node* n_clone = n->clone();
3590 _igvn.replace_input_of(use, j, n_clone);
3591 cloned++;
3592 Node* use_c;
3593 if (!use->is_Phi()) {
3594 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
3595 } else {
3596 // Use in a phi is considered a use in the associated predecessor block
3597 use_c = use->in(0)->in(j);
3598 }
3599 set_ctrl(n_clone, use_c);
3600 assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
3601 get_loop(use_c)->_body.push(n_clone);
3602 _igvn.register_new_node_with_optimizer(n_clone);
3603 #ifndef PRODUCT
3604 if (TracePartialPeeling) {
3605 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
3606 }
3607 #endif
3608 }
3609 return cloned;
3610 }
3611
3612
3613 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
3614 // clone "n" for special uses that are in the not_peeled region.
3615 // If these def-uses occur in separate blocks, the code generator
3616 // marks the method as not compilable. For example, if a "BoolNode"
3617 // is in a different basic block than the "IfNode" that uses it, then
3618 // the compilation is aborted in the code generator.
3619 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
3620 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
3621 if (n->is_Phi() || n->is_Load()) {
3622 return;
3623 }
3624 assert(worklist.size() == 0, "should be empty");
3625 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3626 Node* use = n->fast_out(j);
3627 if ( not_peel.test(use->_idx) &&
3628 (use->is_If() || use->is_CMove() || use->is_Bool() || use->is_OpaqueInitializedAssertionPredicate()) &&
3629 use->in(1) == n) {
3630 worklist.push(use);
3631 }
3632 }
3633 if (worklist.size() > 0) {
3634 // clone "n" and insert it between inputs of "n" and the use
3635 Node* n_clone = n->clone();
3636 loop->_body.push(n_clone);
3637 _igvn.register_new_node_with_optimizer(n_clone);
3638 set_ctrl(n_clone, get_ctrl(n));
3639 sink_list.push(n_clone);
3640 not_peel.set(n_clone->_idx);
3641 #ifndef PRODUCT
3642 if (TracePartialPeeling) {
3643 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
3644 }
3645 #endif
3646 while( worklist.size() ) {
3647 Node *use = worklist.pop();
3648 _igvn.rehash_node_delayed(use);
3649 for (uint j = 1; j < use->req(); j++) {
3650 if (use->in(j) == n) {
3651 use->set_req(j, n_clone);
3652 }
3653 }
3654 }
3655 }
3656 }
3657
3658
3659 //------------------------------ insert_phi_for_loop -------------------------------------
3660 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
3661 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
3662 Node *phi = PhiNode::make(lp, back_edge_val);
3663 phi->set_req(LoopNode::EntryControl, lp_entry_val);
3664 // Use existing phi if it already exists
3665 Node *hit = _igvn.hash_find_insert(phi);
3666 if( hit == nullptr ) {
3667 _igvn.register_new_node_with_optimizer(phi);
3668 set_ctrl(phi, lp);
3669 } else {
3670 // Remove the new phi from the graph and use the hit
3671 _igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Speculative);
3672 phi = hit;
3673 }
3674 _igvn.replace_input_of(use, idx, phi);
3675 }
3676
3677 #ifdef ASSERT
3678 //------------------------------ is_valid_loop_partition -------------------------------------
3679 // Validate the loop partition sets: peel and not_peel
3680 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
3681 VectorSet& not_peel ) {
3682 uint i;
3683 // Check that peel_list entries are in the peel set
3684 for (i = 0; i < peel_list.size(); i++) {
3685 if (!peel.test(peel_list.at(i)->_idx)) {
3686 return false;
3687 }
3688 }
3689 // Check at loop members are in one of peel set or not_peel set
3690 for (i = 0; i < loop->_body.size(); i++ ) {
3691 Node *def = loop->_body.at(i);
3692 uint di = def->_idx;
3693 // Check that peel set elements are in peel_list
3694 if (peel.test(di)) {
3695 if (not_peel.test(di)) {
3696 return false;
3697 }
3698 // Must be in peel_list also
3699 bool found = false;
3700 for (uint j = 0; j < peel_list.size(); j++) {
3701 if (peel_list.at(j)->_idx == di) {
3702 found = true;
3703 break;
3704 }
3705 }
3706 if (!found) {
3707 return false;
3708 }
3709 } else if (not_peel.test(di)) {
3710 if (peel.test(di)) {
3711 return false;
3712 }
3713 } else {
3714 return false;
3715 }
3716 }
3717 return true;
3718 }
3719
3720 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
3721 // Ensure a use outside of loop is of the right form
3722 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
3723 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3724 return (use->is_Phi() &&
3725 use_c->is_Region() && use_c->req() == 3 &&
3726 (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
3727 use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
3728 use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
3729 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
3730 }
3731
3732 //------------------------------ is_valid_clone_loop_form -------------------------------------
3733 // Ensure that all uses outside of loop are of the right form
3734 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
3735 uint orig_exit_idx, uint clone_exit_idx) {
3736 uint len = peel_list.size();
3737 for (uint i = 0; i < len; i++) {
3738 Node *def = peel_list.at(i);
3739
3740 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3741 Node *use = def->fast_out(j);
3742 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3743 if (!loop->is_member(get_loop(use_c))) {
3744 // use is not in the loop, check for correct structure
3745 if (use->in(0) == def) {
3746 // Okay
3747 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
3748 return false;
3749 }
3750 }
3751 }
3752 }
3753 return true;
3754 }
3755 #endif
3756
3757 //------------------------------ partial_peel -------------------------------------
3758 // Partially peel (aka loop rotation) the top portion of a loop (called
3759 // the peel section below) by cloning it and placing one copy just before
3760 // the new loop head and the other copy at the bottom of the new loop.
3761 //
3762 // before after where it came from
3763 //
3764 // stmt1 stmt1
3765 // loop: stmt2 clone
3766 // stmt2 if condA goto exitA clone
3767 // if condA goto exitA new_loop: new
3768 // stmt3 stmt3 clone
3769 // if !condB goto loop if condB goto exitB clone
3770 // exitB: stmt2 orig
3771 // stmt4 if !condA goto new_loop orig
3772 // exitA: goto exitA
3773 // exitB:
3774 // stmt4
3775 // exitA:
3776 //
3777 // Step 1: find the cut point: an exit test on probable
3778 // induction variable.
3779 // Step 2: schedule (with cloning) operations in the peel
3780 // section that can be executed after the cut into
3781 // the section that is not peeled. This may need
3782 // to clone operations into exit blocks. For
3783 // instance, a reference to A[i] in the not-peel
3784 // section and a reference to B[i] in an exit block
3785 // may cause a left-shift of i by 2 to be placed
3786 // in the peel block. This step will clone the left
3787 // shift into the exit block and sink the left shift
3788 // from the peel to the not-peel section.
3789 // Step 3: clone the loop, retarget the control, and insert
3790 // phis for values that are live across the new loop
3791 // head. This is very dependent on the graph structure
3792 // from clone_loop. It creates region nodes for
3793 // exit control and associated phi nodes for values
3794 // flow out of the loop through that exit. The region
3795 // node is dominated by the clone's control projection.
3796 // So the clone's peel section is placed before the
3797 // new loop head, and the clone's not-peel section is
3798 // forms the top part of the new loop. The original
3799 // peel section forms the tail of the new loop.
3800 // Step 4: update the dominator tree and recompute the
3801 // dominator depth.
3802 //
3803 // orig
3804 //
3805 // stmt1
3806 // |
3807 // v
3808 // predicates
3809 // |
3810 // v
3811 // loop<----+
3812 // | |
3813 // stmt2 |
3814 // | |
3815 // v |
3816 // ifA |
3817 // / | |
3818 // v v |
3819 // false true ^ <-- last_peel
3820 // / | |
3821 // / ===|==cut |
3822 // / stmt3 | <-- first_not_peel
3823 // / | |
3824 // | v |
3825 // v ifB |
3826 // exitA: / \ |
3827 // / \ |
3828 // v v |
3829 // false true |
3830 // / \ |
3831 // / ----+
3832 // |
3833 // v
3834 // exitB:
3835 // stmt4
3836 //
3837 //
3838 // after clone loop
3839 //
3840 // stmt1
3841 // |
3842 // v
3843 // predicates
3844 // / \
3845 // clone / \ orig
3846 // / \
3847 // / \
3848 // v v
3849 // +---->loop loop<----+
3850 // | | | |
3851 // | stmt2 stmt2 |
3852 // | | | |
3853 // | v v |
3854 // | ifA ifA |
3855 // | | \ / | |
3856 // | v v v v |
3857 // ^ true false false true ^ <-- last_peel
3858 // | | ^ \ / | |
3859 // | cut==|== \ \ / ===|==cut |
3860 // | stmt3 \ \ / stmt3 | <-- first_not_peel
3861 // | | dom | | | |
3862 // | v \ 1v v2 v |
3863 // | ifB regionA ifB |
3864 // | / \ | / \ |
3865 // | / \ v / \ |
3866 // | v v exitA: v v |
3867 // | true false false true |
3868 // | / ^ \ / \ |
3869 // +---- \ \ / ----+
3870 // dom \ /
3871 // \ 1v v2
3872 // regionB
3873 // |
3874 // v
3875 // exitB:
3876 // stmt4
3877 //
3878 //
3879 // after partial peel
3880 //
3881 // stmt1
3882 // |
3883 // v
3884 // predicates
3885 // /
3886 // clone / orig
3887 // / TOP
3888 // / \
3889 // v v
3890 // TOP->loop loop----+
3891 // | | |
3892 // stmt2 stmt2 |
3893 // | | |
3894 // v v |
3895 // ifA ifA |
3896 // | \ / | |
3897 // v v v v |
3898 // true false false true | <-- last_peel
3899 // | ^ \ / +------|---+
3900 // +->newloop \ \ / === ==cut | |
3901 // | stmt3 \ \ / TOP | |
3902 // | | dom | | stmt3 | | <-- first_not_peel
3903 // | v \ 1v v2 v | |
3904 // | ifB regionA ifB ^ v
3905 // | / \ | / \ | |
3906 // | / \ v / \ | |
3907 // | v v exitA: v v | |
3908 // | true false false true | |
3909 // | / ^ \ / \ | |
3910 // | | \ \ / v | |
3911 // | | dom \ / TOP | |
3912 // | | \ 1v v2 | |
3913 // ^ v regionB | |
3914 // | | | | |
3915 // | | v ^ v
3916 // | | exitB: | |
3917 // | | stmt4 | |
3918 // | +------------>-----------------+ |
3919 // | |
3920 // +-----------------<---------------------+
3921 //
3922 //
3923 // final graph
3924 //
3925 // stmt1
3926 // |
3927 // v
3928 // predicates
3929 // |
3930 // v
3931 // stmt2 clone
3932 // |
3933 // v
3934 // ........> ifA clone
3935 // : / |
3936 // dom / |
3937 // : v v
3938 // : false true
3939 // : | |
3940 // : | v
3941 // : | newloop<-----+
3942 // : | | |
3943 // : | stmt3 clone |
3944 // : | | |
3945 // : | v |
3946 // : | ifB |
3947 // : | / \ |
3948 // : | v v |
3949 // : | false true |
3950 // : | | | |
3951 // : | v stmt2 |
3952 // : | exitB: | |
3953 // : | stmt4 v |
3954 // : | ifA orig |
3955 // : | / \ |
3956 // : | / \ |
3957 // : | v v |
3958 // : | false true |
3959 // : | / \ |
3960 // : v v -----+
3961 // RegionA
3962 // |
3963 // v
3964 // exitA
3965 //
3966 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3967
3968 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3969 if (!loop->_head->is_Loop()) {
3970 return false;
3971 }
3972 LoopNode *head = loop->_head->as_Loop();
3973
3974 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3975 return false;
3976 }
3977
3978 // Check for complex exit control
3979 for (uint ii = 0; ii < loop->_body.size(); ii++) {
3980 Node *n = loop->_body.at(ii);
3981 int opc = n->Opcode();
3982 if (n->is_Call() ||
3983 opc == Op_Catch ||
3984 opc == Op_CatchProj ||
3985 opc == Op_Jump ||
3986 opc == Op_JumpProj) {
3987 #ifndef PRODUCT
3988 if (TracePartialPeeling) {
3989 tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3990 }
3991 #endif
3992 return false;
3993 }
3994 }
3995
3996 int dd = dom_depth(head);
3997
3998 // Step 1: find cut point
3999
4000 // Walk up dominators to loop head looking for first loop exit
4001 // which is executed on every path thru loop.
4002 IfNode *peel_if = nullptr;
4003 IfNode *peel_if_cmpu = nullptr;
4004
4005 Node *iff = loop->tail();
4006 while (iff != head) {
4007 if (iff->is_If()) {
4008 Node *ctrl = get_ctrl(iff->in(1));
4009 if (ctrl->is_top()) return false; // Dead test on live IF.
4010 // If loop-varying exit-test, check for induction variable
4011 if (loop->is_member(get_loop(ctrl)) &&
4012 loop->is_loop_exit(iff) &&
4013 is_possible_iv_test(iff)) {
4014 Node* cmp = iff->in(1)->in(1);
4015 if (cmp->Opcode() == Op_CmpI) {
4016 peel_if = iff->as_If();
4017 } else {
4018 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
4019 peel_if_cmpu = iff->as_If();
4020 }
4021 }
4022 }
4023 iff = idom(iff);
4024 }
4025
4026 // Prefer signed compare over unsigned compare.
4027 IfNode* new_peel_if = nullptr;
4028 if (peel_if == nullptr) {
4029 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) {
4030 return false; // No peel point found
4031 }
4032 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
4033 if (new_peel_if == nullptr) {
4034 return false; // No peel point found
4035 }
4036 peel_if = new_peel_if;
4037 }
4038 Node* last_peel = stay_in_loop(peel_if, loop);
4039 Node* first_not_peeled = stay_in_loop(last_peel, loop);
4040 if (first_not_peeled == nullptr || first_not_peeled == head) {
4041 return false;
4042 }
4043
4044 #ifndef PRODUCT
4045 if (TraceLoopOpts) {
4046 tty->print("PartialPeel ");
4047 loop->dump_head();
4048 }
4049
4050 if (TracePartialPeeling) {
4051 tty->print_cr("before partial peel one iteration");
4052 Node_List wl;
4053 Node* t = head->in(2);
4054 while (true) {
4055 wl.push(t);
4056 if (t == head) break;
4057 t = idom(t);
4058 }
4059 while (wl.size() > 0) {
4060 Node* tt = wl.pop();
4061 tt->dump();
4062 if (tt == last_peel) tty->print_cr("-- cut --");
4063 }
4064 }
4065 #endif
4066
4067 C->print_method(PHASE_BEFORE_PARTIAL_PEELING, 4, head);
4068
4069 VectorSet peel;
4070 VectorSet not_peel;
4071 Node_List peel_list;
4072 Node_List worklist;
4073 Node_List sink_list;
4074
4075 uint estimate = loop->est_loop_clone_sz(1);
4076 if (exceeding_node_budget(estimate)) {
4077 return false;
4078 }
4079
4080 // Set of cfg nodes to peel are those that are executable from
4081 // the head through last_peel.
4082 assert(worklist.size() == 0, "should be empty");
4083 worklist.push(head);
4084 peel.set(head->_idx);
4085 while (worklist.size() > 0) {
4086 Node *n = worklist.pop();
4087 if (n != last_peel) {
4088 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
4089 Node* use = n->fast_out(j);
4090 if (use->is_CFG() &&
4091 loop->is_member(get_loop(use)) &&
4092 !peel.test_set(use->_idx)) {
4093 worklist.push(use);
4094 }
4095 }
4096 }
4097 }
4098
4099 // Set of non-cfg nodes to peel are those that are control
4100 // dependent on the cfg nodes.
4101 for (uint i = 0; i < loop->_body.size(); i++) {
4102 Node *n = loop->_body.at(i);
4103 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
4104 if (peel.test(n_c->_idx)) {
4105 peel.set(n->_idx);
4106 } else {
4107 not_peel.set(n->_idx);
4108 }
4109 }
4110
4111 // Step 2: move operations from the peeled section down into the
4112 // not-peeled section
4113
4114 // Get a post order schedule of nodes in the peel region
4115 // Result in right-most operand.
4116 scheduled_nodelist(loop, peel, peel_list);
4117
4118 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4119
4120 // For future check for too many new phis
4121 uint old_phi_cnt = 0;
4122 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
4123 Node* use = head->fast_out(j);
4124 if (use->is_Phi()) old_phi_cnt++;
4125 }
4126
4127 #ifndef PRODUCT
4128 if (TracePartialPeeling) {
4129 tty->print_cr("\npeeled list");
4130 }
4131 #endif
4132
4133 // Evacuate nodes in peel region into the not_peeled region if possible
4134 bool too_many_clones = false;
4135 uint new_phi_cnt = 0;
4136 uint cloned_for_outside_use = 0;
4137 for (uint i = 0; i < peel_list.size();) {
4138 Node* n = peel_list.at(i);
4139 #ifndef PRODUCT
4140 if (TracePartialPeeling) n->dump();
4141 #endif
4142 bool incr = true;
4143 if (!n->is_CFG()) {
4144 if (has_use_in_set(n, not_peel)) {
4145 // If not used internal to the peeled region,
4146 // move "n" from peeled to not_peeled region.
4147 if (!has_use_internal_to_set(n, peel, loop)) {
4148 // if not pinned and not a load (which maybe anti-dependent on a store)
4149 // and not a CMove (Matcher expects only bool->cmove).
4150 if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) {
4151 int new_clones = clone_for_use_outside_loop(loop, n, worklist);
4152 if (C->failing()) return false;
4153 if (new_clones == -1) {
4154 too_many_clones = true;
4155 break;
4156 }
4157 cloned_for_outside_use += new_clones;
4158 sink_list.push(n);
4159 peel.remove(n->_idx);
4160 not_peel.set(n->_idx);
4161 peel_list.remove(i);
4162 incr = false;
4163 #ifndef PRODUCT
4164 if (TracePartialPeeling) {
4165 tty->print_cr("sink to not_peeled region: %d newbb: %d",
4166 n->_idx, get_ctrl(n)->_idx);
4167 }
4168 #endif
4169 }
4170 } else {
4171 // Otherwise check for special def-use cases that span
4172 // the peel/not_peel boundary such as bool->if
4173 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
4174 new_phi_cnt++;
4175 }
4176 }
4177 }
4178 if (incr) i++;
4179 }
4180
4181 estimate += cloned_for_outside_use + new_phi_cnt;
4182 bool exceed_node_budget = !may_require_nodes(estimate);
4183 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
4184
4185 if (too_many_clones || exceed_node_budget || exceed_phi_limit) {
4186 #ifndef PRODUCT
4187 if (TracePartialPeeling && exceed_phi_limit) {
4188 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
4189 new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F');
4190 }
4191 #endif
4192 if (new_peel_if != nullptr) {
4193 remove_cmpi_loop_exit(new_peel_if, loop);
4194 }
4195 // Inhibit more partial peeling on this loop
4196 assert(!head->is_partial_peel_loop(), "not partial peeled");
4197 head->mark_partial_peel_failed();
4198 if (cloned_for_outside_use > 0) {
4199 // Terminate this round of loop opts because
4200 // the graph outside this loop was changed.
4201 C->set_major_progress();
4202 return true;
4203 }
4204 return false;
4205 }
4206
4207 // Step 3: clone loop, retarget control, and insert new phis
4208
4209 // Create new loop head for new phis and to hang
4210 // the nodes being moved (sinked) from the peel region.
4211 LoopNode* new_head = new LoopNode(last_peel, last_peel);
4212 new_head->set_unswitch_count(head->unswitch_count()); // Preserve
4213 _igvn.register_new_node_with_optimizer(new_head);
4214 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
4215 _igvn.replace_input_of(first_not_peeled, 0, new_head);
4216 set_loop(new_head, loop);
4217 loop->_body.push(new_head);
4218 not_peel.set(new_head->_idx);
4219 set_idom(new_head, last_peel, dom_depth(first_not_peeled));
4220 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
4221
4222 while (sink_list.size() > 0) {
4223 Node* n = sink_list.pop();
4224 set_ctrl(n, new_head);
4225 }
4226
4227 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4228
4229 clone_loop(loop, old_new, dd, IgnoreStripMined);
4230
4231 const uint clone_exit_idx = 1;
4232 const uint orig_exit_idx = 2;
4233 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
4234
4235 Node* head_clone = old_new[head->_idx];
4236 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
4237 Node* orig_tail_clone = head_clone->in(2);
4238
4239 // Add phi if "def" node is in peel set and "use" is not
4240
4241 for (uint i = 0; i < peel_list.size(); i++) {
4242 Node *def = peel_list.at(i);
4243 if (!def->is_CFG()) {
4244 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
4245 Node *use = def->fast_out(j);
4246 if (has_node(use) && use->in(0) != C->top() &&
4247 (!peel.test(use->_idx) ||
4248 (use->is_Phi() && use->in(0) == head)) ) {
4249 worklist.push(use);
4250 }
4251 }
4252 while( worklist.size() ) {
4253 Node *use = worklist.pop();
4254 for (uint j = 1; j < use->req(); j++) {
4255 Node* n = use->in(j);
4256 if (n == def) {
4257
4258 // "def" is in peel set, "use" is not in peel set
4259 // or "use" is in the entry boundary (a phi) of the peel set
4260
4261 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
4262
4263 if ( loop->is_member(get_loop( use_c )) ) {
4264 // use is in loop
4265 if (old_new[use->_idx] != nullptr) { // null for dead code
4266 Node* use_clone = old_new[use->_idx];
4267 _igvn.replace_input_of(use, j, C->top());
4268 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
4269 }
4270 } else {
4271 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
4272 // use is not in the loop, check if the live range includes the cut
4273 Node* lp_if = use_c->in(orig_exit_idx)->in(0);
4274 if (not_peel.test(lp_if->_idx)) {
4275 assert(j == orig_exit_idx, "use from original loop");
4276 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
4277 }
4278 }
4279 }
4280 }
4281 }
4282 }
4283 }
4284
4285 // Step 3b: retarget control
4286
4287 // Redirect control to the new loop head if a cloned node in
4288 // the not_peeled region has control that points into the peeled region.
4289 // This necessary because the cloned peeled region will be outside
4290 // the loop.
4291 // from to
4292 // cloned-peeled <---+
4293 // new_head_clone: | <--+
4294 // cloned-not_peeled in(0) in(0)
4295 // orig-peeled
4296
4297 for (uint i = 0; i < loop->_body.size(); i++) {
4298 Node *n = loop->_body.at(i);
4299 if (!n->is_CFG() && n->in(0) != nullptr &&
4300 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
4301 Node* n_clone = old_new[n->_idx];
4302 if (n_clone->depends_only_on_test()) {
4303 // If this node depends_only_on_test, it will be rewire to the loop head, which is not the
4304 // correct test
4305 Node* pinned_clone = n_clone->pin_node_under_control();
4306 if (pinned_clone != nullptr) {
4307 register_new_node_with_ctrl_of(pinned_clone, n_clone);
4308 old_new.map(n->_idx, pinned_clone);
4309 _igvn.replace_node(n_clone, pinned_clone);
4310 n_clone = pinned_clone;
4311 }
4312 }
4313 _igvn.replace_input_of(n_clone, 0, new_head_clone);
4314 }
4315 }
4316
4317 // Backedge of the surviving new_head (the clone) is original last_peel
4318 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
4319
4320 // Cut first node in original not_peel set
4321 _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
4322 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
4323 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
4324
4325 // Copy head_clone back-branch info to original head
4326 // and remove original head's loop entry and
4327 // clone head's back-branch
4328 _igvn.rehash_node_delayed(head); // Multiple edge updates
4329 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
4330 head->set_req(LoopNode::LoopBackControl, C->top());
4331 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
4332
4333 // Similarly modify the phis
4334 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
4335 Node* use = head->fast_out(k);
4336 if (use->is_Phi() && use->outcnt() > 0) {
4337 Node* use_clone = old_new[use->_idx];
4338 _igvn.rehash_node_delayed(use); // Multiple edge updates
4339 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
4340 use->set_req(LoopNode::LoopBackControl, C->top());
4341 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
4342 }
4343 }
4344
4345 // Step 4: update dominator tree and dominator depth
4346
4347 set_idom(head, orig_tail_clone, dd);
4348 recompute_dom_depth();
4349
4350 // Inhibit more partial peeling on this loop
4351 new_head_clone->set_partial_peel_loop();
4352 C->set_major_progress();
4353 loop->record_for_igvn();
4354
4355 #ifndef PRODUCT
4356 if (TracePartialPeeling) {
4357 tty->print_cr("\nafter partial peel one iteration");
4358 Node_List wl;
4359 Node* t = last_peel;
4360 while (true) {
4361 wl.push(t);
4362 if (t == head_clone) break;
4363 t = idom(t);
4364 }
4365 while (wl.size() > 0) {
4366 Node* tt = wl.pop();
4367 if (tt == head) tty->print_cr("orig head");
4368 else if (tt == new_head_clone) tty->print_cr("new head");
4369 else if (tt == head_clone) tty->print_cr("clone head");
4370 tt->dump();
4371 }
4372 }
4373 #endif
4374
4375 C->print_method(PHASE_AFTER_PARTIAL_PEELING, 4, new_head_clone);
4376
4377 return true;
4378 }
4379
4380 #ifdef ASSERT
4381
4382 // Moves Template Assertion Predicates to a target loop by cloning and killing the old ones. The target loop is the
4383 // original, not-cloned loop. This is currently only used with StressLoopBackedge which is a develop flag only and
4384 // false with product builds. We can therefore guard it with an ifdef. More details can be found at the use-site.
4385 class MoveAssertionPredicatesVisitor : public PredicateVisitor {
4386 ClonePredicateToTargetLoop _clone_predicate_to_loop;
4387 PhaseIdealLoop* const _phase;
4388
4389 public:
4390 MoveAssertionPredicatesVisitor(LoopNode* target_loop_head,
4391 const NodeInSingleLoopBody &node_in_loop_body,
4392 PhaseIdealLoop* phase)
4393 : _clone_predicate_to_loop(target_loop_head, node_in_loop_body, phase),
4394 _phase(phase) {
4395 }
4396 NONCOPYABLE(MoveAssertionPredicatesVisitor);
4397
4398 using PredicateVisitor::visit;
4399
4400 void visit(const TemplateAssertionPredicate& template_assertion_predicate) override {
4401 _clone_predicate_to_loop.clone_template_assertion_predicate(template_assertion_predicate);
4402 template_assertion_predicate.kill(_phase->igvn());
4403 }
4404 };
4405 #endif // ASSERT
4406
4407 // Transform:
4408 //
4409 // loop<-----------------+
4410 // | |
4411 // stmt1 stmt2 .. stmtn |
4412 // | | | |
4413 // \ | / |
4414 // v v v |
4415 // region |
4416 // | |
4417 // shared_stmt |
4418 // | |
4419 // v |
4420 // if |
4421 // / \ |
4422 // | -----------+
4423 // v
4424 //
4425 // into:
4426 //
4427 // loop<-------------------+
4428 // | |
4429 // v |
4430 // +->loop |
4431 // | | |
4432 // | stmt1 stmt2 .. stmtn |
4433 // | | | | |
4434 // | | \ / |
4435 // | | v v |
4436 // | | region1 |
4437 // | | | |
4438 // | shared_stmt shared_stmt |
4439 // | | | |
4440 // | v v |
4441 // | if if |
4442 // | /\ / \ |
4443 // +-- | | -------+
4444 // \ /
4445 // v v
4446 // region2
4447 //
4448 // (region2 is shown to merge mirrored projections of the loop exit
4449 // ifs to make the diagram clearer but they really merge the same
4450 // projection)
4451 //
4452 // Conditions for this transformation to trigger:
4453 // - the path through stmt1 is frequent enough
4454 // - the inner loop will be turned into a counted loop after transformation
4455 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) {
4456 if (!DuplicateBackedge) {
4457 return false;
4458 }
4459 assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only");
4460 if (!loop->_head->is_Loop()) {
4461 return false;
4462 }
4463
4464 uint estimate = loop->est_loop_clone_sz(1);
4465 if (exceeding_node_budget(estimate)) {
4466 return false;
4467 }
4468
4469 LoopNode *head = loop->_head->as_Loop();
4470
4471 Node* region = nullptr;
4472 IfNode* exit_test = nullptr;
4473 uint inner;
4474 float f;
4475 #ifdef ASSERT
4476 if (StressDuplicateBackedge) {
4477 if (head->is_strip_mined()) {
4478 return false;
4479 }
4480 Node* c = head->in(LoopNode::LoopBackControl);
4481
4482 while (c != head) {
4483 if (c->is_Region()) {
4484 region = c;
4485 }
4486 c = idom(c);
4487 }
4488
4489 if (region == nullptr) {
4490 return false;
4491 }
4492
4493 inner = 1;
4494 } else
4495 #endif //ASSERT
4496 {
4497 // Is the shape of the loop that of a counted loop...
4498 Node* back_control = loop_exit_control(loop);
4499 if (back_control == nullptr) {
4500 return false;
4501 }
4502
4503 LoopExitTest loop_exit(back_control, loop, this);
4504 loop_exit.build();
4505 if (!loop_exit.is_valid_with_bt(T_INT)) {
4506 return false;
4507 }
4508
4509 const Node* loop_incr = loop_exit.incr();
4510
4511 // With an extra phi for the candidate iv?
4512 // Or the region node is the loop head
4513 if (!loop_incr->is_Phi() || loop_incr->in(0) == head) {
4514 return false;
4515 }
4516
4517 PathFrequency pf(head, this);
4518 region = loop_incr->in(0);
4519
4520 // Go over all paths for the extra phi's region and see if that
4521 // path is frequent enough and would match the expected iv shape
4522 // if the extra phi is removed
4523 inner = 0;
4524 for (uint i = 1; i < loop_incr->req(); ++i) {
4525 CountedLoopConverter::TruncatedIncrement increment(T_INT);
4526 increment.build(loop_incr->in(i));
4527 if (!increment.is_valid()) {
4528 continue;
4529 }
4530 assert(increment.incr()->Opcode() == Op_AddI, "wrong increment code");
4531
4532 LoopIVStride stride = LoopIVStride(T_INT);
4533 stride.build(increment.incr());
4534 if (!stride.is_valid()) {
4535 continue;
4536 }
4537
4538 PhiNode* phi = loop_iv_phi(stride.xphi(), nullptr, head);
4539 if (phi == nullptr ||
4540 (increment.outer_trunc() == nullptr && phi->in(LoopNode::LoopBackControl) != loop_exit.incr()) ||
4541 (increment.outer_trunc() != nullptr && phi->in(LoopNode::LoopBackControl) != increment.outer_trunc())) {
4542 return false;
4543 }
4544
4545 f = pf.to(region->in(i));
4546 if (f > 0.5) {
4547 inner = i;
4548 break;
4549 }
4550 }
4551
4552 if (inner == 0) {
4553 return false;
4554 }
4555
4556 exit_test = back_control->in(0)->as_If();
4557 }
4558
4559 if (idom(region)->is_Catch()) {
4560 return false;
4561 }
4562
4563 // Collect all control nodes that need to be cloned (shared_stmt in the diagram)
4564 Unique_Node_List wq;
4565 wq.push(head->in(LoopNode::LoopBackControl));
4566 for (uint i = 0; i < wq.size(); i++) {
4567 Node* c = wq.at(i);
4568 assert(get_loop(c) == loop, "not in the right loop?");
4569 if (c->is_Region()) {
4570 if (c != region) {
4571 for (uint j = 1; j < c->req(); ++j) {
4572 wq.push(c->in(j));
4573 }
4574 }
4575 } else {
4576 wq.push(c->in(0));
4577 }
4578 assert(!is_strict_dominator(c, region), "shouldn't go above region");
4579 }
4580
4581 Node* region_dom = idom(region);
4582
4583 // Can't do the transformation if this would cause a membar pair to
4584 // be split
4585 for (uint i = 0; i < wq.size(); i++) {
4586 Node* c = wq.at(i);
4587 if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) {
4588 assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair");
4589 if (!wq.member(c->as_MemBar()->leading_membar())) {
4590 return false;
4591 }
4592 }
4593 }
4594 C->print_method(PHASE_BEFORE_DUPLICATE_LOOP_BACKEDGE, 4, head);
4595
4596 // Collect data nodes that need to be clones as well
4597 int dd = dom_depth(head);
4598
4599 for (uint i = 0; i < loop->_body.size(); ++i) {
4600 Node* n = loop->_body.at(i);
4601 if (has_ctrl(n)) {
4602 Node* c = get_ctrl(n);
4603 if (wq.member(c)) {
4604 wq.push(n);
4605 }
4606 } else {
4607 set_idom(n, idom(n), dd);
4608 }
4609 }
4610
4611 // clone shared_stmt
4612 clone_loop_body(wq, old_new, nullptr);
4613
4614 Node* region_clone = old_new[region->_idx];
4615 region_clone->set_req(inner, C->top());
4616 set_idom(region, region->in(inner), dd);
4617
4618 // Prepare the outer loop
4619 Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]);
4620 register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl));
4621 _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head);
4622 set_idom(head, outer_head, dd);
4623
4624 fix_body_edges(wq, loop, old_new, dd, loop->_parent, true);
4625
4626 // Make one of the shared_stmt copies only reachable from stmt1, the
4627 // other only from stmt2..stmtn.
4628 Node* dom = nullptr;
4629 for (uint i = 1; i < region->req(); ++i) {
4630 if (i != inner) {
4631 _igvn.replace_input_of(region, i, C->top());
4632 }
4633 Node* in = region_clone->in(i);
4634 if (in->is_top()) {
4635 continue;
4636 }
4637 if (dom == nullptr) {
4638 dom = in;
4639 } else {
4640 dom = dom_lca(dom, in);
4641 }
4642 }
4643
4644 set_idom(region_clone, dom, dd);
4645
4646 // Set up the outer loop
4647 for (uint i = 0; i < head->outcnt(); i++) {
4648 Node* u = head->raw_out(i);
4649 if (u->is_Phi()) {
4650 Node* outer_phi = u->clone();
4651 outer_phi->set_req(0, outer_head);
4652 Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx];
4653 if (backedge == nullptr) {
4654 backedge = u->in(LoopNode::LoopBackControl);
4655 }
4656 outer_phi->set_req(LoopNode::LoopBackControl, backedge);
4657 register_new_node(outer_phi, outer_head);
4658 _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi);
4659 }
4660 }
4661
4662 // create control and data nodes for out of loop uses (including region2)
4663 Node_List worklist;
4664 uint new_counter = C->unique();
4665 fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist);
4666
4667 Node_List *split_if_set = nullptr;
4668 Node_List *split_bool_set = nullptr;
4669 Node_List *split_cex_set = nullptr;
4670 fix_data_uses(wq, loop, ControlAroundStripMined, loop->skip_strip_mined(), new_counter, old_new, worklist,
4671 split_if_set, split_bool_set, split_cex_set);
4672
4673 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
4674
4675 if (exit_test != nullptr) {
4676 float cnt = exit_test->_fcnt;
4677 if (cnt != COUNT_UNKNOWN) {
4678 exit_test->_fcnt = cnt * f;
4679 old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f);
4680 }
4681 }
4682
4683 #ifdef ASSERT
4684 if (StressDuplicateBackedge && head->is_CountedLoop()) {
4685 // The Template Assertion Predicates from the old counted loop are now at the new outer loop - clone them to
4686 // the inner counted loop and kill the old ones. We only need to do this with debug builds because
4687 // StressDuplicateBackedge is a devlop flag and false by default. Without StressDuplicateBackedge 'head' will be a
4688 // non-counted loop, and thus we have no Template Assertion Predicates above the old loop to move down.
4689 PredicateIterator predicate_iterator(outer_head->in(LoopNode::EntryControl));
4690 NodeInSingleLoopBody node_in_body(this, loop);
4691 MoveAssertionPredicatesVisitor move_assertion_predicates_visitor(head, node_in_body, this);
4692 predicate_iterator.for_each(move_assertion_predicates_visitor);
4693 }
4694 #endif // ASSERT
4695
4696 C->set_major_progress();
4697
4698 C->print_method(PHASE_AFTER_DUPLICATE_LOOP_BACKEDGE, 4, outer_head);
4699
4700 return true;
4701 }
4702
4703 // AutoVectorize the loop: replace scalar ops with vector ops.
4704 PhaseIdealLoop::AutoVectorizeStatus
4705 PhaseIdealLoop::auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared) {
4706 // Counted loop only
4707 if (!lpt->is_counted()) {
4708 return AutoVectorizeStatus::Impossible;
4709 }
4710
4711 // Main-loop only
4712 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4713 if (!cl->is_main_loop()) {
4714 return AutoVectorizeStatus::Impossible;
4715 }
4716
4717 VLoop vloop(lpt, false);
4718 if (!vloop.check_preconditions()) {
4719 return AutoVectorizeStatus::TriedAndFailed;
4720 }
4721
4722 // Ensure the shared data is cleared before each use
4723 vshared.clear();
4724
4725 const VLoopAnalyzer vloop_analyzer(vloop, vshared);
4726 if (!vloop_analyzer.success()) {
4727 return AutoVectorizeStatus::TriedAndFailed;
4728 }
4729
4730 SuperWord sw(vloop_analyzer);
4731 if (!sw.transform_loop()) {
4732 return AutoVectorizeStatus::TriedAndFailed;
4733 }
4734
4735 return AutoVectorizeStatus::Success;
4736 }
4737
4738 // Just before insert_pre_post_loops, we can multiversion the loop:
4739 //
4740 // multiversion_if
4741 // | |
4742 // fast_loop slow_loop
4743 //
4744 // In the fast_loop we can make speculative assumptions, and put the
4745 // conditions into the multiversion_if. If the conditions hold at runtime,
4746 // we enter the fast_loop, if the conditions fail, we take the slow_loop
4747 // instead which does not make any of the speculative assumptions.
4748 //
4749 // Note: we only multiversion the loop if the loop does not have any
4750 // auto vectorization check Predicate. If we have that predicate,
4751 // then we can simply add the speculative assumption checks to
4752 // that Predicate. This means we do not need to duplicate the
4753 // loop - we have a smaller graph and save compile time. Should
4754 // the conditions ever fail, then we deopt / trap at the Predicate
4755 // and recompile without that Predicate. At that point we will
4756 // multiversion the loop, so that we can still have speculative
4757 // runtime checks.
4758 //
4759 // We perform the multiversioning when the loop is still in its single
4760 // iteration form, even before we insert pre and post loops. This makes
4761 // the cloning much simpler. However, this means that both the fast
4762 // and the slow loop have to be optimized independently (adding pre
4763 // and post loops, unrolling the main loop, auto-vectorize etc.). And
4764 // we may end up not needing any speculative assumptions in the fast_loop
4765 // and then rejecting the slow_loop by constant folding the multiversion_if.
4766 //
4767 // Therefore, we "delay" the optimization of the slow_loop until we add
4768 // at least one speculative assumption for the fast_loop. If we never
4769 // add such a speculative runtime check, the OpaqueMultiversioningNode
4770 // of the multiversion_if constant folds to true after loop opts, and the
4771 // multiversion_if folds away the "delayed" slow_loop. If we add any
4772 // speculative assumption, then we notify the OpaqueMultiversioningNode
4773 // with "notify_slow_loop_that_it_can_resume_optimizations".
4774 //
4775 // Note: new runtime checks can be added to the multiversion_if with
4776 // PhaseIdealLoop::create_new_if_for_multiversion
4777 void PhaseIdealLoop::maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new) {
4778 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4779 LoopNode* outer_loop = cl->skip_strip_mined();
4780 Node* entry = outer_loop->in(LoopNode::EntryControl);
4781
4782 // Check we have multiversioning enabled, and are not already multiversioned.
4783 if (!LoopMultiversioning || cl->is_multiversion()) { return; }
4784
4785 // Check that we do not have a parse-predicate where we can add the runtime checks
4786 // during auto-vectorization.
4787 const Predicates predicates(entry);
4788 const PredicateBlock* predicate_block = predicates.auto_vectorization_check_block();
4789 if (predicate_block->has_parse_predicate()) { return; }
4790
4791 // Check node budget.
4792 uint estimate = lpt->est_loop_clone_sz(2);
4793 if (!may_require_nodes(estimate)) { return; }
4794
4795 do_multiversioning(lpt, old_new);
4796 }
4797
4798 void DataNodeGraph::clone_data_nodes(Node* new_ctrl) {
4799 for (uint i = 0; i < _data_nodes.size(); i++) {
4800 clone(_data_nodes[i], new_ctrl);
4801 }
4802 }
4803
4804 // Clone the given node and set it up properly. Set 'new_ctrl' as ctrl.
4805 void DataNodeGraph::clone(Node* node, Node* new_ctrl) {
4806 Node* clone = node->clone();
4807 _phase->igvn().register_new_node_with_optimizer(clone);
4808 _orig_to_new.put(node, clone);
4809 _phase->set_ctrl(clone, new_ctrl);
4810 if (node->is_CastII()) {
4811 clone->set_req(0, new_ctrl);
4812 }
4813 }
4814
4815 // Rewire the data inputs of all (unprocessed) cloned nodes, whose inputs are still pointing to the same inputs as their
4816 // corresponding orig nodes, to the newly cloned inputs to create a separate cloned graph.
4817 void DataNodeGraph::rewire_clones_to_cloned_inputs() {
4818 _orig_to_new.iterate_all([&](Node* node, Node* clone) {
4819 for (uint i = 1; i < node->req(); i++) {
4820 Node** cloned_input = _orig_to_new.get(node->in(i));
4821 if (cloned_input != nullptr) {
4822 // Input was also cloned -> rewire clone to the cloned input.
4823 _phase->igvn().replace_input_of(clone, i, *cloned_input);
4824 }
4825 }
4826 });
4827 }
4828
4829 // Clone all non-OpaqueLoop* nodes and apply the provided transformation strategy for OpaqueLoop* nodes.
4830 // Set 'new_ctrl' as ctrl for all cloned non-OpaqueLoop* nodes.
4831 void DataNodeGraph::clone_data_nodes_and_transform_opaque_loop_nodes(
4832 const TransformStrategyForOpaqueLoopNodes& transform_strategy,
4833 Node* new_ctrl) {
4834 for (uint i = 0; i < _data_nodes.size(); i++) {
4835 Node* data_node = _data_nodes[i];
4836 if (data_node->is_Opaque1()) {
4837 transform_opaque_node(transform_strategy, data_node);
4838 } else {
4839 clone(data_node, new_ctrl);
4840 }
4841 }
4842 }
4843
4844 void DataNodeGraph::transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node) {
4845 Node* transformed_node;
4846 if (node->is_OpaqueLoopInit()) {
4847 transformed_node = transform_strategy.transform_opaque_init(node->as_OpaqueLoopInit());
4848 } else {
4849 assert(node->is_OpaqueLoopStride(), "must be OpaqueLoopStrideNode");
4850 transformed_node = transform_strategy.transform_opaque_stride(node->as_OpaqueLoopStride());
4851 }
4852 // Add an orig->new mapping to correctly update the inputs of the copied graph in rewire_clones_to_cloned_inputs().
4853 _orig_to_new.put(node, transformed_node);
4854 }