1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/connode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/inlinetypenode.hpp"
35 #include "opto/loopnode.hpp"
36 #include "opto/matcher.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/opaquenode.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/subnode.hpp"
42 #include "opto/subtypenode.hpp"
43 #include "opto/superword.hpp"
44 #include "opto/vectornode.hpp"
45 #include "utilities/checkedCast.hpp"
46 #include "utilities/macros.hpp"
47
48 //=============================================================================
49 //------------------------------split_thru_phi---------------------------------
50 // Split Node 'n' through merge point if there is enough win.
51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
52 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
53 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
54 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
55 // so disable this for now
56 return nullptr;
57 }
58
59 // Splitting range check CastIIs through a loop induction Phi can
60 // cause new Phis to be created that are left unrelated to the loop
61 // induction Phi and prevent optimizations (vectorization)
62 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
63 n->in(1) == region->as_CountedLoop()->phi()) {
64 return nullptr;
65 }
66
67 // Inline types should not be split through Phis because they cannot be merged
68 // through Phi nodes but each value input needs to be merged individually.
69 if (n->is_InlineType()) {
70 return nullptr;
71 }
72
73 if (cannot_split_division(n, region)) {
74 return nullptr;
75 }
76
77 SplitThruPhiWins wins(region);
78 assert(!n->is_CFG(), "");
79 assert(region->is_Region(), "");
80
81 const Type* type = n->bottom_type();
82 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
83 Node* phi;
84 if (t_oop != nullptr && t_oop->is_known_instance_field()) {
85 int iid = t_oop->instance_id();
86 int index = C->get_alias_index(t_oop);
87 int offset = t_oop->offset();
88 phi = new PhiNode(region, type, nullptr, iid, index, offset);
89 } else {
90 phi = PhiNode::make_blank(region, n);
91 }
92 uint old_unique = C->unique();
93 for (uint i = 1; i < region->req(); i++) {
94 Node* x;
95 Node* the_clone = nullptr;
96 if (region->in(i) == C->top()) {
97 x = C->top(); // Dead path? Use a dead data op
98 } else {
99 x = n->clone(); // Else clone up the data op
100 the_clone = x; // Remember for possible deletion.
101 // Alter data node to use pre-phi inputs
102 if (n->in(0) == region)
103 x->set_req( 0, region->in(i) );
104 for (uint j = 1; j < n->req(); j++) {
105 Node* in = n->in(j);
106 if (in->is_Phi() && in->in(0) == region)
107 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
108 }
109 }
110 // Check for a 'win' on some paths
111 const Type* t = x->Value(&_igvn);
112
113 bool singleton = t->singleton();
114
115 // A TOP singleton indicates that there are no possible values incoming
116 // along a particular edge. In most cases, this is OK, and the Phi will
117 // be eliminated later in an Ideal call. However, we can't allow this to
118 // happen if the singleton occurs on loop entry, as the elimination of
119 // the PhiNode may cause the resulting node to migrate back to a previous
120 // loop iteration.
121 if (singleton && t == Type::TOP) {
122 // Is_Loop() == false does not confirm the absence of a loop (e.g., an
123 // irreducible loop may not be indicated by an affirmative is_Loop());
124 // therefore, the only top we can split thru a phi is on a backedge of
125 // a loop.
126 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
127 }
128
129 if (singleton) {
130 wins.add_win(i);
131 x = makecon(t);
132 } else {
133 // We now call Identity to try to simplify the cloned node.
134 // Note that some Identity methods call phase->type(this).
135 // Make sure that the type array is big enough for
136 // our new node, even though we may throw the node away.
137 // (Note: This tweaking with igvn only works because x is a new node.)
138 _igvn.set_type(x, t);
139 // If x is a TypeNode, capture any more-precise type permanently into Node
140 // otherwise it will be not updated during igvn->transform since
141 // igvn->type(x) is set to x->Value() already.
142 x->raise_bottom_type(t);
143 Node* y = x->Identity(&_igvn);
144 if (y != x) {
145 wins.add_win(i);
146 x = y;
147 } else {
148 y = _igvn.hash_find(x);
149 if (y == nullptr) {
150 y = similar_subtype_check(x, region->in(i));
151 }
152 if (y) {
153 wins.add_win(i);
154 x = y;
155 } else {
156 // Else x is a new node we are keeping
157 // We do not need register_new_node_with_optimizer
158 // because set_type has already been called.
159 _igvn._worklist.push(x);
160 }
161 }
162 }
163
164 phi->set_req( i, x );
165
166 if (the_clone == nullptr) {
167 continue;
168 }
169
170 if (the_clone != x) {
171 _igvn.remove_dead_node(the_clone);
172 } else if (region->is_Loop() && i == LoopNode::LoopBackControl &&
173 n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) {
174 // it is not a win if 'x' moved from an outer to an inner loop
175 // this edge case can only happen for Load nodes
176 wins.reset();
177 break;
178 }
179 }
180 // Too few wins?
181 if (!wins.profitable(policy)) {
182 _igvn.remove_dead_node(phi);
183 return nullptr;
184 }
185
186 // Record Phi
187 register_new_node( phi, region );
188
189 for (uint i2 = 1; i2 < phi->req(); i2++) {
190 Node *x = phi->in(i2);
191 // If we commoned up the cloned 'x' with another existing Node,
192 // the existing Node picks up a new use. We need to make the
193 // existing Node occur higher up so it dominates its uses.
194 Node *old_ctrl;
195 IdealLoopTree *old_loop;
196
197 if (x->is_Con()) {
198 assert(get_ctrl(x) == C->root(), "constant control is not root");
199 continue;
200 }
201 // The occasional new node
202 if (x->_idx >= old_unique) { // Found a new, unplaced node?
203 old_ctrl = nullptr;
204 old_loop = nullptr; // Not in any prior loop
205 } else {
206 old_ctrl = get_ctrl(x);
207 old_loop = get_loop(old_ctrl); // Get prior loop
208 }
209 // New late point must dominate new use
210 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
211 if (new_ctrl == old_ctrl) // Nothing is changed
212 continue;
213
214 IdealLoopTree *new_loop = get_loop(new_ctrl);
215
216 // Don't move x into a loop if its uses are
217 // outside of loop. Otherwise x will be cloned
218 // for each use outside of this loop.
219 IdealLoopTree *use_loop = get_loop(region);
220 if (!new_loop->is_member(use_loop) &&
221 (old_loop == nullptr || !new_loop->is_member(old_loop))) {
222 // Take early control, later control will be recalculated
223 // during next iteration of loop optimizations.
224 new_ctrl = get_early_ctrl(x);
225 new_loop = get_loop(new_ctrl);
226 }
227 // Set new location
228 set_ctrl(x, new_ctrl);
229 // If changing loop bodies, see if we need to collect into new body
230 if (old_loop != new_loop) {
231 if (old_loop && !old_loop->_child)
232 old_loop->_body.yank(x);
233 if (!new_loop->_child)
234 new_loop->_body.push(x); // Collect body info
235 }
236 }
237
238 split_thru_phi_yank_old_nodes(n, region);
239 _igvn.replace_node(n, phi);
240
241 #ifndef PRODUCT
242 if (TraceLoopOpts) {
243 tty->print_cr("Split %d %s through %d Phi in %d %s",
244 n->_idx, n->Name(), phi->_idx, region->_idx, region->Name());
245 }
246 #endif // !PRODUCT
247
248 return phi;
249 }
250
251 // If the region is a Loop, we are removing the old n,
252 // and need to yank it from the _body. If any phi we
253 // just split through now has no use any more, it also
254 // has to be removed.
255 void PhaseIdealLoop::split_thru_phi_yank_old_nodes(Node* n, Node* region) {
256 IdealLoopTree* region_loop = get_loop(region);
257 if (region->is_Loop() && region_loop->is_innermost()) {
258 region_loop->_body.yank(n);
259 for (uint j = 1; j < n->req(); j++) {
260 PhiNode* phi = n->in(j)->isa_Phi();
261 // Check that phi belongs to the region and only has n as a use.
262 if (phi != nullptr &&
263 phi->in(0) == region &&
264 phi->unique_multiple_edges_out_or_null() == n) {
265 assert(get_ctrl(phi) == region, "sanity");
266 assert(get_ctrl(n) == region, "sanity");
267 region_loop->_body.yank(phi);
268 }
269 }
270 }
271 }
272
273 // Test whether node 'x' can move into an inner loop relative to node 'n'.
274 // Note: The test is not exact. Returns true if 'x' COULD end up in an inner loop,
275 // BUT it can also return true and 'x' is in the outer loop
276 bool PhaseIdealLoop::can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x) {
277 IdealLoopTree* n_loop_tree = get_loop(n_loop);
278 IdealLoopTree* x_loop_tree = get_loop(get_early_ctrl(x));
279 // x_loop_tree should be outer or same loop as n_loop_tree
280 return !x_loop_tree->is_member(n_loop_tree);
281 }
282
283 // Subtype checks that carry profile data don't common so look for a replacement by following edges
284 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) {
285 if (x->is_SubTypeCheck()) {
286 Node* in1 = x->in(1);
287 for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) {
288 Node* u = in1->fast_out(i);
289 if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) {
290 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
291 Node* bol = u->fast_out(j);
292 for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) {
293 Node* iff = bol->fast_out(k);
294 // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with
295 // unrelated profile
296 if (iff->is_If() && is_dominator(iff, r_in)) {
297 return u;
298 }
299 }
300 }
301 }
302 }
303 }
304 return nullptr;
305 }
306
307 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor
308 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In
309 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in
310 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could
311 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis).
312 // We also need to check other loop phis as they could have been created in the same split-if pass when applying
313 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi.
314 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const {
315 const Type* zero;
316 switch (n->Opcode()) {
317 case Op_DivI:
318 case Op_ModI:
319 case Op_UDivI:
320 case Op_UModI:
321 zero = TypeInt::ZERO;
322 break;
323 case Op_DivL:
324 case Op_ModL:
325 case Op_UDivL:
326 case Op_UModL:
327 zero = TypeLong::ZERO;
328 break;
329 default:
330 return false;
331 }
332
333 if (n->in(0) != nullptr) {
334 // Cannot split through phi if Div or Mod node has a control dependency to a zero check.
335 return true;
336 }
337
338 Node* divisor = n->in(2);
339 return is_divisor_loop_phi(divisor, region) &&
340 loop_phi_backedge_type_contains_zero(divisor, zero);
341 }
342
343 bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) {
344 return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop;
345 }
346
347 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const {
348 return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP;
349 }
350
351 //------------------------------dominated_by------------------------------------
352 // Replace the dominated test with an obvious true or false. Place it on the
353 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
354 // live path up to the dominating control.
355 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool prevdom_not_imply_this) {
356 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
357
358 // prevdom is the dominating projection of the dominating test.
359 assert(iff->Opcode() == Op_If ||
360 iff->Opcode() == Op_CountedLoopEnd ||
361 iff->Opcode() == Op_LongCountedLoopEnd ||
362 iff->Opcode() == Op_RangeCheck ||
363 iff->Opcode() == Op_ParsePredicate,
364 "Check this code when new subtype is added");
365
366 int pop = prevdom->Opcode();
367 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
368 if (flip) {
369 if (pop == Op_IfTrue)
370 pop = Op_IfFalse;
371 else
372 pop = Op_IfTrue;
373 }
374 // 'con' is set to true or false to kill the dominated test.
375 Node* con = makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
376 // Hack the dominated test
377 _igvn.replace_input_of(iff, 1, con);
378
379 // If I don't have a reachable TRUE and FALSE path following the IfNode then
380 // I can assume this path reaches an infinite loop. In this case it's not
381 // important to optimize the data Nodes - either the whole compilation will
382 // be tossed or this path (and all data Nodes) will go dead.
383 if (iff->outcnt() != 2) {
384 return;
385 }
386
387 // Make control-dependent data Nodes on the live path (path that will remain
388 // once the dominated IF is removed) become control-dependent on the
389 // dominating projection.
390 Node* dp = iff->proj_out_or_null(pop == Op_IfTrue);
391
392 if (dp == nullptr) {
393 return;
394 }
395
396 rewire_safe_outputs_to_dominator(dp, prevdom, prevdom_not_imply_this);
397 }
398
399 void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool dominator_not_imply_source) {
400 IdealLoopTree* old_loop = get_loop(source);
401
402 for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) {
403 Node* out = source->fast_out(i); // Control-dependent node
404 if (out->depends_only_on_test()) {
405 assert(out->in(0) == source, "must be control dependent on source");
406 _igvn.replace_input_of(out, 0, dominator);
407 if (dominator_not_imply_source) {
408 // Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range
409 // check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the
410 // removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate
411 // in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
412 // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest
413 // dominating check.
414 Node* clone = out->pin_node_under_control();
415 if (clone != nullptr) {
416 clone = _igvn.register_new_node_with_optimizer(clone, out);
417 _igvn.replace_node(out, clone);
418 out = clone;
419 }
420 }
421 set_early_ctrl(out, false);
422 IdealLoopTree* new_loop = get_loop(get_ctrl(out));
423 if (old_loop != new_loop) {
424 if (!old_loop->_child) {
425 old_loop->_body.yank(out);
426 }
427 if (!new_loop->_child) {
428 new_loop->_body.push(out);
429 }
430 }
431 --i;
432 --imax;
433 }
434 }
435 }
436
437 //------------------------------has_local_phi_input----------------------------
438 // Return TRUE if 'n' has Phi inputs from its local block and no other
439 // block-local inputs (all non-local-phi inputs come from earlier blocks)
440 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
441 Node *n_ctrl = get_ctrl(n);
442 // See if some inputs come from a Phi in this block, or from before
443 // this block.
444 uint i;
445 for( i = 1; i < n->req(); i++ ) {
446 Node *phi = n->in(i);
447 if( phi->is_Phi() && phi->in(0) == n_ctrl )
448 break;
449 }
450 if( i >= n->req() )
451 return nullptr; // No Phi inputs; nowhere to clone thru
452
453 // Check for inputs created between 'n' and the Phi input. These
454 // must split as well; they have already been given the chance
455 // (courtesy of a post-order visit) and since they did not we must
456 // recover the 'cost' of splitting them by being very profitable
457 // when splitting 'n'. Since this is unlikely we simply give up.
458 for( i = 1; i < n->req(); i++ ) {
459 Node *m = n->in(i);
460 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
461 // We allow the special case of AddP's with no local inputs.
462 // This allows us to split-up address expressions.
463 if (m->is_AddP() &&
464 get_ctrl(m->in(AddPNode::Base)) != n_ctrl &&
465 get_ctrl(m->in(AddPNode::Address)) != n_ctrl &&
466 get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) {
467 // Move the AddP up to the dominating point. That's fine because control of m's inputs
468 // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl.
469 Node* c = find_non_split_ctrl(idom(n_ctrl));
470 if (c->is_OuterStripMinedLoop()) {
471 c->as_Loop()->verify_strip_mined(1);
472 c = c->in(LoopNode::EntryControl);
473 }
474 set_ctrl_and_loop(m, c);
475 continue;
476 }
477 return nullptr;
478 }
479 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
480 }
481
482 return n_ctrl;
483 }
484
485 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
486 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) {
487 assert(bt == T_INT || bt == T_LONG, "only for integers");
488 int n_op = n->Opcode();
489
490 if (n_op == Op_LShift(bt)) {
491 // Scale is loop invariant
492 Node* scale = n->in(2);
493 Node* scale_ctrl = get_ctrl(scale);
494 IdealLoopTree* scale_loop = get_loop(scale_ctrl);
495 if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) {
496 return nullptr;
497 }
498 const TypeInt* scale_t = scale->bottom_type()->isa_int();
499 if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) {
500 return nullptr; // Dont bother with byte/short masking
501 }
502 // Add must vary with loop (else shift would be loop-invariant)
503 Node* add = n->in(1);
504 Node* add_ctrl = get_ctrl(add);
505 IdealLoopTree* add_loop = get_loop(add_ctrl);
506 if (n_loop != add_loop) {
507 return nullptr; // happens w/ evil ZKM loops
508 }
509
510 // Convert I-V into I+ (0-V); same for V-I
511 if (add->Opcode() == Op_Sub(bt) &&
512 _igvn.type(add->in(1)) != TypeInteger::zero(bt)) {
513 assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, "");
514 Node* zero = integercon(0, bt);
515 Node* neg = SubNode::make(zero, add->in(2), bt);
516 register_new_node_with_ctrl_of(neg, add->in(2));
517 add = AddNode::make(add->in(1), neg, bt);
518 register_new_node(add, add_ctrl);
519 }
520 if (add->Opcode() != Op_Add(bt)) return nullptr;
521 assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, "");
522 // See if one add input is loop invariant
523 Node* add_var = add->in(1);
524 Node* add_var_ctrl = get_ctrl(add_var);
525 IdealLoopTree* add_var_loop = get_loop(add_var_ctrl);
526 Node* add_invar = add->in(2);
527 Node* add_invar_ctrl = get_ctrl(add_invar);
528 IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl);
529 if (add_invar_loop == n_loop) {
530 // Swap to find the invariant part
531 add_invar = add_var;
532 add_invar_ctrl = add_var_ctrl;
533 add_invar_loop = add_var_loop;
534 add_var = add->in(2);
535 } else if (add_var_loop != n_loop) { // Else neither input is loop invariant
536 return nullptr;
537 }
538 if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) {
539 return nullptr; // No invariant part of the add?
540 }
541
542 // Yes! Reshape address expression!
543 Node* inv_scale = LShiftNode::make(add_invar, scale, bt);
544 Node* inv_scale_ctrl =
545 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
546 add_invar_ctrl : scale_ctrl;
547 register_new_node(inv_scale, inv_scale_ctrl);
548 Node* var_scale = LShiftNode::make(add_var, scale, bt);
549 register_new_node(var_scale, n_ctrl);
550 Node* var_add = AddNode::make(var_scale, inv_scale, bt);
551 register_new_node(var_add, n_ctrl);
552 _igvn.replace_node(n, var_add);
553 return var_add;
554 }
555 return nullptr;
556 }
557
558 //------------------------------remix_address_expressions----------------------
559 // Rework addressing expressions to get the most loop-invariant stuff
560 // moved out. We'd like to do all associative operators, but it's especially
561 // important (common) to do address expressions.
562 Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
563 if (!has_ctrl(n)) return nullptr;
564 Node* n_ctrl = get_ctrl(n);
565 IdealLoopTree* n_loop = get_loop(n_ctrl);
566
567 // See if 'n' mixes loop-varying and loop-invariant inputs and
568 // itself is loop-varying.
569
570 // Only interested in binary ops (and AddP)
571 if (n->req() < 3 || n->req() > 4) return nullptr;
572
573 Node* n1_ctrl = get_ctrl(n->in( 1));
574 Node* n2_ctrl = get_ctrl(n->in( 2));
575 Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
576 IdealLoopTree* n1_loop = get_loop(n1_ctrl);
577 IdealLoopTree* n2_loop = get_loop(n2_ctrl);
578 IdealLoopTree* n3_loop = get_loop(n3_ctrl);
579
580 // Does one of my inputs spin in a tighter loop than self?
581 if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) ||
582 (n_loop->is_member(n2_loop) && n_loop != n2_loop) ||
583 (n_loop->is_member(n3_loop) && n_loop != n3_loop)) {
584 return nullptr; // Leave well enough alone
585 }
586
587 // Is at least one of my inputs loop-invariant?
588 if (n1_loop == n_loop &&
589 n2_loop == n_loop &&
590 n3_loop == n_loop) {
591 return nullptr; // No loop-invariant inputs
592 }
593
594 Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT);
595 if (res != nullptr) {
596 return res;
597 }
598 res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG);
599 if (res != nullptr) {
600 return res;
601 }
602
603 int n_op = n->Opcode();
604 // Replace (I+V) with (V+I)
605 if (n_op == Op_AddI ||
606 n_op == Op_AddL ||
607 n_op == Op_AddF ||
608 n_op == Op_AddD ||
609 n_op == Op_MulI ||
610 n_op == Op_MulL ||
611 n_op == Op_MulF ||
612 n_op == Op_MulD) {
613 if (n2_loop == n_loop) {
614 assert(n1_loop != n_loop, "");
615 n->swap_edges(1, 2);
616 }
617 }
618
619 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
620 // but not if I2 is a constant. Skip for irreducible loops.
621 if (n_op == Op_AddP && n_loop->_head->is_Loop()) {
622 if (n2_loop == n_loop && n3_loop != n_loop) {
623 if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) {
624 Node* n22_ctrl = get_ctrl(n->in(2)->in(2));
625 Node* n23_ctrl = get_ctrl(n->in(2)->in(3));
626 IdealLoopTree* n22loop = get_loop(n22_ctrl);
627 IdealLoopTree* n23_loop = get_loop(n23_ctrl);
628 if (n22loop != n_loop && n22loop->is_member(n_loop) &&
629 n23_loop == n_loop) {
630 Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3));
631 // Stuff new AddP in the loop preheader
632 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
633 Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3));
634 register_new_node(add2, n_ctrl);
635 _igvn.replace_node(n, add2);
636 return add2;
637 }
638 }
639 }
640
641 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
642 if (n2_loop != n_loop && n3_loop == n_loop) {
643 if (n->in(3)->Opcode() == Op_AddX) {
644 Node* V = n->in(3)->in(1);
645 Node* I = n->in(3)->in(2);
646 if (ctrl_is_member(n_loop, V)) {
647 } else {
648 Node *tmp = V; V = I; I = tmp;
649 }
650 if (!ctrl_is_member(n_loop, I)) {
651 Node* add1 = new AddPNode(n->in(1), n->in(2), I);
652 // Stuff new AddP in the loop preheader
653 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
654 Node* add2 = new AddPNode(n->in(1), add1, V);
655 register_new_node(add2, n_ctrl);
656 _igvn.replace_node(n, add2);
657 return add2;
658 }
659 }
660 }
661 }
662
663 return nullptr;
664 }
665
666 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
667 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
668 assert(n->Opcode() == Op_AddI, "sanity");
669 Node * nn = nullptr;
670 Node * in1 = n->in(1);
671 Node * in2 = n->in(2);
672 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
673 IdealLoopTree* loop_n = get_loop(get_ctrl(n));
674 if (loop_n->is_counted() &&
675 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) &&
676 Matcher::match_rule_supported(Op_MulAddVS2VI) &&
677 Matcher::match_rule_supported(Op_MulAddS2I)) {
678 Node* mul_in1 = in1->in(1);
679 Node* mul_in2 = in1->in(2);
680 Node* mul_in3 = in2->in(1);
681 Node* mul_in4 = in2->in(2);
682 if (mul_in1->Opcode() == Op_LoadS &&
683 mul_in2->Opcode() == Op_LoadS &&
684 mul_in3->Opcode() == Op_LoadS &&
685 mul_in4->Opcode() == Op_LoadS) {
686 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
687 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
688 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
689 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
690 IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
691 IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
692 // All nodes should be in the same counted loop.
693 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
694 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
695 Node* adr1 = mul_in1->in(MemNode::Address);
696 Node* adr2 = mul_in2->in(MemNode::Address);
697 Node* adr3 = mul_in3->in(MemNode::Address);
698 Node* adr4 = mul_in4->in(MemNode::Address);
699 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
700 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
701 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
702 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
703 register_new_node_with_ctrl_of(nn, n);
704 _igvn.replace_node(n, nn);
705 return nn;
706 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
707 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
708 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
709 register_new_node_with_ctrl_of(nn, n);
710 _igvn.replace_node(n, nn);
711 return nn;
712 }
713 }
714 }
715 }
716 }
717 }
718 return nn;
719 }
720
721 //------------------------------conditional_move-------------------------------
722 // Attempt to replace a Phi with a conditional move. We have some pretty
723 // strict profitability requirements. All Phis at the merge point must
724 // be converted, so we can remove the control flow. We need to limit the
725 // number of c-moves to a small handful. All code that was in the side-arms
726 // of the CFG diamond is now speculatively executed. This code has to be
727 // "cheap enough". We are pretty much limited to CFG diamonds that merge
728 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
729 Node *PhaseIdealLoop::conditional_move( Node *region ) {
730
731 assert(region->is_Region(), "sanity check");
732 if (region->req() != 3) return nullptr;
733
734 // Check for CFG diamond
735 Node *lp = region->in(1);
736 Node *rp = region->in(2);
737 if (!lp || !rp) return nullptr;
738 Node *lp_c = lp->in(0);
739 if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr;
740 IfNode *iff = lp_c->as_If();
741
742 // Check for ops pinned in an arm of the diamond.
743 // Can't remove the control flow in this case
744 if (lp->outcnt() > 1) return nullptr;
745 if (rp->outcnt() > 1) return nullptr;
746
747 IdealLoopTree* r_loop = get_loop(region);
748 assert(r_loop == get_loop(iff), "sanity");
749 // Always convert to CMOVE if all results are used only outside this loop.
750 bool used_inside_loop = (r_loop == _ltree_root);
751
752 // Check profitability
753 int cost = 0;
754 int phis = 0;
755 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
756 Node *out = region->fast_out(i);
757 if (!out->is_Phi()) continue; // Ignore other control edges, etc
758 phis++;
759 PhiNode* phi = out->as_Phi();
760 BasicType bt = phi->type()->basic_type();
761 switch (bt) {
762 case T_DOUBLE:
763 case T_FLOAT:
764 if (C->use_cmove()) {
765 continue; //TODO: maybe we want to add some cost
766 }
767 cost += Matcher::float_cmove_cost(); // Could be very expensive
768 break;
769 case T_LONG: {
770 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
771 }
772 case T_INT: // These all CMOV fine
773 case T_ADDRESS: { // (RawPtr)
774 cost++;
775 break;
776 }
777 case T_NARROWOOP: // Fall through
778 case T_OBJECT: { // Base oops are OK, but not derived oops
779 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
780 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
781 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
782 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
783 // have a Phi for the base here that we convert to a CMOVE all is well
784 // and good. But if the base is dead, we'll not make a CMOVE. Later
785 // the allocator will have to produce a base by creating a CMOVE of the
786 // relevant bases. This puts the allocator in the business of
787 // manufacturing expensive instructions, generally a bad plan.
788 // Just Say No to Conditionally-Moved Derived Pointers.
789 if (tp && tp->offset() != 0)
790 return nullptr;
791 cost++;
792 break;
793 }
794 default:
795 return nullptr; // In particular, can't do memory or I/O
796 }
797 // Add in cost any speculative ops
798 for (uint j = 1; j < region->req(); j++) {
799 Node *proj = region->in(j);
800 Node *inp = phi->in(j);
801 if (inp->isa_InlineType()) {
802 // TODO 8302217 This prevents PhiNode::push_inline_types_through
803 return nullptr;
804 }
805 if (get_ctrl(inp) == proj) { // Found local op
806 cost++;
807 // Check for a chain of dependent ops; these will all become
808 // speculative in a CMOV.
809 for (uint k = 1; k < inp->req(); k++)
810 if (get_ctrl(inp->in(k)) == proj)
811 cost += ConditionalMoveLimit; // Too much speculative goo
812 }
813 }
814 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
815 // This will likely Split-If, a higher-payoff operation.
816 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
817 Node* use = phi->fast_out(k);
818 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
819 cost += ConditionalMoveLimit;
820 // Is there a use inside the loop?
821 // Note: check only basic types since CMoveP is pinned.
822 if (!used_inside_loop && is_java_primitive(bt)) {
823 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
824 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
825 used_inside_loop = true;
826 }
827 }
828 }
829 }//for
830 Node* bol = iff->in(1);
831 assert(!bol->is_OpaqueInitializedAssertionPredicate(), "Initialized Assertion Predicates cannot form a diamond with Halt");
832 if (bol->is_OpaqueTemplateAssertionPredicate()) {
833 // Ignore Template Assertion Predicates with OpaqueTemplateAssertionPredicate nodes.
834 return nullptr;
835 }
836 if (bol->is_OpaqueMultiversioning()) {
837 assert(bol->as_OpaqueMultiversioning()->is_useless(), "Must be useless, i.e. fast main loop has already disappeared.");
838 // Ignore multiversion_if that just lost its loops. The OpaqueMultiversioning is marked useless,
839 // and will make the multiversion_if constant fold in the next IGVN round.
840 return nullptr;
841 }
842 if (!bol->is_Bool()) {
843 assert(false, "Expected Bool, but got %s", NodeClassNames[bol->Opcode()]);
844 return nullptr;
845 }
846 int cmp_op = bol->in(1)->Opcode();
847 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
848 return nullptr;
849 }
850 // It is expensive to generate flags from a float compare.
851 // Avoid duplicated float compare.
852 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
853
854 float infrequent_prob = PROB_UNLIKELY_MAG(3);
855 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
856 if (used_inside_loop) {
857 if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
858
859 // BlockLayoutByFrequency optimization moves infrequent branch
860 // from hot path. No point in CMOV'ing in such case (110 is used
861 // instead of 100 to take into account not exactness of float value).
862 if (BlockLayoutByFrequency) {
863 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
864 }
865 }
866 // Check for highly predictable branch. No point in CMOV'ing if
867 // we are going to predict accurately all the time.
868 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
869 //keep going
870 } else if (iff->_prob < infrequent_prob ||
871 iff->_prob > (1.0f - infrequent_prob))
872 return nullptr;
873
874 // --------------
875 // Now replace all Phis with CMOV's
876 Node *cmov_ctrl = iff->in(0);
877 uint flip = (lp->Opcode() == Op_IfTrue);
878 Node_List wq;
879 while (1) {
880 PhiNode* phi = nullptr;
881 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
882 Node *out = region->fast_out(i);
883 if (out->is_Phi()) {
884 phi = out->as_Phi();
885 break;
886 }
887 }
888 if (phi == nullptr || _igvn.type(phi) == Type::TOP || !CMoveNode::supported(_igvn.type(phi))) {
889 break;
890 }
891 // Move speculative ops
892 wq.push(phi);
893 while (wq.size() > 0) {
894 Node *n = wq.pop();
895 for (uint j = 1; j < n->req(); j++) {
896 Node* m = n->in(j);
897 if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) {
898 set_ctrl(m, cmov_ctrl);
899 wq.push(m);
900 }
901 }
902 }
903 Node* cmov = CMoveNode::make(iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
904 register_new_node(cmov, cmov_ctrl);
905 _igvn.replace_node(phi, cmov);
906 #ifndef PRODUCT
907 if (TraceLoopOpts) {
908 tty->print("CMOV ");
909 r_loop->dump_head();
910 if (Verbose) {
911 bol->in(1)->dump(1);
912 cmov->dump(1);
913 }
914 }
915 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
916 #endif
917 }
918
919 // The useless CFG diamond will fold up later; see the optimization in
920 // RegionNode::Ideal.
921 _igvn._worklist.push(region);
922
923 return iff->in(1);
924 }
925
926 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
927 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
928 Node* u = m->fast_out(i);
929 if (u->is_CFG()) {
930 if (u->is_NeverBranch()) {
931 u = u->as_NeverBranch()->proj_out(0);
932 enqueue_cfg_uses(u, wq);
933 } else {
934 wq.push(u);
935 }
936 }
937 }
938 }
939
940 // Try moving a store out of a loop, right before the loop
941 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
942 // Store has to be first in the loop body
943 IdealLoopTree *n_loop = get_loop(n_ctrl);
944 if (n->is_Store() && n_loop != _ltree_root &&
945 n_loop->is_loop() && n_loop->_head->is_Loop() &&
946 n->in(0) != nullptr) {
947 Node* address = n->in(MemNode::Address);
948 Node* value = n->in(MemNode::ValueIn);
949 Node* mem = n->in(MemNode::Memory);
950
951 // - address and value must be loop invariant
952 // - memory must be a memory Phi for the loop
953 // - Store must be the only store on this memory slice in the
954 // loop: if there's another store following this one then value
955 // written at iteration i by the second store could be overwritten
956 // at iteration i+n by the first store: it's not safe to move the
957 // first store out of the loop
958 // - nothing must observe the memory Phi: it guarantees no read
959 // before the store, we are also guaranteed the store post
960 // dominates the loop head (ignoring a possible early
961 // exit). Otherwise there would be extra Phi involved between the
962 // loop's Phi and the store.
963 // - there must be no early exit from the loop before the Store
964 // (such an exit most of the time would be an extra use of the
965 // memory Phi but sometimes is a bottom memory Phi that takes the
966 // store as input).
967
968 if (!ctrl_is_member(n_loop, address) &&
969 !ctrl_is_member(n_loop, value) &&
970 mem->is_Phi() && mem->in(0) == n_loop->_head &&
971 mem->outcnt() == 1 &&
972 mem->in(LoopNode::LoopBackControl) == n) {
973
974 assert(n_loop->_tail != nullptr, "need a tail");
975 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
976
977 // Verify that there's no early exit of the loop before the store.
978 bool ctrl_ok = false;
979 {
980 // Follow control from loop head until n, we exit the loop or
981 // we reach the tail
982 ResourceMark rm;
983 Unique_Node_List wq;
984 wq.push(n_loop->_head);
985
986 for (uint next = 0; next < wq.size(); ++next) {
987 Node *m = wq.at(next);
988 if (m == n->in(0)) {
989 ctrl_ok = true;
990 continue;
991 }
992 assert(!has_ctrl(m), "should be CFG");
993 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
994 ctrl_ok = false;
995 break;
996 }
997 enqueue_cfg_uses(m, wq);
998 if (wq.size() > 10) {
999 ctrl_ok = false;
1000 break;
1001 }
1002 }
1003 }
1004 if (ctrl_ok) {
1005 // move the Store
1006 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
1007 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
1008 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
1009 // Disconnect the phi now. An empty phi can confuse other
1010 // optimizations in this pass of loop opts.
1011 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
1012 n_loop->_body.yank(mem);
1013
1014 set_ctrl_and_loop(n, n->in(0));
1015
1016 return n;
1017 }
1018 }
1019 }
1020 return nullptr;
1021 }
1022
1023 // Try moving a store out of a loop, right after the loop
1024 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
1025 if (n->is_Store() && n->in(0) != nullptr) {
1026 Node *n_ctrl = get_ctrl(n);
1027 IdealLoopTree *n_loop = get_loop(n_ctrl);
1028 // Store must be in a loop
1029 if (n_loop != _ltree_root && !n_loop->_irreducible) {
1030 Node* address = n->in(MemNode::Address);
1031 Node* value = n->in(MemNode::ValueIn);
1032 // address must be loop invariant
1033 if (!ctrl_is_member(n_loop, address)) {
1034 // Store must be last on this memory slice in the loop and
1035 // nothing in the loop must observe it
1036 Node* phi = nullptr;
1037 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1038 Node* u = n->fast_out(i);
1039 if (has_ctrl(u)) { // control use?
1040 if (!ctrl_is_member(n_loop, u)) {
1041 continue;
1042 }
1043 if (u->is_Phi() && u->in(0) == n_loop->_head) {
1044 assert(_igvn.type(u) == Type::MEMORY, "bad phi");
1045 // multiple phis on the same slice are possible
1046 if (phi != nullptr) {
1047 return;
1048 }
1049 phi = u;
1050 continue;
1051 }
1052 }
1053 return;
1054 }
1055 if (phi != nullptr) {
1056 // Nothing in the loop before the store (next iteration)
1057 // must observe the stored value
1058 bool mem_ok = true;
1059 {
1060 ResourceMark rm;
1061 Unique_Node_List wq;
1062 wq.push(phi);
1063 for (uint next = 0; next < wq.size() && mem_ok; ++next) {
1064 Node *m = wq.at(next);
1065 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
1066 Node* u = m->fast_out(i);
1067 if (u->is_Store() || u->is_Phi()) {
1068 if (u != n) {
1069 wq.push(u);
1070 mem_ok = (wq.size() <= 10);
1071 }
1072 } else {
1073 mem_ok = false;
1074 break;
1075 }
1076 }
1077 }
1078 }
1079 if (mem_ok) {
1080 // Move the store out of the loop if the LCA of all
1081 // users (except for the phi) is outside the loop.
1082 Node* hook = new Node(1);
1083 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
1084 _igvn.rehash_node_delayed(phi);
1085 int count = phi->replace_edge(n, hook, &_igvn);
1086 assert(count > 0, "inconsistent phi");
1087
1088 // Compute latest point this store can go
1089 Node* lca = get_late_ctrl(n, get_ctrl(n));
1090 if (lca->is_OuterStripMinedLoop()) {
1091 lca = lca->in(LoopNode::EntryControl);
1092 }
1093 if (n_loop->is_member(get_loop(lca))) {
1094 // LCA is in the loop - bail out
1095 _igvn.replace_node(hook, n);
1096 return;
1097 }
1098 #ifdef ASSERT
1099 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
1100 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
1101 n_loop->_head->as_Loop()->verify_strip_mined(1);
1102 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
1103 IdealLoopTree* outer_loop = get_loop(outer);
1104 assert(n_loop->_parent == outer_loop, "broken loop tree");
1105 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
1106 }
1107 #endif
1108 lca = place_outside_loop(lca, n_loop);
1109 assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop");
1110 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1111
1112 // Move store out of the loop
1113 _igvn.replace_node(hook, n->in(MemNode::Memory));
1114 _igvn.replace_input_of(n, 0, lca);
1115 set_ctrl_and_loop(n, lca);
1116
1117 // Disconnect the phi now. An empty phi can confuse other
1118 // optimizations in this pass of loop opts..
1119 if (phi->in(LoopNode::LoopBackControl) == phi) {
1120 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1121 n_loop->_body.yank(phi);
1122 }
1123 }
1124 }
1125 }
1126 }
1127 }
1128 }
1129
1130 // We can't use immutable memory for the flat array check because we are loading the mark word which is
1131 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
1132 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
1133 // flat array checks out of loops, mainly to enable loop unswitching.
1134 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1135 // Skip checks for more than one array
1136 if (n->req() > 3) {
1137 return;
1138 }
1139 Node* mem = n->in(FlatArrayCheckNode::Memory);
1140 Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1141 IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1142 IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1143
1144 // Check if array is loop invariant
1145 if (!check_loop->is_member(ary_loop)) {
1146 // Walk up memory graph from the check until we leave the loop
1147 VectorSet wq;
1148 wq.set(mem->_idx);
1149 while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1150 if (mem->is_Phi()) {
1151 mem = mem->in(1);
1152 } else if (mem->is_MergeMem()) {
1153 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1154 } else if (mem->is_Proj()) {
1155 mem = mem->in(0);
1156 } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1157 mem = mem->in(TypeFunc::Memory);
1158 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1159 mem = mem->in(MemNode::Memory);
1160 } else {
1161 #ifdef ASSERT
1162 mem->dump();
1163 #endif
1164 ShouldNotReachHere();
1165 }
1166 if (wq.test_set(mem->_idx)) {
1167 return;
1168 }
1169 }
1170 // Replace memory input and re-compute ctrl to move the check out of the loop
1171 _igvn.replace_input_of(n, 1, mem);
1172 set_ctrl_and_loop(n, get_early_ctrl(n));
1173 Node* bol = n->unique_out();
1174 set_ctrl_and_loop(bol, get_early_ctrl(bol));
1175 }
1176 }
1177
1178 //------------------------------split_if_with_blocks_pre-----------------------
1179 // Do the real work in a non-recursive function. Data nodes want to be
1180 // cloned in the pre-order so they can feed each other nicely.
1181 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1182 // Cloning these guys is unlikely to win
1183 int n_op = n->Opcode();
1184 if (n_op == Op_MergeMem) {
1185 return n;
1186 }
1187 if (n->is_Proj()) {
1188 return n;
1189 }
1190
1191 if (n->isa_FlatArrayCheck()) {
1192 move_flat_array_check_out_of_loop(n);
1193 return n;
1194 }
1195
1196 // Do not clone-up CmpFXXX variations, as these are always
1197 // followed by a CmpI
1198 if (n->is_Cmp()) {
1199 return n;
1200 }
1201 // Attempt to use a conditional move instead of a phi/branch
1202 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1203 Node *cmov = conditional_move( n );
1204 if (cmov) {
1205 return cmov;
1206 }
1207 }
1208 if (n->is_CFG() || n->is_LoadStore()) {
1209 return n;
1210 }
1211 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1212 if (!C->major_progress()) { // If chance of no more loop opts...
1213 _igvn._worklist.push(n); // maybe we'll remove them
1214 }
1215 return n;
1216 }
1217
1218 if (n->is_Con()) {
1219 return n; // No cloning for Con nodes
1220 }
1221
1222 Node *n_ctrl = get_ctrl(n);
1223 if (!n_ctrl) {
1224 return n; // Dead node
1225 }
1226
1227 Node* res = try_move_store_before_loop(n, n_ctrl);
1228 if (res != nullptr) {
1229 return n;
1230 }
1231
1232 // Attempt to remix address expressions for loop invariants
1233 Node *m = remix_address_expressions( n );
1234 if( m ) return m;
1235
1236 if (n_op == Op_AddI) {
1237 Node *nn = convert_add_to_muladd( n );
1238 if ( nn ) return nn;
1239 }
1240
1241 if (n->is_ConstraintCast() && n->as_ConstraintCast()->dependency().narrows_type()) {
1242 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1243 // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1244 // Node control inputs don't necessarily agree with loop control info (due to
1245 // transformations happened in between), thus additional dominance check is needed
1246 // to keep loop info valid.
1247 if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1248 _igvn.replace_node(n, dom_cast);
1249 return dom_cast;
1250 }
1251 }
1252
1253 // Determine if the Node has inputs from some local Phi.
1254 // Returns the block to clone thru.
1255 Node *n_blk = has_local_phi_input( n );
1256 if( !n_blk ) return n;
1257
1258 // Do not clone the trip counter through on a CountedLoop
1259 // (messes up the canonical shape).
1260 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) ||
1261 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) {
1262 return n;
1263 }
1264 // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination
1265 if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) &&
1266 n->in(1) == n_blk->as_BaseCountedLoop()->phi()) {
1267 return n;
1268 }
1269
1270 // Check for having no control input; not pinned. Allow
1271 // dominating control.
1272 if (n->in(0)) {
1273 Node *dom = idom(n_blk);
1274 if (dom_lca(n->in(0), dom) != n->in(0)) {
1275 return n;
1276 }
1277 }
1278 // Policy: when is it profitable. You must get more wins than
1279 // policy before it is considered profitable. Policy is usually 0,
1280 // so 1 win is considered profitable. Big merges will require big
1281 // cloning, so get a larger policy.
1282 int policy = n_blk->req() >> 2;
1283
1284 // If the loop is a candidate for range check elimination,
1285 // delay splitting through it's phi until a later loop optimization
1286 if (n_blk->is_BaseCountedLoop()) {
1287 IdealLoopTree *lp = get_loop(n_blk);
1288 if (lp && lp->_rce_candidate) {
1289 return n;
1290 }
1291 }
1292
1293 if (must_throttle_split_if()) return n;
1294
1295 // Split 'n' through the merge point if it is profitable, replacing it with a new phi.
1296 Node* phi = split_thru_phi(n, n_blk, policy);
1297 if (phi == nullptr) { return n; }
1298
1299 // Moved a load around the loop, 'en-registering' something.
1300 if (n_blk->is_Loop() && n->is_Load() &&
1301 !phi->in(LoopNode::LoopBackControl)->is_Load())
1302 C->set_major_progress();
1303
1304 return phi;
1305 }
1306
1307 static bool merge_point_too_heavy(Compile* C, Node* region) {
1308 // Bail out if the region and its phis have too many users.
1309 int weight = 0;
1310 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1311 weight += region->fast_out(i)->outcnt();
1312 }
1313 int nodes_left = C->max_node_limit() - C->live_nodes();
1314 if (weight * 8 > nodes_left) {
1315 if (PrintOpto) {
1316 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
1317 }
1318 return true;
1319 } else {
1320 return false;
1321 }
1322 }
1323
1324 static bool merge_point_safe(Node* region) {
1325 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1326 // having a PhiNode input. This sidesteps the dangerous case where the split
1327 // ConvI2LNode may become TOP if the input Value() does not
1328 // overlap the ConvI2L range, leaving a node which may not dominate its
1329 // uses.
1330 // A better fix for this problem can be found in the BugTraq entry, but
1331 // expediency for Mantis demands this hack.
1332 #ifdef _LP64
1333 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1334 Node* n = region->fast_out(i);
1335 if (n->is_Phi()) {
1336 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1337 Node* m = n->fast_out(j);
1338 if (m->Opcode() == Op_ConvI2L)
1339 return false;
1340 if (m->is_CastII()) {
1341 return false;
1342 }
1343 }
1344 }
1345 }
1346 #endif
1347 return true;
1348 }
1349
1350
1351 //------------------------------place_outside_loop---------------------------------
1352 // Place some computation outside of this loop on the path to the use passed as argument
1353 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const {
1354 Node* head = loop->_head;
1355 assert(!loop->is_member(get_loop(useblock)), "must be outside loop");
1356 if (head->is_Loop() && head->as_Loop()->is_strip_mined()) {
1357 loop = loop->_parent;
1358 assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop");
1359 }
1360
1361 // Pick control right outside the loop
1362 for (;;) {
1363 Node* dom = idom(useblock);
1364 if (loop->is_member(get_loop(dom))) {
1365 break;
1366 }
1367 useblock = dom;
1368 }
1369 assert(find_non_split_ctrl(useblock) == useblock, "should be non split control");
1370 return useblock;
1371 }
1372
1373
1374 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1375 if (!n->is_If() || n->is_BaseCountedLoopEnd()) {
1376 return false;
1377 }
1378 if (!n->in(0)->is_Region()) {
1379 return false;
1380 }
1381
1382 Node* region = n->in(0);
1383 Node* dom = idom(region);
1384 if (!dom->is_If() || !n->as_If()->same_condition(dom, &_igvn)) {
1385 return false;
1386 }
1387 IfNode* dom_if = dom->as_If();
1388 IfTrueNode* proj_true = dom_if->true_proj();
1389 IfFalseNode* proj_false = dom_if->false_proj();
1390
1391 for (uint i = 1; i < region->req(); i++) {
1392 if (is_dominator(proj_true, region->in(i))) {
1393 continue;
1394 }
1395 if (is_dominator(proj_false, region->in(i))) {
1396 continue;
1397 }
1398 return false;
1399 }
1400
1401 return true;
1402 }
1403
1404
1405 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1406 if (must_throttle_split_if()) {
1407 return false;
1408 }
1409
1410 // Do not do 'split-if' if irreducible loops are present.
1411 if (_has_irreducible_loops) {
1412 return false;
1413 }
1414
1415 if (merge_point_too_heavy(C, n_ctrl)) {
1416 return false;
1417 }
1418
1419 // Do not do 'split-if' if some paths are dead. First do dead code
1420 // elimination and then see if its still profitable.
1421 for (uint i = 1; i < n_ctrl->req(); i++) {
1422 if (n_ctrl->in(i) == C->top()) {
1423 return false;
1424 }
1425 }
1426
1427 // If trying to do a 'Split-If' at the loop head, it is only
1428 // profitable if the cmp folds up on BOTH paths. Otherwise we
1429 // risk peeling a loop forever.
1430
1431 // CNC - Disabled for now. Requires careful handling of loop
1432 // body selection for the cloned code. Also, make sure we check
1433 // for any input path not being in the same loop as n_ctrl. For
1434 // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1435 // because the alternative loop entry points won't be converted
1436 // into LoopNodes.
1437 IdealLoopTree *n_loop = get_loop(n_ctrl);
1438 for (uint j = 1; j < n_ctrl->req(); j++) {
1439 if (get_loop(n_ctrl->in(j)) != n_loop) {
1440 return false;
1441 }
1442 }
1443
1444 // Check for safety of the merge point.
1445 if (!merge_point_safe(n_ctrl)) {
1446 return false;
1447 }
1448
1449 return true;
1450 }
1451
1452 // Detect if the node is the inner strip-mined loop
1453 // Return: null if it's not the case, or the exit of outer strip-mined loop
1454 static Node* is_inner_of_stripmined_loop(const Node* out) {
1455 Node* out_le = nullptr;
1456
1457 if (out->is_CountedLoopEnd()) {
1458 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1459
1460 if (loop != nullptr && loop->is_strip_mined()) {
1461 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1462 }
1463 }
1464
1465 return out_le;
1466 }
1467
1468 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1469 // If the CmpP is a subtype check for a value that has just been
1470 // loaded from an array, the subtype check guarantees the value
1471 // can't be stored in a flat array and the load of the value
1472 // happens with a flat array check then: push the type check
1473 // through the phi of the flat array check. This needs special
1474 // logic because the subtype check's input is not a phi but a
1475 // LoadKlass that must first be cloned through the phi.
1476 if (n->Opcode() != Op_CmpP) {
1477 return false;
1478 }
1479
1480 Node* klassptr = n->in(1);
1481 Node* klasscon = n->in(2);
1482
1483 if (klassptr->is_DecodeNarrowPtr()) {
1484 klassptr = klassptr->in(1);
1485 }
1486
1487 if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1488 return false;
1489 }
1490
1491 if (!klasscon->is_Con()) {
1492 return false;
1493 }
1494
1495 Node* addr = klassptr->in(MemNode::Address);
1496
1497 if (!addr->is_AddP()) {
1498 return false;
1499 }
1500
1501 intptr_t offset;
1502 Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1503
1504 if (obj == nullptr) {
1505 return false;
1506 }
1507
1508 // TODO 8378077: The code below does not work anymore with off-heap accesses which set their bases to top with
1509 // JDK-8373343. Also: flat_array_element_type_check() was introduced with JDK-8228622 for a specific check to enable
1510 // split-if but JDK-8245729 changed how that check looks like. Is it still relevant? This should be revisited.
1511 if (addr->in(AddPNode::Base)->is_top()) {
1512 return false;
1513 }
1514
1515 if (obj->Opcode() == Op_CastPP) {
1516 obj = obj->in(1);
1517 }
1518
1519 if (!obj->is_Phi()) {
1520 return false;
1521 }
1522
1523 Node* region = obj->in(0);
1524
1525 Node* phi = PhiNode::make_blank(region, n->in(1));
1526 for (uint i = 1; i < region->req(); i++) {
1527 Node* in = obj->in(i);
1528 Node* ctrl = region->in(i);
1529 if (addr->in(AddPNode::Base) != obj) {
1530 Node* cast = addr->in(AddPNode::Base);
1531 assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1532 Node* cast_clone = cast->clone();
1533 cast_clone->set_req(0, ctrl);
1534 cast_clone->set_req(1, in);
1535 register_new_node(cast_clone, ctrl);
1536 const Type* tcast = cast_clone->Value(&_igvn);
1537 _igvn.set_type(cast_clone, tcast);
1538 cast_clone->as_Type()->set_type(tcast);
1539 in = cast_clone;
1540 }
1541 Node* addr_clone = addr->clone();
1542 addr_clone->set_req(AddPNode::Base, in);
1543 addr_clone->set_req(AddPNode::Address, in);
1544 register_new_node(addr_clone, ctrl);
1545 _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1546 Node* klassptr_clone = klassptr->clone();
1547 klassptr_clone->set_req(2, addr_clone);
1548 register_new_node(klassptr_clone, ctrl);
1549 _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1550 if (klassptr != n->in(1)) {
1551 Node* decode = n->in(1);
1552 assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1553 Node* decode_clone = decode->clone();
1554 decode_clone->set_req(1, klassptr_clone);
1555 register_new_node(decode_clone, ctrl);
1556 _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1557 klassptr_clone = decode_clone;
1558 }
1559 phi->set_req(i, klassptr_clone);
1560 }
1561 register_new_node(phi, region);
1562 Node* orig = n->in(1);
1563 _igvn.replace_input_of(n, 1, phi);
1564 split_if_with_blocks_post(n);
1565 if (n->outcnt() != 0) {
1566 _igvn.replace_input_of(n, 1, orig);
1567 _igvn.remove_dead_node(phi);
1568 }
1569 return true;
1570 }
1571
1572 //------------------------------split_if_with_blocks_post----------------------
1573 // Do the real work in a non-recursive function. CFG hackery wants to be
1574 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1575 // info.
1576 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1577
1578 if (flat_array_element_type_check(n)) {
1579 return;
1580 }
1581
1582 // Cloning Cmp through Phi's involves the split-if transform.
1583 // FastLock is not used by an If
1584 if (n->is_Cmp() && !n->is_FastLock()) {
1585 Node *n_ctrl = get_ctrl(n);
1586 // Determine if the Node has inputs from some local Phi.
1587 // Returns the block to clone thru.
1588 Node *n_blk = has_local_phi_input(n);
1589 if (n_blk != n_ctrl) {
1590 return;
1591 }
1592
1593 if (!can_split_if(n_ctrl)) {
1594 return;
1595 }
1596
1597 if (n->outcnt() != 1) {
1598 return; // Multiple bool's from 1 compare?
1599 }
1600 Node *bol = n->unique_out();
1601 assert(bol->is_Bool(), "expect a bool here");
1602 if (bol->outcnt() != 1) {
1603 return;// Multiple branches from 1 compare?
1604 }
1605 Node *iff = bol->unique_out();
1606
1607 // Check some safety conditions
1608 if (iff->is_If()) { // Classic split-if?
1609 if (iff->in(0) != n_ctrl) {
1610 return; // Compare must be in same blk as if
1611 }
1612 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1613 // Can't split CMove with different control.
1614 if (get_ctrl(iff) != n_ctrl) {
1615 return;
1616 }
1617 if (get_ctrl(iff->in(2)) == n_ctrl ||
1618 get_ctrl(iff->in(3)) == n_ctrl) {
1619 return; // Inputs not yet split-up
1620 }
1621 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1622 return; // Loop-invar test gates loop-varying CMOVE
1623 }
1624 } else {
1625 return; // some other kind of node, such as an Allocate
1626 }
1627
1628 // When is split-if profitable? Every 'win' on means some control flow
1629 // goes dead, so it's almost always a win.
1630 int policy = 0;
1631 // Split compare 'n' through the merge point if it is profitable
1632 Node *phi = split_thru_phi( n, n_ctrl, policy);
1633 if (!phi) {
1634 return;
1635 }
1636
1637 // Now split the bool up thru the phi
1638 Node* bolphi = split_thru_phi(bol, n_ctrl, -1);
1639 guarantee(bolphi != nullptr, "null boolean phi node");
1640 assert(iff->in(1) == bolphi, "");
1641
1642 if (bolphi->Value(&_igvn)->singleton()) {
1643 return;
1644 }
1645
1646 // Conditional-move? Must split up now
1647 if (!iff->is_If()) {
1648 Node* cmovphi = split_thru_phi(iff, n_ctrl, -1);
1649 return;
1650 }
1651
1652 // Now split the IF
1653 C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff);
1654 if (TraceLoopOpts) {
1655 tty->print_cr("Split-If");
1656 }
1657 do_split_if(iff);
1658 C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff);
1659 return;
1660 }
1661
1662 // Two identical ifs back to back can be merged
1663 if (try_merge_identical_ifs(n)) {
1664 return;
1665 }
1666
1667 // Check for an IF ready to split; one that has its
1668 // condition codes input coming from a Phi at the block start.
1669 int n_op = n->Opcode();
1670
1671 // Check for an IF being dominated by another IF same test
1672 if (n_op == Op_If ||
1673 n_op == Op_RangeCheck) {
1674 Node *bol = n->in(1);
1675 uint max = bol->outcnt();
1676 // Check for same test used more than once?
1677 if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) {
1678 // Search up IDOMs to see if this IF is dominated.
1679 Node* cmp = bol->in(1);
1680 Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol);
1681
1682 // Now search up IDOMs till cutoff, looking for a dominating test
1683 Node *prevdom = n;
1684 Node *dom = idom(prevdom);
1685 while (dom != cutoff) {
1686 if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom &&
1687 safe_for_if_replacement(dom)) {
1688 // It's invalid to move control dependent data nodes in the inner
1689 // strip-mined loop, because:
1690 // 1) break validation of LoopNode::verify_strip_mined()
1691 // 2) move code with side-effect in strip-mined loop
1692 // Move to the exit of outer strip-mined loop in that case.
1693 Node* out_le = is_inner_of_stripmined_loop(dom);
1694 if (out_le != nullptr) {
1695 prevdom = out_le;
1696 }
1697 // Replace the dominated test with an obvious true or false.
1698 // Place it on the IGVN worklist for later cleanup.
1699 C->set_major_progress();
1700 // Split if: pin array accesses that are control dependent on a range check and moved to a regular if,
1701 // to prevent an array load from floating above its range check. There are three cases:
1702 // 1. Move from RangeCheck "a" to RangeCheck "b": don't need to pin. If we ever remove b, then we pin
1703 // all its array accesses at that point.
1704 // 2. We move from RangeCheck "a" to regular if "b": need to pin. If we ever remove b, then its array
1705 // accesses would start to float, since we don't pin at that point.
1706 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1707 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck &&
1708 prevdom->in(0)->Opcode() != Op_RangeCheck;
1709 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1710 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1711 return;
1712 }
1713 prevdom = dom;
1714 dom = idom(prevdom);
1715 }
1716 }
1717 }
1718
1719 try_sink_out_of_loop(n);
1720 if (C->failing()) {
1721 return;
1722 }
1723
1724 try_move_store_after_loop(n);
1725
1726 // Remove multiple allocations of the same inline type
1727 if (n->is_InlineType()) {
1728 n->as_InlineType()->remove_redundant_allocations(this);
1729 }
1730 }
1731
1732 // Transform:
1733 //
1734 // if (some_condition) {
1735 // // body 1
1736 // } else {
1737 // // body 2
1738 // }
1739 // if (some_condition) {
1740 // // body 3
1741 // } else {
1742 // // body 4
1743 // }
1744 //
1745 // into:
1746 //
1747 //
1748 // if (some_condition) {
1749 // // body 1
1750 // // body 3
1751 // } else {
1752 // // body 2
1753 // // body 4
1754 // }
1755 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) {
1756 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1757 Node *n_ctrl = n->in(0);
1758 IfNode* dom_if = idom(n_ctrl)->as_If();
1759 if (n->in(1) != dom_if->in(1)) {
1760 assert(n->in(1)->in(1)->is_SubTypeCheck() &&
1761 (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr ||
1762 dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached");
1763 _igvn.replace_input_of(n, 1, dom_if->in(1));
1764 }
1765 IfTrueNode* dom_proj_true = dom_if->true_proj();
1766 IfFalseNode* dom_proj_false = dom_if->false_proj();
1767
1768 // Now split the IF
1769 RegionNode* new_false_region;
1770 RegionNode* new_true_region;
1771 do_split_if(n, &new_false_region, &new_true_region);
1772 assert(new_false_region->req() == new_true_region->req(), "");
1773 #ifdef ASSERT
1774 for (uint i = 1; i < new_false_region->req(); ++i) {
1775 assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if");
1776 assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if");
1777 }
1778 #endif
1779 assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test");
1780
1781 // We now have:
1782 // if (some_condition) {
1783 // // body 1
1784 // if (some_condition) {
1785 // body3: // new_true_region
1786 // // body3
1787 // } else {
1788 // goto body4;
1789 // }
1790 // } else {
1791 // // body 2
1792 // if (some_condition) {
1793 // goto body3;
1794 // } else {
1795 // body4: // new_false_region
1796 // // body4;
1797 // }
1798 // }
1799 //
1800
1801 // clone pinned nodes thru the resulting regions
1802 push_pinned_nodes_thru_region(dom_if, new_true_region);
1803 push_pinned_nodes_thru_region(dom_if, new_false_region);
1804
1805 // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent
1806 // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an
1807 // unrelated control dependency.
1808 for (uint i = 1; i < new_false_region->req(); i++) {
1809 if (is_dominator(dom_proj_true, new_false_region->in(i))) {
1810 dominated_by(dom_proj_true, new_false_region->in(i)->in(0)->as_If());
1811 } else {
1812 assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if");
1813 dominated_by(dom_proj_false, new_false_region->in(i)->in(0)->as_If());
1814 }
1815 }
1816 return true;
1817 }
1818 return false;
1819 }
1820
1821 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) {
1822 for (DUIterator i = region->outs(); region->has_out(i); i++) {
1823 Node* u = region->out(i);
1824 if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test()) {
1825 continue;
1826 }
1827 assert(u->in(0) == region, "not a control dependent node?");
1828 uint j = 1;
1829 for (; j < u->req(); ++j) {
1830 Node* in = u->in(j);
1831 if (!is_dominator(ctrl_or_self(in), dom_if)) {
1832 break;
1833 }
1834 }
1835 if (j == u->req()) {
1836 Node *phi = PhiNode::make_blank(region, u);
1837 for (uint k = 1; k < region->req(); ++k) {
1838 Node* clone = u->clone();
1839 clone->set_req(0, region->in(k));
1840 register_new_node(clone, region->in(k));
1841 phi->init_req(k, clone);
1842 }
1843 register_new_node(phi, region);
1844 _igvn.replace_node(u, phi);
1845 --i;
1846 }
1847 }
1848 }
1849
1850 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const {
1851 if (!dom->is_CountedLoopEnd()) {
1852 return true;
1853 }
1854 CountedLoopEndNode* le = dom->as_CountedLoopEnd();
1855 CountedLoopNode* cl = le->loopnode();
1856 if (cl == nullptr) {
1857 return true;
1858 }
1859 if (!cl->is_main_loop()) {
1860 return true;
1861 }
1862 if (cl->is_canonical_loop_entry() == nullptr) {
1863 return true;
1864 }
1865 // Further unrolling is possible so loop exit condition might change
1866 return false;
1867 }
1868
1869 // See if a shared loop-varying computation has no loop-varying uses.
1870 // Happens if something is only used for JVM state in uncommon trap exits,
1871 // like various versions of induction variable+offset. Clone the
1872 // computation per usage to allow it to sink out of the loop.
1873 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1874 bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
1875 n->in(1)->bottom_type()->isa_rawptr() &&
1876 !n->bottom_type()->isa_rawptr();
1877
1878 if (has_ctrl(n) &&
1879 !n->is_Phi() &&
1880 !n->is_Bool() &&
1881 !n->is_Proj() &&
1882 !n->is_MergeMem() &&
1883 !n->is_CMove() &&
1884 !n->is_OpaqueConstantBool() &&
1885 !n->is_OpaqueInitializedAssertionPredicate() &&
1886 !n->is_OpaqueTemplateAssertionPredicate() &&
1887 !is_raw_to_oop_cast && // don't extend live ranges of raw oops
1888 n->Opcode() != Op_CreateEx &&
1889 (KillPathsReachableByDeadTypeNode || !n->is_Type())
1890 ) {
1891 Node *n_ctrl = get_ctrl(n);
1892 IdealLoopTree *n_loop = get_loop(n_ctrl);
1893
1894 if (n->in(0) != nullptr) {
1895 IdealLoopTree* loop_ctrl = get_loop(n->in(0));
1896 if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) {
1897 // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example,
1898 // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop).
1899 // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not.
1900 Node* maybe_pinned_n = n;
1901 Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl);
1902 if (!would_sink_below_pre_loop_exit(loop_ctrl, outside_ctrl)) {
1903 if (n->depends_only_on_test()) {
1904 // If this node depends_only_on_test, it will be rewired to a control input that is not
1905 // the correct test. As a result, it must be pinned otherwise it can be incorrectly
1906 // rewired to a dominating test equivalent to the new control.
1907 Node* pinned_clone = n->pin_node_under_control();
1908 if (pinned_clone != nullptr) {
1909 register_new_node(pinned_clone, n_ctrl);
1910 maybe_pinned_n = pinned_clone;
1911 _igvn.replace_node(n, pinned_clone);
1912 }
1913 }
1914 _igvn.replace_input_of(maybe_pinned_n, 0, outside_ctrl);
1915 }
1916 }
1917 }
1918 if (n_loop != _ltree_root && n->outcnt() > 1) {
1919 // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of
1920 // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us.
1921 Node* early_ctrl = compute_early_ctrl(n, n_ctrl);
1922 if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now
1923 ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops!
1924 if (n->is_Store() || n->is_LoadStore()) {
1925 assert(false, "no node with a side effect");
1926 C->record_failure("no node with a side effect");
1927 return;
1928 }
1929 Node* outer_loop_clone = nullptr;
1930 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) {
1931 Node* u = n->last_out(j); // Clone private computation per use
1932 _igvn.rehash_node_delayed(u);
1933 Node* x = nullptr;
1934 if (n->in(0) != nullptr && n->depends_only_on_test()) {
1935 // If this node depends_only_on_test, it will be rewired to a control input that is not
1936 // the correct test. As a result, it must be pinned otherwise it can be incorrectly
1937 // rewired to a dominating test equivalent to the new control.
1938 x = n->pin_node_under_control();
1939 }
1940 if (x == nullptr) {
1941 x = n->clone();
1942 }
1943 Node* x_ctrl = nullptr;
1944 if (u->is_Phi()) {
1945 // Replace all uses of normal nodes. Replace Phi uses
1946 // individually, so the separate Nodes can sink down
1947 // different paths.
1948 uint k = 1;
1949 while (u->in(k) != n) k++;
1950 u->set_req(k, x);
1951 // x goes next to Phi input path
1952 x_ctrl = u->in(0)->in(k);
1953 // Find control for 'x' next to use but not inside inner loops.
1954 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1955 --j;
1956 } else { // Normal use
1957 if (has_ctrl(u)) {
1958 x_ctrl = get_ctrl(u);
1959 } else {
1960 x_ctrl = u->in(0);
1961 }
1962 // Find control for 'x' next to use but not inside inner loops.
1963 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1964 // Replace all uses
1965 if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) {
1966 // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary
1967 // anymore now that we're going to pin n as well
1968 _igvn.replace_node(u, x);
1969 --j;
1970 } else {
1971 int nb = u->replace_edge(n, x, &_igvn);
1972 j -= nb;
1973 }
1974 }
1975
1976 if (n->is_Load()) {
1977 // For loads, add a control edge to a CFG node outside of the loop
1978 // to force them to not combine and return back inside the loop
1979 // during GVN optimization (4641526).
1980 assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked");
1981
1982 IdealLoopTree* x_loop = get_loop(x_ctrl);
1983 Node* x_head = x_loop->_head;
1984 if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) {
1985 // Do not add duplicate LoadNodes to the outer strip mined loop
1986 if (outer_loop_clone != nullptr) {
1987 _igvn.replace_node(x, outer_loop_clone);
1988 continue;
1989 }
1990 outer_loop_clone = x;
1991 }
1992 x->set_req(0, x_ctrl);
1993 } else if (n->in(0) != nullptr){
1994 x->set_req(0, x_ctrl);
1995 }
1996 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1997 assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop");
1998 register_new_node(x, x_ctrl);
1999
2000 // Chain of AddP nodes: (AddP base (AddP base (AddP base )))
2001 // All AddP nodes must keep the same base after sinking so:
2002 // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk,
2003 // their bases remain the same.
2004 // (see 2- below)
2005 assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() ||
2006 x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) ||
2007 !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape");
2008 if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() &&
2009 !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) {
2010 assert(!x->is_Load(), "load should be pinned");
2011 // Use a cast node to pin clone out of loop
2012 Node* cast = nullptr;
2013 for (uint k = 0; k < x->req(); k++) {
2014 Node* in = x->in(k);
2015 if (in != nullptr && ctrl_is_member(n_loop, in)) {
2016 const Type* in_t = _igvn.type(in);
2017 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
2018 ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr);
2019 }
2020 if (cast != nullptr) {
2021 Node* prev = _igvn.hash_find_insert(cast);
2022 if (prev != nullptr && get_ctrl(prev) == x_ctrl) {
2023 cast->destruct(&_igvn);
2024 cast = prev;
2025 } else {
2026 register_new_node(cast, x_ctrl);
2027 }
2028 x->replace_edge(in, cast);
2029 // Chain of AddP nodes:
2030 // 2- A CastPP of the base is only added now that all AddP nodes are sunk
2031 if (x->is_AddP() && k == AddPNode::Base) {
2032 update_addp_chain_base(x, n->in(AddPNode::Base), cast);
2033 }
2034 break;
2035 }
2036 }
2037 assert(cast != nullptr, "must have added a cast to pin the node");
2038 }
2039 }
2040 _igvn.remove_dead_node(n);
2041 }
2042 _dom_lca_tags_round = 0;
2043 }
2044 }
2045 }
2046
2047 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) {
2048 ResourceMark rm;
2049 Node_List wq;
2050 wq.push(x);
2051 while (wq.size() != 0) {
2052 Node* n = wq.pop();
2053 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2054 Node* u = n->fast_out(i);
2055 if (u->is_AddP() && u->in(AddPNode::Base) == old_base) {
2056 _igvn.replace_input_of(u, AddPNode::Base, new_base);
2057 wq.push(u);
2058 }
2059 }
2060 }
2061 }
2062
2063 // Compute the early control of a node by following its inputs until we reach
2064 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes.
2065 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) {
2066 Node* early_ctrl = nullptr;
2067 ResourceMark rm;
2068 Unique_Node_List wq;
2069 wq.push(n);
2070 for (uint i = 0; i < wq.size(); i++) {
2071 Node* m = wq.at(i);
2072 Node* c = nullptr;
2073 if (m->is_CFG()) {
2074 c = m;
2075 } else if (m->pinned()) {
2076 c = m->in(0);
2077 } else {
2078 for (uint j = 0; j < m->req(); j++) {
2079 Node* in = m->in(j);
2080 if (in != nullptr) {
2081 wq.push(in);
2082 }
2083 }
2084 }
2085 if (c != nullptr) {
2086 assert(is_dominator(c, n_ctrl), "control input must dominate current control");
2087 if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) {
2088 early_ctrl = c;
2089 }
2090 }
2091 }
2092 assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control");
2093 return early_ctrl;
2094 }
2095
2096 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) {
2097 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2098 Node* u = n->fast_out(i);
2099 if (u->is_Opaque1()) {
2100 return false; // Found loop limit, bugfix for 4677003
2101 }
2102 if (u->is_Phi()) {
2103 for (uint j = 1; j < u->req(); ++j) {
2104 if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) {
2105 return false;
2106 }
2107 }
2108 } else {
2109 Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0);
2110 if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) {
2111 return false;
2112 }
2113 }
2114 }
2115 return true;
2116 }
2117
2118 // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input
2119 // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit
2120 // test of the pre loop above the point in the graph where it's pinned. This results in a broken graph. One way to avoid
2121 // it would be to not eliminate the check in the main loop. Instead, we prevent sinking of the node here so better code
2122 // is generated for the main loop.
2123 bool PhaseIdealLoop::would_sink_below_pre_loop_exit(IdealLoopTree* n_loop, Node* ctrl) {
2124 if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop()) {
2125 CountedLoopNode* pre_loop = n_loop->_head->as_CountedLoop();
2126 if (is_dominator(pre_loop->loopexit(), ctrl)) {
2127 return true;
2128 }
2129 }
2130 return false;
2131 }
2132
2133 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) {
2134 if (n->is_Load()) {
2135 // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure each call to
2136 // get_late_ctrl_with_anti_dep() uses its own tag
2137 _dom_lca_tags_round++;
2138 assert(_dom_lca_tags_round != 0, "shouldn't wrap around");
2139
2140 ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl);
2141 }
2142 IdealLoopTree *u_loop = get_loop(ctrl);
2143 if (u_loop == n_loop) {
2144 return false; // Found loop-varying use
2145 }
2146 if (n_loop->is_member(u_loop)) {
2147 return false; // Found use in inner loop
2148 }
2149 if (would_sink_below_pre_loop_exit(n_loop, ctrl)) {
2150 return false;
2151 }
2152 return true;
2153 }
2154
2155 //------------------------------split_if_with_blocks---------------------------
2156 // Check for aggressive application of 'split-if' optimization,
2157 // using basic block level info.
2158 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
2159 Node* root = C->root();
2160 visited.set(root->_idx); // first, mark root as visited
2161 // Do pre-visit work for root
2162 Node* n = split_if_with_blocks_pre(root);
2163 uint cnt = n->outcnt();
2164 uint i = 0;
2165
2166 while (true) {
2167 // Visit all children
2168 if (i < cnt) {
2169 Node* use = n->raw_out(i);
2170 ++i;
2171 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
2172 // Now do pre-visit work for this use
2173 use = split_if_with_blocks_pre(use);
2174 nstack.push(n, i); // Save parent and next use's index.
2175 n = use; // Process all children of current use.
2176 cnt = use->outcnt();
2177 i = 0;
2178 }
2179 }
2180 else {
2181 // All of n's children have been processed, complete post-processing.
2182 if (cnt != 0 && !n->is_Con()) {
2183 assert(has_node(n), "no dead nodes");
2184 split_if_with_blocks_post(n);
2185 if (C->failing()) {
2186 return;
2187 }
2188 }
2189 if (must_throttle_split_if()) {
2190 nstack.clear();
2191 }
2192 if (nstack.is_empty()) {
2193 // Finished all nodes on stack.
2194 break;
2195 }
2196 // Get saved parent node and next use's index. Visit the rest of uses.
2197 n = nstack.node();
2198 cnt = n->outcnt();
2199 i = nstack.index();
2200 nstack.pop();
2201 }
2202 }
2203 }
2204
2205
2206 //=============================================================================
2207 //
2208 // C L O N E A L O O P B O D Y
2209 //
2210
2211 //------------------------------clone_iff--------------------------------------
2212 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2213 // "Nearly" because all Nodes have been cloned from the original in the loop,
2214 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2215 // through the Phi recursively, and return a Bool.
2216 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
2217
2218 // Convert this Phi into a Phi merging Bools
2219 uint i;
2220 for (i = 1; i < phi->req(); i++) {
2221 Node* b = phi->in(i);
2222 if (b->is_Phi()) {
2223 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2224 } else {
2225 assert(b->is_Bool() || b->is_OpaqueConstantBool() || b->is_OpaqueInitializedAssertionPredicate(),
2226 "bool, non-null check with OpaqueConstantBool or Initialized Assertion Predicate with its Opaque node");
2227 }
2228 }
2229 Node* n = phi->in(1);
2230 Node* sample_opaque = nullptr;
2231 Node *sample_bool = nullptr;
2232 if (n->is_OpaqueConstantBool() || n->is_OpaqueInitializedAssertionPredicate()) {
2233 sample_opaque = n;
2234 sample_bool = n->in(1);
2235 assert(sample_bool->is_Bool(), "wrong type");
2236 } else {
2237 sample_bool = n;
2238 }
2239 Node* sample_cmp = sample_bool->in(1);
2240 const Type* t = Type::TOP;
2241 const TypePtr* at = nullptr;
2242 if (sample_cmp->is_FlatArrayCheck()) {
2243 // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2244 assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2245 t = Type::MEMORY;
2246 at = TypeRawPtr::BOTTOM;
2247 }
2248
2249 // Make Phis to merge the Cmp's inputs.
2250 PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2251 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2252 for (i = 1; i < phi->req(); i++) {
2253 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2254 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2255 phi1->set_req(i, n1);
2256 phi2->set_req(i, n2);
2257 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2258 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2259 }
2260 // See if these Phis have been made before.
2261 // Register with optimizer
2262 Node *hit1 = _igvn.hash_find_insert(phi1);
2263 if (hit1) { // Hit, toss just made Phi
2264 _igvn.remove_dead_node(phi1); // Remove new phi
2265 assert(hit1->is_Phi(), "" );
2266 phi1 = (PhiNode*)hit1; // Use existing phi
2267 } else { // Miss
2268 _igvn.register_new_node_with_optimizer(phi1);
2269 }
2270 Node *hit2 = _igvn.hash_find_insert(phi2);
2271 if (hit2) { // Hit, toss just made Phi
2272 _igvn.remove_dead_node(phi2); // Remove new phi
2273 assert(hit2->is_Phi(), "" );
2274 phi2 = (PhiNode*)hit2; // Use existing phi
2275 } else { // Miss
2276 _igvn.register_new_node_with_optimizer(phi2);
2277 }
2278 // Register Phis with loop/block info
2279 set_ctrl(phi1, phi->in(0));
2280 set_ctrl(phi2, phi->in(0));
2281 // Make a new Cmp
2282 Node *cmp = sample_cmp->clone();
2283 cmp->set_req(1, phi1);
2284 cmp->set_req(2, phi2);
2285 _igvn.register_new_node_with_optimizer(cmp);
2286 set_ctrl(cmp, phi->in(0));
2287
2288 // Make a new Bool
2289 Node *b = sample_bool->clone();
2290 b->set_req(1,cmp);
2291 _igvn.register_new_node_with_optimizer(b);
2292 set_ctrl(b, phi->in(0));
2293
2294 if (sample_opaque != nullptr) {
2295 Node* opaque = sample_opaque->clone();
2296 opaque->set_req(1, b);
2297 _igvn.register_new_node_with_optimizer(opaque);
2298 set_ctrl(opaque, phi->in(0));
2299 return opaque;
2300 }
2301
2302 assert(b->is_Bool(), "");
2303 return b;
2304 }
2305
2306 //------------------------------clone_bool-------------------------------------
2307 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2308 // "Nearly" because all Nodes have been cloned from the original in the loop,
2309 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2310 // through the Phi recursively, and return a Bool.
2311 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
2312 uint i;
2313 // Convert this Phi into a Phi merging Bools
2314 for( i = 1; i < phi->req(); i++ ) {
2315 Node *b = phi->in(i);
2316 if( b->is_Phi() ) {
2317 _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi()));
2318 } else {
2319 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
2320 }
2321 }
2322
2323 Node *sample_cmp = phi->in(1);
2324
2325 // Make Phis to merge the Cmp's inputs.
2326 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
2327 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
2328 for( uint j = 1; j < phi->req(); j++ ) {
2329 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
2330 Node *n1, *n2;
2331 if( cmp_top->is_Cmp() ) {
2332 n1 = cmp_top->in(1);
2333 n2 = cmp_top->in(2);
2334 } else {
2335 n1 = n2 = cmp_top;
2336 }
2337 phi1->set_req( j, n1 );
2338 phi2->set_req( j, n2 );
2339 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2340 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2341 }
2342
2343 // See if these Phis have been made before.
2344 // Register with optimizer
2345 Node *hit1 = _igvn.hash_find_insert(phi1);
2346 if( hit1 ) { // Hit, toss just made Phi
2347 _igvn.remove_dead_node(phi1); // Remove new phi
2348 assert( hit1->is_Phi(), "" );
2349 phi1 = (PhiNode*)hit1; // Use existing phi
2350 } else { // Miss
2351 _igvn.register_new_node_with_optimizer(phi1);
2352 }
2353 Node *hit2 = _igvn.hash_find_insert(phi2);
2354 if( hit2 ) { // Hit, toss just made Phi
2355 _igvn.remove_dead_node(phi2); // Remove new phi
2356 assert( hit2->is_Phi(), "" );
2357 phi2 = (PhiNode*)hit2; // Use existing phi
2358 } else { // Miss
2359 _igvn.register_new_node_with_optimizer(phi2);
2360 }
2361 // Register Phis with loop/block info
2362 set_ctrl(phi1, phi->in(0));
2363 set_ctrl(phi2, phi->in(0));
2364 // Make a new Cmp
2365 Node *cmp = sample_cmp->clone();
2366 cmp->set_req( 1, phi1 );
2367 cmp->set_req( 2, phi2 );
2368 _igvn.register_new_node_with_optimizer(cmp);
2369 set_ctrl(cmp, phi->in(0));
2370
2371 assert( cmp->is_Cmp(), "" );
2372 return (CmpNode*)cmp;
2373 }
2374
2375 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
2376 IdealLoopTree* loop, IdealLoopTree* outer_loop,
2377 Node_List*& split_if_set, Node_List*& split_bool_set,
2378 Node_List*& split_cex_set, Node_List& worklist,
2379 uint new_counter, CloneLoopMode mode) {
2380 Node* nnn = old_new[old->_idx];
2381 // Copy uses to a worklist, so I can munge the def-use info
2382 // with impunity.
2383 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2384 worklist.push(old->fast_out(j));
2385
2386 while( worklist.size() ) {
2387 Node *use = worklist.pop();
2388 if (!has_node(use)) continue; // Ignore dead nodes
2389 if (use->in(0) == C->top()) continue;
2390 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2391 // Check for data-use outside of loop - at least one of OLD or USE
2392 // must not be a CFG node.
2393 #ifdef ASSERT
2394 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) {
2395 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
2396 assert(mode != IgnoreStripMined, "incorrect cloning mode");
2397 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
2398 }
2399 #endif
2400 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
2401
2402 // If the Data use is an IF, that means we have an IF outside the
2403 // loop that is switching on a condition that is set inside the
2404 // loop. Happens if people set a loop-exit flag; then test the flag
2405 // in the loop to break the loop, then test is again outside the
2406 // loop to determine which way the loop exited.
2407 //
2408 // For several uses we need to make sure that there is no phi between,
2409 // the use and the Bool/Cmp. We therefore clone the Bool/Cmp down here
2410 // to avoid such a phi in between.
2411 // For example, it is unexpected that there is a Phi between an
2412 // AllocateArray node and its ValidLengthTest input that could cause
2413 // split if to break.
2414 assert(!use->is_OpaqueTemplateAssertionPredicate(),
2415 "should not clone a Template Assertion Predicate which should be removed once it's useless");
2416 if (use->is_If() || use->is_CMove() || use->is_OpaqueConstantBool() || use->is_OpaqueInitializedAssertionPredicate() ||
2417 (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) {
2418 // Since this code is highly unlikely, we lazily build the worklist
2419 // of such Nodes to go split.
2420 if (!split_if_set) {
2421 split_if_set = new Node_List();
2422 }
2423 split_if_set->push(use);
2424 }
2425 if (use->is_Bool()) {
2426 if (!split_bool_set) {
2427 split_bool_set = new Node_List();
2428 }
2429 split_bool_set->push(use);
2430 }
2431 if (use->Opcode() == Op_CreateEx) {
2432 if (!split_cex_set) {
2433 split_cex_set = new Node_List();
2434 }
2435 split_cex_set->push(use);
2436 }
2437
2438
2439 // Get "block" use is in
2440 uint idx = 0;
2441 while( use->in(idx) != old ) idx++;
2442 Node *prev = use->is_CFG() ? use : get_ctrl(use);
2443 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
2444 Node* cfg = (prev->_idx >= new_counter && prev->is_Region())
2445 ? prev->in(2)
2446 : idom(prev);
2447 if( use->is_Phi() ) // Phi use is in prior block
2448 cfg = prev->in(idx); // NOT in block of Phi itself
2449 if (cfg->is_top()) { // Use is dead?
2450 _igvn.replace_input_of(use, idx, C->top());
2451 continue;
2452 }
2453
2454 // If use is referenced through control edge... (idx == 0)
2455 if (mode == IgnoreStripMined && idx == 0) {
2456 LoopNode *head = loop->_head->as_Loop();
2457 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
2458 // That node is outside the inner loop, leave it outside the
2459 // outer loop as well to not confuse verification code.
2460 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
2461 _igvn.replace_input_of(use, 0, head->outer_loop_exit());
2462 continue;
2463 }
2464 }
2465
2466 while(!outer_loop->is_member(get_loop(cfg))) {
2467 prev = cfg;
2468 cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg);
2469 }
2470 // If the use occurs after merging several exits from the loop, then
2471 // old value must have dominated all those exits. Since the same old
2472 // value was used on all those exits we did not need a Phi at this
2473 // merge point. NOW we do need a Phi here. Each loop exit value
2474 // is now merged with the peeled body exit; each exit gets its own
2475 // private Phi and those Phis need to be merged here.
2476 Node *phi;
2477 if( prev->is_Region() ) {
2478 if( idx == 0 ) { // Updating control edge?
2479 phi = prev; // Just use existing control
2480 } else { // Else need a new Phi
2481 phi = PhiNode::make( prev, old );
2482 // Now recursively fix up the new uses of old!
2483 for( uint i = 1; i < prev->req(); i++ ) {
2484 worklist.push(phi); // Onto worklist once for each 'old' input
2485 }
2486 }
2487 } else {
2488 // Get new RegionNode merging old and new loop exits
2489 prev = old_new[prev->_idx];
2490 assert( prev, "just made this in step 7" );
2491 if( idx == 0) { // Updating control edge?
2492 phi = prev; // Just use existing control
2493 } else { // Else need a new Phi
2494 // Make a new Phi merging data values properly
2495 phi = PhiNode::make( prev, old );
2496 phi->set_req( 1, nnn );
2497 }
2498 }
2499 // If inserting a new Phi, check for prior hits
2500 if( idx != 0 ) {
2501 Node *hit = _igvn.hash_find_insert(phi);
2502 if( hit == nullptr ) {
2503 _igvn.register_new_node_with_optimizer(phi); // Register new phi
2504 } else { // or
2505 // Remove the new phi from the graph and use the hit
2506 _igvn.remove_dead_node(phi);
2507 phi = hit; // Use existing phi
2508 }
2509 set_ctrl(phi, prev);
2510 }
2511 // Make 'use' use the Phi instead of the old loop body exit value
2512 assert(use->in(idx) == old, "old is still input of use");
2513 // We notify all uses of old, including use, and the indirect uses,
2514 // that may now be optimized because we have replaced old with phi.
2515 _igvn.add_users_to_worklist(old);
2516 if (idx == 0 && use->depends_only_on_test()) {
2517 // If this node depends_only_on_test, it will be rewired to a control input that is not the
2518 // correct test. As a result, it must be pinned otherwise it can be incorrectly rewired to
2519 // a dominating test equivalent to the new control.
2520 Node* pinned_clone = use->pin_node_under_control();
2521 if (pinned_clone != nullptr) {
2522 pinned_clone->set_req(0, phi);
2523 register_new_node_with_ctrl_of(pinned_clone, use);
2524 _igvn.replace_node(use, pinned_clone);
2525 continue;
2526 }
2527 }
2528 _igvn.replace_input_of(use, idx, phi);
2529 if( use->_idx >= new_counter ) { // If updating new phis
2530 // Not needed for correctness, but prevents a weak assert
2531 // in AddPNode from tripping (when we end up with different
2532 // base & derived Phis that will become the same after
2533 // IGVN does CSE).
2534 Node *hit = _igvn.hash_find_insert(use);
2535 if( hit ) // Go ahead and re-hash for hits.
2536 _igvn.replace_node( use, hit );
2537 }
2538 }
2539 }
2540 }
2541
2542 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
2543 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
2544 bool check_old_new) {
2545 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2546 Node* u = n->fast_out(j);
2547 assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned");
2548 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) {
2549 assert(!phase->ctrl_is_member(loop, u) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
2550 if (!phase->ctrl_is_member(loop, u)) {
2551 if (phase->ctrl_is_member(outer_loop, u)) {
2552 wq.push(u);
2553 } else {
2554 // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of
2555 // the outer loop too
2556 Node* u_c = u->in(0);
2557 if (u_c != nullptr) {
2558 IdealLoopTree* u_c_loop = phase->get_loop(u_c);
2559 if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) {
2560 wq.push(u);
2561 }
2562 }
2563 }
2564 }
2565 }
2566 }
2567 }
2568
2569 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
2570 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
2571 Node_List& extra_data_nodes) {
2572 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2573 CountedLoopNode* cl = head->as_CountedLoop();
2574 Node* l = cl->outer_loop();
2575 Node* tail = cl->outer_loop_tail();
2576 IfNode* le = cl->outer_loop_end();
2577 Node* sfpt = cl->outer_safepoint();
2578 CountedLoopEndNode* cle = cl->loopexit();
2579 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
2580 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
2581 IfFalseNode* cle_out = cle->false_proj();
2582
2583 Node* new_sfpt = nullptr;
2584 Node* new_cle_out = cle_out->clone();
2585 old_new.map(cle_out->_idx, new_cle_out);
2586 if (mode == CloneIncludesStripMined) {
2587 // clone outer loop body
2588 Node* new_l = l->clone();
2589 Node* new_tail = tail->clone();
2590 IfNode* new_le = le->clone()->as_If();
2591 new_sfpt = sfpt->clone();
2592
2593 set_loop(new_l, outer_loop->_parent);
2594 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
2595 set_loop(new_cle_out, outer_loop->_parent);
2596 set_idom(new_cle_out, new_cle, dd);
2597 set_loop(new_sfpt, outer_loop->_parent);
2598 set_idom(new_sfpt, new_cle_out, dd);
2599 set_loop(new_le, outer_loop->_parent);
2600 set_idom(new_le, new_sfpt, dd);
2601 set_loop(new_tail, outer_loop->_parent);
2602 set_idom(new_tail, new_le, dd);
2603 set_idom(new_cl, new_l, dd);
2604
2605 old_new.map(l->_idx, new_l);
2606 old_new.map(tail->_idx, new_tail);
2607 old_new.map(le->_idx, new_le);
2608 old_new.map(sfpt->_idx, new_sfpt);
2609
2610 new_l->set_req(LoopNode::LoopBackControl, new_tail);
2611 new_l->set_req(0, new_l);
2612 new_tail->set_req(0, new_le);
2613 new_le->set_req(0, new_sfpt);
2614 new_sfpt->set_req(0, new_cle_out);
2615 new_cle_out->set_req(0, new_cle);
2616 new_cl->set_req(LoopNode::EntryControl, new_l);
2617
2618 _igvn.register_new_node_with_optimizer(new_l);
2619 _igvn.register_new_node_with_optimizer(new_tail);
2620 _igvn.register_new_node_with_optimizer(new_le);
2621 } else {
2622 Node *newhead = old_new[loop->_head->_idx];
2623 newhead->as_Loop()->clear_strip_mined();
2624 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
2625 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2626 }
2627 // Look at data node that were assigned a control in the outer
2628 // loop: they are kept in the outer loop by the safepoint so start
2629 // from the safepoint node's inputs.
2630 IdealLoopTree* outer_loop = get_loop(l);
2631 Node_Stack stack(2);
2632 stack.push(sfpt, 1);
2633 uint new_counter = C->unique();
2634 while (stack.size() > 0) {
2635 Node* n = stack.node();
2636 uint i = stack.index();
2637 while (i < n->req() &&
2638 (n->in(i) == nullptr ||
2639 !has_ctrl(n->in(i)) ||
2640 get_loop(get_ctrl(n->in(i))) != outer_loop ||
2641 (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
2642 i++;
2643 }
2644 if (i < n->req()) {
2645 stack.set_index(i+1);
2646 stack.push(n->in(i), 0);
2647 } else {
2648 assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
2649 Node* m = n == sfpt ? new_sfpt : n->clone();
2650 if (m != nullptr) {
2651 for (uint i = 0; i < n->req(); i++) {
2652 if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) {
2653 m->set_req(i, old_new[m->in(i)->_idx]);
2654 }
2655 }
2656 } else {
2657 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
2658 }
2659 if (n != sfpt) {
2660 extra_data_nodes.push(n);
2661 _igvn.register_new_node_with_optimizer(m);
2662 assert(get_ctrl(n) == cle_out, "what other control?");
2663 set_ctrl(m, new_cle_out);
2664 old_new.map(n->_idx, m);
2665 }
2666 stack.pop();
2667 }
2668 }
2669 if (mode == CloneIncludesStripMined) {
2670 _igvn.register_new_node_with_optimizer(new_sfpt);
2671 _igvn.register_new_node_with_optimizer(new_cle_out);
2672 }
2673 // Some other transformation may have pessimistically assigned some
2674 // data nodes to the outer loop. Set their control so they are out
2675 // of the outer loop.
2676 ResourceMark rm;
2677 Unique_Node_List wq;
2678 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2679 Node* old = extra_data_nodes.at(i);
2680 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2681 }
2682
2683 for (uint i = 0; i < loop->_body.size(); i++) {
2684 Node* old = loop->_body.at(i);
2685 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2686 }
2687
2688 Node* inner_out = sfpt->in(0);
2689 if (inner_out->outcnt() > 1) {
2690 collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true);
2691 }
2692
2693 Node* new_ctrl = cl->outer_loop_exit();
2694 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2695 for (uint i = 0; i < wq.size(); i++) {
2696 Node* n = wq.at(i);
2697 set_ctrl(n, new_ctrl);
2698 if (n->in(0) != nullptr) {
2699 _igvn.replace_input_of(n, 0, new_ctrl);
2700 }
2701 collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false);
2702 }
2703 } else {
2704 Node *newhead = old_new[loop->_head->_idx];
2705 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2706 }
2707 }
2708
2709 //------------------------------clone_loop-------------------------------------
2710 //
2711 // C L O N E A L O O P B O D Y
2712 //
2713 // This is the basic building block of the loop optimizations. It clones an
2714 // entire loop body. It makes an old_new loop body mapping; with this mapping
2715 // you can find the new-loop equivalent to an old-loop node. All new-loop
2716 // nodes are exactly equal to their old-loop counterparts, all edges are the
2717 // same. All exits from the old-loop now have a RegionNode that merges the
2718 // equivalent new-loop path. This is true even for the normal "loop-exit"
2719 // condition. All uses of loop-invariant old-loop values now come from (one
2720 // or more) Phis that merge their new-loop equivalents.
2721 //
2722 // This operation leaves the graph in an illegal state: there are two valid
2723 // control edges coming from the loop pre-header to both loop bodies. I'll
2724 // definitely have to hack the graph after running this transform.
2725 //
2726 // From this building block I will further edit edges to perform loop peeling
2727 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2728 //
2729 // Parameter side_by_size_idom:
2730 // When side_by_size_idom is null, the dominator tree is constructed for
2731 // the clone loop to dominate the original. Used in construction of
2732 // pre-main-post loop sequence.
2733 // When nonnull, the clone and original are side-by-side, both are
2734 // dominated by the side_by_side_idom node. Used in construction of
2735 // unswitched loops.
2736 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2737 CloneLoopMode mode, Node* side_by_side_idom) {
2738
2739 LoopNode* head = loop->_head->as_Loop();
2740 head->verify_strip_mined(1);
2741
2742 if (C->do_vector_loop() && PrintOpto) {
2743 const char* mname = C->method()->name()->as_quoted_ascii();
2744 if (mname != nullptr) {
2745 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2746 }
2747 }
2748
2749 CloneMap& cm = C->clone_map();
2750 if (C->do_vector_loop()) {
2751 cm.set_clone_idx(cm.max_gen()+1);
2752 #ifndef PRODUCT
2753 if (PrintOpto) {
2754 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2755 loop->dump_head();
2756 }
2757 #endif
2758 }
2759
2760 // Step 1: Clone the loop body. Make the old->new mapping.
2761 clone_loop_body(loop->_body, old_new, &cm);
2762
2763 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2764
2765 // Step 2: Fix the edges in the new body. If the old input is outside the
2766 // loop use it. If the old input is INside the loop, use the corresponding
2767 // new node instead.
2768 fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false);
2769
2770 Node_List extra_data_nodes; // data nodes in the outer strip mined loop
2771 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2772
2773 // Step 3: Now fix control uses. Loop varying control uses have already
2774 // been fixed up (as part of all input edges in Step 2). Loop invariant
2775 // control uses must be either an IfFalse or an IfTrue. Make a merge
2776 // point to merge the old and new IfFalse/IfTrue nodes; make the use
2777 // refer to this.
2778 Node_List worklist;
2779 uint new_counter = C->unique();
2780 fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist);
2781
2782 // Step 4: If loop-invariant use is not control, it must be dominated by a
2783 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region
2784 // there if needed. Make a Phi there merging old and new used values.
2785 Node_List *split_if_set = nullptr;
2786 Node_List *split_bool_set = nullptr;
2787 Node_List *split_cex_set = nullptr;
2788 fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set);
2789
2790 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2791 Node* old = extra_data_nodes.at(i);
2792 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2793 split_bool_set, split_cex_set, worklist, new_counter,
2794 mode);
2795 }
2796
2797 // Check for IFs that need splitting/cloning. Happens if an IF outside of
2798 // the loop uses a condition set in the loop. The original IF probably
2799 // takes control from one or more OLD Regions (which in turn get from NEW
2800 // Regions). In any case, there will be a set of Phis for each merge point
2801 // from the IF up to where the original BOOL def exists the loop.
2802 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
2803
2804 }
2805
2806 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) {
2807 if (split_if_set) {
2808 while (split_if_set->size()) {
2809 Node *iff = split_if_set->pop();
2810 uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1;
2811 if (iff->in(input)->is_Phi()) {
2812 Node *b = clone_iff(iff->in(input)->as_Phi());
2813 _igvn.replace_input_of(iff, input, b);
2814 }
2815 }
2816 }
2817 if (split_bool_set) {
2818 while (split_bool_set->size()) {
2819 Node *b = split_bool_set->pop();
2820 Node *phi = b->in(1);
2821 assert(phi->is_Phi(), "");
2822 CmpNode *cmp = clone_bool((PhiNode*) phi);
2823 _igvn.replace_input_of(b, 1, cmp);
2824 }
2825 }
2826 if (split_cex_set) {
2827 while (split_cex_set->size()) {
2828 Node *b = split_cex_set->pop();
2829 assert(b->in(0)->is_Region(), "");
2830 assert(b->in(1)->is_Phi(), "");
2831 assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2832 split_up(b, b->in(0), nullptr);
2833 }
2834 }
2835 }
2836
2837 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop,
2838 uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set,
2839 Node_List*& split_bool_set, Node_List*& split_cex_set) {
2840 for(uint i = 0; i < body.size(); i++ ) {
2841 Node* old = body.at(i);
2842 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2843 split_bool_set, split_cex_set, worklist, new_counter,
2844 mode);
2845 }
2846 }
2847
2848 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode,
2849 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) {
2850 LoopNode* head = loop->_head->as_Loop();
2851 for(uint i = 0; i < body.size(); i++ ) {
2852 Node* old = body.at(i);
2853 if( !old->is_CFG() ) continue;
2854
2855 // Copy uses to a worklist, so I can munge the def-use info
2856 // with impunity.
2857 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) {
2858 worklist.push(old->fast_out(j));
2859 }
2860
2861 while (worklist.size()) { // Visit all uses
2862 Node *use = worklist.pop();
2863 if (!has_node(use)) continue; // Ignore dead nodes
2864 IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use );
2865 if (!loop->is_member(use_loop) && use->is_CFG()) {
2866 // Both OLD and USE are CFG nodes here.
2867 assert(use->is_Proj(), "" );
2868 Node* nnn = old_new[old->_idx];
2869
2870 Node* newuse = nullptr;
2871 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2872 CountedLoopNode* cl = head->as_CountedLoop();
2873 CountedLoopEndNode* cle = cl->loopexit();
2874 // is use the projection that exits the loop from the CountedLoopEndNode?
2875 if (use->in(0) == cle) {
2876 IfFalseNode* cle_out = use->as_IfFalse();
2877 IfNode* le = cl->outer_loop_end();
2878 use = le->false_proj();
2879 use_loop = get_loop(use);
2880 if (mode == CloneIncludesStripMined) {
2881 nnn = old_new[le->_idx];
2882 } else {
2883 newuse = old_new[cle_out->_idx];
2884 }
2885 }
2886 }
2887 if (newuse == nullptr) {
2888 newuse = use->clone();
2889 }
2890
2891 // Clone the loop exit control projection
2892 if (C->do_vector_loop() && cm != nullptr) {
2893 cm->verify_insert_and_clone(use, newuse, cm->clone_idx());
2894 }
2895 newuse->set_req(0,nnn);
2896 _igvn.register_new_node_with_optimizer(newuse);
2897 set_loop(newuse, use_loop);
2898 set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2899
2900 // We need a Region to merge the exit from the peeled body and the
2901 // exit from the old loop body.
2902 RegionNode *r = new RegionNode(3);
2903 uint dd_r = MIN2(dom_depth(newuse), dom_depth(use));
2904 assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" );
2905
2906 // The original user of 'use' uses 'r' instead.
2907 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2908 Node* useuse = use->last_out(l);
2909 _igvn.rehash_node_delayed(useuse);
2910 uint uses_found = 0;
2911 if (useuse->in(0) == use) {
2912 useuse->set_req(0, r);
2913 uses_found++;
2914 if (useuse->is_CFG()) {
2915 // This is not a dom_depth > dd_r because when new
2916 // control flow is constructed by a loop opt, a node and
2917 // its dominator can end up at the same dom_depth
2918 assert(dom_depth(useuse) >= dd_r, "");
2919 set_idom(useuse, r, dom_depth(useuse));
2920 }
2921 }
2922 for (uint k = 1; k < useuse->req(); k++) {
2923 if( useuse->in(k) == use ) {
2924 useuse->set_req(k, r);
2925 uses_found++;
2926 if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2927 // This is not a dom_depth > dd_r because when new
2928 // control flow is constructed by a loop opt, a node
2929 // and its dominator can end up at the same dom_depth
2930 assert(dom_depth(useuse) >= dd_r , "");
2931 set_idom(useuse, r, dom_depth(useuse));
2932 }
2933 }
2934 }
2935 l -= uses_found; // we deleted 1 or more copies of this edge
2936 }
2937
2938 assert(use->is_Proj(), "loop exit should be projection");
2939 // replace_node_and_forward_ctrl() below moves all nodes that are:
2940 // - control dependent on the loop exit or
2941 // - have control set to the loop exit
2942 // below the post-loop merge point.
2943 // replace_node_and_forward_ctrl() takes a dead control as first input.
2944 // To make it possible to use it, the loop exit projection is cloned and becomes the
2945 // new exit projection. The initial one becomes dead and is "replaced" by the region.
2946 Node* use_clone = use->clone();
2947 register_control(use_clone, use_loop, idom(use), dom_depth(use));
2948 // Now finish up 'r'
2949 r->set_req(1, newuse);
2950 r->set_req(2, use_clone);
2951 _igvn.register_new_node_with_optimizer(r);
2952 set_loop(r, use_loop);
2953 set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r);
2954 replace_node_and_forward_ctrl(use, r);
2955 // Map the (cloned) old use to the new merge point
2956 old_new.map(use_clone->_idx, r);
2957 } // End of if a loop-exit test
2958 }
2959 }
2960 }
2961
2962 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd,
2963 IdealLoopTree* parent, bool partial) {
2964 for(uint i = 0; i < body.size(); i++ ) {
2965 Node *old = body.at(i);
2966 Node *nnn = old_new[old->_idx];
2967 // Fix CFG/Loop controlling the new node
2968 if (has_ctrl(old)) {
2969 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2970 } else {
2971 set_loop(nnn, parent);
2972 if (old->outcnt() > 0) {
2973 Node* dom = idom(old);
2974 if (old_new[dom->_idx] != nullptr) {
2975 dom = old_new[dom->_idx];
2976 set_idom(nnn, dom, dd );
2977 }
2978 }
2979 }
2980 // Correct edges to the new node
2981 for (uint j = 0; j < nnn->req(); j++) {
2982 Node *n = nnn->in(j);
2983 if (n != nullptr) {
2984 IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n);
2985 if (loop->is_member(old_in_loop)) {
2986 if (old_new[n->_idx] != nullptr) {
2987 nnn->set_req(j, old_new[n->_idx]);
2988 } else {
2989 assert(!body.contains(n), "");
2990 assert(partial, "node not cloned");
2991 }
2992 }
2993 }
2994 }
2995 _igvn.hash_find_insert(nnn);
2996 }
2997 }
2998
2999 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) {
3000 for (uint i = 0; i < body.size(); i++) {
3001 Node* old = body.at(i);
3002 Node* nnn = old->clone();
3003 old_new.map(old->_idx, nnn);
3004 if (C->do_vector_loop() && cm != nullptr) {
3005 cm->verify_insert_and_clone(old, nnn, cm->clone_idx());
3006 }
3007 _igvn.register_new_node_with_optimizer(nnn);
3008 }
3009 }
3010
3011
3012 //---------------------- stride_of_possible_iv -------------------------------------
3013 // Looks for an iff/bool/comp with one operand of the compare
3014 // being a cycle involving an add and a phi,
3015 // with an optional truncation (left-shift followed by a right-shift)
3016 // of the add. Returns zero if not an iv.
3017 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
3018 Node* trunc1 = nullptr;
3019 Node* trunc2 = nullptr;
3020 const TypeInteger* ttype = nullptr;
3021 if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) {
3022 return 0;
3023 }
3024 BoolNode* bl = iff->in(1)->as_Bool();
3025 Node* cmp = bl->in(1);
3026 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
3027 return 0;
3028 }
3029 // Must have an invariant operand
3030 if (ctrl_is_member(get_loop(iff), cmp->in(2))) {
3031 return 0;
3032 }
3033 Node* add2 = nullptr;
3034 Node* cmp1 = cmp->in(1);
3035 if (cmp1->is_Phi()) {
3036 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
3037 Node* phi = cmp1;
3038 for (uint i = 1; i < phi->req(); i++) {
3039 Node* in = phi->in(i);
3040 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
3041 &trunc1, &trunc2, &ttype, T_INT);
3042 if (add && add->in(1) == phi) {
3043 add2 = add->in(2);
3044 break;
3045 }
3046 }
3047 } else {
3048 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
3049 Node* addtrunc = cmp1;
3050 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
3051 &trunc1, &trunc2, &ttype, T_INT);
3052 if (add && add->in(1)->is_Phi()) {
3053 Node* phi = add->in(1);
3054 for (uint i = 1; i < phi->req(); i++) {
3055 if (phi->in(i) == addtrunc) {
3056 add2 = add->in(2);
3057 break;
3058 }
3059 }
3060 }
3061 }
3062 if (add2 != nullptr) {
3063 const TypeInt* add2t = _igvn.type(add2)->is_int();
3064 if (add2t->is_con()) {
3065 return add2t->get_con();
3066 }
3067 }
3068 return 0;
3069 }
3070
3071
3072 //---------------------- stay_in_loop -------------------------------------
3073 // Return the (unique) control output node that's in the loop (if it exists.)
3074 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
3075 Node* unique = nullptr;
3076 if (!n) return nullptr;
3077 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3078 Node* use = n->fast_out(i);
3079 if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
3080 if (unique != nullptr) {
3081 return nullptr;
3082 }
3083 unique = use;
3084 }
3085 }
3086 return unique;
3087 }
3088
3089 //------------------------------ register_node -------------------------------------
3090 // Utility to register node "n" with PhaseIdealLoop
3091 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) {
3092 _igvn.register_new_node_with_optimizer(n);
3093 loop->_body.push(n);
3094 if (n->is_CFG()) {
3095 set_loop(n, loop);
3096 set_idom(n, pred, ddepth);
3097 } else {
3098 set_ctrl(n, pred);
3099 }
3100 }
3101
3102 //------------------------------ proj_clone -------------------------------------
3103 // Utility to create an if-projection
3104 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
3105 ProjNode* c = p->clone()->as_Proj();
3106 c->set_req(0, iff);
3107 return c;
3108 }
3109
3110 //------------------------------ short_circuit_if -------------------------------------
3111 // Force the iff control output to be the live_proj
3112 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
3113 guarantee(live_proj != nullptr, "null projection");
3114 int proj_con = live_proj->_con;
3115 assert(proj_con == 0 || proj_con == 1, "false or true projection");
3116 Node* con = intcon(proj_con);
3117 if (iff) {
3118 iff->set_req(1, con);
3119 }
3120 return con;
3121 }
3122
3123 //------------------------------ insert_if_before_proj -------------------------------------
3124 // Insert a new if before an if projection (* - new node)
3125 //
3126 // before
3127 // if(test)
3128 // / \
3129 // v v
3130 // other-proj proj (arg)
3131 //
3132 // after
3133 // if(test)
3134 // / \
3135 // / v
3136 // | * proj-clone
3137 // v |
3138 // other-proj v
3139 // * new_if(relop(cmp[IU](left,right)))
3140 // / \
3141 // v v
3142 // * new-proj proj
3143 // (returned)
3144 //
3145 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
3146 IfNode* iff = proj->in(0)->as_If();
3147 IdealLoopTree *loop = get_loop(proj);
3148 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3149 uint ddepth = dom_depth(proj);
3150
3151 _igvn.rehash_node_delayed(iff);
3152 _igvn.rehash_node_delayed(proj);
3153
3154 proj->set_req(0, nullptr); // temporary disconnect
3155 ProjNode* proj2 = proj_clone(proj, iff);
3156 register_node(proj2, loop, iff, ddepth);
3157
3158 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
3159 register_node(cmp, loop, proj2, ddepth);
3160
3161 BoolNode* bol = new BoolNode(cmp, relop);
3162 register_node(bol, loop, proj2, ddepth);
3163
3164 int opcode = iff->Opcode();
3165 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
3166 IfNode* new_if = IfNode::make_with_same_profile(iff, proj2, bol);
3167 register_node(new_if, loop, proj2, ddepth);
3168
3169 proj->set_req(0, new_if); // reattach
3170 set_idom(proj, new_if, ddepth);
3171
3172 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
3173 guarantee(new_exit != nullptr, "null exit node");
3174 register_node(new_exit, get_loop(other_proj), new_if, ddepth);
3175
3176 return new_exit;
3177 }
3178
3179 //------------------------------ insert_region_before_proj -------------------------------------
3180 // Insert a region before an if projection (* - new node)
3181 //
3182 // before
3183 // if(test)
3184 // / |
3185 // v |
3186 // proj v
3187 // other-proj
3188 //
3189 // after
3190 // if(test)
3191 // / |
3192 // v |
3193 // * proj-clone v
3194 // | other-proj
3195 // v
3196 // * new-region
3197 // |
3198 // v
3199 // * dum_if
3200 // / \
3201 // v \
3202 // * dum-proj v
3203 // proj
3204 //
3205 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
3206 IfNode* iff = proj->in(0)->as_If();
3207 IdealLoopTree *loop = get_loop(proj);
3208 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3209 uint ddepth = dom_depth(proj);
3210
3211 _igvn.rehash_node_delayed(iff);
3212 _igvn.rehash_node_delayed(proj);
3213
3214 proj->set_req(0, nullptr); // temporary disconnect
3215 ProjNode* proj2 = proj_clone(proj, iff);
3216 register_node(proj2, loop, iff, ddepth);
3217
3218 RegionNode* reg = new RegionNode(2);
3219 reg->set_req(1, proj2);
3220 register_node(reg, loop, iff, ddepth);
3221
3222 IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt);
3223 register_node(dum_if, loop, reg, ddepth);
3224
3225 proj->set_req(0, dum_if); // reattach
3226 set_idom(proj, dum_if, ddepth);
3227
3228 ProjNode* dum_proj = proj_clone(other_proj, dum_if);
3229 register_node(dum_proj, loop, dum_if, ddepth);
3230
3231 return reg;
3232 }
3233
3234 // Idea
3235 // ----
3236 // Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops
3237 // require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel
3238 // with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new
3239 // loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned
3240 // test alone instead:
3241 //
3242 // Before Partial Peeling:
3243 // Loop:
3244 // <peeled section>
3245 // Split off signed loop exit test
3246 // <-- CUT HERE -->
3247 // Unchanged unsigned loop exit test
3248 // <rest of unpeeled section>
3249 // goto Loop
3250 //
3251 // After Partial Peeling:
3252 // <cloned peeled section>
3253 // Cloned split off signed loop exit test
3254 // Loop:
3255 // Unchanged unsigned loop exit test
3256 // <rest of unpeeled section>
3257 // <peeled section>
3258 // Split off signed loop exit test
3259 // goto Loop
3260 //
3261 // Details
3262 // -------
3263 // Before:
3264 // if (i <u limit) Unsigned loop exit condition
3265 // / |
3266 // v v
3267 // exit-proj stay-in-loop-proj
3268 //
3269 // Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it
3270 // before the CmpU on the stay-in-loop path and keep both tests:
3271 //
3272 // if (i <u limit) Signed loop exit test
3273 // / |
3274 // / if (i <u limit) Unsigned loop exit test
3275 // / / |
3276 // v v v
3277 // exit-region stay-in-loop-proj
3278 //
3279 // Implementation
3280 // --------------
3281 // We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned
3282 // loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop
3283 // exit tests is preserved, and their loop nesting is correct.
3284 //
3285 // To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit
3286 // test above it and kill the original unsigned loop exit test by setting it's condition to a constant
3287 // (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later:
3288 //
3289 // if (stay-in-loop-const) Killed original unsigned loop exit test
3290 // / |
3291 // / v
3292 // / if (i < limit) Split off signed loop exit test
3293 // / / |
3294 // / / v
3295 // / / if (i <u limit) Cloned unsigned loop exit test
3296 // / / / |
3297 // v v v |
3298 // exit-region |
3299 // | |
3300 // dummy-if |
3301 // / | |
3302 // dead | |
3303 // v v
3304 // exit-proj stay-in-loop-proj
3305 //
3306 // Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned
3307 // loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()).
3308 //
3309 // Requirements
3310 // ------------
3311 // Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly
3312 // the same as before with only a single unsigned test. This is only possible if certain requirements are met.
3313 // Otherwise, we need to bail out (see comments in the code below).
3314 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) {
3315 const bool Signed = true;
3316 const bool Unsigned = false;
3317
3318 BoolNode* bol = if_cmpu->in(1)->as_Bool();
3319 if (bol->_test._test != BoolTest::lt) {
3320 return nullptr;
3321 }
3322 CmpNode* cmpu = bol->in(1)->as_Cmp();
3323 assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison");
3324
3325 int stride = stride_of_possible_iv(if_cmpu);
3326 if (stride == 0) {
3327 return nullptr;
3328 }
3329
3330 Node* lp_proj = stay_in_loop(if_cmpu, loop);
3331 guarantee(lp_proj != nullptr, "null loop node");
3332
3333 ProjNode* lp_continue = lp_proj->as_Proj();
3334 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
3335 if (!lp_exit->is_IfFalse()) {
3336 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit).
3337 // We therefore can't add a single exit condition.
3338 return nullptr;
3339 }
3340 // The unsigned loop exit condition is
3341 // !(i <u limit)
3342 // = i >=u limit
3343 //
3344 // First, we note that for any x for which
3345 // 0 <= x <= INT_MAX
3346 // we can convert x to an unsigned int and still get the same guarantee:
3347 // 0 <= (uint) x <= INT_MAX = (uint) INT_MAX
3348 // 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA)
3349 //
3350 // With that in mind, if
3351 // limit >= 0 (COND)
3352 // then the unsigned loop exit condition
3353 // i >=u limit (ULE)
3354 // is equivalent to
3355 // i < 0 || i >= limit (SLE-full)
3356 // because either i is negative and therefore always greater than MAX_INT when converting to unsigned
3357 // (uint) i >=u MAX_INT >= limit >= 0
3358 // or otherwise
3359 // i >= limit >= 0
3360 // holds due to (LEMMA).
3361 //
3362 // For completeness, a counterexample with limit < 0:
3363 // Assume i = -3 and limit = -2:
3364 // i < 0
3365 // -2 < 0
3366 // is true and thus also "i < 0 || i >= limit". But
3367 // i >=u limit
3368 // -3 >=u -2
3369 // is false.
3370 Node* limit = cmpu->in(2);
3371 const TypeInt* type_limit = _igvn.type(limit)->is_int();
3372 if (type_limit->_lo < 0) {
3373 return nullptr;
3374 }
3375
3376 // We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride:
3377 // stride < 0:
3378 // i < 0 (SLE = SLE-negative)
3379 // stride > 0:
3380 // i >= limit (SLE = SLE-positive)
3381 // such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0):
3382 //
3383 // Loop:
3384 // <peeled section>
3385 // i >= limit (SLE-positive)
3386 // <-- CUT HERE -->
3387 // i >=u limit (ULE)
3388 // <rest of unpeeled section>
3389 // goto Loop
3390 //
3391 // We exit the loop if:
3392 // (SLE) is true OR (ULE) is true
3393 // However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly
3394 // exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure:
3395 // (SLE) IMPLIES (ULE)
3396 // This indeed holds when (COND) is given:
3397 // - stride > 0:
3398 // i >= limit // (SLE = SLE-positive)
3399 // i >= limit >= 0 // (COND)
3400 // i >=u limit >= 0 // (LEMMA)
3401 // which is the unsigned loop exit condition (ULE).
3402 // - stride < 0:
3403 // i < 0 // (SLE = SLE-negative)
3404 // (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned
3405 // MAX_INT >= limit >= 0 // (COND)
3406 // MAX_INT >=u limit >= 0 // (LEMMA)
3407 // and thus from (NEG) and (LEMMA):
3408 // i >=u limit
3409 // which is the unsigned loop exit condition (ULE).
3410 //
3411 //
3412 // After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0):
3413 // <cloned peeled section>
3414 // i >= limit (SLE-positive)
3415 // Loop:
3416 // i >=u limit (ULE)
3417 // <rest of unpeeled section>
3418 // <peeled section>
3419 // i >= limit (SLE-positive)
3420 // goto Loop
3421 Node* rhs_cmpi;
3422 if (stride > 0) {
3423 rhs_cmpi = limit; // For i >= limit
3424 } else {
3425 rhs_cmpi = makecon(TypeInt::ZERO); // For i < 0
3426 }
3427 // Create a new region on the exit path
3428 RegionNode* reg = insert_region_before_proj(lp_exit);
3429 guarantee(reg != nullptr, "null region node");
3430
3431 // Clone the if-cmpu-true-false using a signed compare
3432 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
3433 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue);
3434 reg->add_req(cmpi_exit);
3435
3436 // Clone the if-cmpu-true-false
3437 BoolTest::mask rel_u = bol->_test._test;
3438 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
3439 reg->add_req(cmpu_exit);
3440
3441 // Force original if to stay in loop.
3442 short_circuit_if(if_cmpu, lp_continue);
3443
3444 return cmpi_exit->in(0)->as_If();
3445 }
3446
3447 //------------------------------ remove_cmpi_loop_exit -------------------------------------
3448 // Remove a previously inserted signed compare loop exit.
3449 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
3450 Node* lp_proj = stay_in_loop(if_cmp, loop);
3451 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
3452 stay_in_loop(lp_proj, loop)->is_If() &&
3453 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
3454 Node* con = makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
3455 if_cmp->set_req(1, con);
3456 }
3457
3458 //------------------------------ scheduled_nodelist -------------------------------------
3459 // Create a post order schedule of nodes that are in the
3460 // "member" set. The list is returned in "sched".
3461 // The first node in "sched" is the loop head, followed by
3462 // nodes which have no inputs in the "member" set, and then
3463 // followed by the nodes that have an immediate input dependence
3464 // on a node in "sched".
3465 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
3466
3467 assert(member.test(loop->_head->_idx), "loop head must be in member set");
3468 VectorSet visited;
3469 Node_Stack nstack(loop->_body.size());
3470
3471 Node* n = loop->_head; // top of stack is cached in "n"
3472 uint idx = 0;
3473 visited.set(n->_idx);
3474
3475 // Initially push all with no inputs from within member set
3476 for(uint i = 0; i < loop->_body.size(); i++ ) {
3477 Node *elt = loop->_body.at(i);
3478 if (member.test(elt->_idx)) {
3479 bool found = false;
3480 for (uint j = 0; j < elt->req(); j++) {
3481 Node* def = elt->in(j);
3482 if (def && member.test(def->_idx) && def != elt) {
3483 found = true;
3484 break;
3485 }
3486 }
3487 if (!found && elt != loop->_head) {
3488 nstack.push(n, idx);
3489 n = elt;
3490 assert(!visited.test(n->_idx), "not seen yet");
3491 visited.set(n->_idx);
3492 }
3493 }
3494 }
3495
3496 // traverse out's that are in the member set
3497 while (true) {
3498 if (idx < n->outcnt()) {
3499 Node* use = n->raw_out(idx);
3500 idx++;
3501 if (!visited.test_set(use->_idx)) {
3502 if (member.test(use->_idx)) {
3503 nstack.push(n, idx);
3504 n = use;
3505 idx = 0;
3506 }
3507 }
3508 } else {
3509 // All outputs processed
3510 sched.push(n);
3511 if (nstack.is_empty()) break;
3512 n = nstack.node();
3513 idx = nstack.index();
3514 nstack.pop();
3515 }
3516 }
3517 }
3518
3519
3520 //------------------------------ has_use_in_set -------------------------------------
3521 // Has a use in the vector set
3522 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
3523 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3524 Node* use = n->fast_out(j);
3525 if (vset.test(use->_idx)) {
3526 return true;
3527 }
3528 }
3529 return false;
3530 }
3531
3532
3533 //------------------------------ has_use_internal_to_set -------------------------------------
3534 // Has use internal to the vector set (ie. not in a phi at the loop head)
3535 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
3536 Node* head = loop->_head;
3537 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3538 Node* use = n->fast_out(j);
3539 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
3540 return true;
3541 }
3542 }
3543 return false;
3544 }
3545
3546
3547 //------------------------------ clone_for_use_outside_loop -------------------------------------
3548 // clone "n" for uses that are outside of loop
3549 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
3550 int cloned = 0;
3551 assert(worklist.size() == 0, "should be empty");
3552 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3553 Node* use = n->fast_out(j);
3554 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
3555 worklist.push(use);
3556 }
3557 }
3558
3559 if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor,
3560 "Too many clones required in clone_for_use_outside_loop in partial peeling")) {
3561 return -1;
3562 }
3563
3564 while( worklist.size() ) {
3565 Node *use = worklist.pop();
3566 if (!has_node(use) || use->in(0) == C->top()) continue;
3567 uint j;
3568 for (j = 0; j < use->req(); j++) {
3569 if (use->in(j) == n) break;
3570 }
3571 assert(j < use->req(), "must be there");
3572
3573 // clone "n" and insert it between the inputs of "n" and the use outside the loop
3574 Node* n_clone = n->clone();
3575 _igvn.replace_input_of(use, j, n_clone);
3576 cloned++;
3577 Node* use_c;
3578 if (!use->is_Phi()) {
3579 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
3580 } else {
3581 // Use in a phi is considered a use in the associated predecessor block
3582 use_c = use->in(0)->in(j);
3583 }
3584 set_ctrl(n_clone, use_c);
3585 assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
3586 get_loop(use_c)->_body.push(n_clone);
3587 _igvn.register_new_node_with_optimizer(n_clone);
3588 #ifndef PRODUCT
3589 if (TracePartialPeeling) {
3590 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
3591 }
3592 #endif
3593 }
3594 return cloned;
3595 }
3596
3597
3598 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
3599 // clone "n" for special uses that are in the not_peeled region.
3600 // If these def-uses occur in separate blocks, the code generator
3601 // marks the method as not compilable. For example, if a "BoolNode"
3602 // is in a different basic block than the "IfNode" that uses it, then
3603 // the compilation is aborted in the code generator.
3604 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
3605 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
3606 if (n->is_Phi() || n->is_Load()) {
3607 return;
3608 }
3609 assert(worklist.size() == 0, "should be empty");
3610 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3611 Node* use = n->fast_out(j);
3612 if ( not_peel.test(use->_idx) &&
3613 (use->is_If() || use->is_CMove() || use->is_Bool() || use->is_OpaqueInitializedAssertionPredicate()) &&
3614 use->in(1) == n) {
3615 worklist.push(use);
3616 }
3617 }
3618 if (worklist.size() > 0) {
3619 // clone "n" and insert it between inputs of "n" and the use
3620 Node* n_clone = n->clone();
3621 loop->_body.push(n_clone);
3622 _igvn.register_new_node_with_optimizer(n_clone);
3623 set_ctrl(n_clone, get_ctrl(n));
3624 sink_list.push(n_clone);
3625 not_peel.set(n_clone->_idx);
3626 #ifndef PRODUCT
3627 if (TracePartialPeeling) {
3628 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
3629 }
3630 #endif
3631 while( worklist.size() ) {
3632 Node *use = worklist.pop();
3633 _igvn.rehash_node_delayed(use);
3634 for (uint j = 1; j < use->req(); j++) {
3635 if (use->in(j) == n) {
3636 use->set_req(j, n_clone);
3637 }
3638 }
3639 }
3640 }
3641 }
3642
3643
3644 //------------------------------ insert_phi_for_loop -------------------------------------
3645 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
3646 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
3647 Node *phi = PhiNode::make(lp, back_edge_val);
3648 phi->set_req(LoopNode::EntryControl, lp_entry_val);
3649 // Use existing phi if it already exists
3650 Node *hit = _igvn.hash_find_insert(phi);
3651 if( hit == nullptr ) {
3652 _igvn.register_new_node_with_optimizer(phi);
3653 set_ctrl(phi, lp);
3654 } else {
3655 // Remove the new phi from the graph and use the hit
3656 _igvn.remove_dead_node(phi);
3657 phi = hit;
3658 }
3659 _igvn.replace_input_of(use, idx, phi);
3660 }
3661
3662 #ifdef ASSERT
3663 //------------------------------ is_valid_loop_partition -------------------------------------
3664 // Validate the loop partition sets: peel and not_peel
3665 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
3666 VectorSet& not_peel ) {
3667 uint i;
3668 // Check that peel_list entries are in the peel set
3669 for (i = 0; i < peel_list.size(); i++) {
3670 if (!peel.test(peel_list.at(i)->_idx)) {
3671 return false;
3672 }
3673 }
3674 // Check at loop members are in one of peel set or not_peel set
3675 for (i = 0; i < loop->_body.size(); i++ ) {
3676 Node *def = loop->_body.at(i);
3677 uint di = def->_idx;
3678 // Check that peel set elements are in peel_list
3679 if (peel.test(di)) {
3680 if (not_peel.test(di)) {
3681 return false;
3682 }
3683 // Must be in peel_list also
3684 bool found = false;
3685 for (uint j = 0; j < peel_list.size(); j++) {
3686 if (peel_list.at(j)->_idx == di) {
3687 found = true;
3688 break;
3689 }
3690 }
3691 if (!found) {
3692 return false;
3693 }
3694 } else if (not_peel.test(di)) {
3695 if (peel.test(di)) {
3696 return false;
3697 }
3698 } else {
3699 return false;
3700 }
3701 }
3702 return true;
3703 }
3704
3705 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
3706 // Ensure a use outside of loop is of the right form
3707 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
3708 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3709 return (use->is_Phi() &&
3710 use_c->is_Region() && use_c->req() == 3 &&
3711 (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
3712 use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
3713 use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
3714 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
3715 }
3716
3717 //------------------------------ is_valid_clone_loop_form -------------------------------------
3718 // Ensure that all uses outside of loop are of the right form
3719 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
3720 uint orig_exit_idx, uint clone_exit_idx) {
3721 uint len = peel_list.size();
3722 for (uint i = 0; i < len; i++) {
3723 Node *def = peel_list.at(i);
3724
3725 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3726 Node *use = def->fast_out(j);
3727 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3728 if (!loop->is_member(get_loop(use_c))) {
3729 // use is not in the loop, check for correct structure
3730 if (use->in(0) == def) {
3731 // Okay
3732 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
3733 return false;
3734 }
3735 }
3736 }
3737 }
3738 return true;
3739 }
3740 #endif
3741
3742 //------------------------------ partial_peel -------------------------------------
3743 // Partially peel (aka loop rotation) the top portion of a loop (called
3744 // the peel section below) by cloning it and placing one copy just before
3745 // the new loop head and the other copy at the bottom of the new loop.
3746 //
3747 // before after where it came from
3748 //
3749 // stmt1 stmt1
3750 // loop: stmt2 clone
3751 // stmt2 if condA goto exitA clone
3752 // if condA goto exitA new_loop: new
3753 // stmt3 stmt3 clone
3754 // if !condB goto loop if condB goto exitB clone
3755 // exitB: stmt2 orig
3756 // stmt4 if !condA goto new_loop orig
3757 // exitA: goto exitA
3758 // exitB:
3759 // stmt4
3760 // exitA:
3761 //
3762 // Step 1: find the cut point: an exit test on probable
3763 // induction variable.
3764 // Step 2: schedule (with cloning) operations in the peel
3765 // section that can be executed after the cut into
3766 // the section that is not peeled. This may need
3767 // to clone operations into exit blocks. For
3768 // instance, a reference to A[i] in the not-peel
3769 // section and a reference to B[i] in an exit block
3770 // may cause a left-shift of i by 2 to be placed
3771 // in the peel block. This step will clone the left
3772 // shift into the exit block and sink the left shift
3773 // from the peel to the not-peel section.
3774 // Step 3: clone the loop, retarget the control, and insert
3775 // phis for values that are live across the new loop
3776 // head. This is very dependent on the graph structure
3777 // from clone_loop. It creates region nodes for
3778 // exit control and associated phi nodes for values
3779 // flow out of the loop through that exit. The region
3780 // node is dominated by the clone's control projection.
3781 // So the clone's peel section is placed before the
3782 // new loop head, and the clone's not-peel section is
3783 // forms the top part of the new loop. The original
3784 // peel section forms the tail of the new loop.
3785 // Step 4: update the dominator tree and recompute the
3786 // dominator depth.
3787 //
3788 // orig
3789 //
3790 // stmt1
3791 // |
3792 // v
3793 // predicates
3794 // |
3795 // v
3796 // loop<----+
3797 // | |
3798 // stmt2 |
3799 // | |
3800 // v |
3801 // ifA |
3802 // / | |
3803 // v v |
3804 // false true ^ <-- last_peel
3805 // / | |
3806 // / ===|==cut |
3807 // / stmt3 | <-- first_not_peel
3808 // / | |
3809 // | v |
3810 // v ifB |
3811 // exitA: / \ |
3812 // / \ |
3813 // v v |
3814 // false true |
3815 // / \ |
3816 // / ----+
3817 // |
3818 // v
3819 // exitB:
3820 // stmt4
3821 //
3822 //
3823 // after clone loop
3824 //
3825 // stmt1
3826 // |
3827 // v
3828 // predicates
3829 // / \
3830 // clone / \ orig
3831 // / \
3832 // / \
3833 // v v
3834 // +---->loop loop<----+
3835 // | | | |
3836 // | stmt2 stmt2 |
3837 // | | | |
3838 // | v v |
3839 // | ifA ifA |
3840 // | | \ / | |
3841 // | v v v v |
3842 // ^ true false false true ^ <-- last_peel
3843 // | | ^ \ / | |
3844 // | cut==|== \ \ / ===|==cut |
3845 // | stmt3 \ \ / stmt3 | <-- first_not_peel
3846 // | | dom | | | |
3847 // | v \ 1v v2 v |
3848 // | ifB regionA ifB |
3849 // | / \ | / \ |
3850 // | / \ v / \ |
3851 // | v v exitA: v v |
3852 // | true false false true |
3853 // | / ^ \ / \ |
3854 // +---- \ \ / ----+
3855 // dom \ /
3856 // \ 1v v2
3857 // regionB
3858 // |
3859 // v
3860 // exitB:
3861 // stmt4
3862 //
3863 //
3864 // after partial peel
3865 //
3866 // stmt1
3867 // |
3868 // v
3869 // predicates
3870 // /
3871 // clone / orig
3872 // / TOP
3873 // / \
3874 // v v
3875 // TOP->loop loop----+
3876 // | | |
3877 // stmt2 stmt2 |
3878 // | | |
3879 // v v |
3880 // ifA ifA |
3881 // | \ / | |
3882 // v v v v |
3883 // true false false true | <-- last_peel
3884 // | ^ \ / +------|---+
3885 // +->newloop \ \ / === ==cut | |
3886 // | stmt3 \ \ / TOP | |
3887 // | | dom | | stmt3 | | <-- first_not_peel
3888 // | v \ 1v v2 v | |
3889 // | ifB regionA ifB ^ v
3890 // | / \ | / \ | |
3891 // | / \ v / \ | |
3892 // | v v exitA: v v | |
3893 // | true false false true | |
3894 // | / ^ \ / \ | |
3895 // | | \ \ / v | |
3896 // | | dom \ / TOP | |
3897 // | | \ 1v v2 | |
3898 // ^ v regionB | |
3899 // | | | | |
3900 // | | v ^ v
3901 // | | exitB: | |
3902 // | | stmt4 | |
3903 // | +------------>-----------------+ |
3904 // | |
3905 // +-----------------<---------------------+
3906 //
3907 //
3908 // final graph
3909 //
3910 // stmt1
3911 // |
3912 // v
3913 // predicates
3914 // |
3915 // v
3916 // stmt2 clone
3917 // |
3918 // v
3919 // ........> ifA clone
3920 // : / |
3921 // dom / |
3922 // : v v
3923 // : false true
3924 // : | |
3925 // : | v
3926 // : | newloop<-----+
3927 // : | | |
3928 // : | stmt3 clone |
3929 // : | | |
3930 // : | v |
3931 // : | ifB |
3932 // : | / \ |
3933 // : | v v |
3934 // : | false true |
3935 // : | | | |
3936 // : | v stmt2 |
3937 // : | exitB: | |
3938 // : | stmt4 v |
3939 // : | ifA orig |
3940 // : | / \ |
3941 // : | / \ |
3942 // : | v v |
3943 // : | false true |
3944 // : | / \ |
3945 // : v v -----+
3946 // RegionA
3947 // |
3948 // v
3949 // exitA
3950 //
3951 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3952
3953 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3954 if (!loop->_head->is_Loop()) {
3955 return false;
3956 }
3957 LoopNode *head = loop->_head->as_Loop();
3958
3959 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3960 return false;
3961 }
3962
3963 // Check for complex exit control
3964 for (uint ii = 0; ii < loop->_body.size(); ii++) {
3965 Node *n = loop->_body.at(ii);
3966 int opc = n->Opcode();
3967 if (n->is_Call() ||
3968 opc == Op_Catch ||
3969 opc == Op_CatchProj ||
3970 opc == Op_Jump ||
3971 opc == Op_JumpProj) {
3972 #ifndef PRODUCT
3973 if (TracePartialPeeling) {
3974 tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3975 }
3976 #endif
3977 return false;
3978 }
3979 }
3980
3981 int dd = dom_depth(head);
3982
3983 // Step 1: find cut point
3984
3985 // Walk up dominators to loop head looking for first loop exit
3986 // which is executed on every path thru loop.
3987 IfNode *peel_if = nullptr;
3988 IfNode *peel_if_cmpu = nullptr;
3989
3990 Node *iff = loop->tail();
3991 while (iff != head) {
3992 if (iff->is_If()) {
3993 Node *ctrl = get_ctrl(iff->in(1));
3994 if (ctrl->is_top()) return false; // Dead test on live IF.
3995 // If loop-varying exit-test, check for induction variable
3996 if (loop->is_member(get_loop(ctrl)) &&
3997 loop->is_loop_exit(iff) &&
3998 is_possible_iv_test(iff)) {
3999 Node* cmp = iff->in(1)->in(1);
4000 if (cmp->Opcode() == Op_CmpI) {
4001 peel_if = iff->as_If();
4002 } else {
4003 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
4004 peel_if_cmpu = iff->as_If();
4005 }
4006 }
4007 }
4008 iff = idom(iff);
4009 }
4010
4011 // Prefer signed compare over unsigned compare.
4012 IfNode* new_peel_if = nullptr;
4013 if (peel_if == nullptr) {
4014 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) {
4015 return false; // No peel point found
4016 }
4017 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
4018 if (new_peel_if == nullptr) {
4019 return false; // No peel point found
4020 }
4021 peel_if = new_peel_if;
4022 }
4023 Node* last_peel = stay_in_loop(peel_if, loop);
4024 Node* first_not_peeled = stay_in_loop(last_peel, loop);
4025 if (first_not_peeled == nullptr || first_not_peeled == head) {
4026 return false;
4027 }
4028
4029 #ifndef PRODUCT
4030 if (TraceLoopOpts) {
4031 tty->print("PartialPeel ");
4032 loop->dump_head();
4033 }
4034
4035 if (TracePartialPeeling) {
4036 tty->print_cr("before partial peel one iteration");
4037 Node_List wl;
4038 Node* t = head->in(2);
4039 while (true) {
4040 wl.push(t);
4041 if (t == head) break;
4042 t = idom(t);
4043 }
4044 while (wl.size() > 0) {
4045 Node* tt = wl.pop();
4046 tt->dump();
4047 if (tt == last_peel) tty->print_cr("-- cut --");
4048 }
4049 }
4050 #endif
4051
4052 C->print_method(PHASE_BEFORE_PARTIAL_PEELING, 4, head);
4053
4054 VectorSet peel;
4055 VectorSet not_peel;
4056 Node_List peel_list;
4057 Node_List worklist;
4058 Node_List sink_list;
4059
4060 uint estimate = loop->est_loop_clone_sz(1);
4061 if (exceeding_node_budget(estimate)) {
4062 return false;
4063 }
4064
4065 // Set of cfg nodes to peel are those that are executable from
4066 // the head through last_peel.
4067 assert(worklist.size() == 0, "should be empty");
4068 worklist.push(head);
4069 peel.set(head->_idx);
4070 while (worklist.size() > 0) {
4071 Node *n = worklist.pop();
4072 if (n != last_peel) {
4073 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
4074 Node* use = n->fast_out(j);
4075 if (use->is_CFG() &&
4076 loop->is_member(get_loop(use)) &&
4077 !peel.test_set(use->_idx)) {
4078 worklist.push(use);
4079 }
4080 }
4081 }
4082 }
4083
4084 // Set of non-cfg nodes to peel are those that are control
4085 // dependent on the cfg nodes.
4086 for (uint i = 0; i < loop->_body.size(); i++) {
4087 Node *n = loop->_body.at(i);
4088 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
4089 if (peel.test(n_c->_idx)) {
4090 peel.set(n->_idx);
4091 } else {
4092 not_peel.set(n->_idx);
4093 }
4094 }
4095
4096 // Step 2: move operations from the peeled section down into the
4097 // not-peeled section
4098
4099 // Get a post order schedule of nodes in the peel region
4100 // Result in right-most operand.
4101 scheduled_nodelist(loop, peel, peel_list);
4102
4103 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4104
4105 // For future check for too many new phis
4106 uint old_phi_cnt = 0;
4107 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
4108 Node* use = head->fast_out(j);
4109 if (use->is_Phi()) old_phi_cnt++;
4110 }
4111
4112 #ifndef PRODUCT
4113 if (TracePartialPeeling) {
4114 tty->print_cr("\npeeled list");
4115 }
4116 #endif
4117
4118 // Evacuate nodes in peel region into the not_peeled region if possible
4119 bool too_many_clones = false;
4120 uint new_phi_cnt = 0;
4121 uint cloned_for_outside_use = 0;
4122 for (uint i = 0; i < peel_list.size();) {
4123 Node* n = peel_list.at(i);
4124 #ifndef PRODUCT
4125 if (TracePartialPeeling) n->dump();
4126 #endif
4127 bool incr = true;
4128 if (!n->is_CFG()) {
4129 if (has_use_in_set(n, not_peel)) {
4130 // If not used internal to the peeled region,
4131 // move "n" from peeled to not_peeled region.
4132 if (!has_use_internal_to_set(n, peel, loop)) {
4133 // if not pinned and not a load (which maybe anti-dependent on a store)
4134 // and not a CMove (Matcher expects only bool->cmove).
4135 if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) {
4136 int new_clones = clone_for_use_outside_loop(loop, n, worklist);
4137 if (C->failing()) return false;
4138 if (new_clones == -1) {
4139 too_many_clones = true;
4140 break;
4141 }
4142 cloned_for_outside_use += new_clones;
4143 sink_list.push(n);
4144 peel.remove(n->_idx);
4145 not_peel.set(n->_idx);
4146 peel_list.remove(i);
4147 incr = false;
4148 #ifndef PRODUCT
4149 if (TracePartialPeeling) {
4150 tty->print_cr("sink to not_peeled region: %d newbb: %d",
4151 n->_idx, get_ctrl(n)->_idx);
4152 }
4153 #endif
4154 }
4155 } else {
4156 // Otherwise check for special def-use cases that span
4157 // the peel/not_peel boundary such as bool->if
4158 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
4159 new_phi_cnt++;
4160 }
4161 }
4162 }
4163 if (incr) i++;
4164 }
4165
4166 estimate += cloned_for_outside_use + new_phi_cnt;
4167 bool exceed_node_budget = !may_require_nodes(estimate);
4168 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
4169
4170 if (too_many_clones || exceed_node_budget || exceed_phi_limit) {
4171 #ifndef PRODUCT
4172 if (TracePartialPeeling && exceed_phi_limit) {
4173 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
4174 new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F');
4175 }
4176 #endif
4177 if (new_peel_if != nullptr) {
4178 remove_cmpi_loop_exit(new_peel_if, loop);
4179 }
4180 // Inhibit more partial peeling on this loop
4181 assert(!head->is_partial_peel_loop(), "not partial peeled");
4182 head->mark_partial_peel_failed();
4183 if (cloned_for_outside_use > 0) {
4184 // Terminate this round of loop opts because
4185 // the graph outside this loop was changed.
4186 C->set_major_progress();
4187 return true;
4188 }
4189 return false;
4190 }
4191
4192 // Step 3: clone loop, retarget control, and insert new phis
4193
4194 // Create new loop head for new phis and to hang
4195 // the nodes being moved (sinked) from the peel region.
4196 LoopNode* new_head = new LoopNode(last_peel, last_peel);
4197 new_head->set_unswitch_count(head->unswitch_count()); // Preserve
4198 _igvn.register_new_node_with_optimizer(new_head);
4199 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
4200 _igvn.replace_input_of(first_not_peeled, 0, new_head);
4201 set_loop(new_head, loop);
4202 loop->_body.push(new_head);
4203 not_peel.set(new_head->_idx);
4204 set_idom(new_head, last_peel, dom_depth(first_not_peeled));
4205 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
4206
4207 while (sink_list.size() > 0) {
4208 Node* n = sink_list.pop();
4209 set_ctrl(n, new_head);
4210 }
4211
4212 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4213
4214 clone_loop(loop, old_new, dd, IgnoreStripMined);
4215
4216 const uint clone_exit_idx = 1;
4217 const uint orig_exit_idx = 2;
4218 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
4219
4220 Node* head_clone = old_new[head->_idx];
4221 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
4222 Node* orig_tail_clone = head_clone->in(2);
4223
4224 // Add phi if "def" node is in peel set and "use" is not
4225
4226 for (uint i = 0; i < peel_list.size(); i++) {
4227 Node *def = peel_list.at(i);
4228 if (!def->is_CFG()) {
4229 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
4230 Node *use = def->fast_out(j);
4231 if (has_node(use) && use->in(0) != C->top() &&
4232 (!peel.test(use->_idx) ||
4233 (use->is_Phi() && use->in(0) == head)) ) {
4234 worklist.push(use);
4235 }
4236 }
4237 while( worklist.size() ) {
4238 Node *use = worklist.pop();
4239 for (uint j = 1; j < use->req(); j++) {
4240 Node* n = use->in(j);
4241 if (n == def) {
4242
4243 // "def" is in peel set, "use" is not in peel set
4244 // or "use" is in the entry boundary (a phi) of the peel set
4245
4246 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
4247
4248 if ( loop->is_member(get_loop( use_c )) ) {
4249 // use is in loop
4250 if (old_new[use->_idx] != nullptr) { // null for dead code
4251 Node* use_clone = old_new[use->_idx];
4252 _igvn.replace_input_of(use, j, C->top());
4253 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
4254 }
4255 } else {
4256 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
4257 // use is not in the loop, check if the live range includes the cut
4258 Node* lp_if = use_c->in(orig_exit_idx)->in(0);
4259 if (not_peel.test(lp_if->_idx)) {
4260 assert(j == orig_exit_idx, "use from original loop");
4261 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
4262 }
4263 }
4264 }
4265 }
4266 }
4267 }
4268 }
4269
4270 // Step 3b: retarget control
4271
4272 // Redirect control to the new loop head if a cloned node in
4273 // the not_peeled region has control that points into the peeled region.
4274 // This necessary because the cloned peeled region will be outside
4275 // the loop.
4276 // from to
4277 // cloned-peeled <---+
4278 // new_head_clone: | <--+
4279 // cloned-not_peeled in(0) in(0)
4280 // orig-peeled
4281
4282 for (uint i = 0; i < loop->_body.size(); i++) {
4283 Node *n = loop->_body.at(i);
4284 if (!n->is_CFG() && n->in(0) != nullptr &&
4285 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
4286 Node* n_clone = old_new[n->_idx];
4287 if (n_clone->depends_only_on_test()) {
4288 // If this node depends_only_on_test, it will be rewire to the loop head, which is not the
4289 // correct test
4290 Node* pinned_clone = n_clone->pin_node_under_control();
4291 if (pinned_clone != nullptr) {
4292 register_new_node_with_ctrl_of(pinned_clone, n_clone);
4293 old_new.map(n->_idx, pinned_clone);
4294 _igvn.replace_node(n_clone, pinned_clone);
4295 n_clone = pinned_clone;
4296 }
4297 }
4298 _igvn.replace_input_of(n_clone, 0, new_head_clone);
4299 }
4300 }
4301
4302 // Backedge of the surviving new_head (the clone) is original last_peel
4303 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
4304
4305 // Cut first node in original not_peel set
4306 _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
4307 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
4308 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
4309
4310 // Copy head_clone back-branch info to original head
4311 // and remove original head's loop entry and
4312 // clone head's back-branch
4313 _igvn.rehash_node_delayed(head); // Multiple edge updates
4314 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
4315 head->set_req(LoopNode::LoopBackControl, C->top());
4316 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
4317
4318 // Similarly modify the phis
4319 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
4320 Node* use = head->fast_out(k);
4321 if (use->is_Phi() && use->outcnt() > 0) {
4322 Node* use_clone = old_new[use->_idx];
4323 _igvn.rehash_node_delayed(use); // Multiple edge updates
4324 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
4325 use->set_req(LoopNode::LoopBackControl, C->top());
4326 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
4327 }
4328 }
4329
4330 // Step 4: update dominator tree and dominator depth
4331
4332 set_idom(head, orig_tail_clone, dd);
4333 recompute_dom_depth();
4334
4335 // Inhibit more partial peeling on this loop
4336 new_head_clone->set_partial_peel_loop();
4337 C->set_major_progress();
4338 loop->record_for_igvn();
4339
4340 #ifndef PRODUCT
4341 if (TracePartialPeeling) {
4342 tty->print_cr("\nafter partial peel one iteration");
4343 Node_List wl;
4344 Node* t = last_peel;
4345 while (true) {
4346 wl.push(t);
4347 if (t == head_clone) break;
4348 t = idom(t);
4349 }
4350 while (wl.size() > 0) {
4351 Node* tt = wl.pop();
4352 if (tt == head) tty->print_cr("orig head");
4353 else if (tt == new_head_clone) tty->print_cr("new head");
4354 else if (tt == head_clone) tty->print_cr("clone head");
4355 tt->dump();
4356 }
4357 }
4358 #endif
4359
4360 C->print_method(PHASE_AFTER_PARTIAL_PEELING, 4, new_head_clone);
4361
4362 return true;
4363 }
4364
4365 #ifdef ASSERT
4366
4367 // Moves Template Assertion Predicates to a target loop by cloning and killing the old ones. The target loop is the
4368 // original, not-cloned loop. This is currently only used with StressLoopBackedge which is a develop flag only and
4369 // false with product builds. We can therefore guard it with an ifdef. More details can be found at the use-site.
4370 class MoveAssertionPredicatesVisitor : public PredicateVisitor {
4371 ClonePredicateToTargetLoop _clone_predicate_to_loop;
4372 PhaseIdealLoop* const _phase;
4373
4374 public:
4375 MoveAssertionPredicatesVisitor(LoopNode* target_loop_head,
4376 const NodeInSingleLoopBody &node_in_loop_body,
4377 PhaseIdealLoop* phase)
4378 : _clone_predicate_to_loop(target_loop_head, node_in_loop_body, phase),
4379 _phase(phase) {
4380 }
4381 NONCOPYABLE(MoveAssertionPredicatesVisitor);
4382
4383 using PredicateVisitor::visit;
4384
4385 void visit(const TemplateAssertionPredicate& template_assertion_predicate) override {
4386 _clone_predicate_to_loop.clone_template_assertion_predicate(template_assertion_predicate);
4387 template_assertion_predicate.kill(_phase->igvn());
4388 }
4389 };
4390 #endif // ASSERT
4391
4392 // Transform:
4393 //
4394 // loop<-----------------+
4395 // | |
4396 // stmt1 stmt2 .. stmtn |
4397 // | | | |
4398 // \ | / |
4399 // v v v |
4400 // region |
4401 // | |
4402 // shared_stmt |
4403 // | |
4404 // v |
4405 // if |
4406 // / \ |
4407 // | -----------+
4408 // v
4409 //
4410 // into:
4411 //
4412 // loop<-------------------+
4413 // | |
4414 // v |
4415 // +->loop |
4416 // | | |
4417 // | stmt1 stmt2 .. stmtn |
4418 // | | | | |
4419 // | | \ / |
4420 // | | v v |
4421 // | | region1 |
4422 // | | | |
4423 // | shared_stmt shared_stmt |
4424 // | | | |
4425 // | v v |
4426 // | if if |
4427 // | /\ / \ |
4428 // +-- | | -------+
4429 // \ /
4430 // v v
4431 // region2
4432 //
4433 // (region2 is shown to merge mirrored projections of the loop exit
4434 // ifs to make the diagram clearer but they really merge the same
4435 // projection)
4436 //
4437 // Conditions for this transformation to trigger:
4438 // - the path through stmt1 is frequent enough
4439 // - the inner loop will be turned into a counted loop after transformation
4440 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) {
4441 if (!DuplicateBackedge) {
4442 return false;
4443 }
4444 assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only");
4445 if (!loop->_head->is_Loop()) {
4446 return false;
4447 }
4448
4449 uint estimate = loop->est_loop_clone_sz(1);
4450 if (exceeding_node_budget(estimate)) {
4451 return false;
4452 }
4453
4454 LoopNode *head = loop->_head->as_Loop();
4455
4456 Node* region = nullptr;
4457 IfNode* exit_test = nullptr;
4458 uint inner;
4459 float f;
4460 #ifdef ASSERT
4461 if (StressDuplicateBackedge) {
4462 if (head->is_strip_mined()) {
4463 return false;
4464 }
4465 Node* c = head->in(LoopNode::LoopBackControl);
4466
4467 while (c != head) {
4468 if (c->is_Region()) {
4469 region = c;
4470 }
4471 c = idom(c);
4472 }
4473
4474 if (region == nullptr) {
4475 return false;
4476 }
4477
4478 inner = 1;
4479 } else
4480 #endif //ASSERT
4481 {
4482 // Is the shape of the loop that of a counted loop...
4483 Node* back_control = loop_exit_control(head, loop);
4484 if (back_control == nullptr) {
4485 return false;
4486 }
4487
4488 BoolTest::mask bt = BoolTest::illegal;
4489 float cl_prob = 0;
4490 Node* incr = nullptr;
4491 Node* limit = nullptr;
4492 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
4493 if (cmp == nullptr || cmp->Opcode() != Op_CmpI) {
4494 return false;
4495 }
4496
4497 // With an extra phi for the candidate iv?
4498 // Or the region node is the loop head
4499 if (!incr->is_Phi() || incr->in(0) == head) {
4500 return false;
4501 }
4502
4503 PathFrequency pf(head, this);
4504 region = incr->in(0);
4505
4506 // Go over all paths for the extra phi's region and see if that
4507 // path is frequent enough and would match the expected iv shape
4508 // if the extra phi is removed
4509 inner = 0;
4510 for (uint i = 1; i < incr->req(); ++i) {
4511 Node* in = incr->in(i);
4512 Node* trunc1 = nullptr;
4513 Node* trunc2 = nullptr;
4514 const TypeInteger* iv_trunc_t = nullptr;
4515 Node* orig_in = in;
4516 if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) {
4517 continue;
4518 }
4519 assert(in->Opcode() == Op_AddI, "wrong increment code");
4520 Node* xphi = nullptr;
4521 Node* stride = loop_iv_stride(in, xphi);
4522
4523 if (stride == nullptr) {
4524 continue;
4525 }
4526
4527 PhiNode* phi = loop_iv_phi(xphi, nullptr, head);
4528 if (phi == nullptr ||
4529 (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
4530 (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
4531 return false;
4532 }
4533
4534 f = pf.to(region->in(i));
4535 if (f > 0.5) {
4536 inner = i;
4537 break;
4538 }
4539 }
4540
4541 if (inner == 0) {
4542 return false;
4543 }
4544
4545 exit_test = back_control->in(0)->as_If();
4546 }
4547
4548 if (idom(region)->is_Catch()) {
4549 return false;
4550 }
4551
4552 // Collect all control nodes that need to be cloned (shared_stmt in the diagram)
4553 Unique_Node_List wq;
4554 wq.push(head->in(LoopNode::LoopBackControl));
4555 for (uint i = 0; i < wq.size(); i++) {
4556 Node* c = wq.at(i);
4557 assert(get_loop(c) == loop, "not in the right loop?");
4558 if (c->is_Region()) {
4559 if (c != region) {
4560 for (uint j = 1; j < c->req(); ++j) {
4561 wq.push(c->in(j));
4562 }
4563 }
4564 } else {
4565 wq.push(c->in(0));
4566 }
4567 assert(!is_strict_dominator(c, region), "shouldn't go above region");
4568 }
4569
4570 Node* region_dom = idom(region);
4571
4572 // Can't do the transformation if this would cause a membar pair to
4573 // be split
4574 for (uint i = 0; i < wq.size(); i++) {
4575 Node* c = wq.at(i);
4576 if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) {
4577 assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair");
4578 if (!wq.member(c->as_MemBar()->leading_membar())) {
4579 return false;
4580 }
4581 }
4582 }
4583 C->print_method(PHASE_BEFORE_DUPLICATE_LOOP_BACKEDGE, 4, head);
4584
4585 // Collect data nodes that need to be clones as well
4586 int dd = dom_depth(head);
4587
4588 for (uint i = 0; i < loop->_body.size(); ++i) {
4589 Node* n = loop->_body.at(i);
4590 if (has_ctrl(n)) {
4591 Node* c = get_ctrl(n);
4592 if (wq.member(c)) {
4593 wq.push(n);
4594 }
4595 } else {
4596 set_idom(n, idom(n), dd);
4597 }
4598 }
4599
4600 // clone shared_stmt
4601 clone_loop_body(wq, old_new, nullptr);
4602
4603 Node* region_clone = old_new[region->_idx];
4604 region_clone->set_req(inner, C->top());
4605 set_idom(region, region->in(inner), dd);
4606
4607 // Prepare the outer loop
4608 Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]);
4609 register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl));
4610 _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head);
4611 set_idom(head, outer_head, dd);
4612
4613 fix_body_edges(wq, loop, old_new, dd, loop->_parent, true);
4614
4615 // Make one of the shared_stmt copies only reachable from stmt1, the
4616 // other only from stmt2..stmtn.
4617 Node* dom = nullptr;
4618 for (uint i = 1; i < region->req(); ++i) {
4619 if (i != inner) {
4620 _igvn.replace_input_of(region, i, C->top());
4621 }
4622 Node* in = region_clone->in(i);
4623 if (in->is_top()) {
4624 continue;
4625 }
4626 if (dom == nullptr) {
4627 dom = in;
4628 } else {
4629 dom = dom_lca(dom, in);
4630 }
4631 }
4632
4633 set_idom(region_clone, dom, dd);
4634
4635 // Set up the outer loop
4636 for (uint i = 0; i < head->outcnt(); i++) {
4637 Node* u = head->raw_out(i);
4638 if (u->is_Phi()) {
4639 Node* outer_phi = u->clone();
4640 outer_phi->set_req(0, outer_head);
4641 Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx];
4642 if (backedge == nullptr) {
4643 backedge = u->in(LoopNode::LoopBackControl);
4644 }
4645 outer_phi->set_req(LoopNode::LoopBackControl, backedge);
4646 register_new_node(outer_phi, outer_head);
4647 _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi);
4648 }
4649 }
4650
4651 // create control and data nodes for out of loop uses (including region2)
4652 Node_List worklist;
4653 uint new_counter = C->unique();
4654 fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist);
4655
4656 Node_List *split_if_set = nullptr;
4657 Node_List *split_bool_set = nullptr;
4658 Node_List *split_cex_set = nullptr;
4659 fix_data_uses(wq, loop, ControlAroundStripMined, loop->skip_strip_mined(), new_counter, old_new, worklist,
4660 split_if_set, split_bool_set, split_cex_set);
4661
4662 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
4663
4664 if (exit_test != nullptr) {
4665 float cnt = exit_test->_fcnt;
4666 if (cnt != COUNT_UNKNOWN) {
4667 exit_test->_fcnt = cnt * f;
4668 old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f);
4669 }
4670 }
4671
4672 #ifdef ASSERT
4673 if (StressDuplicateBackedge && head->is_CountedLoop()) {
4674 // The Template Assertion Predicates from the old counted loop are now at the new outer loop - clone them to
4675 // the inner counted loop and kill the old ones. We only need to do this with debug builds because
4676 // StressDuplicateBackedge is a devlop flag and false by default. Without StressDuplicateBackedge 'head' will be a
4677 // non-counted loop, and thus we have no Template Assertion Predicates above the old loop to move down.
4678 PredicateIterator predicate_iterator(outer_head->in(LoopNode::EntryControl));
4679 NodeInSingleLoopBody node_in_body(this, loop);
4680 MoveAssertionPredicatesVisitor move_assertion_predicates_visitor(head, node_in_body, this);
4681 predicate_iterator.for_each(move_assertion_predicates_visitor);
4682 }
4683 #endif // ASSERT
4684
4685 C->set_major_progress();
4686
4687 C->print_method(PHASE_AFTER_DUPLICATE_LOOP_BACKEDGE, 4, outer_head);
4688
4689 return true;
4690 }
4691
4692 // AutoVectorize the loop: replace scalar ops with vector ops.
4693 PhaseIdealLoop::AutoVectorizeStatus
4694 PhaseIdealLoop::auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared) {
4695 // Counted loop only
4696 if (!lpt->is_counted()) {
4697 return AutoVectorizeStatus::Impossible;
4698 }
4699
4700 // Main-loop only
4701 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4702 if (!cl->is_main_loop()) {
4703 return AutoVectorizeStatus::Impossible;
4704 }
4705
4706 VLoop vloop(lpt, false);
4707 if (!vloop.check_preconditions()) {
4708 return AutoVectorizeStatus::TriedAndFailed;
4709 }
4710
4711 // Ensure the shared data is cleared before each use
4712 vshared.clear();
4713
4714 const VLoopAnalyzer vloop_analyzer(vloop, vshared);
4715 if (!vloop_analyzer.success()) {
4716 return AutoVectorizeStatus::TriedAndFailed;
4717 }
4718
4719 SuperWord sw(vloop_analyzer);
4720 if (!sw.transform_loop()) {
4721 return AutoVectorizeStatus::TriedAndFailed;
4722 }
4723
4724 return AutoVectorizeStatus::Success;
4725 }
4726
4727 // Just before insert_pre_post_loops, we can multiversion the loop:
4728 //
4729 // multiversion_if
4730 // | |
4731 // fast_loop slow_loop
4732 //
4733 // In the fast_loop we can make speculative assumptions, and put the
4734 // conditions into the multiversion_if. If the conditions hold at runtime,
4735 // we enter the fast_loop, if the conditions fail, we take the slow_loop
4736 // instead which does not make any of the speculative assumptions.
4737 //
4738 // Note: we only multiversion the loop if the loop does not have any
4739 // auto vectorization check Predicate. If we have that predicate,
4740 // then we can simply add the speculative assumption checks to
4741 // that Predicate. This means we do not need to duplicate the
4742 // loop - we have a smaller graph and save compile time. Should
4743 // the conditions ever fail, then we deopt / trap at the Predicate
4744 // and recompile without that Predicate. At that point we will
4745 // multiversion the loop, so that we can still have speculative
4746 // runtime checks.
4747 //
4748 // We perform the multiversioning when the loop is still in its single
4749 // iteration form, even before we insert pre and post loops. This makes
4750 // the cloning much simpler. However, this means that both the fast
4751 // and the slow loop have to be optimized independently (adding pre
4752 // and post loops, unrolling the main loop, auto-vectorize etc.). And
4753 // we may end up not needing any speculative assumptions in the fast_loop
4754 // and then rejecting the slow_loop by constant folding the multiversion_if.
4755 //
4756 // Therefore, we "delay" the optimization of the slow_loop until we add
4757 // at least one speculative assumption for the fast_loop. If we never
4758 // add such a speculative runtime check, the OpaqueMultiversioningNode
4759 // of the multiversion_if constant folds to true after loop opts, and the
4760 // multiversion_if folds away the "delayed" slow_loop. If we add any
4761 // speculative assumption, then we notify the OpaqueMultiversioningNode
4762 // with "notify_slow_loop_that_it_can_resume_optimizations".
4763 //
4764 // Note: new runtime checks can be added to the multiversion_if with
4765 // PhaseIdealLoop::create_new_if_for_multiversion
4766 void PhaseIdealLoop::maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new) {
4767 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4768 LoopNode* outer_loop = cl->skip_strip_mined();
4769 Node* entry = outer_loop->in(LoopNode::EntryControl);
4770
4771 // Check we have multiversioning enabled, and are not already multiversioned.
4772 if (!LoopMultiversioning || cl->is_multiversion()) { return; }
4773
4774 // Check that we do not have a parse-predicate where we can add the runtime checks
4775 // during auto-vectorization.
4776 const Predicates predicates(entry);
4777 const PredicateBlock* predicate_block = predicates.auto_vectorization_check_block();
4778 if (predicate_block->has_parse_predicate()) { return; }
4779
4780 // Check node budget.
4781 uint estimate = lpt->est_loop_clone_sz(2);
4782 if (!may_require_nodes(estimate)) { return; }
4783
4784 do_multiversioning(lpt, old_new);
4785 }
4786
4787 void DataNodeGraph::clone_data_nodes(Node* new_ctrl) {
4788 for (uint i = 0; i < _data_nodes.size(); i++) {
4789 clone(_data_nodes[i], new_ctrl);
4790 }
4791 }
4792
4793 // Clone the given node and set it up properly. Set 'new_ctrl' as ctrl.
4794 void DataNodeGraph::clone(Node* node, Node* new_ctrl) {
4795 Node* clone = node->clone();
4796 _phase->igvn().register_new_node_with_optimizer(clone);
4797 _orig_to_new.put(node, clone);
4798 _phase->set_ctrl(clone, new_ctrl);
4799 if (node->is_CastII()) {
4800 clone->set_req(0, new_ctrl);
4801 }
4802 }
4803
4804 // Rewire the data inputs of all (unprocessed) cloned nodes, whose inputs are still pointing to the same inputs as their
4805 // corresponding orig nodes, to the newly cloned inputs to create a separate cloned graph.
4806 void DataNodeGraph::rewire_clones_to_cloned_inputs() {
4807 _orig_to_new.iterate_all([&](Node* node, Node* clone) {
4808 for (uint i = 1; i < node->req(); i++) {
4809 Node** cloned_input = _orig_to_new.get(node->in(i));
4810 if (cloned_input != nullptr) {
4811 // Input was also cloned -> rewire clone to the cloned input.
4812 _phase->igvn().replace_input_of(clone, i, *cloned_input);
4813 }
4814 }
4815 });
4816 }
4817
4818 // Clone all non-OpaqueLoop* nodes and apply the provided transformation strategy for OpaqueLoop* nodes.
4819 // Set 'new_ctrl' as ctrl for all cloned non-OpaqueLoop* nodes.
4820 void DataNodeGraph::clone_data_nodes_and_transform_opaque_loop_nodes(
4821 const TransformStrategyForOpaqueLoopNodes& transform_strategy,
4822 Node* new_ctrl) {
4823 for (uint i = 0; i < _data_nodes.size(); i++) {
4824 Node* data_node = _data_nodes[i];
4825 if (data_node->is_Opaque1()) {
4826 transform_opaque_node(transform_strategy, data_node);
4827 } else {
4828 clone(data_node, new_ctrl);
4829 }
4830 }
4831 }
4832
4833 void DataNodeGraph::transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node) {
4834 Node* transformed_node;
4835 if (node->is_OpaqueLoopInit()) {
4836 transformed_node = transform_strategy.transform_opaque_init(node->as_OpaqueLoopInit());
4837 } else {
4838 assert(node->is_OpaqueLoopStride(), "must be OpaqueLoopStrideNode");
4839 transformed_node = transform_strategy.transform_opaque_stride(node->as_OpaqueLoopStride());
4840 }
4841 // Add an orig->new mapping to correctly update the inputs of the copied graph in rewire_clones_to_cloned_inputs().
4842 _orig_to_new.put(node, transformed_node);
4843 }