1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/connode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/loopnode.hpp"
35 #include "opto/matcher.hpp"
36 #include "opto/movenode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/opaquenode.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/subnode.hpp"
41 #include "opto/subtypenode.hpp"
42 #include "opto/superword.hpp"
43 #include "opto/vectornode.hpp"
44 #include "utilities/checkedCast.hpp"
45 #include "utilities/macros.hpp"
46
47 //=============================================================================
48 //------------------------------split_thru_phi---------------------------------
49 // Split Node 'n' through merge point if there is enough win.
50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
51 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
52 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
53 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
54 // so disable this for now
55 return nullptr;
56 }
57
58 // Splitting range check CastIIs through a loop induction Phi can
59 // cause new Phis to be created that are left unrelated to the loop
60 // induction Phi and prevent optimizations (vectorization)
61 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
62 n->in(1) == region->as_CountedLoop()->phi()) {
63 return nullptr;
64 }
65
66 if (cannot_split_division(n, region)) {
67 return nullptr;
68 }
69
70 SplitThruPhiWins wins(region);
71 assert(!n->is_CFG(), "");
72 assert(region->is_Region(), "");
73
74 const Type* type = n->bottom_type();
75 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
76 Node* phi;
77 if (t_oop != nullptr && t_oop->is_known_instance_field()) {
78 int iid = t_oop->instance_id();
79 int index = C->get_alias_index(t_oop);
80 int offset = t_oop->offset();
81 phi = new PhiNode(region, type, nullptr, iid, index, offset);
82 } else {
83 phi = PhiNode::make_blank(region, n);
84 }
85 uint old_unique = C->unique();
86 for (uint i = 1; i < region->req(); i++) {
87 Node* x;
88 Node* the_clone = nullptr;
89 if (region->in(i) == C->top()) {
90 x = C->top(); // Dead path? Use a dead data op
91 } else {
92 x = n->clone(); // Else clone up the data op
93 the_clone = x; // Remember for possible deletion.
94 // Alter data node to use pre-phi inputs
95 if (n->in(0) == region)
96 x->set_req( 0, region->in(i) );
97 for (uint j = 1; j < n->req(); j++) {
98 Node* in = n->in(j);
99 if (in->is_Phi() && in->in(0) == region)
100 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
101 }
102 }
103 // Check for a 'win' on some paths
104 const Type* t = x->Value(&_igvn);
105
106 bool singleton = t->singleton();
107
108 // A TOP singleton indicates that there are no possible values incoming
109 // along a particular edge. In most cases, this is OK, and the Phi will
110 // be eliminated later in an Ideal call. However, we can't allow this to
111 // happen if the singleton occurs on loop entry, as the elimination of
112 // the PhiNode may cause the resulting node to migrate back to a previous
113 // loop iteration.
114 if (singleton && t == Type::TOP) {
115 // Is_Loop() == false does not confirm the absence of a loop (e.g., an
116 // irreducible loop may not be indicated by an affirmative is_Loop());
117 // therefore, the only top we can split thru a phi is on a backedge of
118 // a loop.
119 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
120 }
121
122 if (singleton) {
123 wins.add_win(i);
124 x = makecon(t);
125 } else {
126 // We now call Identity to try to simplify the cloned node.
127 // Note that some Identity methods call phase->type(this).
128 // Make sure that the type array is big enough for
129 // our new node, even though we may throw the node away.
130 // (Note: This tweaking with igvn only works because x is a new node.)
131 _igvn.set_type(x, t);
132 // If x is a TypeNode, capture any more-precise type permanently into Node
133 // otherwise it will be not updated during igvn->transform since
134 // igvn->type(x) is set to x->Value() already.
135 x->raise_bottom_type(t);
136 Node* y = x->Identity(&_igvn);
137 if (y != x) {
138 wins.add_win(i);
139 x = y;
140 } else {
141 y = _igvn.hash_find(x);
142 if (y == nullptr) {
143 y = similar_subtype_check(x, region->in(i));
144 }
145 if (y) {
146 wins.add_win(i);
147 x = y;
148 } else {
149 // Else x is a new node we are keeping
150 // We do not need register_new_node_with_optimizer
151 // because set_type has already been called.
152 _igvn._worklist.push(x);
153 }
154 }
155 }
156
157 phi->set_req( i, x );
158
159 if (the_clone == nullptr) {
160 continue;
161 }
162
163 if (the_clone != x) {
164 _igvn.remove_dead_node(the_clone);
165 } else if (region->is_Loop() && i == LoopNode::LoopBackControl &&
166 n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) {
167 // it is not a win if 'x' moved from an outer to an inner loop
168 // this edge case can only happen for Load nodes
169 wins.reset();
170 break;
171 }
172 }
173 // Too few wins?
174 if (!wins.profitable(policy)) {
175 _igvn.remove_dead_node(phi);
176 return nullptr;
177 }
178
179 // Record Phi
180 register_new_node( phi, region );
181
182 for (uint i2 = 1; i2 < phi->req(); i2++) {
183 Node *x = phi->in(i2);
184 // If we commoned up the cloned 'x' with another existing Node,
185 // the existing Node picks up a new use. We need to make the
186 // existing Node occur higher up so it dominates its uses.
187 Node *old_ctrl;
188 IdealLoopTree *old_loop;
189
190 if (x->is_Con()) {
191 assert(get_ctrl(x) == C->root(), "constant control is not root");
192 continue;
193 }
194 // The occasional new node
195 if (x->_idx >= old_unique) { // Found a new, unplaced node?
196 old_ctrl = nullptr;
197 old_loop = nullptr; // Not in any prior loop
198 } else {
199 old_ctrl = get_ctrl(x);
200 old_loop = get_loop(old_ctrl); // Get prior loop
201 }
202 // New late point must dominate new use
203 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
204 if (new_ctrl == old_ctrl) // Nothing is changed
205 continue;
206
207 IdealLoopTree *new_loop = get_loop(new_ctrl);
208
209 // Don't move x into a loop if its uses are
210 // outside of loop. Otherwise x will be cloned
211 // for each use outside of this loop.
212 IdealLoopTree *use_loop = get_loop(region);
213 if (!new_loop->is_member(use_loop) &&
214 (old_loop == nullptr || !new_loop->is_member(old_loop))) {
215 // Take early control, later control will be recalculated
216 // during next iteration of loop optimizations.
217 new_ctrl = get_early_ctrl(x);
218 new_loop = get_loop(new_ctrl);
219 }
220 // Set new location
221 set_ctrl(x, new_ctrl);
222 // If changing loop bodies, see if we need to collect into new body
223 if (old_loop != new_loop) {
224 if (old_loop && !old_loop->_child)
225 old_loop->_body.yank(x);
226 if (!new_loop->_child)
227 new_loop->_body.push(x); // Collect body info
228 }
229 }
230
231 split_thru_phi_yank_old_nodes(n, region);
232 _igvn.replace_node(n, phi);
233
234 #ifndef PRODUCT
235 if (TraceLoopOpts) {
236 tty->print_cr("Split %d %s through %d Phi in %d %s",
237 n->_idx, n->Name(), phi->_idx, region->_idx, region->Name());
238 }
239 #endif // !PRODUCT
240
241 return phi;
242 }
243
244 // If the region is a Loop, we are removing the old n,
245 // and need to yank it from the _body. If any phi we
246 // just split through now has no use any more, it also
247 // has to be removed.
248 void PhaseIdealLoop::split_thru_phi_yank_old_nodes(Node* n, Node* region) {
249 IdealLoopTree* region_loop = get_loop(region);
250 if (region->is_Loop() && region_loop->is_innermost()) {
251 region_loop->_body.yank(n);
252 for (uint j = 1; j < n->req(); j++) {
253 PhiNode* phi = n->in(j)->isa_Phi();
254 // Check that phi belongs to the region and only has n as a use.
255 if (phi != nullptr &&
256 phi->in(0) == region &&
257 phi->unique_multiple_edges_out_or_null() == n) {
258 assert(get_ctrl(phi) == region, "sanity");
259 assert(get_ctrl(n) == region, "sanity");
260 region_loop->_body.yank(phi);
261 }
262 }
263 }
264 }
265
266 // Test whether node 'x' can move into an inner loop relative to node 'n'.
267 // Note: The test is not exact. Returns true if 'x' COULD end up in an inner loop,
268 // BUT it can also return true and 'x' is in the outer loop
269 bool PhaseIdealLoop::can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x) {
270 IdealLoopTree* n_loop_tree = get_loop(n_loop);
271 IdealLoopTree* x_loop_tree = get_loop(get_early_ctrl(x));
272 // x_loop_tree should be outer or same loop as n_loop_tree
273 return !x_loop_tree->is_member(n_loop_tree);
274 }
275
276 // Subtype checks that carry profile data don't common so look for a replacement by following edges
277 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) {
278 if (x->is_SubTypeCheck()) {
279 Node* in1 = x->in(1);
280 for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) {
281 Node* u = in1->fast_out(i);
282 if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) {
283 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
284 Node* bol = u->fast_out(j);
285 for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) {
286 Node* iff = bol->fast_out(k);
287 // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with
288 // unrelated profile
289 if (iff->is_If() && is_dominator(iff, r_in)) {
290 return u;
291 }
292 }
293 }
294 }
295 }
296 }
297 return nullptr;
298 }
299
300 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor
301 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In
302 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in
303 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could
304 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis).
305 // We also need to check other loop phis as they could have been created in the same split-if pass when applying
306 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi.
307 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const {
308 const Type* zero;
309 switch (n->Opcode()) {
310 case Op_DivI:
311 case Op_ModI:
312 case Op_UDivI:
313 case Op_UModI:
314 zero = TypeInt::ZERO;
315 break;
316 case Op_DivL:
317 case Op_ModL:
318 case Op_UDivL:
319 case Op_UModL:
320 zero = TypeLong::ZERO;
321 break;
322 default:
323 return false;
324 }
325
326 if (n->in(0) != nullptr) {
327 // Cannot split through phi if Div or Mod node has a control dependency to a zero check.
328 return true;
329 }
330
331 Node* divisor = n->in(2);
332 return is_divisor_loop_phi(divisor, region) &&
333 loop_phi_backedge_type_contains_zero(divisor, zero);
334 }
335
336 bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) {
337 return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop;
338 }
339
340 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const {
341 return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP;
342 }
343
344 //------------------------------dominated_by------------------------------------
345 // Replace the dominated test with an obvious true or false. Place it on the
346 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
347 // live path up to the dominating control.
348 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool prevdom_not_imply_this) {
349 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
350
351 // prevdom is the dominating projection of the dominating test.
352 assert(iff->Opcode() == Op_If ||
353 iff->Opcode() == Op_CountedLoopEnd ||
354 iff->Opcode() == Op_LongCountedLoopEnd ||
355 iff->Opcode() == Op_RangeCheck ||
356 iff->Opcode() == Op_ParsePredicate,
357 "Check this code when new subtype is added");
358
359 int pop = prevdom->Opcode();
360 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
361 if (flip) {
362 if (pop == Op_IfTrue)
363 pop = Op_IfFalse;
364 else
365 pop = Op_IfTrue;
366 }
367 // 'con' is set to true or false to kill the dominated test.
368 Node* con = makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
369 // Hack the dominated test
370 _igvn.replace_input_of(iff, 1, con);
371
372 // If I don't have a reachable TRUE and FALSE path following the IfNode then
373 // I can assume this path reaches an infinite loop. In this case it's not
374 // important to optimize the data Nodes - either the whole compilation will
375 // be tossed or this path (and all data Nodes) will go dead.
376 if (iff->outcnt() != 2) {
377 return;
378 }
379
380 // Make control-dependent data Nodes on the live path (path that will remain
381 // once the dominated IF is removed) become control-dependent on the
382 // dominating projection.
383 Node* dp = iff->proj_out_or_null(pop == Op_IfTrue);
384
385 if (dp == nullptr) {
386 return;
387 }
388
389 rewire_safe_outputs_to_dominator(dp, prevdom, prevdom_not_imply_this);
390 }
391
392 void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool dominator_not_imply_source) {
393 IdealLoopTree* old_loop = get_loop(source);
394
395 for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) {
396 Node* out = source->fast_out(i); // Control-dependent node
397 if (out->depends_only_on_test()) {
398 assert(out->in(0) == source, "must be control dependent on source");
399 _igvn.replace_input_of(out, 0, dominator);
400 if (dominator_not_imply_source) {
401 // Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range
402 // check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the
403 // removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate
404 // in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
405 // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest
406 // dominating check.
407 Node* clone = out->pin_node_under_control();
408 if (clone != nullptr) {
409 clone = _igvn.register_new_node_with_optimizer(clone, out);
410 _igvn.replace_node(out, clone);
411 out = clone;
412 }
413 }
414 set_early_ctrl(out, false);
415 IdealLoopTree* new_loop = get_loop(get_ctrl(out));
416 if (old_loop != new_loop) {
417 if (!old_loop->_child) {
418 old_loop->_body.yank(out);
419 }
420 if (!new_loop->_child) {
421 new_loop->_body.push(out);
422 }
423 }
424 --i;
425 --imax;
426 }
427 }
428 }
429
430 //------------------------------has_local_phi_input----------------------------
431 // Return TRUE if 'n' has Phi inputs from its local block and no other
432 // block-local inputs (all non-local-phi inputs come from earlier blocks)
433 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
434 Node *n_ctrl = get_ctrl(n);
435 // See if some inputs come from a Phi in this block, or from before
436 // this block.
437 uint i;
438 for( i = 1; i < n->req(); i++ ) {
439 Node *phi = n->in(i);
440 if( phi->is_Phi() && phi->in(0) == n_ctrl )
441 break;
442 }
443 if( i >= n->req() )
444 return nullptr; // No Phi inputs; nowhere to clone thru
445
446 // Check for inputs created between 'n' and the Phi input. These
447 // must split as well; they have already been given the chance
448 // (courtesy of a post-order visit) and since they did not we must
449 // recover the 'cost' of splitting them by being very profitable
450 // when splitting 'n'. Since this is unlikely we simply give up.
451 for( i = 1; i < n->req(); i++ ) {
452 Node *m = n->in(i);
453 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
454 // We allow the special case of AddP's with no local inputs.
455 // This allows us to split-up address expressions.
456 if (m->is_AddP() &&
457 get_ctrl(m->in(AddPNode::Base)) != n_ctrl &&
458 get_ctrl(m->in(AddPNode::Address)) != n_ctrl &&
459 get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) {
460 // Move the AddP up to the dominating point. That's fine because control of m's inputs
461 // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl.
462 Node* c = find_non_split_ctrl(idom(n_ctrl));
463 if (c->is_OuterStripMinedLoop()) {
464 c->as_Loop()->verify_strip_mined(1);
465 c = c->in(LoopNode::EntryControl);
466 }
467 set_ctrl_and_loop(m, c);
468 continue;
469 }
470 return nullptr;
471 }
472 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
473 }
474
475 return n_ctrl;
476 }
477
478 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
479 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) {
480 assert(bt == T_INT || bt == T_LONG, "only for integers");
481 int n_op = n->Opcode();
482
483 if (n_op == Op_LShift(bt)) {
484 // Scale is loop invariant
485 Node* scale = n->in(2);
486 Node* scale_ctrl = get_ctrl(scale);
487 IdealLoopTree* scale_loop = get_loop(scale_ctrl);
488 if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) {
489 return nullptr;
490 }
491 const TypeInt* scale_t = scale->bottom_type()->isa_int();
492 if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) {
493 return nullptr; // Dont bother with byte/short masking
494 }
495 // Add must vary with loop (else shift would be loop-invariant)
496 Node* add = n->in(1);
497 Node* add_ctrl = get_ctrl(add);
498 IdealLoopTree* add_loop = get_loop(add_ctrl);
499 if (n_loop != add_loop) {
500 return nullptr; // happens w/ evil ZKM loops
501 }
502
503 // Convert I-V into I+ (0-V); same for V-I
504 if (add->Opcode() == Op_Sub(bt) &&
505 _igvn.type(add->in(1)) != TypeInteger::zero(bt)) {
506 assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, "");
507 Node* zero = integercon(0, bt);
508 Node* neg = SubNode::make(zero, add->in(2), bt);
509 register_new_node_with_ctrl_of(neg, add->in(2));
510 add = AddNode::make(add->in(1), neg, bt);
511 register_new_node(add, add_ctrl);
512 }
513 if (add->Opcode() != Op_Add(bt)) return nullptr;
514 assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, "");
515 // See if one add input is loop invariant
516 Node* add_var = add->in(1);
517 Node* add_var_ctrl = get_ctrl(add_var);
518 IdealLoopTree* add_var_loop = get_loop(add_var_ctrl);
519 Node* add_invar = add->in(2);
520 Node* add_invar_ctrl = get_ctrl(add_invar);
521 IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl);
522 if (add_invar_loop == n_loop) {
523 // Swap to find the invariant part
524 add_invar = add_var;
525 add_invar_ctrl = add_var_ctrl;
526 add_invar_loop = add_var_loop;
527 add_var = add->in(2);
528 } else if (add_var_loop != n_loop) { // Else neither input is loop invariant
529 return nullptr;
530 }
531 if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) {
532 return nullptr; // No invariant part of the add?
533 }
534
535 // Yes! Reshape address expression!
536 Node* inv_scale = LShiftNode::make(add_invar, scale, bt);
537 Node* inv_scale_ctrl =
538 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
539 add_invar_ctrl : scale_ctrl;
540 register_new_node(inv_scale, inv_scale_ctrl);
541 Node* var_scale = LShiftNode::make(add_var, scale, bt);
542 register_new_node(var_scale, n_ctrl);
543 Node* var_add = AddNode::make(var_scale, inv_scale, bt);
544 register_new_node(var_add, n_ctrl);
545 _igvn.replace_node(n, var_add);
546 return var_add;
547 }
548 return nullptr;
549 }
550
551 //------------------------------remix_address_expressions----------------------
552 // Rework addressing expressions to get the most loop-invariant stuff
553 // moved out. We'd like to do all associative operators, but it's especially
554 // important (common) to do address expressions.
555 Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
556 if (!has_ctrl(n)) return nullptr;
557 Node* n_ctrl = get_ctrl(n);
558 IdealLoopTree* n_loop = get_loop(n_ctrl);
559
560 // See if 'n' mixes loop-varying and loop-invariant inputs and
561 // itself is loop-varying.
562
563 // Only interested in binary ops (and AddP)
564 if (n->req() < 3 || n->req() > 4) return nullptr;
565
566 Node* n1_ctrl = get_ctrl(n->in( 1));
567 Node* n2_ctrl = get_ctrl(n->in( 2));
568 Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
569 IdealLoopTree* n1_loop = get_loop(n1_ctrl);
570 IdealLoopTree* n2_loop = get_loop(n2_ctrl);
571 IdealLoopTree* n3_loop = get_loop(n3_ctrl);
572
573 // Does one of my inputs spin in a tighter loop than self?
574 if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) ||
575 (n_loop->is_member(n2_loop) && n_loop != n2_loop) ||
576 (n_loop->is_member(n3_loop) && n_loop != n3_loop)) {
577 return nullptr; // Leave well enough alone
578 }
579
580 // Is at least one of my inputs loop-invariant?
581 if (n1_loop == n_loop &&
582 n2_loop == n_loop &&
583 n3_loop == n_loop) {
584 return nullptr; // No loop-invariant inputs
585 }
586
587 Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT);
588 if (res != nullptr) {
589 return res;
590 }
591 res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG);
592 if (res != nullptr) {
593 return res;
594 }
595
596 int n_op = n->Opcode();
597 // Replace (I+V) with (V+I)
598 if (n_op == Op_AddI ||
599 n_op == Op_AddL ||
600 n_op == Op_AddF ||
601 n_op == Op_AddD ||
602 n_op == Op_MulI ||
603 n_op == Op_MulL ||
604 n_op == Op_MulF ||
605 n_op == Op_MulD) {
606 if (n2_loop == n_loop) {
607 assert(n1_loop != n_loop, "");
608 n->swap_edges(1, 2);
609 }
610 }
611
612 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
613 // but not if I2 is a constant. Skip for irreducible loops.
614 if (n_op == Op_AddP && n_loop->_head->is_Loop()) {
615 if (n2_loop == n_loop && n3_loop != n_loop) {
616 if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) {
617 Node* n22_ctrl = get_ctrl(n->in(2)->in(2));
618 Node* n23_ctrl = get_ctrl(n->in(2)->in(3));
619 IdealLoopTree* n22loop = get_loop(n22_ctrl);
620 IdealLoopTree* n23_loop = get_loop(n23_ctrl);
621 if (n22loop != n_loop && n22loop->is_member(n_loop) &&
622 n23_loop == n_loop) {
623 Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3));
624 // Stuff new AddP in the loop preheader
625 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
626 Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3));
627 register_new_node(add2, n_ctrl);
628 _igvn.replace_node(n, add2);
629 return add2;
630 }
631 }
632 }
633
634 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
635 if (n2_loop != n_loop && n3_loop == n_loop) {
636 if (n->in(3)->Opcode() == Op_AddX) {
637 Node* V = n->in(3)->in(1);
638 Node* I = n->in(3)->in(2);
639 if (ctrl_is_member(n_loop, V)) {
640 } else {
641 Node *tmp = V; V = I; I = tmp;
642 }
643 if (!ctrl_is_member(n_loop, I)) {
644 Node* add1 = new AddPNode(n->in(1), n->in(2), I);
645 // Stuff new AddP in the loop preheader
646 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
647 Node* add2 = new AddPNode(n->in(1), add1, V);
648 register_new_node(add2, n_ctrl);
649 _igvn.replace_node(n, add2);
650 return add2;
651 }
652 }
653 }
654 }
655
656 return nullptr;
657 }
658
659 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
660 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
661 assert(n->Opcode() == Op_AddI, "sanity");
662 Node * nn = nullptr;
663 Node * in1 = n->in(1);
664 Node * in2 = n->in(2);
665 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
666 IdealLoopTree* loop_n = get_loop(get_ctrl(n));
667 if (loop_n->is_counted() &&
668 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) &&
669 Matcher::match_rule_supported(Op_MulAddVS2VI) &&
670 Matcher::match_rule_supported(Op_MulAddS2I)) {
671 Node* mul_in1 = in1->in(1);
672 Node* mul_in2 = in1->in(2);
673 Node* mul_in3 = in2->in(1);
674 Node* mul_in4 = in2->in(2);
675 if (mul_in1->Opcode() == Op_LoadS &&
676 mul_in2->Opcode() == Op_LoadS &&
677 mul_in3->Opcode() == Op_LoadS &&
678 mul_in4->Opcode() == Op_LoadS) {
679 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
680 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
681 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
682 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
683 IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
684 IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
685 // All nodes should be in the same counted loop.
686 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
687 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
688 Node* adr1 = mul_in1->in(MemNode::Address);
689 Node* adr2 = mul_in2->in(MemNode::Address);
690 Node* adr3 = mul_in3->in(MemNode::Address);
691 Node* adr4 = mul_in4->in(MemNode::Address);
692 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
693 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
694 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
695 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
696 register_new_node_with_ctrl_of(nn, n);
697 _igvn.replace_node(n, nn);
698 return nn;
699 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
700 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
701 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
702 register_new_node_with_ctrl_of(nn, n);
703 _igvn.replace_node(n, nn);
704 return nn;
705 }
706 }
707 }
708 }
709 }
710 }
711 return nn;
712 }
713
714 //------------------------------conditional_move-------------------------------
715 // Attempt to replace a Phi with a conditional move. We have some pretty
716 // strict profitability requirements. All Phis at the merge point must
717 // be converted, so we can remove the control flow. We need to limit the
718 // number of c-moves to a small handful. All code that was in the side-arms
719 // of the CFG diamond is now speculatively executed. This code has to be
720 // "cheap enough". We are pretty much limited to CFG diamonds that merge
721 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
722 Node *PhaseIdealLoop::conditional_move( Node *region ) {
723
724 assert(region->is_Region(), "sanity check");
725 if (region->req() != 3) return nullptr;
726
727 // Check for CFG diamond
728 Node *lp = region->in(1);
729 Node *rp = region->in(2);
730 if (!lp || !rp) return nullptr;
731 Node *lp_c = lp->in(0);
732 if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr;
733 IfNode *iff = lp_c->as_If();
734
735 // Check for ops pinned in an arm of the diamond.
736 // Can't remove the control flow in this case
737 if (lp->outcnt() > 1) return nullptr;
738 if (rp->outcnt() > 1) return nullptr;
739
740 IdealLoopTree* r_loop = get_loop(region);
741 assert(r_loop == get_loop(iff), "sanity");
742 // Always convert to CMOVE if all results are used only outside this loop.
743 bool used_inside_loop = (r_loop == _ltree_root);
744
745 // Check profitability
746 int cost = 0;
747 int phis = 0;
748 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
749 Node *out = region->fast_out(i);
750 if (!out->is_Phi()) continue; // Ignore other control edges, etc
751 phis++;
752 PhiNode* phi = out->as_Phi();
753 BasicType bt = phi->type()->basic_type();
754 switch (bt) {
755 case T_DOUBLE:
756 case T_FLOAT:
757 if (C->use_cmove()) {
758 continue; //TODO: maybe we want to add some cost
759 }
760 cost += Matcher::float_cmove_cost(); // Could be very expensive
761 break;
762 case T_LONG: {
763 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
764 }
765 case T_INT: // These all CMOV fine
766 case T_ADDRESS: { // (RawPtr)
767 cost++;
768 break;
769 }
770 case T_NARROWOOP: // Fall through
771 case T_OBJECT: { // Base oops are OK, but not derived oops
772 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
773 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
774 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
775 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
776 // have a Phi for the base here that we convert to a CMOVE all is well
777 // and good. But if the base is dead, we'll not make a CMOVE. Later
778 // the allocator will have to produce a base by creating a CMOVE of the
779 // relevant bases. This puts the allocator in the business of
780 // manufacturing expensive instructions, generally a bad plan.
781 // Just Say No to Conditionally-Moved Derived Pointers.
782 if (tp && tp->offset() != 0)
783 return nullptr;
784 cost++;
785 break;
786 }
787 default:
788 return nullptr; // In particular, can't do memory or I/O
789 }
790 // Add in cost any speculative ops
791 for (uint j = 1; j < region->req(); j++) {
792 Node *proj = region->in(j);
793 Node *inp = phi->in(j);
794 if (get_ctrl(inp) == proj) { // Found local op
795 cost++;
796 // Check for a chain of dependent ops; these will all become
797 // speculative in a CMOV.
798 for (uint k = 1; k < inp->req(); k++)
799 if (get_ctrl(inp->in(k)) == proj)
800 cost += ConditionalMoveLimit; // Too much speculative goo
801 }
802 }
803 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
804 // This will likely Split-If, a higher-payoff operation.
805 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
806 Node* use = phi->fast_out(k);
807 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
808 cost += ConditionalMoveLimit;
809 // Is there a use inside the loop?
810 // Note: check only basic types since CMoveP is pinned.
811 if (!used_inside_loop && is_java_primitive(bt)) {
812 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
813 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
814 used_inside_loop = true;
815 }
816 }
817 }
818 }//for
819 Node* bol = iff->in(1);
820 assert(!bol->is_OpaqueInitializedAssertionPredicate(), "Initialized Assertion Predicates cannot form a diamond with Halt");
821 if (bol->is_OpaqueTemplateAssertionPredicate()) {
822 // Ignore Template Assertion Predicates with OpaqueTemplateAssertionPredicate nodes.
823 return nullptr;
824 }
825 if (bol->is_OpaqueMultiversioning()) {
826 assert(bol->as_OpaqueMultiversioning()->is_useless(), "Must be useless, i.e. fast main loop has already disappeared.");
827 // Ignore multiversion_if that just lost its loops. The OpaqueMultiversioning is marked useless,
828 // and will make the multiversion_if constant fold in the next IGVN round.
829 return nullptr;
830 }
831 if (!bol->is_Bool()) {
832 assert(false, "Expected Bool, but got %s", NodeClassNames[bol->Opcode()]);
833 return nullptr;
834 }
835 int cmp_op = bol->in(1)->Opcode();
836 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
837 return nullptr;
838 }
839 // It is expensive to generate flags from a float compare.
840 // Avoid duplicated float compare.
841 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
842
843 float infrequent_prob = PROB_UNLIKELY_MAG(3);
844 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
845 if (used_inside_loop) {
846 if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
847
848 // BlockLayoutByFrequency optimization moves infrequent branch
849 // from hot path. No point in CMOV'ing in such case (110 is used
850 // instead of 100 to take into account not exactness of float value).
851 if (BlockLayoutByFrequency) {
852 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
853 }
854 }
855 // Check for highly predictable branch. No point in CMOV'ing if
856 // we are going to predict accurately all the time.
857 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
858 //keep going
859 } else if (iff->_prob < infrequent_prob ||
860 iff->_prob > (1.0f - infrequent_prob))
861 return nullptr;
862
863 // --------------
864 // Now replace all Phis with CMOV's
865 Node *cmov_ctrl = iff->in(0);
866 uint flip = (lp->Opcode() == Op_IfTrue);
867 Node_List wq;
868 while (1) {
869 PhiNode* phi = nullptr;
870 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
871 Node *out = region->fast_out(i);
872 if (out->is_Phi()) {
873 phi = out->as_Phi();
874 break;
875 }
876 }
877 if (phi == nullptr || _igvn.type(phi) == Type::TOP || !CMoveNode::supported(_igvn.type(phi))) {
878 break;
879 }
880 // Move speculative ops
881 wq.push(phi);
882 while (wq.size() > 0) {
883 Node *n = wq.pop();
884 for (uint j = 1; j < n->req(); j++) {
885 Node* m = n->in(j);
886 if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) {
887 set_ctrl(m, cmov_ctrl);
888 wq.push(m);
889 }
890 }
891 }
892 Node* cmov = CMoveNode::make(iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
893 register_new_node(cmov, cmov_ctrl);
894 _igvn.replace_node(phi, cmov);
895 #ifndef PRODUCT
896 if (TraceLoopOpts) {
897 tty->print("CMOV ");
898 r_loop->dump_head();
899 if (Verbose) {
900 bol->in(1)->dump(1);
901 cmov->dump(1);
902 }
903 }
904 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
905 #endif
906 }
907
908 // The useless CFG diamond will fold up later; see the optimization in
909 // RegionNode::Ideal.
910 _igvn._worklist.push(region);
911
912 return iff->in(1);
913 }
914
915 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
916 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
917 Node* u = m->fast_out(i);
918 if (u->is_CFG()) {
919 if (u->is_NeverBranch()) {
920 u = u->as_NeverBranch()->proj_out(0);
921 enqueue_cfg_uses(u, wq);
922 } else {
923 wq.push(u);
924 }
925 }
926 }
927 }
928
929 // Try moving a store out of a loop, right before the loop
930 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
931 // Store has to be first in the loop body
932 IdealLoopTree *n_loop = get_loop(n_ctrl);
933 if (n->is_Store() && n_loop != _ltree_root &&
934 n_loop->is_loop() && n_loop->_head->is_Loop() &&
935 n->in(0) != nullptr) {
936 Node* address = n->in(MemNode::Address);
937 Node* value = n->in(MemNode::ValueIn);
938 Node* mem = n->in(MemNode::Memory);
939
940 // - address and value must be loop invariant
941 // - memory must be a memory Phi for the loop
942 // - Store must be the only store on this memory slice in the
943 // loop: if there's another store following this one then value
944 // written at iteration i by the second store could be overwritten
945 // at iteration i+n by the first store: it's not safe to move the
946 // first store out of the loop
947 // - nothing must observe the memory Phi: it guarantees no read
948 // before the store, we are also guaranteed the store post
949 // dominates the loop head (ignoring a possible early
950 // exit). Otherwise there would be extra Phi involved between the
951 // loop's Phi and the store.
952 // - there must be no early exit from the loop before the Store
953 // (such an exit most of the time would be an extra use of the
954 // memory Phi but sometimes is a bottom memory Phi that takes the
955 // store as input).
956
957 if (!ctrl_is_member(n_loop, address) &&
958 !ctrl_is_member(n_loop, value) &&
959 mem->is_Phi() && mem->in(0) == n_loop->_head &&
960 mem->outcnt() == 1 &&
961 mem->in(LoopNode::LoopBackControl) == n) {
962
963 assert(n_loop->_tail != nullptr, "need a tail");
964 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
965
966 // Verify that there's no early exit of the loop before the store.
967 bool ctrl_ok = false;
968 {
969 // Follow control from loop head until n, we exit the loop or
970 // we reach the tail
971 ResourceMark rm;
972 Unique_Node_List wq;
973 wq.push(n_loop->_head);
974
975 for (uint next = 0; next < wq.size(); ++next) {
976 Node *m = wq.at(next);
977 if (m == n->in(0)) {
978 ctrl_ok = true;
979 continue;
980 }
981 assert(!has_ctrl(m), "should be CFG");
982 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
983 ctrl_ok = false;
984 break;
985 }
986 enqueue_cfg_uses(m, wq);
987 if (wq.size() > 10) {
988 ctrl_ok = false;
989 break;
990 }
991 }
992 }
993 if (ctrl_ok) {
994 // move the Store
995 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
996 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
997 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
998 // Disconnect the phi now. An empty phi can confuse other
999 // optimizations in this pass of loop opts.
1000 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
1001 n_loop->_body.yank(mem);
1002
1003 set_ctrl_and_loop(n, n->in(0));
1004
1005 return n;
1006 }
1007 }
1008 }
1009 return nullptr;
1010 }
1011
1012 // Try moving a store out of a loop, right after the loop
1013 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
1014 if (n->is_Store() && n->in(0) != nullptr) {
1015 Node *n_ctrl = get_ctrl(n);
1016 IdealLoopTree *n_loop = get_loop(n_ctrl);
1017 // Store must be in a loop
1018 if (n_loop != _ltree_root && !n_loop->_irreducible) {
1019 Node* address = n->in(MemNode::Address);
1020 Node* value = n->in(MemNode::ValueIn);
1021 // address must be loop invariant
1022 if (!ctrl_is_member(n_loop, address)) {
1023 // Store must be last on this memory slice in the loop and
1024 // nothing in the loop must observe it
1025 Node* phi = nullptr;
1026 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1027 Node* u = n->fast_out(i);
1028 if (has_ctrl(u)) { // control use?
1029 if (!ctrl_is_member(n_loop, u)) {
1030 continue;
1031 }
1032 if (u->is_Phi() && u->in(0) == n_loop->_head) {
1033 assert(_igvn.type(u) == Type::MEMORY, "bad phi");
1034 // multiple phis on the same slice are possible
1035 if (phi != nullptr) {
1036 return;
1037 }
1038 phi = u;
1039 continue;
1040 }
1041 }
1042 return;
1043 }
1044 if (phi != nullptr) {
1045 // Nothing in the loop before the store (next iteration)
1046 // must observe the stored value
1047 bool mem_ok = true;
1048 {
1049 ResourceMark rm;
1050 Unique_Node_List wq;
1051 wq.push(phi);
1052 for (uint next = 0; next < wq.size() && mem_ok; ++next) {
1053 Node *m = wq.at(next);
1054 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
1055 Node* u = m->fast_out(i);
1056 if (u->is_Store() || u->is_Phi()) {
1057 if (u != n) {
1058 wq.push(u);
1059 mem_ok = (wq.size() <= 10);
1060 }
1061 } else {
1062 mem_ok = false;
1063 break;
1064 }
1065 }
1066 }
1067 }
1068 if (mem_ok) {
1069 // Move the store out of the loop if the LCA of all
1070 // users (except for the phi) is outside the loop.
1071 Node* hook = new Node(1);
1072 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
1073 _igvn.rehash_node_delayed(phi);
1074 int count = phi->replace_edge(n, hook, &_igvn);
1075 assert(count > 0, "inconsistent phi");
1076
1077 // Compute latest point this store can go
1078 Node* lca = get_late_ctrl(n, get_ctrl(n));
1079 if (lca->is_OuterStripMinedLoop()) {
1080 lca = lca->in(LoopNode::EntryControl);
1081 }
1082 if (n_loop->is_member(get_loop(lca))) {
1083 // LCA is in the loop - bail out
1084 _igvn.replace_node(hook, n);
1085 return;
1086 }
1087 #ifdef ASSERT
1088 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
1089 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
1090 n_loop->_head->as_Loop()->verify_strip_mined(1);
1091 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
1092 IdealLoopTree* outer_loop = get_loop(outer);
1093 assert(n_loop->_parent == outer_loop, "broken loop tree");
1094 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
1095 }
1096 #endif
1097 lca = place_outside_loop(lca, n_loop);
1098 assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop");
1099 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1100
1101 // Move store out of the loop
1102 _igvn.replace_node(hook, n->in(MemNode::Memory));
1103 _igvn.replace_input_of(n, 0, lca);
1104 set_ctrl_and_loop(n, lca);
1105
1106 // Disconnect the phi now. An empty phi can confuse other
1107 // optimizations in this pass of loop opts..
1108 if (phi->in(LoopNode::LoopBackControl) == phi) {
1109 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1110 n_loop->_body.yank(phi);
1111 }
1112 }
1113 }
1114 }
1115 }
1116 }
1117 }
1118
1119 //------------------------------split_if_with_blocks_pre-----------------------
1120 // Do the real work in a non-recursive function. Data nodes want to be
1121 // cloned in the pre-order so they can feed each other nicely.
1122 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1123 // Cloning these guys is unlikely to win
1124 int n_op = n->Opcode();
1125 if (n_op == Op_MergeMem) {
1126 return n;
1127 }
1128 if (n->is_Proj()) {
1129 return n;
1130 }
1131 // Do not clone-up CmpFXXX variations, as these are always
1132 // followed by a CmpI
1133 if (n->is_Cmp()) {
1134 return n;
1135 }
1136 // Attempt to use a conditional move instead of a phi/branch
1137 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1138 Node *cmov = conditional_move( n );
1139 if (cmov) {
1140 return cmov;
1141 }
1142 }
1143 if (n->is_CFG() || n->is_LoadStore()) {
1144 return n;
1145 }
1146 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1147 if (!C->major_progress()) { // If chance of no more loop opts...
1148 _igvn._worklist.push(n); // maybe we'll remove them
1149 }
1150 return n;
1151 }
1152
1153 if (n->is_Con()) {
1154 return n; // No cloning for Con nodes
1155 }
1156
1157 Node *n_ctrl = get_ctrl(n);
1158 if (!n_ctrl) {
1159 return n; // Dead node
1160 }
1161
1162 Node* res = try_move_store_before_loop(n, n_ctrl);
1163 if (res != nullptr) {
1164 return n;
1165 }
1166
1167 // Attempt to remix address expressions for loop invariants
1168 Node *m = remix_address_expressions( n );
1169 if( m ) return m;
1170
1171 if (n_op == Op_AddI) {
1172 Node *nn = convert_add_to_muladd( n );
1173 if ( nn ) return nn;
1174 }
1175
1176 if (n->is_ConstraintCast() && n->as_ConstraintCast()->dependency().narrows_type()) {
1177 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1178 // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1179 // Node control inputs don't necessarily agree with loop control info (due to
1180 // transformations happened in between), thus additional dominance check is needed
1181 // to keep loop info valid.
1182 if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1183 _igvn.replace_node(n, dom_cast);
1184 return dom_cast;
1185 }
1186 }
1187
1188 // Determine if the Node has inputs from some local Phi.
1189 // Returns the block to clone thru.
1190 Node *n_blk = has_local_phi_input( n );
1191 if( !n_blk ) return n;
1192
1193 // Do not clone the trip counter through on a CountedLoop
1194 // (messes up the canonical shape).
1195 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) ||
1196 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) {
1197 return n;
1198 }
1199 // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination
1200 if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) &&
1201 n->in(1) == n_blk->as_BaseCountedLoop()->phi()) {
1202 return n;
1203 }
1204
1205 // Check for having no control input; not pinned. Allow
1206 // dominating control.
1207 if (n->in(0)) {
1208 Node *dom = idom(n_blk);
1209 if (dom_lca(n->in(0), dom) != n->in(0)) {
1210 return n;
1211 }
1212 }
1213 // Policy: when is it profitable. You must get more wins than
1214 // policy before it is considered profitable. Policy is usually 0,
1215 // so 1 win is considered profitable. Big merges will require big
1216 // cloning, so get a larger policy.
1217 int policy = n_blk->req() >> 2;
1218
1219 // If the loop is a candidate for range check elimination,
1220 // delay splitting through it's phi until a later loop optimization
1221 if (n_blk->is_BaseCountedLoop()) {
1222 IdealLoopTree *lp = get_loop(n_blk);
1223 if (lp && lp->_rce_candidate) {
1224 return n;
1225 }
1226 }
1227
1228 if (must_throttle_split_if()) return n;
1229
1230 // Split 'n' through the merge point if it is profitable, replacing it with a new phi.
1231 Node* phi = split_thru_phi(n, n_blk, policy);
1232 if (phi == nullptr) { return n; }
1233
1234 // Moved a load around the loop, 'en-registering' something.
1235 if (n_blk->is_Loop() && n->is_Load() &&
1236 !phi->in(LoopNode::LoopBackControl)->is_Load())
1237 C->set_major_progress();
1238
1239 return phi;
1240 }
1241
1242 static bool merge_point_too_heavy(Compile* C, Node* region) {
1243 // Bail out if the region and its phis have too many users.
1244 int weight = 0;
1245 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1246 weight += region->fast_out(i)->outcnt();
1247 }
1248 int nodes_left = C->max_node_limit() - C->live_nodes();
1249 if (weight * 8 > nodes_left) {
1250 if (PrintOpto) {
1251 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
1252 }
1253 return true;
1254 } else {
1255 return false;
1256 }
1257 }
1258
1259 static bool merge_point_safe(Node* region) {
1260 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1261 // having a PhiNode input. This sidesteps the dangerous case where the split
1262 // ConvI2LNode may become TOP if the input Value() does not
1263 // overlap the ConvI2L range, leaving a node which may not dominate its
1264 // uses.
1265 // A better fix for this problem can be found in the BugTraq entry, but
1266 // expediency for Mantis demands this hack.
1267 #ifdef _LP64
1268 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1269 Node* n = region->fast_out(i);
1270 if (n->is_Phi()) {
1271 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1272 Node* m = n->fast_out(j);
1273 if (m->Opcode() == Op_ConvI2L)
1274 return false;
1275 if (m->is_CastII()) {
1276 return false;
1277 }
1278 }
1279 }
1280 }
1281 #endif
1282 return true;
1283 }
1284
1285
1286 //------------------------------place_outside_loop---------------------------------
1287 // Place some computation outside of this loop on the path to the use passed as argument
1288 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const {
1289 Node* head = loop->_head;
1290 assert(!loop->is_member(get_loop(useblock)), "must be outside loop");
1291 if (head->is_Loop() && head->as_Loop()->is_strip_mined()) {
1292 loop = loop->_parent;
1293 assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop");
1294 }
1295
1296 // Pick control right outside the loop
1297 for (;;) {
1298 Node* dom = idom(useblock);
1299 if (loop->is_member(get_loop(dom))) {
1300 break;
1301 }
1302 useblock = dom;
1303 }
1304 assert(find_non_split_ctrl(useblock) == useblock, "should be non split control");
1305 return useblock;
1306 }
1307
1308
1309 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1310 if (!n->is_If() || n->is_BaseCountedLoopEnd()) {
1311 return false;
1312 }
1313 if (!n->in(0)->is_Region()) {
1314 return false;
1315 }
1316
1317 Node* region = n->in(0);
1318 Node* dom = idom(region);
1319 if (!dom->is_If() || !n->as_If()->same_condition(dom, &_igvn)) {
1320 return false;
1321 }
1322 IfNode* dom_if = dom->as_If();
1323 IfTrueNode* proj_true = dom_if->true_proj();
1324 IfFalseNode* proj_false = dom_if->false_proj();
1325
1326 for (uint i = 1; i < region->req(); i++) {
1327 if (is_dominator(proj_true, region->in(i))) {
1328 continue;
1329 }
1330 if (is_dominator(proj_false, region->in(i))) {
1331 continue;
1332 }
1333 return false;
1334 }
1335
1336 return true;
1337 }
1338
1339
1340 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1341 if (must_throttle_split_if()) {
1342 return false;
1343 }
1344
1345 // Do not do 'split-if' if irreducible loops are present.
1346 if (_has_irreducible_loops) {
1347 return false;
1348 }
1349
1350 if (merge_point_too_heavy(C, n_ctrl)) {
1351 return false;
1352 }
1353
1354 // Do not do 'split-if' if some paths are dead. First do dead code
1355 // elimination and then see if its still profitable.
1356 for (uint i = 1; i < n_ctrl->req(); i++) {
1357 if (n_ctrl->in(i) == C->top()) {
1358 return false;
1359 }
1360 }
1361
1362 // If trying to do a 'Split-If' at the loop head, it is only
1363 // profitable if the cmp folds up on BOTH paths. Otherwise we
1364 // risk peeling a loop forever.
1365
1366 // CNC - Disabled for now. Requires careful handling of loop
1367 // body selection for the cloned code. Also, make sure we check
1368 // for any input path not being in the same loop as n_ctrl. For
1369 // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1370 // because the alternative loop entry points won't be converted
1371 // into LoopNodes.
1372 IdealLoopTree *n_loop = get_loop(n_ctrl);
1373 for (uint j = 1; j < n_ctrl->req(); j++) {
1374 if (get_loop(n_ctrl->in(j)) != n_loop) {
1375 return false;
1376 }
1377 }
1378
1379 // Check for safety of the merge point.
1380 if (!merge_point_safe(n_ctrl)) {
1381 return false;
1382 }
1383
1384 return true;
1385 }
1386
1387 // Detect if the node is the inner strip-mined loop
1388 // Return: null if it's not the case, or the exit of outer strip-mined loop
1389 static Node* is_inner_of_stripmined_loop(const Node* out) {
1390 Node* out_le = nullptr;
1391
1392 if (out->is_CountedLoopEnd()) {
1393 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1394
1395 if (loop != nullptr && loop->is_strip_mined()) {
1396 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1397 }
1398 }
1399
1400 return out_le;
1401 }
1402
1403 //------------------------------split_if_with_blocks_post----------------------
1404 // Do the real work in a non-recursive function. CFG hackery wants to be
1405 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1406 // info.
1407 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1408
1409 // Cloning Cmp through Phi's involves the split-if transform.
1410 // FastLock is not used by an If
1411 if (n->is_Cmp() && !n->is_FastLock()) {
1412 Node *n_ctrl = get_ctrl(n);
1413 // Determine if the Node has inputs from some local Phi.
1414 // Returns the block to clone thru.
1415 Node *n_blk = has_local_phi_input(n);
1416 if (n_blk != n_ctrl) {
1417 return;
1418 }
1419
1420 if (!can_split_if(n_ctrl)) {
1421 return;
1422 }
1423
1424 if (n->outcnt() != 1) {
1425 return; // Multiple bool's from 1 compare?
1426 }
1427 Node *bol = n->unique_out();
1428 assert(bol->is_Bool(), "expect a bool here");
1429 if (bol->outcnt() != 1) {
1430 return;// Multiple branches from 1 compare?
1431 }
1432 Node *iff = bol->unique_out();
1433
1434 // Check some safety conditions
1435 if (iff->is_If()) { // Classic split-if?
1436 if (iff->in(0) != n_ctrl) {
1437 return; // Compare must be in same blk as if
1438 }
1439 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1440 // Can't split CMove with different control.
1441 if (get_ctrl(iff) != n_ctrl) {
1442 return;
1443 }
1444 if (get_ctrl(iff->in(2)) == n_ctrl ||
1445 get_ctrl(iff->in(3)) == n_ctrl) {
1446 return; // Inputs not yet split-up
1447 }
1448 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1449 return; // Loop-invar test gates loop-varying CMOVE
1450 }
1451 } else {
1452 return; // some other kind of node, such as an Allocate
1453 }
1454
1455 // When is split-if profitable? Every 'win' on means some control flow
1456 // goes dead, so it's almost always a win.
1457 int policy = 0;
1458 // Split compare 'n' through the merge point if it is profitable
1459 Node *phi = split_thru_phi( n, n_ctrl, policy);
1460 if (!phi) {
1461 return;
1462 }
1463
1464 // Now split the bool up thru the phi
1465 Node* bolphi = split_thru_phi(bol, n_ctrl, -1);
1466 guarantee(bolphi != nullptr, "null boolean phi node");
1467 assert(iff->in(1) == bolphi, "");
1468
1469 if (bolphi->Value(&_igvn)->singleton()) {
1470 return;
1471 }
1472
1473 // Conditional-move? Must split up now
1474 if (!iff->is_If()) {
1475 Node* cmovphi = split_thru_phi(iff, n_ctrl, -1);
1476 return;
1477 }
1478
1479 // Now split the IF
1480 C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff);
1481 if (TraceLoopOpts) {
1482 tty->print_cr("Split-If");
1483 }
1484 do_split_if(iff);
1485 C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff);
1486 return;
1487 }
1488
1489 // Two identical ifs back to back can be merged
1490 if (try_merge_identical_ifs(n)) {
1491 return;
1492 }
1493
1494 // Check for an IF ready to split; one that has its
1495 // condition codes input coming from a Phi at the block start.
1496 int n_op = n->Opcode();
1497
1498 // Check for an IF being dominated by another IF same test
1499 if (n_op == Op_If ||
1500 n_op == Op_RangeCheck) {
1501 Node *bol = n->in(1);
1502 uint max = bol->outcnt();
1503 // Check for same test used more than once?
1504 if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) {
1505 // Search up IDOMs to see if this IF is dominated.
1506 Node* cmp = bol->in(1);
1507 Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol);
1508
1509 // Now search up IDOMs till cutoff, looking for a dominating test
1510 Node *prevdom = n;
1511 Node *dom = idom(prevdom);
1512 while (dom != cutoff) {
1513 if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom &&
1514 safe_for_if_replacement(dom)) {
1515 // It's invalid to move control dependent data nodes in the inner
1516 // strip-mined loop, because:
1517 // 1) break validation of LoopNode::verify_strip_mined()
1518 // 2) move code with side-effect in strip-mined loop
1519 // Move to the exit of outer strip-mined loop in that case.
1520 Node* out_le = is_inner_of_stripmined_loop(dom);
1521 if (out_le != nullptr) {
1522 prevdom = out_le;
1523 }
1524 // Replace the dominated test with an obvious true or false.
1525 // Place it on the IGVN worklist for later cleanup.
1526 C->set_major_progress();
1527 // Split if: pin array accesses that are control dependent on a range check and moved to a regular if,
1528 // to prevent an array load from floating above its range check. There are three cases:
1529 // 1. Move from RangeCheck "a" to RangeCheck "b": don't need to pin. If we ever remove b, then we pin
1530 // all its array accesses at that point.
1531 // 2. We move from RangeCheck "a" to regular if "b": need to pin. If we ever remove b, then its array
1532 // accesses would start to float, since we don't pin at that point.
1533 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1534 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck &&
1535 prevdom->in(0)->Opcode() != Op_RangeCheck;
1536 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1537 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1538 return;
1539 }
1540 prevdom = dom;
1541 dom = idom(prevdom);
1542 }
1543 }
1544 }
1545
1546 try_sink_out_of_loop(n);
1547 if (C->failing()) {
1548 return;
1549 }
1550
1551 try_move_store_after_loop(n);
1552 }
1553
1554 // Transform:
1555 //
1556 // if (some_condition) {
1557 // // body 1
1558 // } else {
1559 // // body 2
1560 // }
1561 // if (some_condition) {
1562 // // body 3
1563 // } else {
1564 // // body 4
1565 // }
1566 //
1567 // into:
1568 //
1569 //
1570 // if (some_condition) {
1571 // // body 1
1572 // // body 3
1573 // } else {
1574 // // body 2
1575 // // body 4
1576 // }
1577 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) {
1578 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1579 Node *n_ctrl = n->in(0);
1580 IfNode* dom_if = idom(n_ctrl)->as_If();
1581 if (n->in(1) != dom_if->in(1)) {
1582 assert(n->in(1)->in(1)->is_SubTypeCheck() &&
1583 (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr ||
1584 dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached");
1585 _igvn.replace_input_of(n, 1, dom_if->in(1));
1586 }
1587 IfTrueNode* dom_proj_true = dom_if->true_proj();
1588 IfFalseNode* dom_proj_false = dom_if->false_proj();
1589
1590 // Now split the IF
1591 RegionNode* new_false_region;
1592 RegionNode* new_true_region;
1593 do_split_if(n, &new_false_region, &new_true_region);
1594 assert(new_false_region->req() == new_true_region->req(), "");
1595 #ifdef ASSERT
1596 for (uint i = 1; i < new_false_region->req(); ++i) {
1597 assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if");
1598 assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if");
1599 }
1600 #endif
1601 assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test");
1602
1603 // We now have:
1604 // if (some_condition) {
1605 // // body 1
1606 // if (some_condition) {
1607 // body3: // new_true_region
1608 // // body3
1609 // } else {
1610 // goto body4;
1611 // }
1612 // } else {
1613 // // body 2
1614 // if (some_condition) {
1615 // goto body3;
1616 // } else {
1617 // body4: // new_false_region
1618 // // body4;
1619 // }
1620 // }
1621 //
1622
1623 // clone pinned nodes thru the resulting regions
1624 push_pinned_nodes_thru_region(dom_if, new_true_region);
1625 push_pinned_nodes_thru_region(dom_if, new_false_region);
1626
1627 // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent
1628 // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an
1629 // unrelated control dependency.
1630 for (uint i = 1; i < new_false_region->req(); i++) {
1631 if (is_dominator(dom_proj_true, new_false_region->in(i))) {
1632 dominated_by(dom_proj_true, new_false_region->in(i)->in(0)->as_If());
1633 } else {
1634 assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if");
1635 dominated_by(dom_proj_false, new_false_region->in(i)->in(0)->as_If());
1636 }
1637 }
1638 return true;
1639 }
1640 return false;
1641 }
1642
1643 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) {
1644 for (DUIterator i = region->outs(); region->has_out(i); i++) {
1645 Node* u = region->out(i);
1646 if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test()) {
1647 continue;
1648 }
1649 assert(u->in(0) == region, "not a control dependent node?");
1650 uint j = 1;
1651 for (; j < u->req(); ++j) {
1652 Node* in = u->in(j);
1653 if (!is_dominator(ctrl_or_self(in), dom_if)) {
1654 break;
1655 }
1656 }
1657 if (j == u->req()) {
1658 Node *phi = PhiNode::make_blank(region, u);
1659 for (uint k = 1; k < region->req(); ++k) {
1660 Node* clone = u->clone();
1661 clone->set_req(0, region->in(k));
1662 register_new_node(clone, region->in(k));
1663 phi->init_req(k, clone);
1664 }
1665 register_new_node(phi, region);
1666 _igvn.replace_node(u, phi);
1667 --i;
1668 }
1669 }
1670 }
1671
1672 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const {
1673 if (!dom->is_CountedLoopEnd()) {
1674 return true;
1675 }
1676 CountedLoopEndNode* le = dom->as_CountedLoopEnd();
1677 CountedLoopNode* cl = le->loopnode();
1678 if (cl == nullptr) {
1679 return true;
1680 }
1681 if (!cl->is_main_loop()) {
1682 return true;
1683 }
1684 if (cl->is_canonical_loop_entry() == nullptr) {
1685 return true;
1686 }
1687 // Further unrolling is possible so loop exit condition might change
1688 return false;
1689 }
1690
1691 // See if a shared loop-varying computation has no loop-varying uses.
1692 // Happens if something is only used for JVM state in uncommon trap exits,
1693 // like various versions of induction variable+offset. Clone the
1694 // computation per usage to allow it to sink out of the loop.
1695 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1696 bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
1697 n->in(1)->bottom_type()->isa_rawptr() &&
1698 !n->bottom_type()->isa_rawptr();
1699
1700 if (has_ctrl(n) &&
1701 !n->is_Phi() &&
1702 !n->is_Bool() &&
1703 !n->is_Proj() &&
1704 !n->is_MergeMem() &&
1705 !n->is_CMove() &&
1706 !n->is_OpaqueConstantBool() &&
1707 !n->is_OpaqueInitializedAssertionPredicate() &&
1708 !n->is_OpaqueTemplateAssertionPredicate() &&
1709 !is_raw_to_oop_cast && // don't extend live ranges of raw oops
1710 n->Opcode() != Op_CreateEx &&
1711 (KillPathsReachableByDeadTypeNode || !n->is_Type())
1712 ) {
1713 Node *n_ctrl = get_ctrl(n);
1714 IdealLoopTree *n_loop = get_loop(n_ctrl);
1715
1716 if (n->in(0) != nullptr) {
1717 IdealLoopTree* loop_ctrl = get_loop(n->in(0));
1718 if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) {
1719 // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example,
1720 // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop).
1721 // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not.
1722 Node* maybe_pinned_n = n;
1723 Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl);
1724 if (!would_sink_below_pre_loop_exit(loop_ctrl, outside_ctrl)) {
1725 if (n->depends_only_on_test()) {
1726 // If this node depends_only_on_test, it will be rewired to a control input that is not
1727 // the correct test. As a result, it must be pinned otherwise it can be incorrectly
1728 // rewired to a dominating test equivalent to the new control.
1729 Node* pinned_clone = n->pin_node_under_control();
1730 if (pinned_clone != nullptr) {
1731 register_new_node(pinned_clone, n_ctrl);
1732 maybe_pinned_n = pinned_clone;
1733 _igvn.replace_node(n, pinned_clone);
1734 }
1735 }
1736 _igvn.replace_input_of(maybe_pinned_n, 0, outside_ctrl);
1737 }
1738 }
1739 }
1740 if (n_loop != _ltree_root && n->outcnt() > 1) {
1741 // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of
1742 // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us.
1743 Node* early_ctrl = compute_early_ctrl(n, n_ctrl);
1744 if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now
1745 ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops!
1746 if (n->is_Store() || n->is_LoadStore()) {
1747 assert(false, "no node with a side effect");
1748 C->record_failure("no node with a side effect");
1749 return;
1750 }
1751 Node* outer_loop_clone = nullptr;
1752 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) {
1753 Node* u = n->last_out(j); // Clone private computation per use
1754 _igvn.rehash_node_delayed(u);
1755 Node* x = nullptr;
1756 if (n->in(0) != nullptr && n->depends_only_on_test()) {
1757 // If this node depends_only_on_test, it will be rewired to a control input that is not
1758 // the correct test. As a result, it must be pinned otherwise it can be incorrectly
1759 // rewired to a dominating test equivalent to the new control.
1760 x = n->pin_node_under_control();
1761 }
1762 if (x == nullptr) {
1763 x = n->clone();
1764 }
1765 Node* x_ctrl = nullptr;
1766 if (u->is_Phi()) {
1767 // Replace all uses of normal nodes. Replace Phi uses
1768 // individually, so the separate Nodes can sink down
1769 // different paths.
1770 uint k = 1;
1771 while (u->in(k) != n) k++;
1772 u->set_req(k, x);
1773 // x goes next to Phi input path
1774 x_ctrl = u->in(0)->in(k);
1775 // Find control for 'x' next to use but not inside inner loops.
1776 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1777 --j;
1778 } else { // Normal use
1779 if (has_ctrl(u)) {
1780 x_ctrl = get_ctrl(u);
1781 } else {
1782 x_ctrl = u->in(0);
1783 }
1784 // Find control for 'x' next to use but not inside inner loops.
1785 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1786 // Replace all uses
1787 if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) {
1788 // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary
1789 // anymore now that we're going to pin n as well
1790 _igvn.replace_node(u, x);
1791 --j;
1792 } else {
1793 int nb = u->replace_edge(n, x, &_igvn);
1794 j -= nb;
1795 }
1796 }
1797
1798 if (n->is_Load()) {
1799 // For loads, add a control edge to a CFG node outside of the loop
1800 // to force them to not combine and return back inside the loop
1801 // during GVN optimization (4641526).
1802 assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked");
1803
1804 IdealLoopTree* x_loop = get_loop(x_ctrl);
1805 Node* x_head = x_loop->_head;
1806 if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) {
1807 // Do not add duplicate LoadNodes to the outer strip mined loop
1808 if (outer_loop_clone != nullptr) {
1809 _igvn.replace_node(x, outer_loop_clone);
1810 continue;
1811 }
1812 outer_loop_clone = x;
1813 }
1814 x->set_req(0, x_ctrl);
1815 } else if (n->in(0) != nullptr){
1816 x->set_req(0, x_ctrl);
1817 }
1818 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1819 assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop");
1820 register_new_node(x, x_ctrl);
1821
1822 // Chain of AddP nodes: (AddP base (AddP base (AddP base )))
1823 // All AddP nodes must keep the same base after sinking so:
1824 // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk,
1825 // their bases remain the same.
1826 // (see 2- below)
1827 assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() ||
1828 x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) ||
1829 !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape");
1830 if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() &&
1831 !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) {
1832 assert(!x->is_Load(), "load should be pinned");
1833 // Use a cast node to pin clone out of loop
1834 Node* cast = nullptr;
1835 for (uint k = 0; k < x->req(); k++) {
1836 Node* in = x->in(k);
1837 if (in != nullptr && ctrl_is_member(n_loop, in)) {
1838 const Type* in_t = _igvn.type(in);
1839 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
1840 ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr);
1841 }
1842 if (cast != nullptr) {
1843 Node* prev = _igvn.hash_find_insert(cast);
1844 if (prev != nullptr && get_ctrl(prev) == x_ctrl) {
1845 cast->destruct(&_igvn);
1846 cast = prev;
1847 } else {
1848 register_new_node(cast, x_ctrl);
1849 }
1850 x->replace_edge(in, cast);
1851 // Chain of AddP nodes:
1852 // 2- A CastPP of the base is only added now that all AddP nodes are sunk
1853 if (x->is_AddP() && k == AddPNode::Base) {
1854 update_addp_chain_base(x, n->in(AddPNode::Base), cast);
1855 }
1856 break;
1857 }
1858 }
1859 assert(cast != nullptr, "must have added a cast to pin the node");
1860 }
1861 }
1862 _igvn.remove_dead_node(n);
1863 }
1864 _dom_lca_tags_round = 0;
1865 }
1866 }
1867 }
1868
1869 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) {
1870 ResourceMark rm;
1871 Node_List wq;
1872 wq.push(x);
1873 while (wq.size() != 0) {
1874 Node* n = wq.pop();
1875 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1876 Node* u = n->fast_out(i);
1877 if (u->is_AddP() && u->in(AddPNode::Base) == old_base) {
1878 _igvn.replace_input_of(u, AddPNode::Base, new_base);
1879 wq.push(u);
1880 }
1881 }
1882 }
1883 }
1884
1885 // Compute the early control of a node by following its inputs until we reach
1886 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes.
1887 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) {
1888 Node* early_ctrl = nullptr;
1889 ResourceMark rm;
1890 Unique_Node_List wq;
1891 wq.push(n);
1892 for (uint i = 0; i < wq.size(); i++) {
1893 Node* m = wq.at(i);
1894 Node* c = nullptr;
1895 if (m->is_CFG()) {
1896 c = m;
1897 } else if (m->pinned()) {
1898 c = m->in(0);
1899 } else {
1900 for (uint j = 0; j < m->req(); j++) {
1901 Node* in = m->in(j);
1902 if (in != nullptr) {
1903 wq.push(in);
1904 }
1905 }
1906 }
1907 if (c != nullptr) {
1908 assert(is_dominator(c, n_ctrl), "control input must dominate current control");
1909 if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) {
1910 early_ctrl = c;
1911 }
1912 }
1913 }
1914 assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control");
1915 return early_ctrl;
1916 }
1917
1918 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) {
1919 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1920 Node* u = n->fast_out(i);
1921 if (u->is_Opaque1()) {
1922 return false; // Found loop limit, bugfix for 4677003
1923 }
1924 if (u->is_Phi()) {
1925 for (uint j = 1; j < u->req(); ++j) {
1926 if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) {
1927 return false;
1928 }
1929 }
1930 } else {
1931 Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0);
1932 if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) {
1933 return false;
1934 }
1935 }
1936 }
1937 return true;
1938 }
1939
1940 // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input
1941 // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit
1942 // test of the pre loop above the point in the graph where it's pinned. This results in a broken graph. One way to avoid
1943 // it would be to not eliminate the check in the main loop. Instead, we prevent sinking of the node here so better code
1944 // is generated for the main loop.
1945 bool PhaseIdealLoop::would_sink_below_pre_loop_exit(IdealLoopTree* n_loop, Node* ctrl) {
1946 if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop()) {
1947 CountedLoopNode* pre_loop = n_loop->_head->as_CountedLoop();
1948 if (is_dominator(pre_loop->loopexit(), ctrl)) {
1949 return true;
1950 }
1951 }
1952 return false;
1953 }
1954
1955 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) {
1956 if (n->is_Load()) {
1957 // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure each call to
1958 // get_late_ctrl_with_anti_dep() uses its own tag
1959 _dom_lca_tags_round++;
1960 assert(_dom_lca_tags_round != 0, "shouldn't wrap around");
1961
1962 ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl);
1963 }
1964 IdealLoopTree *u_loop = get_loop(ctrl);
1965 if (u_loop == n_loop) {
1966 return false; // Found loop-varying use
1967 }
1968 if (n_loop->is_member(u_loop)) {
1969 return false; // Found use in inner loop
1970 }
1971 if (would_sink_below_pre_loop_exit(n_loop, ctrl)) {
1972 return false;
1973 }
1974 return true;
1975 }
1976
1977 //------------------------------split_if_with_blocks---------------------------
1978 // Check for aggressive application of 'split-if' optimization,
1979 // using basic block level info.
1980 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
1981 Node* root = C->root();
1982 visited.set(root->_idx); // first, mark root as visited
1983 // Do pre-visit work for root
1984 Node* n = split_if_with_blocks_pre(root);
1985 uint cnt = n->outcnt();
1986 uint i = 0;
1987
1988 while (true) {
1989 // Visit all children
1990 if (i < cnt) {
1991 Node* use = n->raw_out(i);
1992 ++i;
1993 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
1994 // Now do pre-visit work for this use
1995 use = split_if_with_blocks_pre(use);
1996 nstack.push(n, i); // Save parent and next use's index.
1997 n = use; // Process all children of current use.
1998 cnt = use->outcnt();
1999 i = 0;
2000 }
2001 }
2002 else {
2003 // All of n's children have been processed, complete post-processing.
2004 if (cnt != 0 && !n->is_Con()) {
2005 assert(has_node(n), "no dead nodes");
2006 split_if_with_blocks_post(n);
2007 if (C->failing()) {
2008 return;
2009 }
2010 }
2011 if (must_throttle_split_if()) {
2012 nstack.clear();
2013 }
2014 if (nstack.is_empty()) {
2015 // Finished all nodes on stack.
2016 break;
2017 }
2018 // Get saved parent node and next use's index. Visit the rest of uses.
2019 n = nstack.node();
2020 cnt = n->outcnt();
2021 i = nstack.index();
2022 nstack.pop();
2023 }
2024 }
2025 }
2026
2027
2028 //=============================================================================
2029 //
2030 // C L O N E A L O O P B O D Y
2031 //
2032
2033 //------------------------------clone_iff--------------------------------------
2034 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2035 // "Nearly" because all Nodes have been cloned from the original in the loop,
2036 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2037 // through the Phi recursively, and return a Bool.
2038 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
2039
2040 // Convert this Phi into a Phi merging Bools
2041 uint i;
2042 for (i = 1; i < phi->req(); i++) {
2043 Node* b = phi->in(i);
2044 if (b->is_Phi()) {
2045 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2046 } else {
2047 assert(b->is_Bool() || b->is_OpaqueConstantBool() || b->is_OpaqueInitializedAssertionPredicate(),
2048 "bool, non-null check with OpaqueConstantBool or Initialized Assertion Predicate with its Opaque node");
2049 }
2050 }
2051 Node* n = phi->in(1);
2052 Node* sample_opaque = nullptr;
2053 Node *sample_bool = nullptr;
2054 if (n->is_OpaqueConstantBool() || n->is_OpaqueInitializedAssertionPredicate()) {
2055 sample_opaque = n;
2056 sample_bool = n->in(1);
2057 assert(sample_bool->is_Bool(), "wrong type");
2058 } else {
2059 sample_bool = n;
2060 }
2061 Node *sample_cmp = sample_bool->in(1);
2062
2063 // Make Phis to merge the Cmp's inputs.
2064 PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
2065 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2066 for (i = 1; i < phi->req(); i++) {
2067 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2068 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2069 phi1->set_req(i, n1);
2070 phi2->set_req(i, n2);
2071 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2072 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2073 }
2074 // See if these Phis have been made before.
2075 // Register with optimizer
2076 Node *hit1 = _igvn.hash_find_insert(phi1);
2077 if (hit1) { // Hit, toss just made Phi
2078 _igvn.remove_dead_node(phi1); // Remove new phi
2079 assert(hit1->is_Phi(), "" );
2080 phi1 = (PhiNode*)hit1; // Use existing phi
2081 } else { // Miss
2082 _igvn.register_new_node_with_optimizer(phi1);
2083 }
2084 Node *hit2 = _igvn.hash_find_insert(phi2);
2085 if (hit2) { // Hit, toss just made Phi
2086 _igvn.remove_dead_node(phi2); // Remove new phi
2087 assert(hit2->is_Phi(), "" );
2088 phi2 = (PhiNode*)hit2; // Use existing phi
2089 } else { // Miss
2090 _igvn.register_new_node_with_optimizer(phi2);
2091 }
2092 // Register Phis with loop/block info
2093 set_ctrl(phi1, phi->in(0));
2094 set_ctrl(phi2, phi->in(0));
2095 // Make a new Cmp
2096 Node *cmp = sample_cmp->clone();
2097 cmp->set_req(1, phi1);
2098 cmp->set_req(2, phi2);
2099 _igvn.register_new_node_with_optimizer(cmp);
2100 set_ctrl(cmp, phi->in(0));
2101
2102 // Make a new Bool
2103 Node *b = sample_bool->clone();
2104 b->set_req(1,cmp);
2105 _igvn.register_new_node_with_optimizer(b);
2106 set_ctrl(b, phi->in(0));
2107
2108 if (sample_opaque != nullptr) {
2109 Node* opaque = sample_opaque->clone();
2110 opaque->set_req(1, b);
2111 _igvn.register_new_node_with_optimizer(opaque);
2112 set_ctrl(opaque, phi->in(0));
2113 return opaque;
2114 }
2115
2116 assert(b->is_Bool(), "");
2117 return b;
2118 }
2119
2120 //------------------------------clone_bool-------------------------------------
2121 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2122 // "Nearly" because all Nodes have been cloned from the original in the loop,
2123 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2124 // through the Phi recursively, and return a Bool.
2125 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
2126 uint i;
2127 // Convert this Phi into a Phi merging Bools
2128 for( i = 1; i < phi->req(); i++ ) {
2129 Node *b = phi->in(i);
2130 if( b->is_Phi() ) {
2131 _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi()));
2132 } else {
2133 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
2134 }
2135 }
2136
2137 Node *sample_cmp = phi->in(1);
2138
2139 // Make Phis to merge the Cmp's inputs.
2140 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
2141 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
2142 for( uint j = 1; j < phi->req(); j++ ) {
2143 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
2144 Node *n1, *n2;
2145 if( cmp_top->is_Cmp() ) {
2146 n1 = cmp_top->in(1);
2147 n2 = cmp_top->in(2);
2148 } else {
2149 n1 = n2 = cmp_top;
2150 }
2151 phi1->set_req( j, n1 );
2152 phi2->set_req( j, n2 );
2153 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2154 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2155 }
2156
2157 // See if these Phis have been made before.
2158 // Register with optimizer
2159 Node *hit1 = _igvn.hash_find_insert(phi1);
2160 if( hit1 ) { // Hit, toss just made Phi
2161 _igvn.remove_dead_node(phi1); // Remove new phi
2162 assert( hit1->is_Phi(), "" );
2163 phi1 = (PhiNode*)hit1; // Use existing phi
2164 } else { // Miss
2165 _igvn.register_new_node_with_optimizer(phi1);
2166 }
2167 Node *hit2 = _igvn.hash_find_insert(phi2);
2168 if( hit2 ) { // Hit, toss just made Phi
2169 _igvn.remove_dead_node(phi2); // Remove new phi
2170 assert( hit2->is_Phi(), "" );
2171 phi2 = (PhiNode*)hit2; // Use existing phi
2172 } else { // Miss
2173 _igvn.register_new_node_with_optimizer(phi2);
2174 }
2175 // Register Phis with loop/block info
2176 set_ctrl(phi1, phi->in(0));
2177 set_ctrl(phi2, phi->in(0));
2178 // Make a new Cmp
2179 Node *cmp = sample_cmp->clone();
2180 cmp->set_req( 1, phi1 );
2181 cmp->set_req( 2, phi2 );
2182 _igvn.register_new_node_with_optimizer(cmp);
2183 set_ctrl(cmp, phi->in(0));
2184
2185 assert( cmp->is_Cmp(), "" );
2186 return (CmpNode*)cmp;
2187 }
2188
2189 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
2190 IdealLoopTree* loop, IdealLoopTree* outer_loop,
2191 Node_List*& split_if_set, Node_List*& split_bool_set,
2192 Node_List*& split_cex_set, Node_List& worklist,
2193 uint new_counter, CloneLoopMode mode) {
2194 Node* nnn = old_new[old->_idx];
2195 // Copy uses to a worklist, so I can munge the def-use info
2196 // with impunity.
2197 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2198 worklist.push(old->fast_out(j));
2199
2200 while( worklist.size() ) {
2201 Node *use = worklist.pop();
2202 if (!has_node(use)) continue; // Ignore dead nodes
2203 if (use->in(0) == C->top()) continue;
2204 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2205 // Check for data-use outside of loop - at least one of OLD or USE
2206 // must not be a CFG node.
2207 #ifdef ASSERT
2208 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) {
2209 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
2210 assert(mode != IgnoreStripMined, "incorrect cloning mode");
2211 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
2212 }
2213 #endif
2214 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
2215
2216 // If the Data use is an IF, that means we have an IF outside the
2217 // loop that is switching on a condition that is set inside the
2218 // loop. Happens if people set a loop-exit flag; then test the flag
2219 // in the loop to break the loop, then test is again outside the
2220 // loop to determine which way the loop exited.
2221 //
2222 // For several uses we need to make sure that there is no phi between,
2223 // the use and the Bool/Cmp. We therefore clone the Bool/Cmp down here
2224 // to avoid such a phi in between.
2225 // For example, it is unexpected that there is a Phi between an
2226 // AllocateArray node and its ValidLengthTest input that could cause
2227 // split if to break.
2228 assert(!use->is_OpaqueTemplateAssertionPredicate(),
2229 "should not clone a Template Assertion Predicate which should be removed once it's useless");
2230 if (use->is_If() || use->is_CMove() || use->is_OpaqueConstantBool() || use->is_OpaqueInitializedAssertionPredicate() ||
2231 (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) {
2232 // Since this code is highly unlikely, we lazily build the worklist
2233 // of such Nodes to go split.
2234 if (!split_if_set) {
2235 split_if_set = new Node_List();
2236 }
2237 split_if_set->push(use);
2238 }
2239 if (use->is_Bool()) {
2240 if (!split_bool_set) {
2241 split_bool_set = new Node_List();
2242 }
2243 split_bool_set->push(use);
2244 }
2245 if (use->Opcode() == Op_CreateEx) {
2246 if (!split_cex_set) {
2247 split_cex_set = new Node_List();
2248 }
2249 split_cex_set->push(use);
2250 }
2251
2252
2253 // Get "block" use is in
2254 uint idx = 0;
2255 while( use->in(idx) != old ) idx++;
2256 Node *prev = use->is_CFG() ? use : get_ctrl(use);
2257 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
2258 Node* cfg = (prev->_idx >= new_counter && prev->is_Region())
2259 ? prev->in(2)
2260 : idom(prev);
2261 if( use->is_Phi() ) // Phi use is in prior block
2262 cfg = prev->in(idx); // NOT in block of Phi itself
2263 if (cfg->is_top()) { // Use is dead?
2264 _igvn.replace_input_of(use, idx, C->top());
2265 continue;
2266 }
2267
2268 // If use is referenced through control edge... (idx == 0)
2269 if (mode == IgnoreStripMined && idx == 0) {
2270 LoopNode *head = loop->_head->as_Loop();
2271 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
2272 // That node is outside the inner loop, leave it outside the
2273 // outer loop as well to not confuse verification code.
2274 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
2275 _igvn.replace_input_of(use, 0, head->outer_loop_exit());
2276 continue;
2277 }
2278 }
2279
2280 while(!outer_loop->is_member(get_loop(cfg))) {
2281 prev = cfg;
2282 cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg);
2283 }
2284 // If the use occurs after merging several exits from the loop, then
2285 // old value must have dominated all those exits. Since the same old
2286 // value was used on all those exits we did not need a Phi at this
2287 // merge point. NOW we do need a Phi here. Each loop exit value
2288 // is now merged with the peeled body exit; each exit gets its own
2289 // private Phi and those Phis need to be merged here.
2290 Node *phi;
2291 if( prev->is_Region() ) {
2292 if( idx == 0 ) { // Updating control edge?
2293 phi = prev; // Just use existing control
2294 } else { // Else need a new Phi
2295 phi = PhiNode::make( prev, old );
2296 // Now recursively fix up the new uses of old!
2297 for( uint i = 1; i < prev->req(); i++ ) {
2298 worklist.push(phi); // Onto worklist once for each 'old' input
2299 }
2300 }
2301 } else {
2302 // Get new RegionNode merging old and new loop exits
2303 prev = old_new[prev->_idx];
2304 assert( prev, "just made this in step 7" );
2305 if( idx == 0) { // Updating control edge?
2306 phi = prev; // Just use existing control
2307 } else { // Else need a new Phi
2308 // Make a new Phi merging data values properly
2309 phi = PhiNode::make( prev, old );
2310 phi->set_req( 1, nnn );
2311 }
2312 }
2313 // If inserting a new Phi, check for prior hits
2314 if( idx != 0 ) {
2315 Node *hit = _igvn.hash_find_insert(phi);
2316 if( hit == nullptr ) {
2317 _igvn.register_new_node_with_optimizer(phi); // Register new phi
2318 } else { // or
2319 // Remove the new phi from the graph and use the hit
2320 _igvn.remove_dead_node(phi);
2321 phi = hit; // Use existing phi
2322 }
2323 set_ctrl(phi, prev);
2324 }
2325 // Make 'use' use the Phi instead of the old loop body exit value
2326 assert(use->in(idx) == old, "old is still input of use");
2327 // We notify all uses of old, including use, and the indirect uses,
2328 // that may now be optimized because we have replaced old with phi.
2329 _igvn.add_users_to_worklist(old);
2330 if (idx == 0 && use->depends_only_on_test()) {
2331 // If this node depends_only_on_test, it will be rewired to a control input that is not the
2332 // correct test. As a result, it must be pinned otherwise it can be incorrectly rewired to
2333 // a dominating test equivalent to the new control.
2334 Node* pinned_clone = use->pin_node_under_control();
2335 if (pinned_clone != nullptr) {
2336 pinned_clone->set_req(0, phi);
2337 register_new_node_with_ctrl_of(pinned_clone, use);
2338 _igvn.replace_node(use, pinned_clone);
2339 continue;
2340 }
2341 }
2342 _igvn.replace_input_of(use, idx, phi);
2343 if( use->_idx >= new_counter ) { // If updating new phis
2344 // Not needed for correctness, but prevents a weak assert
2345 // in AddPNode from tripping (when we end up with different
2346 // base & derived Phis that will become the same after
2347 // IGVN does CSE).
2348 Node *hit = _igvn.hash_find_insert(use);
2349 if( hit ) // Go ahead and re-hash for hits.
2350 _igvn.replace_node( use, hit );
2351 }
2352 }
2353 }
2354 }
2355
2356 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
2357 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
2358 bool check_old_new) {
2359 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2360 Node* u = n->fast_out(j);
2361 assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned");
2362 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) {
2363 assert(!phase->ctrl_is_member(loop, u) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
2364 if (!phase->ctrl_is_member(loop, u)) {
2365 if (phase->ctrl_is_member(outer_loop, u)) {
2366 wq.push(u);
2367 } else {
2368 // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of
2369 // the outer loop too
2370 Node* u_c = u->in(0);
2371 if (u_c != nullptr) {
2372 IdealLoopTree* u_c_loop = phase->get_loop(u_c);
2373 if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) {
2374 wq.push(u);
2375 }
2376 }
2377 }
2378 }
2379 }
2380 }
2381 }
2382
2383 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
2384 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
2385 Node_List& extra_data_nodes) {
2386 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2387 CountedLoopNode* cl = head->as_CountedLoop();
2388 Node* l = cl->outer_loop();
2389 Node* tail = cl->outer_loop_tail();
2390 IfNode* le = cl->outer_loop_end();
2391 Node* sfpt = cl->outer_safepoint();
2392 CountedLoopEndNode* cle = cl->loopexit();
2393 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
2394 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
2395 IfFalseNode* cle_out = cle->false_proj();
2396
2397 Node* new_sfpt = nullptr;
2398 Node* new_cle_out = cle_out->clone();
2399 old_new.map(cle_out->_idx, new_cle_out);
2400 if (mode == CloneIncludesStripMined) {
2401 // clone outer loop body
2402 Node* new_l = l->clone();
2403 Node* new_tail = tail->clone();
2404 IfNode* new_le = le->clone()->as_If();
2405 new_sfpt = sfpt->clone();
2406
2407 set_loop(new_l, outer_loop->_parent);
2408 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
2409 set_loop(new_cle_out, outer_loop->_parent);
2410 set_idom(new_cle_out, new_cle, dd);
2411 set_loop(new_sfpt, outer_loop->_parent);
2412 set_idom(new_sfpt, new_cle_out, dd);
2413 set_loop(new_le, outer_loop->_parent);
2414 set_idom(new_le, new_sfpt, dd);
2415 set_loop(new_tail, outer_loop->_parent);
2416 set_idom(new_tail, new_le, dd);
2417 set_idom(new_cl, new_l, dd);
2418
2419 old_new.map(l->_idx, new_l);
2420 old_new.map(tail->_idx, new_tail);
2421 old_new.map(le->_idx, new_le);
2422 old_new.map(sfpt->_idx, new_sfpt);
2423
2424 new_l->set_req(LoopNode::LoopBackControl, new_tail);
2425 new_l->set_req(0, new_l);
2426 new_tail->set_req(0, new_le);
2427 new_le->set_req(0, new_sfpt);
2428 new_sfpt->set_req(0, new_cle_out);
2429 new_cle_out->set_req(0, new_cle);
2430 new_cl->set_req(LoopNode::EntryControl, new_l);
2431
2432 _igvn.register_new_node_with_optimizer(new_l);
2433 _igvn.register_new_node_with_optimizer(new_tail);
2434 _igvn.register_new_node_with_optimizer(new_le);
2435 } else {
2436 Node *newhead = old_new[loop->_head->_idx];
2437 newhead->as_Loop()->clear_strip_mined();
2438 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
2439 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2440 }
2441 // Look at data node that were assigned a control in the outer
2442 // loop: they are kept in the outer loop by the safepoint so start
2443 // from the safepoint node's inputs.
2444 IdealLoopTree* outer_loop = get_loop(l);
2445 Node_Stack stack(2);
2446 stack.push(sfpt, 1);
2447 uint new_counter = C->unique();
2448 while (stack.size() > 0) {
2449 Node* n = stack.node();
2450 uint i = stack.index();
2451 while (i < n->req() &&
2452 (n->in(i) == nullptr ||
2453 !has_ctrl(n->in(i)) ||
2454 get_loop(get_ctrl(n->in(i))) != outer_loop ||
2455 (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
2456 i++;
2457 }
2458 if (i < n->req()) {
2459 stack.set_index(i+1);
2460 stack.push(n->in(i), 0);
2461 } else {
2462 assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
2463 Node* m = n == sfpt ? new_sfpt : n->clone();
2464 if (m != nullptr) {
2465 for (uint i = 0; i < n->req(); i++) {
2466 if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) {
2467 m->set_req(i, old_new[m->in(i)->_idx]);
2468 }
2469 }
2470 } else {
2471 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
2472 }
2473 if (n != sfpt) {
2474 extra_data_nodes.push(n);
2475 _igvn.register_new_node_with_optimizer(m);
2476 assert(get_ctrl(n) == cle_out, "what other control?");
2477 set_ctrl(m, new_cle_out);
2478 old_new.map(n->_idx, m);
2479 }
2480 stack.pop();
2481 }
2482 }
2483 if (mode == CloneIncludesStripMined) {
2484 _igvn.register_new_node_with_optimizer(new_sfpt);
2485 _igvn.register_new_node_with_optimizer(new_cle_out);
2486 }
2487 // Some other transformation may have pessimistically assigned some
2488 // data nodes to the outer loop. Set their control so they are out
2489 // of the outer loop.
2490 ResourceMark rm;
2491 Unique_Node_List wq;
2492 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2493 Node* old = extra_data_nodes.at(i);
2494 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2495 }
2496
2497 for (uint i = 0; i < loop->_body.size(); i++) {
2498 Node* old = loop->_body.at(i);
2499 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2500 }
2501
2502 Node* inner_out = sfpt->in(0);
2503 if (inner_out->outcnt() > 1) {
2504 collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true);
2505 }
2506
2507 Node* new_ctrl = cl->outer_loop_exit();
2508 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2509 for (uint i = 0; i < wq.size(); i++) {
2510 Node* n = wq.at(i);
2511 set_ctrl(n, new_ctrl);
2512 if (n->in(0) != nullptr) {
2513 _igvn.replace_input_of(n, 0, new_ctrl);
2514 }
2515 collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false);
2516 }
2517 } else {
2518 Node *newhead = old_new[loop->_head->_idx];
2519 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2520 }
2521 }
2522
2523 //------------------------------clone_loop-------------------------------------
2524 //
2525 // C L O N E A L O O P B O D Y
2526 //
2527 // This is the basic building block of the loop optimizations. It clones an
2528 // entire loop body. It makes an old_new loop body mapping; with this mapping
2529 // you can find the new-loop equivalent to an old-loop node. All new-loop
2530 // nodes are exactly equal to their old-loop counterparts, all edges are the
2531 // same. All exits from the old-loop now have a RegionNode that merges the
2532 // equivalent new-loop path. This is true even for the normal "loop-exit"
2533 // condition. All uses of loop-invariant old-loop values now come from (one
2534 // or more) Phis that merge their new-loop equivalents.
2535 //
2536 // This operation leaves the graph in an illegal state: there are two valid
2537 // control edges coming from the loop pre-header to both loop bodies. I'll
2538 // definitely have to hack the graph after running this transform.
2539 //
2540 // From this building block I will further edit edges to perform loop peeling
2541 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2542 //
2543 // Parameter side_by_size_idom:
2544 // When side_by_size_idom is null, the dominator tree is constructed for
2545 // the clone loop to dominate the original. Used in construction of
2546 // pre-main-post loop sequence.
2547 // When nonnull, the clone and original are side-by-side, both are
2548 // dominated by the side_by_side_idom node. Used in construction of
2549 // unswitched loops.
2550 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2551 CloneLoopMode mode, Node* side_by_side_idom) {
2552
2553 LoopNode* head = loop->_head->as_Loop();
2554 head->verify_strip_mined(1);
2555
2556 if (C->do_vector_loop() && PrintOpto) {
2557 const char* mname = C->method()->name()->as_quoted_ascii();
2558 if (mname != nullptr) {
2559 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2560 }
2561 }
2562
2563 CloneMap& cm = C->clone_map();
2564 if (C->do_vector_loop()) {
2565 cm.set_clone_idx(cm.max_gen()+1);
2566 #ifndef PRODUCT
2567 if (PrintOpto) {
2568 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2569 loop->dump_head();
2570 }
2571 #endif
2572 }
2573
2574 // Step 1: Clone the loop body. Make the old->new mapping.
2575 clone_loop_body(loop->_body, old_new, &cm);
2576
2577 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2578
2579 // Step 2: Fix the edges in the new body. If the old input is outside the
2580 // loop use it. If the old input is INside the loop, use the corresponding
2581 // new node instead.
2582 fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false);
2583
2584 Node_List extra_data_nodes; // data nodes in the outer strip mined loop
2585 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2586
2587 // Step 3: Now fix control uses. Loop varying control uses have already
2588 // been fixed up (as part of all input edges in Step 2). Loop invariant
2589 // control uses must be either an IfFalse or an IfTrue. Make a merge
2590 // point to merge the old and new IfFalse/IfTrue nodes; make the use
2591 // refer to this.
2592 Node_List worklist;
2593 uint new_counter = C->unique();
2594 fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist);
2595
2596 // Step 4: If loop-invariant use is not control, it must be dominated by a
2597 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region
2598 // there if needed. Make a Phi there merging old and new used values.
2599 Node_List *split_if_set = nullptr;
2600 Node_List *split_bool_set = nullptr;
2601 Node_List *split_cex_set = nullptr;
2602 fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set);
2603
2604 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2605 Node* old = extra_data_nodes.at(i);
2606 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2607 split_bool_set, split_cex_set, worklist, new_counter,
2608 mode);
2609 }
2610
2611 // Check for IFs that need splitting/cloning. Happens if an IF outside of
2612 // the loop uses a condition set in the loop. The original IF probably
2613 // takes control from one or more OLD Regions (which in turn get from NEW
2614 // Regions). In any case, there will be a set of Phis for each merge point
2615 // from the IF up to where the original BOOL def exists the loop.
2616 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
2617
2618 }
2619
2620 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) {
2621 if (split_if_set) {
2622 while (split_if_set->size()) {
2623 Node *iff = split_if_set->pop();
2624 uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1;
2625 if (iff->in(input)->is_Phi()) {
2626 Node *b = clone_iff(iff->in(input)->as_Phi());
2627 _igvn.replace_input_of(iff, input, b);
2628 }
2629 }
2630 }
2631 if (split_bool_set) {
2632 while (split_bool_set->size()) {
2633 Node *b = split_bool_set->pop();
2634 Node *phi = b->in(1);
2635 assert(phi->is_Phi(), "");
2636 CmpNode *cmp = clone_bool((PhiNode*) phi);
2637 _igvn.replace_input_of(b, 1, cmp);
2638 }
2639 }
2640 if (split_cex_set) {
2641 while (split_cex_set->size()) {
2642 Node *b = split_cex_set->pop();
2643 assert(b->in(0)->is_Region(), "");
2644 assert(b->in(1)->is_Phi(), "");
2645 assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2646 split_up(b, b->in(0), nullptr);
2647 }
2648 }
2649 }
2650
2651 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop,
2652 uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set,
2653 Node_List*& split_bool_set, Node_List*& split_cex_set) {
2654 for(uint i = 0; i < body.size(); i++ ) {
2655 Node* old = body.at(i);
2656 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2657 split_bool_set, split_cex_set, worklist, new_counter,
2658 mode);
2659 }
2660 }
2661
2662 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode,
2663 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) {
2664 LoopNode* head = loop->_head->as_Loop();
2665 for(uint i = 0; i < body.size(); i++ ) {
2666 Node* old = body.at(i);
2667 if( !old->is_CFG() ) continue;
2668
2669 // Copy uses to a worklist, so I can munge the def-use info
2670 // with impunity.
2671 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) {
2672 worklist.push(old->fast_out(j));
2673 }
2674
2675 while (worklist.size()) { // Visit all uses
2676 Node *use = worklist.pop();
2677 if (!has_node(use)) continue; // Ignore dead nodes
2678 IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use );
2679 if (!loop->is_member(use_loop) && use->is_CFG()) {
2680 // Both OLD and USE are CFG nodes here.
2681 assert(use->is_Proj(), "" );
2682 Node* nnn = old_new[old->_idx];
2683
2684 Node* newuse = nullptr;
2685 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2686 CountedLoopNode* cl = head->as_CountedLoop();
2687 CountedLoopEndNode* cle = cl->loopexit();
2688 // is use the projection that exits the loop from the CountedLoopEndNode?
2689 if (use->in(0) == cle) {
2690 IfFalseNode* cle_out = use->as_IfFalse();
2691 IfNode* le = cl->outer_loop_end();
2692 use = le->false_proj();
2693 use_loop = get_loop(use);
2694 if (mode == CloneIncludesStripMined) {
2695 nnn = old_new[le->_idx];
2696 } else {
2697 newuse = old_new[cle_out->_idx];
2698 }
2699 }
2700 }
2701 if (newuse == nullptr) {
2702 newuse = use->clone();
2703 }
2704
2705 // Clone the loop exit control projection
2706 if (C->do_vector_loop() && cm != nullptr) {
2707 cm->verify_insert_and_clone(use, newuse, cm->clone_idx());
2708 }
2709 newuse->set_req(0,nnn);
2710 _igvn.register_new_node_with_optimizer(newuse);
2711 set_loop(newuse, use_loop);
2712 set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2713
2714 // We need a Region to merge the exit from the peeled body and the
2715 // exit from the old loop body.
2716 RegionNode *r = new RegionNode(3);
2717 uint dd_r = MIN2(dom_depth(newuse), dom_depth(use));
2718 assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" );
2719
2720 // The original user of 'use' uses 'r' instead.
2721 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2722 Node* useuse = use->last_out(l);
2723 _igvn.rehash_node_delayed(useuse);
2724 uint uses_found = 0;
2725 if (useuse->in(0) == use) {
2726 useuse->set_req(0, r);
2727 uses_found++;
2728 if (useuse->is_CFG()) {
2729 // This is not a dom_depth > dd_r because when new
2730 // control flow is constructed by a loop opt, a node and
2731 // its dominator can end up at the same dom_depth
2732 assert(dom_depth(useuse) >= dd_r, "");
2733 set_idom(useuse, r, dom_depth(useuse));
2734 }
2735 }
2736 for (uint k = 1; k < useuse->req(); k++) {
2737 if( useuse->in(k) == use ) {
2738 useuse->set_req(k, r);
2739 uses_found++;
2740 if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2741 // This is not a dom_depth > dd_r because when new
2742 // control flow is constructed by a loop opt, a node
2743 // and its dominator can end up at the same dom_depth
2744 assert(dom_depth(useuse) >= dd_r , "");
2745 set_idom(useuse, r, dom_depth(useuse));
2746 }
2747 }
2748 }
2749 l -= uses_found; // we deleted 1 or more copies of this edge
2750 }
2751
2752 assert(use->is_Proj(), "loop exit should be projection");
2753 // replace_node_and_forward_ctrl() below moves all nodes that are:
2754 // - control dependent on the loop exit or
2755 // - have control set to the loop exit
2756 // below the post-loop merge point.
2757 // replace_node_and_forward_ctrl() takes a dead control as first input.
2758 // To make it possible to use it, the loop exit projection is cloned and becomes the
2759 // new exit projection. The initial one becomes dead and is "replaced" by the region.
2760 Node* use_clone = use->clone();
2761 register_control(use_clone, use_loop, idom(use), dom_depth(use));
2762 // Now finish up 'r'
2763 r->set_req(1, newuse);
2764 r->set_req(2, use_clone);
2765 _igvn.register_new_node_with_optimizer(r);
2766 set_loop(r, use_loop);
2767 set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r);
2768 replace_node_and_forward_ctrl(use, r);
2769 // Map the (cloned) old use to the new merge point
2770 old_new.map(use_clone->_idx, r);
2771 } // End of if a loop-exit test
2772 }
2773 }
2774 }
2775
2776 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd,
2777 IdealLoopTree* parent, bool partial) {
2778 for(uint i = 0; i < body.size(); i++ ) {
2779 Node *old = body.at(i);
2780 Node *nnn = old_new[old->_idx];
2781 // Fix CFG/Loop controlling the new node
2782 if (has_ctrl(old)) {
2783 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2784 } else {
2785 set_loop(nnn, parent);
2786 if (old->outcnt() > 0) {
2787 Node* dom = idom(old);
2788 if (old_new[dom->_idx] != nullptr) {
2789 dom = old_new[dom->_idx];
2790 set_idom(nnn, dom, dd );
2791 }
2792 }
2793 }
2794 // Correct edges to the new node
2795 for (uint j = 0; j < nnn->req(); j++) {
2796 Node *n = nnn->in(j);
2797 if (n != nullptr) {
2798 IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n);
2799 if (loop->is_member(old_in_loop)) {
2800 if (old_new[n->_idx] != nullptr) {
2801 nnn->set_req(j, old_new[n->_idx]);
2802 } else {
2803 assert(!body.contains(n), "");
2804 assert(partial, "node not cloned");
2805 }
2806 }
2807 }
2808 }
2809 _igvn.hash_find_insert(nnn);
2810 }
2811 }
2812
2813 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) {
2814 for (uint i = 0; i < body.size(); i++) {
2815 Node* old = body.at(i);
2816 Node* nnn = old->clone();
2817 old_new.map(old->_idx, nnn);
2818 if (C->do_vector_loop() && cm != nullptr) {
2819 cm->verify_insert_and_clone(old, nnn, cm->clone_idx());
2820 }
2821 _igvn.register_new_node_with_optimizer(nnn);
2822 }
2823 }
2824
2825
2826 //---------------------- stride_of_possible_iv -------------------------------------
2827 // Looks for an iff/bool/comp with one operand of the compare
2828 // being a cycle involving an add and a phi,
2829 // with an optional truncation (left-shift followed by a right-shift)
2830 // of the add. Returns zero if not an iv.
2831 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
2832 Node* trunc1 = nullptr;
2833 Node* trunc2 = nullptr;
2834 const TypeInteger* ttype = nullptr;
2835 if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) {
2836 return 0;
2837 }
2838 BoolNode* bl = iff->in(1)->as_Bool();
2839 Node* cmp = bl->in(1);
2840 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
2841 return 0;
2842 }
2843 // Must have an invariant operand
2844 if (ctrl_is_member(get_loop(iff), cmp->in(2))) {
2845 return 0;
2846 }
2847 Node* add2 = nullptr;
2848 Node* cmp1 = cmp->in(1);
2849 if (cmp1->is_Phi()) {
2850 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
2851 Node* phi = cmp1;
2852 for (uint i = 1; i < phi->req(); i++) {
2853 Node* in = phi->in(i);
2854 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
2855 &trunc1, &trunc2, &ttype, T_INT);
2856 if (add && add->in(1) == phi) {
2857 add2 = add->in(2);
2858 break;
2859 }
2860 }
2861 } else {
2862 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
2863 Node* addtrunc = cmp1;
2864 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
2865 &trunc1, &trunc2, &ttype, T_INT);
2866 if (add && add->in(1)->is_Phi()) {
2867 Node* phi = add->in(1);
2868 for (uint i = 1; i < phi->req(); i++) {
2869 if (phi->in(i) == addtrunc) {
2870 add2 = add->in(2);
2871 break;
2872 }
2873 }
2874 }
2875 }
2876 if (add2 != nullptr) {
2877 const TypeInt* add2t = _igvn.type(add2)->is_int();
2878 if (add2t->is_con()) {
2879 return add2t->get_con();
2880 }
2881 }
2882 return 0;
2883 }
2884
2885
2886 //---------------------- stay_in_loop -------------------------------------
2887 // Return the (unique) control output node that's in the loop (if it exists.)
2888 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
2889 Node* unique = nullptr;
2890 if (!n) return nullptr;
2891 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2892 Node* use = n->fast_out(i);
2893 if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
2894 if (unique != nullptr) {
2895 return nullptr;
2896 }
2897 unique = use;
2898 }
2899 }
2900 return unique;
2901 }
2902
2903 //------------------------------ register_node -------------------------------------
2904 // Utility to register node "n" with PhaseIdealLoop
2905 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) {
2906 _igvn.register_new_node_with_optimizer(n);
2907 loop->_body.push(n);
2908 if (n->is_CFG()) {
2909 set_loop(n, loop);
2910 set_idom(n, pred, ddepth);
2911 } else {
2912 set_ctrl(n, pred);
2913 }
2914 }
2915
2916 //------------------------------ proj_clone -------------------------------------
2917 // Utility to create an if-projection
2918 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
2919 ProjNode* c = p->clone()->as_Proj();
2920 c->set_req(0, iff);
2921 return c;
2922 }
2923
2924 //------------------------------ short_circuit_if -------------------------------------
2925 // Force the iff control output to be the live_proj
2926 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
2927 guarantee(live_proj != nullptr, "null projection");
2928 int proj_con = live_proj->_con;
2929 assert(proj_con == 0 || proj_con == 1, "false or true projection");
2930 Node* con = intcon(proj_con);
2931 if (iff) {
2932 iff->set_req(1, con);
2933 }
2934 return con;
2935 }
2936
2937 //------------------------------ insert_if_before_proj -------------------------------------
2938 // Insert a new if before an if projection (* - new node)
2939 //
2940 // before
2941 // if(test)
2942 // / \
2943 // v v
2944 // other-proj proj (arg)
2945 //
2946 // after
2947 // if(test)
2948 // / \
2949 // / v
2950 // | * proj-clone
2951 // v |
2952 // other-proj v
2953 // * new_if(relop(cmp[IU](left,right)))
2954 // / \
2955 // v v
2956 // * new-proj proj
2957 // (returned)
2958 //
2959 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
2960 IfNode* iff = proj->in(0)->as_If();
2961 IdealLoopTree *loop = get_loop(proj);
2962 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2963 uint ddepth = dom_depth(proj);
2964
2965 _igvn.rehash_node_delayed(iff);
2966 _igvn.rehash_node_delayed(proj);
2967
2968 proj->set_req(0, nullptr); // temporary disconnect
2969 ProjNode* proj2 = proj_clone(proj, iff);
2970 register_node(proj2, loop, iff, ddepth);
2971
2972 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
2973 register_node(cmp, loop, proj2, ddepth);
2974
2975 BoolNode* bol = new BoolNode(cmp, relop);
2976 register_node(bol, loop, proj2, ddepth);
2977
2978 int opcode = iff->Opcode();
2979 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
2980 IfNode* new_if = IfNode::make_with_same_profile(iff, proj2, bol);
2981 register_node(new_if, loop, proj2, ddepth);
2982
2983 proj->set_req(0, new_if); // reattach
2984 set_idom(proj, new_if, ddepth);
2985
2986 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
2987 guarantee(new_exit != nullptr, "null exit node");
2988 register_node(new_exit, get_loop(other_proj), new_if, ddepth);
2989
2990 return new_exit;
2991 }
2992
2993 //------------------------------ insert_region_before_proj -------------------------------------
2994 // Insert a region before an if projection (* - new node)
2995 //
2996 // before
2997 // if(test)
2998 // / |
2999 // v |
3000 // proj v
3001 // other-proj
3002 //
3003 // after
3004 // if(test)
3005 // / |
3006 // v |
3007 // * proj-clone v
3008 // | other-proj
3009 // v
3010 // * new-region
3011 // |
3012 // v
3013 // * dum_if
3014 // / \
3015 // v \
3016 // * dum-proj v
3017 // proj
3018 //
3019 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
3020 IfNode* iff = proj->in(0)->as_If();
3021 IdealLoopTree *loop = get_loop(proj);
3022 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3023 uint ddepth = dom_depth(proj);
3024
3025 _igvn.rehash_node_delayed(iff);
3026 _igvn.rehash_node_delayed(proj);
3027
3028 proj->set_req(0, nullptr); // temporary disconnect
3029 ProjNode* proj2 = proj_clone(proj, iff);
3030 register_node(proj2, loop, iff, ddepth);
3031
3032 RegionNode* reg = new RegionNode(2);
3033 reg->set_req(1, proj2);
3034 register_node(reg, loop, iff, ddepth);
3035
3036 IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt);
3037 register_node(dum_if, loop, reg, ddepth);
3038
3039 proj->set_req(0, dum_if); // reattach
3040 set_idom(proj, dum_if, ddepth);
3041
3042 ProjNode* dum_proj = proj_clone(other_proj, dum_if);
3043 register_node(dum_proj, loop, dum_if, ddepth);
3044
3045 return reg;
3046 }
3047
3048 // Idea
3049 // ----
3050 // Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops
3051 // require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel
3052 // with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new
3053 // loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned
3054 // test alone instead:
3055 //
3056 // Before Partial Peeling:
3057 // Loop:
3058 // <peeled section>
3059 // Split off signed loop exit test
3060 // <-- CUT HERE -->
3061 // Unchanged unsigned loop exit test
3062 // <rest of unpeeled section>
3063 // goto Loop
3064 //
3065 // After Partial Peeling:
3066 // <cloned peeled section>
3067 // Cloned split off signed loop exit test
3068 // Loop:
3069 // Unchanged unsigned loop exit test
3070 // <rest of unpeeled section>
3071 // <peeled section>
3072 // Split off signed loop exit test
3073 // goto Loop
3074 //
3075 // Details
3076 // -------
3077 // Before:
3078 // if (i <u limit) Unsigned loop exit condition
3079 // / |
3080 // v v
3081 // exit-proj stay-in-loop-proj
3082 //
3083 // Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it
3084 // before the CmpU on the stay-in-loop path and keep both tests:
3085 //
3086 // if (i <u limit) Signed loop exit test
3087 // / |
3088 // / if (i <u limit) Unsigned loop exit test
3089 // / / |
3090 // v v v
3091 // exit-region stay-in-loop-proj
3092 //
3093 // Implementation
3094 // --------------
3095 // We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned
3096 // loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop
3097 // exit tests is preserved, and their loop nesting is correct.
3098 //
3099 // To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit
3100 // test above it and kill the original unsigned loop exit test by setting it's condition to a constant
3101 // (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later:
3102 //
3103 // if (stay-in-loop-const) Killed original unsigned loop exit test
3104 // / |
3105 // / v
3106 // / if (i < limit) Split off signed loop exit test
3107 // / / |
3108 // / / v
3109 // / / if (i <u limit) Cloned unsigned loop exit test
3110 // / / / |
3111 // v v v |
3112 // exit-region |
3113 // | |
3114 // dummy-if |
3115 // / | |
3116 // dead | |
3117 // v v
3118 // exit-proj stay-in-loop-proj
3119 //
3120 // Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned
3121 // loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()).
3122 //
3123 // Requirements
3124 // ------------
3125 // Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly
3126 // the same as before with only a single unsigned test. This is only possible if certain requirements are met.
3127 // Otherwise, we need to bail out (see comments in the code below).
3128 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) {
3129 const bool Signed = true;
3130 const bool Unsigned = false;
3131
3132 BoolNode* bol = if_cmpu->in(1)->as_Bool();
3133 if (bol->_test._test != BoolTest::lt) {
3134 return nullptr;
3135 }
3136 CmpNode* cmpu = bol->in(1)->as_Cmp();
3137 assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison");
3138
3139 int stride = stride_of_possible_iv(if_cmpu);
3140 if (stride == 0) {
3141 return nullptr;
3142 }
3143
3144 Node* lp_proj = stay_in_loop(if_cmpu, loop);
3145 guarantee(lp_proj != nullptr, "null loop node");
3146
3147 ProjNode* lp_continue = lp_proj->as_Proj();
3148 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
3149 if (!lp_exit->is_IfFalse()) {
3150 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit).
3151 // We therefore can't add a single exit condition.
3152 return nullptr;
3153 }
3154 // The unsigned loop exit condition is
3155 // !(i <u limit)
3156 // = i >=u limit
3157 //
3158 // First, we note that for any x for which
3159 // 0 <= x <= INT_MAX
3160 // we can convert x to an unsigned int and still get the same guarantee:
3161 // 0 <= (uint) x <= INT_MAX = (uint) INT_MAX
3162 // 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA)
3163 //
3164 // With that in mind, if
3165 // limit >= 0 (COND)
3166 // then the unsigned loop exit condition
3167 // i >=u limit (ULE)
3168 // is equivalent to
3169 // i < 0 || i >= limit (SLE-full)
3170 // because either i is negative and therefore always greater than MAX_INT when converting to unsigned
3171 // (uint) i >=u MAX_INT >= limit >= 0
3172 // or otherwise
3173 // i >= limit >= 0
3174 // holds due to (LEMMA).
3175 //
3176 // For completeness, a counterexample with limit < 0:
3177 // Assume i = -3 and limit = -2:
3178 // i < 0
3179 // -2 < 0
3180 // is true and thus also "i < 0 || i >= limit". But
3181 // i >=u limit
3182 // -3 >=u -2
3183 // is false.
3184 Node* limit = cmpu->in(2);
3185 const TypeInt* type_limit = _igvn.type(limit)->is_int();
3186 if (type_limit->_lo < 0) {
3187 return nullptr;
3188 }
3189
3190 // We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride:
3191 // stride < 0:
3192 // i < 0 (SLE = SLE-negative)
3193 // stride > 0:
3194 // i >= limit (SLE = SLE-positive)
3195 // such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0):
3196 //
3197 // Loop:
3198 // <peeled section>
3199 // i >= limit (SLE-positive)
3200 // <-- CUT HERE -->
3201 // i >=u limit (ULE)
3202 // <rest of unpeeled section>
3203 // goto Loop
3204 //
3205 // We exit the loop if:
3206 // (SLE) is true OR (ULE) is true
3207 // However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly
3208 // exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure:
3209 // (SLE) IMPLIES (ULE)
3210 // This indeed holds when (COND) is given:
3211 // - stride > 0:
3212 // i >= limit // (SLE = SLE-positive)
3213 // i >= limit >= 0 // (COND)
3214 // i >=u limit >= 0 // (LEMMA)
3215 // which is the unsigned loop exit condition (ULE).
3216 // - stride < 0:
3217 // i < 0 // (SLE = SLE-negative)
3218 // (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned
3219 // MAX_INT >= limit >= 0 // (COND)
3220 // MAX_INT >=u limit >= 0 // (LEMMA)
3221 // and thus from (NEG) and (LEMMA):
3222 // i >=u limit
3223 // which is the unsigned loop exit condition (ULE).
3224 //
3225 //
3226 // After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0):
3227 // <cloned peeled section>
3228 // i >= limit (SLE-positive)
3229 // Loop:
3230 // i >=u limit (ULE)
3231 // <rest of unpeeled section>
3232 // <peeled section>
3233 // i >= limit (SLE-positive)
3234 // goto Loop
3235 Node* rhs_cmpi;
3236 if (stride > 0) {
3237 rhs_cmpi = limit; // For i >= limit
3238 } else {
3239 rhs_cmpi = makecon(TypeInt::ZERO); // For i < 0
3240 }
3241 // Create a new region on the exit path
3242 RegionNode* reg = insert_region_before_proj(lp_exit);
3243 guarantee(reg != nullptr, "null region node");
3244
3245 // Clone the if-cmpu-true-false using a signed compare
3246 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
3247 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue);
3248 reg->add_req(cmpi_exit);
3249
3250 // Clone the if-cmpu-true-false
3251 BoolTest::mask rel_u = bol->_test._test;
3252 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
3253 reg->add_req(cmpu_exit);
3254
3255 // Force original if to stay in loop.
3256 short_circuit_if(if_cmpu, lp_continue);
3257
3258 return cmpi_exit->in(0)->as_If();
3259 }
3260
3261 //------------------------------ remove_cmpi_loop_exit -------------------------------------
3262 // Remove a previously inserted signed compare loop exit.
3263 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
3264 Node* lp_proj = stay_in_loop(if_cmp, loop);
3265 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
3266 stay_in_loop(lp_proj, loop)->is_If() &&
3267 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
3268 Node* con = makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
3269 if_cmp->set_req(1, con);
3270 }
3271
3272 //------------------------------ scheduled_nodelist -------------------------------------
3273 // Create a post order schedule of nodes that are in the
3274 // "member" set. The list is returned in "sched".
3275 // The first node in "sched" is the loop head, followed by
3276 // nodes which have no inputs in the "member" set, and then
3277 // followed by the nodes that have an immediate input dependence
3278 // on a node in "sched".
3279 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
3280
3281 assert(member.test(loop->_head->_idx), "loop head must be in member set");
3282 VectorSet visited;
3283 Node_Stack nstack(loop->_body.size());
3284
3285 Node* n = loop->_head; // top of stack is cached in "n"
3286 uint idx = 0;
3287 visited.set(n->_idx);
3288
3289 // Initially push all with no inputs from within member set
3290 for(uint i = 0; i < loop->_body.size(); i++ ) {
3291 Node *elt = loop->_body.at(i);
3292 if (member.test(elt->_idx)) {
3293 bool found = false;
3294 for (uint j = 0; j < elt->req(); j++) {
3295 Node* def = elt->in(j);
3296 if (def && member.test(def->_idx) && def != elt) {
3297 found = true;
3298 break;
3299 }
3300 }
3301 if (!found && elt != loop->_head) {
3302 nstack.push(n, idx);
3303 n = elt;
3304 assert(!visited.test(n->_idx), "not seen yet");
3305 visited.set(n->_idx);
3306 }
3307 }
3308 }
3309
3310 // traverse out's that are in the member set
3311 while (true) {
3312 if (idx < n->outcnt()) {
3313 Node* use = n->raw_out(idx);
3314 idx++;
3315 if (!visited.test_set(use->_idx)) {
3316 if (member.test(use->_idx)) {
3317 nstack.push(n, idx);
3318 n = use;
3319 idx = 0;
3320 }
3321 }
3322 } else {
3323 // All outputs processed
3324 sched.push(n);
3325 if (nstack.is_empty()) break;
3326 n = nstack.node();
3327 idx = nstack.index();
3328 nstack.pop();
3329 }
3330 }
3331 }
3332
3333
3334 //------------------------------ has_use_in_set -------------------------------------
3335 // Has a use in the vector set
3336 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
3337 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3338 Node* use = n->fast_out(j);
3339 if (vset.test(use->_idx)) {
3340 return true;
3341 }
3342 }
3343 return false;
3344 }
3345
3346
3347 //------------------------------ has_use_internal_to_set -------------------------------------
3348 // Has use internal to the vector set (ie. not in a phi at the loop head)
3349 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
3350 Node* head = loop->_head;
3351 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3352 Node* use = n->fast_out(j);
3353 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
3354 return true;
3355 }
3356 }
3357 return false;
3358 }
3359
3360
3361 //------------------------------ clone_for_use_outside_loop -------------------------------------
3362 // clone "n" for uses that are outside of loop
3363 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
3364 int cloned = 0;
3365 assert(worklist.size() == 0, "should be empty");
3366 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3367 Node* use = n->fast_out(j);
3368 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
3369 worklist.push(use);
3370 }
3371 }
3372
3373 if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor,
3374 "Too many clones required in clone_for_use_outside_loop in partial peeling")) {
3375 return -1;
3376 }
3377
3378 while( worklist.size() ) {
3379 Node *use = worklist.pop();
3380 if (!has_node(use) || use->in(0) == C->top()) continue;
3381 uint j;
3382 for (j = 0; j < use->req(); j++) {
3383 if (use->in(j) == n) break;
3384 }
3385 assert(j < use->req(), "must be there");
3386
3387 // clone "n" and insert it between the inputs of "n" and the use outside the loop
3388 Node* n_clone = n->clone();
3389 _igvn.replace_input_of(use, j, n_clone);
3390 cloned++;
3391 Node* use_c;
3392 if (!use->is_Phi()) {
3393 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
3394 } else {
3395 // Use in a phi is considered a use in the associated predecessor block
3396 use_c = use->in(0)->in(j);
3397 }
3398 set_ctrl(n_clone, use_c);
3399 assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
3400 get_loop(use_c)->_body.push(n_clone);
3401 _igvn.register_new_node_with_optimizer(n_clone);
3402 #ifndef PRODUCT
3403 if (TracePartialPeeling) {
3404 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
3405 }
3406 #endif
3407 }
3408 return cloned;
3409 }
3410
3411
3412 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
3413 // clone "n" for special uses that are in the not_peeled region.
3414 // If these def-uses occur in separate blocks, the code generator
3415 // marks the method as not compilable. For example, if a "BoolNode"
3416 // is in a different basic block than the "IfNode" that uses it, then
3417 // the compilation is aborted in the code generator.
3418 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
3419 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
3420 if (n->is_Phi() || n->is_Load()) {
3421 return;
3422 }
3423 assert(worklist.size() == 0, "should be empty");
3424 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3425 Node* use = n->fast_out(j);
3426 if ( not_peel.test(use->_idx) &&
3427 (use->is_If() || use->is_CMove() || use->is_Bool() || use->is_OpaqueInitializedAssertionPredicate()) &&
3428 use->in(1) == n) {
3429 worklist.push(use);
3430 }
3431 }
3432 if (worklist.size() > 0) {
3433 // clone "n" and insert it between inputs of "n" and the use
3434 Node* n_clone = n->clone();
3435 loop->_body.push(n_clone);
3436 _igvn.register_new_node_with_optimizer(n_clone);
3437 set_ctrl(n_clone, get_ctrl(n));
3438 sink_list.push(n_clone);
3439 not_peel.set(n_clone->_idx);
3440 #ifndef PRODUCT
3441 if (TracePartialPeeling) {
3442 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
3443 }
3444 #endif
3445 while( worklist.size() ) {
3446 Node *use = worklist.pop();
3447 _igvn.rehash_node_delayed(use);
3448 for (uint j = 1; j < use->req(); j++) {
3449 if (use->in(j) == n) {
3450 use->set_req(j, n_clone);
3451 }
3452 }
3453 }
3454 }
3455 }
3456
3457
3458 //------------------------------ insert_phi_for_loop -------------------------------------
3459 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
3460 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
3461 Node *phi = PhiNode::make(lp, back_edge_val);
3462 phi->set_req(LoopNode::EntryControl, lp_entry_val);
3463 // Use existing phi if it already exists
3464 Node *hit = _igvn.hash_find_insert(phi);
3465 if( hit == nullptr ) {
3466 _igvn.register_new_node_with_optimizer(phi);
3467 set_ctrl(phi, lp);
3468 } else {
3469 // Remove the new phi from the graph and use the hit
3470 _igvn.remove_dead_node(phi);
3471 phi = hit;
3472 }
3473 _igvn.replace_input_of(use, idx, phi);
3474 }
3475
3476 #ifdef ASSERT
3477 //------------------------------ is_valid_loop_partition -------------------------------------
3478 // Validate the loop partition sets: peel and not_peel
3479 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
3480 VectorSet& not_peel ) {
3481 uint i;
3482 // Check that peel_list entries are in the peel set
3483 for (i = 0; i < peel_list.size(); i++) {
3484 if (!peel.test(peel_list.at(i)->_idx)) {
3485 return false;
3486 }
3487 }
3488 // Check at loop members are in one of peel set or not_peel set
3489 for (i = 0; i < loop->_body.size(); i++ ) {
3490 Node *def = loop->_body.at(i);
3491 uint di = def->_idx;
3492 // Check that peel set elements are in peel_list
3493 if (peel.test(di)) {
3494 if (not_peel.test(di)) {
3495 return false;
3496 }
3497 // Must be in peel_list also
3498 bool found = false;
3499 for (uint j = 0; j < peel_list.size(); j++) {
3500 if (peel_list.at(j)->_idx == di) {
3501 found = true;
3502 break;
3503 }
3504 }
3505 if (!found) {
3506 return false;
3507 }
3508 } else if (not_peel.test(di)) {
3509 if (peel.test(di)) {
3510 return false;
3511 }
3512 } else {
3513 return false;
3514 }
3515 }
3516 return true;
3517 }
3518
3519 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
3520 // Ensure a use outside of loop is of the right form
3521 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
3522 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3523 return (use->is_Phi() &&
3524 use_c->is_Region() && use_c->req() == 3 &&
3525 (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
3526 use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
3527 use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
3528 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
3529 }
3530
3531 //------------------------------ is_valid_clone_loop_form -------------------------------------
3532 // Ensure that all uses outside of loop are of the right form
3533 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
3534 uint orig_exit_idx, uint clone_exit_idx) {
3535 uint len = peel_list.size();
3536 for (uint i = 0; i < len; i++) {
3537 Node *def = peel_list.at(i);
3538
3539 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3540 Node *use = def->fast_out(j);
3541 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3542 if (!loop->is_member(get_loop(use_c))) {
3543 // use is not in the loop, check for correct structure
3544 if (use->in(0) == def) {
3545 // Okay
3546 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
3547 return false;
3548 }
3549 }
3550 }
3551 }
3552 return true;
3553 }
3554 #endif
3555
3556 //------------------------------ partial_peel -------------------------------------
3557 // Partially peel (aka loop rotation) the top portion of a loop (called
3558 // the peel section below) by cloning it and placing one copy just before
3559 // the new loop head and the other copy at the bottom of the new loop.
3560 //
3561 // before after where it came from
3562 //
3563 // stmt1 stmt1
3564 // loop: stmt2 clone
3565 // stmt2 if condA goto exitA clone
3566 // if condA goto exitA new_loop: new
3567 // stmt3 stmt3 clone
3568 // if !condB goto loop if condB goto exitB clone
3569 // exitB: stmt2 orig
3570 // stmt4 if !condA goto new_loop orig
3571 // exitA: goto exitA
3572 // exitB:
3573 // stmt4
3574 // exitA:
3575 //
3576 // Step 1: find the cut point: an exit test on probable
3577 // induction variable.
3578 // Step 2: schedule (with cloning) operations in the peel
3579 // section that can be executed after the cut into
3580 // the section that is not peeled. This may need
3581 // to clone operations into exit blocks. For
3582 // instance, a reference to A[i] in the not-peel
3583 // section and a reference to B[i] in an exit block
3584 // may cause a left-shift of i by 2 to be placed
3585 // in the peel block. This step will clone the left
3586 // shift into the exit block and sink the left shift
3587 // from the peel to the not-peel section.
3588 // Step 3: clone the loop, retarget the control, and insert
3589 // phis for values that are live across the new loop
3590 // head. This is very dependent on the graph structure
3591 // from clone_loop. It creates region nodes for
3592 // exit control and associated phi nodes for values
3593 // flow out of the loop through that exit. The region
3594 // node is dominated by the clone's control projection.
3595 // So the clone's peel section is placed before the
3596 // new loop head, and the clone's not-peel section is
3597 // forms the top part of the new loop. The original
3598 // peel section forms the tail of the new loop.
3599 // Step 4: update the dominator tree and recompute the
3600 // dominator depth.
3601 //
3602 // orig
3603 //
3604 // stmt1
3605 // |
3606 // v
3607 // predicates
3608 // |
3609 // v
3610 // loop<----+
3611 // | |
3612 // stmt2 |
3613 // | |
3614 // v |
3615 // ifA |
3616 // / | |
3617 // v v |
3618 // false true ^ <-- last_peel
3619 // / | |
3620 // / ===|==cut |
3621 // / stmt3 | <-- first_not_peel
3622 // / | |
3623 // | v |
3624 // v ifB |
3625 // exitA: / \ |
3626 // / \ |
3627 // v v |
3628 // false true |
3629 // / \ |
3630 // / ----+
3631 // |
3632 // v
3633 // exitB:
3634 // stmt4
3635 //
3636 //
3637 // after clone loop
3638 //
3639 // stmt1
3640 // |
3641 // v
3642 // predicates
3643 // / \
3644 // clone / \ orig
3645 // / \
3646 // / \
3647 // v v
3648 // +---->loop loop<----+
3649 // | | | |
3650 // | stmt2 stmt2 |
3651 // | | | |
3652 // | v v |
3653 // | ifA ifA |
3654 // | | \ / | |
3655 // | v v v v |
3656 // ^ true false false true ^ <-- last_peel
3657 // | | ^ \ / | |
3658 // | cut==|== \ \ / ===|==cut |
3659 // | stmt3 \ \ / stmt3 | <-- first_not_peel
3660 // | | dom | | | |
3661 // | v \ 1v v2 v |
3662 // | ifB regionA ifB |
3663 // | / \ | / \ |
3664 // | / \ v / \ |
3665 // | v v exitA: v v |
3666 // | true false false true |
3667 // | / ^ \ / \ |
3668 // +---- \ \ / ----+
3669 // dom \ /
3670 // \ 1v v2
3671 // regionB
3672 // |
3673 // v
3674 // exitB:
3675 // stmt4
3676 //
3677 //
3678 // after partial peel
3679 //
3680 // stmt1
3681 // |
3682 // v
3683 // predicates
3684 // /
3685 // clone / orig
3686 // / TOP
3687 // / \
3688 // v v
3689 // TOP->loop loop----+
3690 // | | |
3691 // stmt2 stmt2 |
3692 // | | |
3693 // v v |
3694 // ifA ifA |
3695 // | \ / | |
3696 // v v v v |
3697 // true false false true | <-- last_peel
3698 // | ^ \ / +------|---+
3699 // +->newloop \ \ / === ==cut | |
3700 // | stmt3 \ \ / TOP | |
3701 // | | dom | | stmt3 | | <-- first_not_peel
3702 // | v \ 1v v2 v | |
3703 // | ifB regionA ifB ^ v
3704 // | / \ | / \ | |
3705 // | / \ v / \ | |
3706 // | v v exitA: v v | |
3707 // | true false false true | |
3708 // | / ^ \ / \ | |
3709 // | | \ \ / v | |
3710 // | | dom \ / TOP | |
3711 // | | \ 1v v2 | |
3712 // ^ v regionB | |
3713 // | | | | |
3714 // | | v ^ v
3715 // | | exitB: | |
3716 // | | stmt4 | |
3717 // | +------------>-----------------+ |
3718 // | |
3719 // +-----------------<---------------------+
3720 //
3721 //
3722 // final graph
3723 //
3724 // stmt1
3725 // |
3726 // v
3727 // predicates
3728 // |
3729 // v
3730 // stmt2 clone
3731 // |
3732 // v
3733 // ........> ifA clone
3734 // : / |
3735 // dom / |
3736 // : v v
3737 // : false true
3738 // : | |
3739 // : | v
3740 // : | newloop<-----+
3741 // : | | |
3742 // : | stmt3 clone |
3743 // : | | |
3744 // : | v |
3745 // : | ifB |
3746 // : | / \ |
3747 // : | v v |
3748 // : | false true |
3749 // : | | | |
3750 // : | v stmt2 |
3751 // : | exitB: | |
3752 // : | stmt4 v |
3753 // : | ifA orig |
3754 // : | / \ |
3755 // : | / \ |
3756 // : | v v |
3757 // : | false true |
3758 // : | / \ |
3759 // : v v -----+
3760 // RegionA
3761 // |
3762 // v
3763 // exitA
3764 //
3765 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3766
3767 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3768 if (!loop->_head->is_Loop()) {
3769 return false;
3770 }
3771 LoopNode *head = loop->_head->as_Loop();
3772
3773 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3774 return false;
3775 }
3776
3777 // Check for complex exit control
3778 for (uint ii = 0; ii < loop->_body.size(); ii++) {
3779 Node *n = loop->_body.at(ii);
3780 int opc = n->Opcode();
3781 if (n->is_Call() ||
3782 opc == Op_Catch ||
3783 opc == Op_CatchProj ||
3784 opc == Op_Jump ||
3785 opc == Op_JumpProj) {
3786 #ifndef PRODUCT
3787 if (TracePartialPeeling) {
3788 tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3789 }
3790 #endif
3791 return false;
3792 }
3793 }
3794
3795 int dd = dom_depth(head);
3796
3797 // Step 1: find cut point
3798
3799 // Walk up dominators to loop head looking for first loop exit
3800 // which is executed on every path thru loop.
3801 IfNode *peel_if = nullptr;
3802 IfNode *peel_if_cmpu = nullptr;
3803
3804 Node *iff = loop->tail();
3805 while (iff != head) {
3806 if (iff->is_If()) {
3807 Node *ctrl = get_ctrl(iff->in(1));
3808 if (ctrl->is_top()) return false; // Dead test on live IF.
3809 // If loop-varying exit-test, check for induction variable
3810 if (loop->is_member(get_loop(ctrl)) &&
3811 loop->is_loop_exit(iff) &&
3812 is_possible_iv_test(iff)) {
3813 Node* cmp = iff->in(1)->in(1);
3814 if (cmp->Opcode() == Op_CmpI) {
3815 peel_if = iff->as_If();
3816 } else {
3817 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
3818 peel_if_cmpu = iff->as_If();
3819 }
3820 }
3821 }
3822 iff = idom(iff);
3823 }
3824
3825 // Prefer signed compare over unsigned compare.
3826 IfNode* new_peel_if = nullptr;
3827 if (peel_if == nullptr) {
3828 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) {
3829 return false; // No peel point found
3830 }
3831 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
3832 if (new_peel_if == nullptr) {
3833 return false; // No peel point found
3834 }
3835 peel_if = new_peel_if;
3836 }
3837 Node* last_peel = stay_in_loop(peel_if, loop);
3838 Node* first_not_peeled = stay_in_loop(last_peel, loop);
3839 if (first_not_peeled == nullptr || first_not_peeled == head) {
3840 return false;
3841 }
3842
3843 #ifndef PRODUCT
3844 if (TraceLoopOpts) {
3845 tty->print("PartialPeel ");
3846 loop->dump_head();
3847 }
3848
3849 if (TracePartialPeeling) {
3850 tty->print_cr("before partial peel one iteration");
3851 Node_List wl;
3852 Node* t = head->in(2);
3853 while (true) {
3854 wl.push(t);
3855 if (t == head) break;
3856 t = idom(t);
3857 }
3858 while (wl.size() > 0) {
3859 Node* tt = wl.pop();
3860 tt->dump();
3861 if (tt == last_peel) tty->print_cr("-- cut --");
3862 }
3863 }
3864 #endif
3865
3866 C->print_method(PHASE_BEFORE_PARTIAL_PEELING, 4, head);
3867
3868 VectorSet peel;
3869 VectorSet not_peel;
3870 Node_List peel_list;
3871 Node_List worklist;
3872 Node_List sink_list;
3873
3874 uint estimate = loop->est_loop_clone_sz(1);
3875 if (exceeding_node_budget(estimate)) {
3876 return false;
3877 }
3878
3879 // Set of cfg nodes to peel are those that are executable from
3880 // the head through last_peel.
3881 assert(worklist.size() == 0, "should be empty");
3882 worklist.push(head);
3883 peel.set(head->_idx);
3884 while (worklist.size() > 0) {
3885 Node *n = worklist.pop();
3886 if (n != last_peel) {
3887 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3888 Node* use = n->fast_out(j);
3889 if (use->is_CFG() &&
3890 loop->is_member(get_loop(use)) &&
3891 !peel.test_set(use->_idx)) {
3892 worklist.push(use);
3893 }
3894 }
3895 }
3896 }
3897
3898 // Set of non-cfg nodes to peel are those that are control
3899 // dependent on the cfg nodes.
3900 for (uint i = 0; i < loop->_body.size(); i++) {
3901 Node *n = loop->_body.at(i);
3902 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
3903 if (peel.test(n_c->_idx)) {
3904 peel.set(n->_idx);
3905 } else {
3906 not_peel.set(n->_idx);
3907 }
3908 }
3909
3910 // Step 2: move operations from the peeled section down into the
3911 // not-peeled section
3912
3913 // Get a post order schedule of nodes in the peel region
3914 // Result in right-most operand.
3915 scheduled_nodelist(loop, peel, peel_list);
3916
3917 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3918
3919 // For future check for too many new phis
3920 uint old_phi_cnt = 0;
3921 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
3922 Node* use = head->fast_out(j);
3923 if (use->is_Phi()) old_phi_cnt++;
3924 }
3925
3926 #ifndef PRODUCT
3927 if (TracePartialPeeling) {
3928 tty->print_cr("\npeeled list");
3929 }
3930 #endif
3931
3932 // Evacuate nodes in peel region into the not_peeled region if possible
3933 bool too_many_clones = false;
3934 uint new_phi_cnt = 0;
3935 uint cloned_for_outside_use = 0;
3936 for (uint i = 0; i < peel_list.size();) {
3937 Node* n = peel_list.at(i);
3938 #ifndef PRODUCT
3939 if (TracePartialPeeling) n->dump();
3940 #endif
3941 bool incr = true;
3942 if (!n->is_CFG()) {
3943 if (has_use_in_set(n, not_peel)) {
3944 // If not used internal to the peeled region,
3945 // move "n" from peeled to not_peeled region.
3946 if (!has_use_internal_to_set(n, peel, loop)) {
3947 // if not pinned and not a load (which maybe anti-dependent on a store)
3948 // and not a CMove (Matcher expects only bool->cmove).
3949 if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) {
3950 int new_clones = clone_for_use_outside_loop(loop, n, worklist);
3951 if (C->failing()) return false;
3952 if (new_clones == -1) {
3953 too_many_clones = true;
3954 break;
3955 }
3956 cloned_for_outside_use += new_clones;
3957 sink_list.push(n);
3958 peel.remove(n->_idx);
3959 not_peel.set(n->_idx);
3960 peel_list.remove(i);
3961 incr = false;
3962 #ifndef PRODUCT
3963 if (TracePartialPeeling) {
3964 tty->print_cr("sink to not_peeled region: %d newbb: %d",
3965 n->_idx, get_ctrl(n)->_idx);
3966 }
3967 #endif
3968 }
3969 } else {
3970 // Otherwise check for special def-use cases that span
3971 // the peel/not_peel boundary such as bool->if
3972 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
3973 new_phi_cnt++;
3974 }
3975 }
3976 }
3977 if (incr) i++;
3978 }
3979
3980 estimate += cloned_for_outside_use + new_phi_cnt;
3981 bool exceed_node_budget = !may_require_nodes(estimate);
3982 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
3983
3984 if (too_many_clones || exceed_node_budget || exceed_phi_limit) {
3985 #ifndef PRODUCT
3986 if (TracePartialPeeling && exceed_phi_limit) {
3987 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
3988 new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F');
3989 }
3990 #endif
3991 if (new_peel_if != nullptr) {
3992 remove_cmpi_loop_exit(new_peel_if, loop);
3993 }
3994 // Inhibit more partial peeling on this loop
3995 assert(!head->is_partial_peel_loop(), "not partial peeled");
3996 head->mark_partial_peel_failed();
3997 if (cloned_for_outside_use > 0) {
3998 // Terminate this round of loop opts because
3999 // the graph outside this loop was changed.
4000 C->set_major_progress();
4001 return true;
4002 }
4003 return false;
4004 }
4005
4006 // Step 3: clone loop, retarget control, and insert new phis
4007
4008 // Create new loop head for new phis and to hang
4009 // the nodes being moved (sinked) from the peel region.
4010 LoopNode* new_head = new LoopNode(last_peel, last_peel);
4011 new_head->set_unswitch_count(head->unswitch_count()); // Preserve
4012 _igvn.register_new_node_with_optimizer(new_head);
4013 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
4014 _igvn.replace_input_of(first_not_peeled, 0, new_head);
4015 set_loop(new_head, loop);
4016 loop->_body.push(new_head);
4017 not_peel.set(new_head->_idx);
4018 set_idom(new_head, last_peel, dom_depth(first_not_peeled));
4019 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
4020
4021 while (sink_list.size() > 0) {
4022 Node* n = sink_list.pop();
4023 set_ctrl(n, new_head);
4024 }
4025
4026 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4027
4028 clone_loop(loop, old_new, dd, IgnoreStripMined);
4029
4030 const uint clone_exit_idx = 1;
4031 const uint orig_exit_idx = 2;
4032 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
4033
4034 Node* head_clone = old_new[head->_idx];
4035 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
4036 Node* orig_tail_clone = head_clone->in(2);
4037
4038 // Add phi if "def" node is in peel set and "use" is not
4039
4040 for (uint i = 0; i < peel_list.size(); i++) {
4041 Node *def = peel_list.at(i);
4042 if (!def->is_CFG()) {
4043 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
4044 Node *use = def->fast_out(j);
4045 if (has_node(use) && use->in(0) != C->top() &&
4046 (!peel.test(use->_idx) ||
4047 (use->is_Phi() && use->in(0) == head)) ) {
4048 worklist.push(use);
4049 }
4050 }
4051 while( worklist.size() ) {
4052 Node *use = worklist.pop();
4053 for (uint j = 1; j < use->req(); j++) {
4054 Node* n = use->in(j);
4055 if (n == def) {
4056
4057 // "def" is in peel set, "use" is not in peel set
4058 // or "use" is in the entry boundary (a phi) of the peel set
4059
4060 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
4061
4062 if ( loop->is_member(get_loop( use_c )) ) {
4063 // use is in loop
4064 if (old_new[use->_idx] != nullptr) { // null for dead code
4065 Node* use_clone = old_new[use->_idx];
4066 _igvn.replace_input_of(use, j, C->top());
4067 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
4068 }
4069 } else {
4070 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
4071 // use is not in the loop, check if the live range includes the cut
4072 Node* lp_if = use_c->in(orig_exit_idx)->in(0);
4073 if (not_peel.test(lp_if->_idx)) {
4074 assert(j == orig_exit_idx, "use from original loop");
4075 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
4076 }
4077 }
4078 }
4079 }
4080 }
4081 }
4082 }
4083
4084 // Step 3b: retarget control
4085
4086 // Redirect control to the new loop head if a cloned node in
4087 // the not_peeled region has control that points into the peeled region.
4088 // This necessary because the cloned peeled region will be outside
4089 // the loop.
4090 // from to
4091 // cloned-peeled <---+
4092 // new_head_clone: | <--+
4093 // cloned-not_peeled in(0) in(0)
4094 // orig-peeled
4095
4096 for (uint i = 0; i < loop->_body.size(); i++) {
4097 Node *n = loop->_body.at(i);
4098 if (!n->is_CFG() && n->in(0) != nullptr &&
4099 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
4100 Node* n_clone = old_new[n->_idx];
4101 if (n_clone->depends_only_on_test()) {
4102 // If this node depends_only_on_test, it will be rewire to the loop head, which is not the
4103 // correct test
4104 Node* pinned_clone = n_clone->pin_node_under_control();
4105 if (pinned_clone != nullptr) {
4106 register_new_node_with_ctrl_of(pinned_clone, n_clone);
4107 old_new.map(n->_idx, pinned_clone);
4108 _igvn.replace_node(n_clone, pinned_clone);
4109 n_clone = pinned_clone;
4110 }
4111 }
4112 _igvn.replace_input_of(n_clone, 0, new_head_clone);
4113 }
4114 }
4115
4116 // Backedge of the surviving new_head (the clone) is original last_peel
4117 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
4118
4119 // Cut first node in original not_peel set
4120 _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
4121 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
4122 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
4123
4124 // Copy head_clone back-branch info to original head
4125 // and remove original head's loop entry and
4126 // clone head's back-branch
4127 _igvn.rehash_node_delayed(head); // Multiple edge updates
4128 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
4129 head->set_req(LoopNode::LoopBackControl, C->top());
4130 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
4131
4132 // Similarly modify the phis
4133 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
4134 Node* use = head->fast_out(k);
4135 if (use->is_Phi() && use->outcnt() > 0) {
4136 Node* use_clone = old_new[use->_idx];
4137 _igvn.rehash_node_delayed(use); // Multiple edge updates
4138 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
4139 use->set_req(LoopNode::LoopBackControl, C->top());
4140 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
4141 }
4142 }
4143
4144 // Step 4: update dominator tree and dominator depth
4145
4146 set_idom(head, orig_tail_clone, dd);
4147 recompute_dom_depth();
4148
4149 // Inhibit more partial peeling on this loop
4150 new_head_clone->set_partial_peel_loop();
4151 C->set_major_progress();
4152 loop->record_for_igvn();
4153
4154 #ifndef PRODUCT
4155 if (TracePartialPeeling) {
4156 tty->print_cr("\nafter partial peel one iteration");
4157 Node_List wl;
4158 Node* t = last_peel;
4159 while (true) {
4160 wl.push(t);
4161 if (t == head_clone) break;
4162 t = idom(t);
4163 }
4164 while (wl.size() > 0) {
4165 Node* tt = wl.pop();
4166 if (tt == head) tty->print_cr("orig head");
4167 else if (tt == new_head_clone) tty->print_cr("new head");
4168 else if (tt == head_clone) tty->print_cr("clone head");
4169 tt->dump();
4170 }
4171 }
4172 #endif
4173
4174 C->print_method(PHASE_AFTER_PARTIAL_PEELING, 4, new_head_clone);
4175
4176 return true;
4177 }
4178
4179 #ifdef ASSERT
4180
4181 // Moves Template Assertion Predicates to a target loop by cloning and killing the old ones. The target loop is the
4182 // original, not-cloned loop. This is currently only used with StressLoopBackedge which is a develop flag only and
4183 // false with product builds. We can therefore guard it with an ifdef. More details can be found at the use-site.
4184 class MoveAssertionPredicatesVisitor : public PredicateVisitor {
4185 ClonePredicateToTargetLoop _clone_predicate_to_loop;
4186 PhaseIdealLoop* const _phase;
4187
4188 public:
4189 MoveAssertionPredicatesVisitor(LoopNode* target_loop_head,
4190 const NodeInSingleLoopBody &node_in_loop_body,
4191 PhaseIdealLoop* phase)
4192 : _clone_predicate_to_loop(target_loop_head, node_in_loop_body, phase),
4193 _phase(phase) {
4194 }
4195 NONCOPYABLE(MoveAssertionPredicatesVisitor);
4196
4197 using PredicateVisitor::visit;
4198
4199 void visit(const TemplateAssertionPredicate& template_assertion_predicate) override {
4200 _clone_predicate_to_loop.clone_template_assertion_predicate(template_assertion_predicate);
4201 template_assertion_predicate.kill(_phase->igvn());
4202 }
4203 };
4204 #endif // ASSERT
4205
4206 // Transform:
4207 //
4208 // loop<-----------------+
4209 // | |
4210 // stmt1 stmt2 .. stmtn |
4211 // | | | |
4212 // \ | / |
4213 // v v v |
4214 // region |
4215 // | |
4216 // shared_stmt |
4217 // | |
4218 // v |
4219 // if |
4220 // / \ |
4221 // | -----------+
4222 // v
4223 //
4224 // into:
4225 //
4226 // loop<-------------------+
4227 // | |
4228 // v |
4229 // +->loop |
4230 // | | |
4231 // | stmt1 stmt2 .. stmtn |
4232 // | | | | |
4233 // | | \ / |
4234 // | | v v |
4235 // | | region1 |
4236 // | | | |
4237 // | shared_stmt shared_stmt |
4238 // | | | |
4239 // | v v |
4240 // | if if |
4241 // | /\ / \ |
4242 // +-- | | -------+
4243 // \ /
4244 // v v
4245 // region2
4246 //
4247 // (region2 is shown to merge mirrored projections of the loop exit
4248 // ifs to make the diagram clearer but they really merge the same
4249 // projection)
4250 //
4251 // Conditions for this transformation to trigger:
4252 // - the path through stmt1 is frequent enough
4253 // - the inner loop will be turned into a counted loop after transformation
4254 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) {
4255 if (!DuplicateBackedge) {
4256 return false;
4257 }
4258 assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only");
4259 if (!loop->_head->is_Loop()) {
4260 return false;
4261 }
4262
4263 uint estimate = loop->est_loop_clone_sz(1);
4264 if (exceeding_node_budget(estimate)) {
4265 return false;
4266 }
4267
4268 LoopNode *head = loop->_head->as_Loop();
4269
4270 Node* region = nullptr;
4271 IfNode* exit_test = nullptr;
4272 uint inner;
4273 float f;
4274 #ifdef ASSERT
4275 if (StressDuplicateBackedge) {
4276 if (head->is_strip_mined()) {
4277 return false;
4278 }
4279 Node* c = head->in(LoopNode::LoopBackControl);
4280
4281 while (c != head) {
4282 if (c->is_Region()) {
4283 region = c;
4284 }
4285 c = idom(c);
4286 }
4287
4288 if (region == nullptr) {
4289 return false;
4290 }
4291
4292 inner = 1;
4293 } else
4294 #endif //ASSERT
4295 {
4296 // Is the shape of the loop that of a counted loop...
4297 Node* back_control = loop_exit_control(head, loop);
4298 if (back_control == nullptr) {
4299 return false;
4300 }
4301
4302 BoolTest::mask bt = BoolTest::illegal;
4303 float cl_prob = 0;
4304 Node* incr = nullptr;
4305 Node* limit = nullptr;
4306 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
4307 if (cmp == nullptr || cmp->Opcode() != Op_CmpI) {
4308 return false;
4309 }
4310
4311 // With an extra phi for the candidate iv?
4312 // Or the region node is the loop head
4313 if (!incr->is_Phi() || incr->in(0) == head) {
4314 return false;
4315 }
4316
4317 PathFrequency pf(head, this);
4318 region = incr->in(0);
4319
4320 // Go over all paths for the extra phi's region and see if that
4321 // path is frequent enough and would match the expected iv shape
4322 // if the extra phi is removed
4323 inner = 0;
4324 for (uint i = 1; i < incr->req(); ++i) {
4325 Node* in = incr->in(i);
4326 Node* trunc1 = nullptr;
4327 Node* trunc2 = nullptr;
4328 const TypeInteger* iv_trunc_t = nullptr;
4329 Node* orig_in = in;
4330 if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) {
4331 continue;
4332 }
4333 assert(in->Opcode() == Op_AddI, "wrong increment code");
4334 Node* xphi = nullptr;
4335 Node* stride = loop_iv_stride(in, xphi);
4336
4337 if (stride == nullptr) {
4338 continue;
4339 }
4340
4341 PhiNode* phi = loop_iv_phi(xphi, nullptr, head);
4342 if (phi == nullptr ||
4343 (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
4344 (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
4345 return false;
4346 }
4347
4348 f = pf.to(region->in(i));
4349 if (f > 0.5) {
4350 inner = i;
4351 break;
4352 }
4353 }
4354
4355 if (inner == 0) {
4356 return false;
4357 }
4358
4359 exit_test = back_control->in(0)->as_If();
4360 }
4361
4362 if (idom(region)->is_Catch()) {
4363 return false;
4364 }
4365
4366 // Collect all control nodes that need to be cloned (shared_stmt in the diagram)
4367 Unique_Node_List wq;
4368 wq.push(head->in(LoopNode::LoopBackControl));
4369 for (uint i = 0; i < wq.size(); i++) {
4370 Node* c = wq.at(i);
4371 assert(get_loop(c) == loop, "not in the right loop?");
4372 if (c->is_Region()) {
4373 if (c != region) {
4374 for (uint j = 1; j < c->req(); ++j) {
4375 wq.push(c->in(j));
4376 }
4377 }
4378 } else {
4379 wq.push(c->in(0));
4380 }
4381 assert(!is_strict_dominator(c, region), "shouldn't go above region");
4382 }
4383
4384 Node* region_dom = idom(region);
4385
4386 // Can't do the transformation if this would cause a membar pair to
4387 // be split
4388 for (uint i = 0; i < wq.size(); i++) {
4389 Node* c = wq.at(i);
4390 if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) {
4391 assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair");
4392 if (!wq.member(c->as_MemBar()->leading_membar())) {
4393 return false;
4394 }
4395 }
4396 }
4397 C->print_method(PHASE_BEFORE_DUPLICATE_LOOP_BACKEDGE, 4, head);
4398
4399 // Collect data nodes that need to be clones as well
4400 int dd = dom_depth(head);
4401
4402 for (uint i = 0; i < loop->_body.size(); ++i) {
4403 Node* n = loop->_body.at(i);
4404 if (has_ctrl(n)) {
4405 Node* c = get_ctrl(n);
4406 if (wq.member(c)) {
4407 wq.push(n);
4408 }
4409 } else {
4410 set_idom(n, idom(n), dd);
4411 }
4412 }
4413
4414 // clone shared_stmt
4415 clone_loop_body(wq, old_new, nullptr);
4416
4417 Node* region_clone = old_new[region->_idx];
4418 region_clone->set_req(inner, C->top());
4419 set_idom(region, region->in(inner), dd);
4420
4421 // Prepare the outer loop
4422 Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]);
4423 register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl));
4424 _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head);
4425 set_idom(head, outer_head, dd);
4426
4427 fix_body_edges(wq, loop, old_new, dd, loop->_parent, true);
4428
4429 // Make one of the shared_stmt copies only reachable from stmt1, the
4430 // other only from stmt2..stmtn.
4431 Node* dom = nullptr;
4432 for (uint i = 1; i < region->req(); ++i) {
4433 if (i != inner) {
4434 _igvn.replace_input_of(region, i, C->top());
4435 }
4436 Node* in = region_clone->in(i);
4437 if (in->is_top()) {
4438 continue;
4439 }
4440 if (dom == nullptr) {
4441 dom = in;
4442 } else {
4443 dom = dom_lca(dom, in);
4444 }
4445 }
4446
4447 set_idom(region_clone, dom, dd);
4448
4449 // Set up the outer loop
4450 for (uint i = 0; i < head->outcnt(); i++) {
4451 Node* u = head->raw_out(i);
4452 if (u->is_Phi()) {
4453 Node* outer_phi = u->clone();
4454 outer_phi->set_req(0, outer_head);
4455 Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx];
4456 if (backedge == nullptr) {
4457 backedge = u->in(LoopNode::LoopBackControl);
4458 }
4459 outer_phi->set_req(LoopNode::LoopBackControl, backedge);
4460 register_new_node(outer_phi, outer_head);
4461 _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi);
4462 }
4463 }
4464
4465 // create control and data nodes for out of loop uses (including region2)
4466 Node_List worklist;
4467 uint new_counter = C->unique();
4468 fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist);
4469
4470 Node_List *split_if_set = nullptr;
4471 Node_List *split_bool_set = nullptr;
4472 Node_List *split_cex_set = nullptr;
4473 fix_data_uses(wq, loop, ControlAroundStripMined, loop->skip_strip_mined(), new_counter, old_new, worklist,
4474 split_if_set, split_bool_set, split_cex_set);
4475
4476 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
4477
4478 if (exit_test != nullptr) {
4479 float cnt = exit_test->_fcnt;
4480 if (cnt != COUNT_UNKNOWN) {
4481 exit_test->_fcnt = cnt * f;
4482 old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f);
4483 }
4484 }
4485
4486 #ifdef ASSERT
4487 if (StressDuplicateBackedge && head->is_CountedLoop()) {
4488 // The Template Assertion Predicates from the old counted loop are now at the new outer loop - clone them to
4489 // the inner counted loop and kill the old ones. We only need to do this with debug builds because
4490 // StressDuplicateBackedge is a devlop flag and false by default. Without StressDuplicateBackedge 'head' will be a
4491 // non-counted loop, and thus we have no Template Assertion Predicates above the old loop to move down.
4492 PredicateIterator predicate_iterator(outer_head->in(LoopNode::EntryControl));
4493 NodeInSingleLoopBody node_in_body(this, loop);
4494 MoveAssertionPredicatesVisitor move_assertion_predicates_visitor(head, node_in_body, this);
4495 predicate_iterator.for_each(move_assertion_predicates_visitor);
4496 }
4497 #endif // ASSERT
4498
4499 C->set_major_progress();
4500
4501 C->print_method(PHASE_AFTER_DUPLICATE_LOOP_BACKEDGE, 4, outer_head);
4502
4503 return true;
4504 }
4505
4506 // AutoVectorize the loop: replace scalar ops with vector ops.
4507 PhaseIdealLoop::AutoVectorizeStatus
4508 PhaseIdealLoop::auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared) {
4509 // Counted loop only
4510 if (!lpt->is_counted()) {
4511 return AutoVectorizeStatus::Impossible;
4512 }
4513
4514 // Main-loop only
4515 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4516 if (!cl->is_main_loop()) {
4517 return AutoVectorizeStatus::Impossible;
4518 }
4519
4520 VLoop vloop(lpt, false);
4521 if (!vloop.check_preconditions()) {
4522 return AutoVectorizeStatus::TriedAndFailed;
4523 }
4524
4525 // Ensure the shared data is cleared before each use
4526 vshared.clear();
4527
4528 const VLoopAnalyzer vloop_analyzer(vloop, vshared);
4529 if (!vloop_analyzer.success()) {
4530 return AutoVectorizeStatus::TriedAndFailed;
4531 }
4532
4533 SuperWord sw(vloop_analyzer);
4534 if (!sw.transform_loop()) {
4535 return AutoVectorizeStatus::TriedAndFailed;
4536 }
4537
4538 return AutoVectorizeStatus::Success;
4539 }
4540
4541 // Just before insert_pre_post_loops, we can multiversion the loop:
4542 //
4543 // multiversion_if
4544 // | |
4545 // fast_loop slow_loop
4546 //
4547 // In the fast_loop we can make speculative assumptions, and put the
4548 // conditions into the multiversion_if. If the conditions hold at runtime,
4549 // we enter the fast_loop, if the conditions fail, we take the slow_loop
4550 // instead which does not make any of the speculative assumptions.
4551 //
4552 // Note: we only multiversion the loop if the loop does not have any
4553 // auto vectorization check Predicate. If we have that predicate,
4554 // then we can simply add the speculative assumption checks to
4555 // that Predicate. This means we do not need to duplicate the
4556 // loop - we have a smaller graph and save compile time. Should
4557 // the conditions ever fail, then we deopt / trap at the Predicate
4558 // and recompile without that Predicate. At that point we will
4559 // multiversion the loop, so that we can still have speculative
4560 // runtime checks.
4561 //
4562 // We perform the multiversioning when the loop is still in its single
4563 // iteration form, even before we insert pre and post loops. This makes
4564 // the cloning much simpler. However, this means that both the fast
4565 // and the slow loop have to be optimized independently (adding pre
4566 // and post loops, unrolling the main loop, auto-vectorize etc.). And
4567 // we may end up not needing any speculative assumptions in the fast_loop
4568 // and then rejecting the slow_loop by constant folding the multiversion_if.
4569 //
4570 // Therefore, we "delay" the optimization of the slow_loop until we add
4571 // at least one speculative assumption for the fast_loop. If we never
4572 // add such a speculative runtime check, the OpaqueMultiversioningNode
4573 // of the multiversion_if constant folds to true after loop opts, and the
4574 // multiversion_if folds away the "delayed" slow_loop. If we add any
4575 // speculative assumption, then we notify the OpaqueMultiversioningNode
4576 // with "notify_slow_loop_that_it_can_resume_optimizations".
4577 //
4578 // Note: new runtime checks can be added to the multiversion_if with
4579 // PhaseIdealLoop::create_new_if_for_multiversion
4580 void PhaseIdealLoop::maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new) {
4581 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4582 LoopNode* outer_loop = cl->skip_strip_mined();
4583 Node* entry = outer_loop->in(LoopNode::EntryControl);
4584
4585 // Check we have multiversioning enabled, and are not already multiversioned.
4586 if (!LoopMultiversioning || cl->is_multiversion()) { return; }
4587
4588 // Check that we do not have a parse-predicate where we can add the runtime checks
4589 // during auto-vectorization.
4590 const Predicates predicates(entry);
4591 const PredicateBlock* predicate_block = predicates.auto_vectorization_check_block();
4592 if (predicate_block->has_parse_predicate()) { return; }
4593
4594 // Check node budget.
4595 uint estimate = lpt->est_loop_clone_sz(2);
4596 if (!may_require_nodes(estimate)) { return; }
4597
4598 do_multiversioning(lpt, old_new);
4599 }
4600
4601 void DataNodeGraph::clone_data_nodes(Node* new_ctrl) {
4602 for (uint i = 0; i < _data_nodes.size(); i++) {
4603 clone(_data_nodes[i], new_ctrl);
4604 }
4605 }
4606
4607 // Clone the given node and set it up properly. Set 'new_ctrl' as ctrl.
4608 void DataNodeGraph::clone(Node* node, Node* new_ctrl) {
4609 Node* clone = node->clone();
4610 _phase->igvn().register_new_node_with_optimizer(clone);
4611 _orig_to_new.put(node, clone);
4612 _phase->set_ctrl(clone, new_ctrl);
4613 if (node->is_CastII()) {
4614 clone->set_req(0, new_ctrl);
4615 }
4616 }
4617
4618 // Rewire the data inputs of all (unprocessed) cloned nodes, whose inputs are still pointing to the same inputs as their
4619 // corresponding orig nodes, to the newly cloned inputs to create a separate cloned graph.
4620 void DataNodeGraph::rewire_clones_to_cloned_inputs() {
4621 _orig_to_new.iterate_all([&](Node* node, Node* clone) {
4622 for (uint i = 1; i < node->req(); i++) {
4623 Node** cloned_input = _orig_to_new.get(node->in(i));
4624 if (cloned_input != nullptr) {
4625 // Input was also cloned -> rewire clone to the cloned input.
4626 _phase->igvn().replace_input_of(clone, i, *cloned_input);
4627 }
4628 }
4629 });
4630 }
4631
4632 // Clone all non-OpaqueLoop* nodes and apply the provided transformation strategy for OpaqueLoop* nodes.
4633 // Set 'new_ctrl' as ctrl for all cloned non-OpaqueLoop* nodes.
4634 void DataNodeGraph::clone_data_nodes_and_transform_opaque_loop_nodes(
4635 const TransformStrategyForOpaqueLoopNodes& transform_strategy,
4636 Node* new_ctrl) {
4637 for (uint i = 0; i < _data_nodes.size(); i++) {
4638 Node* data_node = _data_nodes[i];
4639 if (data_node->is_Opaque1()) {
4640 transform_opaque_node(transform_strategy, data_node);
4641 } else {
4642 clone(data_node, new_ctrl);
4643 }
4644 }
4645 }
4646
4647 void DataNodeGraph::transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node) {
4648 Node* transformed_node;
4649 if (node->is_OpaqueLoopInit()) {
4650 transformed_node = transform_strategy.transform_opaque_init(node->as_OpaqueLoopInit());
4651 } else {
4652 assert(node->is_OpaqueLoopStride(), "must be OpaqueLoopStrideNode");
4653 transformed_node = transform_strategy.transform_opaque_stride(node->as_OpaqueLoopStride());
4654 }
4655 // Add an orig->new mapping to correctly update the inputs of the copied graph in rewire_clones_to_cloned_inputs().
4656 _orig_to_new.put(node, transformed_node);
4657 }