1 /*
2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/connode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/loopnode.hpp"
35 #include "opto/matcher.hpp"
36 #include "opto/movenode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/opaquenode.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/subnode.hpp"
41 #include "opto/subtypenode.hpp"
42 #include "opto/superword.hpp"
43 #include "opto/vectornode.hpp"
44 #include "utilities/checkedCast.hpp"
45 #include "utilities/macros.hpp"
46
47 //=============================================================================
48 //------------------------------split_thru_phi---------------------------------
49 // Split Node 'n' through merge point if there is enough win.
50 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
51 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
52 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
53 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
54 // so disable this for now
55 return nullptr;
56 }
57
58 // Splitting range check CastIIs through a loop induction Phi can
59 // cause new Phis to be created that are left unrelated to the loop
60 // induction Phi and prevent optimizations (vectorization)
61 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
62 n->in(1) == region->as_CountedLoop()->phi()) {
63 return nullptr;
64 }
65
66 if (cannot_split_division(n, region)) {
67 return nullptr;
68 }
69
70 SplitThruPhiWins wins(region);
71 assert(!n->is_CFG(), "");
72 assert(region->is_Region(), "");
73
74 const Type* type = n->bottom_type();
75 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
76 Node* phi;
77 if (t_oop != nullptr && t_oop->is_known_instance_field()) {
78 int iid = t_oop->instance_id();
79 int index = C->get_alias_index(t_oop);
80 int offset = t_oop->offset();
81 phi = new PhiNode(region, type, nullptr, iid, index, offset);
82 } else {
83 phi = PhiNode::make_blank(region, n);
84 }
85 uint old_unique = C->unique();
86 for (uint i = 1; i < region->req(); i++) {
87 Node* x;
88 Node* the_clone = nullptr;
89 if (region->in(i) == C->top()) {
90 x = C->top(); // Dead path? Use a dead data op
91 } else {
92 x = n->clone(); // Else clone up the data op
93 the_clone = x; // Remember for possible deletion.
94 // Alter data node to use pre-phi inputs
95 if (n->in(0) == region)
96 x->set_req( 0, region->in(i) );
97 for (uint j = 1; j < n->req(); j++) {
98 Node* in = n->in(j);
99 if (in->is_Phi() && in->in(0) == region)
100 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
101 }
102 }
103 // Check for a 'win' on some paths
104 const Type* t = x->Value(&_igvn);
105
106 bool singleton = t->singleton();
107
108 // A TOP singleton indicates that there are no possible values incoming
109 // along a particular edge. In most cases, this is OK, and the Phi will
110 // be eliminated later in an Ideal call. However, we can't allow this to
111 // happen if the singleton occurs on loop entry, as the elimination of
112 // the PhiNode may cause the resulting node to migrate back to a previous
113 // loop iteration.
114 if (singleton && t == Type::TOP) {
115 // Is_Loop() == false does not confirm the absence of a loop (e.g., an
116 // irreducible loop may not be indicated by an affirmative is_Loop());
117 // therefore, the only top we can split thru a phi is on a backedge of
118 // a loop.
119 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
120 }
121
122 if (singleton) {
123 wins.add_win(i);
124 x = makecon(t);
125 } else {
126 // We now call Identity to try to simplify the cloned node.
127 // Note that some Identity methods call phase->type(this).
128 // Make sure that the type array is big enough for
129 // our new node, even though we may throw the node away.
130 // (Note: This tweaking with igvn only works because x is a new node.)
131 _igvn.set_type(x, t);
132 // If x is a TypeNode, capture any more-precise type permanently into Node
133 // otherwise it will be not updated during igvn->transform since
134 // igvn->type(x) is set to x->Value() already.
135 x->raise_bottom_type(t);
136 Node* y = x->Identity(&_igvn);
137 if (y != x) {
138 wins.add_win(i);
139 x = y;
140 } else {
141 y = _igvn.hash_find(x);
142 if (y == nullptr) {
143 y = similar_subtype_check(x, region->in(i));
144 }
145 if (y) {
146 wins.add_win(i);
147 x = y;
148 } else {
149 // Else x is a new node we are keeping
150 // We do not need register_new_node_with_optimizer
151 // because set_type has already been called.
152 _igvn._worklist.push(x);
153 }
154 }
155 }
156
157 phi->set_req( i, x );
158
159 if (the_clone == nullptr) {
160 continue;
161 }
162
163 if (the_clone != x) {
164 _igvn.remove_dead_node(the_clone);
165 } else if (region->is_Loop() && i == LoopNode::LoopBackControl &&
166 n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) {
167 // it is not a win if 'x' moved from an outer to an inner loop
168 // this edge case can only happen for Load nodes
169 wins.reset();
170 break;
171 }
172 }
173 // Too few wins?
174 if (!wins.profitable(policy)) {
175 _igvn.remove_dead_node(phi);
176 return nullptr;
177 }
178
179 // Record Phi
180 register_new_node( phi, region );
181
182 for (uint i2 = 1; i2 < phi->req(); i2++) {
183 Node *x = phi->in(i2);
184 // If we commoned up the cloned 'x' with another existing Node,
185 // the existing Node picks up a new use. We need to make the
186 // existing Node occur higher up so it dominates its uses.
187 Node *old_ctrl;
188 IdealLoopTree *old_loop;
189
190 if (x->is_Con()) {
191 assert(get_ctrl(x) == C->root(), "constant control is not root");
192 continue;
193 }
194 // The occasional new node
195 if (x->_idx >= old_unique) { // Found a new, unplaced node?
196 old_ctrl = nullptr;
197 old_loop = nullptr; // Not in any prior loop
198 } else {
199 old_ctrl = get_ctrl(x);
200 old_loop = get_loop(old_ctrl); // Get prior loop
201 }
202 // New late point must dominate new use
203 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
204 if (new_ctrl == old_ctrl) // Nothing is changed
205 continue;
206
207 IdealLoopTree *new_loop = get_loop(new_ctrl);
208
209 // Don't move x into a loop if its uses are
210 // outside of loop. Otherwise x will be cloned
211 // for each use outside of this loop.
212 IdealLoopTree *use_loop = get_loop(region);
213 if (!new_loop->is_member(use_loop) &&
214 (old_loop == nullptr || !new_loop->is_member(old_loop))) {
215 // Take early control, later control will be recalculated
216 // during next iteration of loop optimizations.
217 new_ctrl = get_early_ctrl(x);
218 new_loop = get_loop(new_ctrl);
219 }
220 // Set new location
221 set_ctrl(x, new_ctrl);
222 // If changing loop bodies, see if we need to collect into new body
223 if (old_loop != new_loop) {
224 if (old_loop && !old_loop->_child)
225 old_loop->_body.yank(x);
226 if (!new_loop->_child)
227 new_loop->_body.push(x); // Collect body info
228 }
229 }
230
231 split_thru_phi_yank_old_nodes(n, region);
232 _igvn.replace_node(n, phi);
233
234 #ifndef PRODUCT
235 if (TraceLoopOpts) {
236 tty->print_cr("Split %d %s through %d Phi in %d %s",
237 n->_idx, n->Name(), phi->_idx, region->_idx, region->Name());
238 }
239 #endif // !PRODUCT
240
241 return phi;
242 }
243
244 // If the region is a Loop, we are removing the old n,
245 // and need to yank it from the _body. If any phi we
246 // just split through now has no use any more, it also
247 // has to be removed.
248 void PhaseIdealLoop::split_thru_phi_yank_old_nodes(Node* n, Node* region) {
249 IdealLoopTree* region_loop = get_loop(region);
250 if (region->is_Loop() && region_loop->is_innermost()) {
251 region_loop->_body.yank(n);
252 for (uint j = 1; j < n->req(); j++) {
253 PhiNode* phi = n->in(j)->isa_Phi();
254 // Check that phi belongs to the region and only has n as a use.
255 if (phi != nullptr &&
256 phi->in(0) == region &&
257 phi->unique_multiple_edges_out_or_null() == n) {
258 assert(get_ctrl(phi) == region, "sanity");
259 assert(get_ctrl(n) == region, "sanity");
260 region_loop->_body.yank(phi);
261 }
262 }
263 }
264 }
265
266 // Test whether node 'x' can move into an inner loop relative to node 'n'.
267 // Note: The test is not exact. Returns true if 'x' COULD end up in an inner loop,
268 // BUT it can also return true and 'x' is in the outer loop
269 bool PhaseIdealLoop::can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x) {
270 IdealLoopTree* n_loop_tree = get_loop(n_loop);
271 IdealLoopTree* x_loop_tree = get_loop(get_early_ctrl(x));
272 // x_loop_tree should be outer or same loop as n_loop_tree
273 return !x_loop_tree->is_member(n_loop_tree);
274 }
275
276 // Subtype checks that carry profile data don't common so look for a replacement by following edges
277 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) {
278 if (x->is_SubTypeCheck()) {
279 Node* in1 = x->in(1);
280 for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) {
281 Node* u = in1->fast_out(i);
282 if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) {
283 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
284 Node* bol = u->fast_out(j);
285 for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) {
286 Node* iff = bol->fast_out(k);
287 // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with
288 // unrelated profile
289 if (iff->is_If() && is_dominator(iff, r_in)) {
290 return u;
291 }
292 }
293 }
294 }
295 }
296 }
297 return nullptr;
298 }
299
300 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor
301 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In
302 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in
303 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could
304 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis).
305 // We also need to check other loop phis as they could have been created in the same split-if pass when applying
306 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi.
307 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const {
308 const Type* zero;
309 switch (n->Opcode()) {
310 case Op_DivI:
311 case Op_ModI:
312 case Op_UDivI:
313 case Op_UModI:
314 zero = TypeInt::ZERO;
315 break;
316 case Op_DivL:
317 case Op_ModL:
318 case Op_UDivL:
319 case Op_UModL:
320 zero = TypeLong::ZERO;
321 break;
322 default:
323 return false;
324 }
325
326 if (n->in(0) != nullptr) {
327 // Cannot split through phi if Div or Mod node has a control dependency to a zero check.
328 return true;
329 }
330
331 Node* divisor = n->in(2);
332 return is_divisor_loop_phi(divisor, region) &&
333 loop_phi_backedge_type_contains_zero(divisor, zero);
334 }
335
336 bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) {
337 return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop;
338 }
339
340 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const {
341 return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP;
342 }
343
344 //------------------------------dominated_by------------------------------------
345 // Replace the dominated test with an obvious true or false. Place it on the
346 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
347 // live path up to the dominating control.
348 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool pin_array_access_nodes) {
349 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
350
351 // prevdom is the dominating projection of the dominating test.
352 assert(iff->Opcode() == Op_If ||
353 iff->Opcode() == Op_CountedLoopEnd ||
354 iff->Opcode() == Op_LongCountedLoopEnd ||
355 iff->Opcode() == Op_RangeCheck ||
356 iff->Opcode() == Op_ParsePredicate,
357 "Check this code when new subtype is added");
358
359 int pop = prevdom->Opcode();
360 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
361 if (flip) {
362 if (pop == Op_IfTrue)
363 pop = Op_IfFalse;
364 else
365 pop = Op_IfTrue;
366 }
367 // 'con' is set to true or false to kill the dominated test.
368 Node* con = makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
369 // Hack the dominated test
370 _igvn.replace_input_of(iff, 1, con);
371
372 // If I don't have a reachable TRUE and FALSE path following the IfNode then
373 // I can assume this path reaches an infinite loop. In this case it's not
374 // important to optimize the data Nodes - either the whole compilation will
375 // be tossed or this path (and all data Nodes) will go dead.
376 if (iff->outcnt() != 2) {
377 return;
378 }
379
380 // Make control-dependent data Nodes on the live path (path that will remain
381 // once the dominated IF is removed) become control-dependent on the
382 // dominating projection.
383 Node* dp = iff->proj_out_or_null(pop == Op_IfTrue);
384
385 if (dp == nullptr) {
386 return;
387 }
388
389 rewire_safe_outputs_to_dominator(dp, prevdom, pin_array_access_nodes);
390 }
391
392 void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool pin_array_access_nodes) {
393 IdealLoopTree* old_loop = get_loop(source);
394
395 for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) {
396 Node* out = source->fast_out(i); // Control-dependent node
397 // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
398 if (out->depends_only_on_test() && _igvn.no_dependent_zero_check(out)) {
399 assert(out->in(0) == source, "must be control dependent on source");
400 _igvn.replace_input_of(out, 0, dominator);
401 if (pin_array_access_nodes) {
402 // Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range
403 // check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the
404 // removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate
405 // in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
406 // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest
407 // dominating check.
408 Node* clone = out->pin_array_access_node();
409 if (clone != nullptr) {
410 clone = _igvn.register_new_node_with_optimizer(clone, out);
411 _igvn.replace_node(out, clone);
412 out = clone;
413 }
414 }
415 set_early_ctrl(out, false);
416 IdealLoopTree* new_loop = get_loop(get_ctrl(out));
417 if (old_loop != new_loop) {
418 if (!old_loop->_child) {
419 old_loop->_body.yank(out);
420 }
421 if (!new_loop->_child) {
422 new_loop->_body.push(out);
423 }
424 }
425 --i;
426 --imax;
427 }
428 }
429 }
430
431 //------------------------------has_local_phi_input----------------------------
432 // Return TRUE if 'n' has Phi inputs from its local block and no other
433 // block-local inputs (all non-local-phi inputs come from earlier blocks)
434 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
435 Node *n_ctrl = get_ctrl(n);
436 // See if some inputs come from a Phi in this block, or from before
437 // this block.
438 uint i;
439 for( i = 1; i < n->req(); i++ ) {
440 Node *phi = n->in(i);
441 if( phi->is_Phi() && phi->in(0) == n_ctrl )
442 break;
443 }
444 if( i >= n->req() )
445 return nullptr; // No Phi inputs; nowhere to clone thru
446
447 // Check for inputs created between 'n' and the Phi input. These
448 // must split as well; they have already been given the chance
449 // (courtesy of a post-order visit) and since they did not we must
450 // recover the 'cost' of splitting them by being very profitable
451 // when splitting 'n'. Since this is unlikely we simply give up.
452 for( i = 1; i < n->req(); i++ ) {
453 Node *m = n->in(i);
454 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
455 // We allow the special case of AddP's with no local inputs.
456 // This allows us to split-up address expressions.
457 if (m->is_AddP() &&
458 get_ctrl(m->in(AddPNode::Base)) != n_ctrl &&
459 get_ctrl(m->in(AddPNode::Address)) != n_ctrl &&
460 get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) {
461 // Move the AddP up to the dominating point. That's fine because control of m's inputs
462 // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl.
463 Node* c = find_non_split_ctrl(idom(n_ctrl));
464 if (c->is_OuterStripMinedLoop()) {
465 c->as_Loop()->verify_strip_mined(1);
466 c = c->in(LoopNode::EntryControl);
467 }
468 set_ctrl_and_loop(m, c);
469 continue;
470 }
471 return nullptr;
472 }
473 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
474 }
475
476 return n_ctrl;
477 }
478
479 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
480 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) {
481 assert(bt == T_INT || bt == T_LONG, "only for integers");
482 int n_op = n->Opcode();
483
484 if (n_op == Op_LShift(bt)) {
485 // Scale is loop invariant
486 Node* scale = n->in(2);
487 Node* scale_ctrl = get_ctrl(scale);
488 IdealLoopTree* scale_loop = get_loop(scale_ctrl);
489 if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) {
490 return nullptr;
491 }
492 const TypeInt* scale_t = scale->bottom_type()->isa_int();
493 if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) {
494 return nullptr; // Dont bother with byte/short masking
495 }
496 // Add must vary with loop (else shift would be loop-invariant)
497 Node* add = n->in(1);
498 Node* add_ctrl = get_ctrl(add);
499 IdealLoopTree* add_loop = get_loop(add_ctrl);
500 if (n_loop != add_loop) {
501 return nullptr; // happens w/ evil ZKM loops
502 }
503
504 // Convert I-V into I+ (0-V); same for V-I
505 if (add->Opcode() == Op_Sub(bt) &&
506 _igvn.type(add->in(1)) != TypeInteger::zero(bt)) {
507 assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, "");
508 Node* zero = integercon(0, bt);
509 Node* neg = SubNode::make(zero, add->in(2), bt);
510 register_new_node_with_ctrl_of(neg, add->in(2));
511 add = AddNode::make(add->in(1), neg, bt);
512 register_new_node(add, add_ctrl);
513 }
514 if (add->Opcode() != Op_Add(bt)) return nullptr;
515 assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, "");
516 // See if one add input is loop invariant
517 Node* add_var = add->in(1);
518 Node* add_var_ctrl = get_ctrl(add_var);
519 IdealLoopTree* add_var_loop = get_loop(add_var_ctrl);
520 Node* add_invar = add->in(2);
521 Node* add_invar_ctrl = get_ctrl(add_invar);
522 IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl);
523 if (add_invar_loop == n_loop) {
524 // Swap to find the invariant part
525 add_invar = add_var;
526 add_invar_ctrl = add_var_ctrl;
527 add_invar_loop = add_var_loop;
528 add_var = add->in(2);
529 } else if (add_var_loop != n_loop) { // Else neither input is loop invariant
530 return nullptr;
531 }
532 if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) {
533 return nullptr; // No invariant part of the add?
534 }
535
536 // Yes! Reshape address expression!
537 Node* inv_scale = LShiftNode::make(add_invar, scale, bt);
538 Node* inv_scale_ctrl =
539 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
540 add_invar_ctrl : scale_ctrl;
541 register_new_node(inv_scale, inv_scale_ctrl);
542 Node* var_scale = LShiftNode::make(add_var, scale, bt);
543 register_new_node(var_scale, n_ctrl);
544 Node* var_add = AddNode::make(var_scale, inv_scale, bt);
545 register_new_node(var_add, n_ctrl);
546 _igvn.replace_node(n, var_add);
547 return var_add;
548 }
549 return nullptr;
550 }
551
552 //------------------------------remix_address_expressions----------------------
553 // Rework addressing expressions to get the most loop-invariant stuff
554 // moved out. We'd like to do all associative operators, but it's especially
555 // important (common) to do address expressions.
556 Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
557 if (!has_ctrl(n)) return nullptr;
558 Node* n_ctrl = get_ctrl(n);
559 IdealLoopTree* n_loop = get_loop(n_ctrl);
560
561 // See if 'n' mixes loop-varying and loop-invariant inputs and
562 // itself is loop-varying.
563
564 // Only interested in binary ops (and AddP)
565 if (n->req() < 3 || n->req() > 4) return nullptr;
566
567 Node* n1_ctrl = get_ctrl(n->in( 1));
568 Node* n2_ctrl = get_ctrl(n->in( 2));
569 Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
570 IdealLoopTree* n1_loop = get_loop(n1_ctrl);
571 IdealLoopTree* n2_loop = get_loop(n2_ctrl);
572 IdealLoopTree* n3_loop = get_loop(n3_ctrl);
573
574 // Does one of my inputs spin in a tighter loop than self?
575 if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) ||
576 (n_loop->is_member(n2_loop) && n_loop != n2_loop) ||
577 (n_loop->is_member(n3_loop) && n_loop != n3_loop)) {
578 return nullptr; // Leave well enough alone
579 }
580
581 // Is at least one of my inputs loop-invariant?
582 if (n1_loop == n_loop &&
583 n2_loop == n_loop &&
584 n3_loop == n_loop) {
585 return nullptr; // No loop-invariant inputs
586 }
587
588 Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT);
589 if (res != nullptr) {
590 return res;
591 }
592 res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG);
593 if (res != nullptr) {
594 return res;
595 }
596
597 int n_op = n->Opcode();
598 // Replace (I+V) with (V+I)
599 if (n_op == Op_AddI ||
600 n_op == Op_AddL ||
601 n_op == Op_AddF ||
602 n_op == Op_AddD ||
603 n_op == Op_MulI ||
604 n_op == Op_MulL ||
605 n_op == Op_MulF ||
606 n_op == Op_MulD) {
607 if (n2_loop == n_loop) {
608 assert(n1_loop != n_loop, "");
609 n->swap_edges(1, 2);
610 }
611 }
612
613 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
614 // but not if I2 is a constant. Skip for irreducible loops.
615 if (n_op == Op_AddP && n_loop->_head->is_Loop()) {
616 if (n2_loop == n_loop && n3_loop != n_loop) {
617 if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) {
618 Node* n22_ctrl = get_ctrl(n->in(2)->in(2));
619 Node* n23_ctrl = get_ctrl(n->in(2)->in(3));
620 IdealLoopTree* n22loop = get_loop(n22_ctrl);
621 IdealLoopTree* n23_loop = get_loop(n23_ctrl);
622 if (n22loop != n_loop && n22loop->is_member(n_loop) &&
623 n23_loop == n_loop) {
624 Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3));
625 // Stuff new AddP in the loop preheader
626 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
627 Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3));
628 register_new_node(add2, n_ctrl);
629 _igvn.replace_node(n, add2);
630 return add2;
631 }
632 }
633 }
634
635 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
636 if (n2_loop != n_loop && n3_loop == n_loop) {
637 if (n->in(3)->Opcode() == Op_AddX) {
638 Node* V = n->in(3)->in(1);
639 Node* I = n->in(3)->in(2);
640 if (ctrl_is_member(n_loop, V)) {
641 } else {
642 Node *tmp = V; V = I; I = tmp;
643 }
644 if (!ctrl_is_member(n_loop, I)) {
645 Node* add1 = new AddPNode(n->in(1), n->in(2), I);
646 // Stuff new AddP in the loop preheader
647 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
648 Node* add2 = new AddPNode(n->in(1), add1, V);
649 register_new_node(add2, n_ctrl);
650 _igvn.replace_node(n, add2);
651 return add2;
652 }
653 }
654 }
655 }
656
657 return nullptr;
658 }
659
660 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
661 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
662 assert(n->Opcode() == Op_AddI, "sanity");
663 Node * nn = nullptr;
664 Node * in1 = n->in(1);
665 Node * in2 = n->in(2);
666 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
667 IdealLoopTree* loop_n = get_loop(get_ctrl(n));
668 if (loop_n->is_counted() &&
669 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) &&
670 Matcher::match_rule_supported(Op_MulAddVS2VI) &&
671 Matcher::match_rule_supported(Op_MulAddS2I)) {
672 Node* mul_in1 = in1->in(1);
673 Node* mul_in2 = in1->in(2);
674 Node* mul_in3 = in2->in(1);
675 Node* mul_in4 = in2->in(2);
676 if (mul_in1->Opcode() == Op_LoadS &&
677 mul_in2->Opcode() == Op_LoadS &&
678 mul_in3->Opcode() == Op_LoadS &&
679 mul_in4->Opcode() == Op_LoadS) {
680 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
681 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
682 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
683 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
684 IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
685 IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
686 // All nodes should be in the same counted loop.
687 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
688 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
689 Node* adr1 = mul_in1->in(MemNode::Address);
690 Node* adr2 = mul_in2->in(MemNode::Address);
691 Node* adr3 = mul_in3->in(MemNode::Address);
692 Node* adr4 = mul_in4->in(MemNode::Address);
693 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
694 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
695 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
696 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
697 register_new_node_with_ctrl_of(nn, n);
698 _igvn.replace_node(n, nn);
699 return nn;
700 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
701 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
702 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
703 register_new_node_with_ctrl_of(nn, n);
704 _igvn.replace_node(n, nn);
705 return nn;
706 }
707 }
708 }
709 }
710 }
711 }
712 return nn;
713 }
714
715 //------------------------------conditional_move-------------------------------
716 // Attempt to replace a Phi with a conditional move. We have some pretty
717 // strict profitability requirements. All Phis at the merge point must
718 // be converted, so we can remove the control flow. We need to limit the
719 // number of c-moves to a small handful. All code that was in the side-arms
720 // of the CFG diamond is now speculatively executed. This code has to be
721 // "cheap enough". We are pretty much limited to CFG diamonds that merge
722 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
723 Node *PhaseIdealLoop::conditional_move( Node *region ) {
724
725 assert(region->is_Region(), "sanity check");
726 if (region->req() != 3) return nullptr;
727
728 // Check for CFG diamond
729 Node *lp = region->in(1);
730 Node *rp = region->in(2);
731 if (!lp || !rp) return nullptr;
732 Node *lp_c = lp->in(0);
733 if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr;
734 IfNode *iff = lp_c->as_If();
735
736 // Check for ops pinned in an arm of the diamond.
737 // Can't remove the control flow in this case
738 if (lp->outcnt() > 1) return nullptr;
739 if (rp->outcnt() > 1) return nullptr;
740
741 IdealLoopTree* r_loop = get_loop(region);
742 assert(r_loop == get_loop(iff), "sanity");
743 // Always convert to CMOVE if all results are used only outside this loop.
744 bool used_inside_loop = (r_loop == _ltree_root);
745
746 // Check profitability
747 int cost = 0;
748 int phis = 0;
749 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
750 Node *out = region->fast_out(i);
751 if (!out->is_Phi()) continue; // Ignore other control edges, etc
752 phis++;
753 PhiNode* phi = out->as_Phi();
754 BasicType bt = phi->type()->basic_type();
755 switch (bt) {
756 case T_DOUBLE:
757 case T_FLOAT:
758 if (C->use_cmove()) {
759 continue; //TODO: maybe we want to add some cost
760 }
761 cost += Matcher::float_cmove_cost(); // Could be very expensive
762 break;
763 case T_LONG: {
764 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
765 }
766 case T_INT: // These all CMOV fine
767 case T_ADDRESS: { // (RawPtr)
768 cost++;
769 break;
770 }
771 case T_NARROWOOP: // Fall through
772 case T_OBJECT: { // Base oops are OK, but not derived oops
773 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
774 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
775 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
776 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
777 // have a Phi for the base here that we convert to a CMOVE all is well
778 // and good. But if the base is dead, we'll not make a CMOVE. Later
779 // the allocator will have to produce a base by creating a CMOVE of the
780 // relevant bases. This puts the allocator in the business of
781 // manufacturing expensive instructions, generally a bad plan.
782 // Just Say No to Conditionally-Moved Derived Pointers.
783 if (tp && tp->offset() != 0)
784 return nullptr;
785 cost++;
786 break;
787 }
788 default:
789 return nullptr; // In particular, can't do memory or I/O
790 }
791 // Add in cost any speculative ops
792 for (uint j = 1; j < region->req(); j++) {
793 Node *proj = region->in(j);
794 Node *inp = phi->in(j);
795 if (get_ctrl(inp) == proj) { // Found local op
796 cost++;
797 // Check for a chain of dependent ops; these will all become
798 // speculative in a CMOV.
799 for (uint k = 1; k < inp->req(); k++)
800 if (get_ctrl(inp->in(k)) == proj)
801 cost += ConditionalMoveLimit; // Too much speculative goo
802 }
803 }
804 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
805 // This will likely Split-If, a higher-payoff operation.
806 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
807 Node* use = phi->fast_out(k);
808 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
809 cost += ConditionalMoveLimit;
810 // Is there a use inside the loop?
811 // Note: check only basic types since CMoveP is pinned.
812 if (!used_inside_loop && is_java_primitive(bt)) {
813 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
814 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
815 used_inside_loop = true;
816 }
817 }
818 }
819 }//for
820 Node* bol = iff->in(1);
821 assert(!bol->is_OpaqueInitializedAssertionPredicate(), "Initialized Assertion Predicates cannot form a diamond with Halt");
822 if (bol->is_OpaqueTemplateAssertionPredicate()) {
823 // Ignore Template Assertion Predicates with OpaqueTemplateAssertionPredicate nodes.
824 return nullptr;
825 }
826 if (bol->is_OpaqueMultiversioning()) {
827 assert(bol->as_OpaqueMultiversioning()->is_useless(), "Must be useless, i.e. fast main loop has already disappeared.");
828 // Ignore multiversion_if that just lost its loops. The OpaqueMultiversioning is marked useless,
829 // and will make the multiversion_if constant fold in the next IGVN round.
830 return nullptr;
831 }
832 if (!bol->is_Bool()) {
833 assert(false, "Expected Bool, but got %s", NodeClassNames[bol->Opcode()]);
834 return nullptr;
835 }
836 int cmp_op = bol->in(1)->Opcode();
837 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
838 return nullptr;
839 }
840 // It is expensive to generate flags from a float compare.
841 // Avoid duplicated float compare.
842 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
843
844 float infrequent_prob = PROB_UNLIKELY_MAG(3);
845 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
846 if (used_inside_loop) {
847 if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
848
849 // BlockLayoutByFrequency optimization moves infrequent branch
850 // from hot path. No point in CMOV'ing in such case (110 is used
851 // instead of 100 to take into account not exactness of float value).
852 if (BlockLayoutByFrequency) {
853 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
854 }
855 }
856 // Check for highly predictable branch. No point in CMOV'ing if
857 // we are going to predict accurately all the time.
858 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
859 //keep going
860 } else if (iff->_prob < infrequent_prob ||
861 iff->_prob > (1.0f - infrequent_prob))
862 return nullptr;
863
864 // --------------
865 // Now replace all Phis with CMOV's
866 Node *cmov_ctrl = iff->in(0);
867 uint flip = (lp->Opcode() == Op_IfTrue);
868 Node_List wq;
869 while (1) {
870 PhiNode* phi = nullptr;
871 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
872 Node *out = region->fast_out(i);
873 if (out->is_Phi()) {
874 phi = out->as_Phi();
875 break;
876 }
877 }
878 if (phi == nullptr || _igvn.type(phi) == Type::TOP || !CMoveNode::supported(_igvn.type(phi))) {
879 break;
880 }
881 // Move speculative ops
882 wq.push(phi);
883 while (wq.size() > 0) {
884 Node *n = wq.pop();
885 for (uint j = 1; j < n->req(); j++) {
886 Node* m = n->in(j);
887 if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) {
888 set_ctrl(m, cmov_ctrl);
889 wq.push(m);
890 }
891 }
892 }
893 Node* cmov = CMoveNode::make(iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
894 register_new_node(cmov, cmov_ctrl);
895 _igvn.replace_node(phi, cmov);
896 #ifndef PRODUCT
897 if (TraceLoopOpts) {
898 tty->print("CMOV ");
899 r_loop->dump_head();
900 if (Verbose) {
901 bol->in(1)->dump(1);
902 cmov->dump(1);
903 }
904 }
905 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
906 #endif
907 }
908
909 // The useless CFG diamond will fold up later; see the optimization in
910 // RegionNode::Ideal.
911 _igvn._worklist.push(region);
912
913 return iff->in(1);
914 }
915
916 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
917 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
918 Node* u = m->fast_out(i);
919 if (u->is_CFG()) {
920 if (u->is_NeverBranch()) {
921 u = u->as_NeverBranch()->proj_out(0);
922 enqueue_cfg_uses(u, wq);
923 } else {
924 wq.push(u);
925 }
926 }
927 }
928 }
929
930 // Try moving a store out of a loop, right before the loop
931 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
932 // Store has to be first in the loop body
933 IdealLoopTree *n_loop = get_loop(n_ctrl);
934 if (n->is_Store() && n_loop != _ltree_root &&
935 n_loop->is_loop() && n_loop->_head->is_Loop() &&
936 n->in(0) != nullptr) {
937 Node* address = n->in(MemNode::Address);
938 Node* value = n->in(MemNode::ValueIn);
939 Node* mem = n->in(MemNode::Memory);
940
941 // - address and value must be loop invariant
942 // - memory must be a memory Phi for the loop
943 // - Store must be the only store on this memory slice in the
944 // loop: if there's another store following this one then value
945 // written at iteration i by the second store could be overwritten
946 // at iteration i+n by the first store: it's not safe to move the
947 // first store out of the loop
948 // - nothing must observe the memory Phi: it guarantees no read
949 // before the store, we are also guaranteed the store post
950 // dominates the loop head (ignoring a possible early
951 // exit). Otherwise there would be extra Phi involved between the
952 // loop's Phi and the store.
953 // - there must be no early exit from the loop before the Store
954 // (such an exit most of the time would be an extra use of the
955 // memory Phi but sometimes is a bottom memory Phi that takes the
956 // store as input).
957
958 if (!ctrl_is_member(n_loop, address) &&
959 !ctrl_is_member(n_loop, value) &&
960 mem->is_Phi() && mem->in(0) == n_loop->_head &&
961 mem->outcnt() == 1 &&
962 mem->in(LoopNode::LoopBackControl) == n) {
963
964 assert(n_loop->_tail != nullptr, "need a tail");
965 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
966
967 // Verify that there's no early exit of the loop before the store.
968 bool ctrl_ok = false;
969 {
970 // Follow control from loop head until n, we exit the loop or
971 // we reach the tail
972 ResourceMark rm;
973 Unique_Node_List wq;
974 wq.push(n_loop->_head);
975
976 for (uint next = 0; next < wq.size(); ++next) {
977 Node *m = wq.at(next);
978 if (m == n->in(0)) {
979 ctrl_ok = true;
980 continue;
981 }
982 assert(!has_ctrl(m), "should be CFG");
983 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
984 ctrl_ok = false;
985 break;
986 }
987 enqueue_cfg_uses(m, wq);
988 if (wq.size() > 10) {
989 ctrl_ok = false;
990 break;
991 }
992 }
993 }
994 if (ctrl_ok) {
995 // move the Store
996 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
997 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
998 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
999 // Disconnect the phi now. An empty phi can confuse other
1000 // optimizations in this pass of loop opts.
1001 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
1002 n_loop->_body.yank(mem);
1003
1004 set_ctrl_and_loop(n, n->in(0));
1005
1006 return n;
1007 }
1008 }
1009 }
1010 return nullptr;
1011 }
1012
1013 // Try moving a store out of a loop, right after the loop
1014 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
1015 if (n->is_Store() && n->in(0) != nullptr) {
1016 Node *n_ctrl = get_ctrl(n);
1017 IdealLoopTree *n_loop = get_loop(n_ctrl);
1018 // Store must be in a loop
1019 if (n_loop != _ltree_root && !n_loop->_irreducible) {
1020 Node* address = n->in(MemNode::Address);
1021 Node* value = n->in(MemNode::ValueIn);
1022 // address must be loop invariant
1023 if (!ctrl_is_member(n_loop, address)) {
1024 // Store must be last on this memory slice in the loop and
1025 // nothing in the loop must observe it
1026 Node* phi = nullptr;
1027 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1028 Node* u = n->fast_out(i);
1029 if (has_ctrl(u)) { // control use?
1030 if (!ctrl_is_member(n_loop, u)) {
1031 continue;
1032 }
1033 if (u->is_Phi() && u->in(0) == n_loop->_head) {
1034 assert(_igvn.type(u) == Type::MEMORY, "bad phi");
1035 // multiple phis on the same slice are possible
1036 if (phi != nullptr) {
1037 return;
1038 }
1039 phi = u;
1040 continue;
1041 }
1042 }
1043 return;
1044 }
1045 if (phi != nullptr) {
1046 // Nothing in the loop before the store (next iteration)
1047 // must observe the stored value
1048 bool mem_ok = true;
1049 {
1050 ResourceMark rm;
1051 Unique_Node_List wq;
1052 wq.push(phi);
1053 for (uint next = 0; next < wq.size() && mem_ok; ++next) {
1054 Node *m = wq.at(next);
1055 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
1056 Node* u = m->fast_out(i);
1057 if (u->is_Store() || u->is_Phi()) {
1058 if (u != n) {
1059 wq.push(u);
1060 mem_ok = (wq.size() <= 10);
1061 }
1062 } else {
1063 mem_ok = false;
1064 break;
1065 }
1066 }
1067 }
1068 }
1069 if (mem_ok) {
1070 // Move the store out of the loop if the LCA of all
1071 // users (except for the phi) is outside the loop.
1072 Node* hook = new Node(1);
1073 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
1074 _igvn.rehash_node_delayed(phi);
1075 int count = phi->replace_edge(n, hook, &_igvn);
1076 assert(count > 0, "inconsistent phi");
1077
1078 // Compute latest point this store can go
1079 Node* lca = get_late_ctrl(n, get_ctrl(n));
1080 if (lca->is_OuterStripMinedLoop()) {
1081 lca = lca->in(LoopNode::EntryControl);
1082 }
1083 if (n_loop->is_member(get_loop(lca))) {
1084 // LCA is in the loop - bail out
1085 _igvn.replace_node(hook, n);
1086 return;
1087 }
1088 #ifdef ASSERT
1089 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
1090 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
1091 n_loop->_head->as_Loop()->verify_strip_mined(1);
1092 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
1093 IdealLoopTree* outer_loop = get_loop(outer);
1094 assert(n_loop->_parent == outer_loop, "broken loop tree");
1095 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
1096 }
1097 #endif
1098 lca = place_outside_loop(lca, n_loop);
1099 assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop");
1100 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1101
1102 // Move store out of the loop
1103 _igvn.replace_node(hook, n->in(MemNode::Memory));
1104 _igvn.replace_input_of(n, 0, lca);
1105 set_ctrl_and_loop(n, lca);
1106
1107 // Disconnect the phi now. An empty phi can confuse other
1108 // optimizations in this pass of loop opts..
1109 if (phi->in(LoopNode::LoopBackControl) == phi) {
1110 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1111 n_loop->_body.yank(phi);
1112 }
1113 }
1114 }
1115 }
1116 }
1117 }
1118 }
1119
1120 //------------------------------split_if_with_blocks_pre-----------------------
1121 // Do the real work in a non-recursive function. Data nodes want to be
1122 // cloned in the pre-order so they can feed each other nicely.
1123 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1124 // Cloning these guys is unlikely to win
1125 int n_op = n->Opcode();
1126 if (n_op == Op_MergeMem) {
1127 return n;
1128 }
1129 if (n->is_Proj()) {
1130 return n;
1131 }
1132 // Do not clone-up CmpFXXX variations, as these are always
1133 // followed by a CmpI
1134 if (n->is_Cmp()) {
1135 return n;
1136 }
1137 // Attempt to use a conditional move instead of a phi/branch
1138 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1139 Node *cmov = conditional_move( n );
1140 if (cmov) {
1141 return cmov;
1142 }
1143 }
1144 if (n->is_CFG() || n->is_LoadStore()) {
1145 return n;
1146 }
1147 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1148 if (!C->major_progress()) { // If chance of no more loop opts...
1149 _igvn._worklist.push(n); // maybe we'll remove them
1150 }
1151 return n;
1152 }
1153
1154 if (n->is_Con()) {
1155 return n; // No cloning for Con nodes
1156 }
1157
1158 Node *n_ctrl = get_ctrl(n);
1159 if (!n_ctrl) {
1160 return n; // Dead node
1161 }
1162
1163 Node* res = try_move_store_before_loop(n, n_ctrl);
1164 if (res != nullptr) {
1165 return n;
1166 }
1167
1168 // Attempt to remix address expressions for loop invariants
1169 Node *m = remix_address_expressions( n );
1170 if( m ) return m;
1171
1172 if (n_op == Op_AddI) {
1173 Node *nn = convert_add_to_muladd( n );
1174 if ( nn ) return nn;
1175 }
1176
1177 if (n->is_ConstraintCast() && n->as_ConstraintCast()->dependency().narrows_type()) {
1178 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1179 // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1180 // Node control inputs don't necessarily agree with loop control info (due to
1181 // transformations happened in between), thus additional dominance check is needed
1182 // to keep loop info valid.
1183 if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1184 _igvn.replace_node(n, dom_cast);
1185 return dom_cast;
1186 }
1187 }
1188
1189 // Determine if the Node has inputs from some local Phi.
1190 // Returns the block to clone thru.
1191 Node *n_blk = has_local_phi_input( n );
1192 if( !n_blk ) return n;
1193
1194 // Do not clone the trip counter through on a CountedLoop
1195 // (messes up the canonical shape).
1196 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) ||
1197 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) {
1198 return n;
1199 }
1200 // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination
1201 if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) &&
1202 n->in(1) == n_blk->as_BaseCountedLoop()->phi()) {
1203 return n;
1204 }
1205
1206 // Check for having no control input; not pinned. Allow
1207 // dominating control.
1208 if (n->in(0)) {
1209 Node *dom = idom(n_blk);
1210 if (dom_lca(n->in(0), dom) != n->in(0)) {
1211 return n;
1212 }
1213 }
1214 // Policy: when is it profitable. You must get more wins than
1215 // policy before it is considered profitable. Policy is usually 0,
1216 // so 1 win is considered profitable. Big merges will require big
1217 // cloning, so get a larger policy.
1218 int policy = n_blk->req() >> 2;
1219
1220 // If the loop is a candidate for range check elimination,
1221 // delay splitting through it's phi until a later loop optimization
1222 if (n_blk->is_BaseCountedLoop()) {
1223 IdealLoopTree *lp = get_loop(n_blk);
1224 if (lp && lp->_rce_candidate) {
1225 return n;
1226 }
1227 }
1228
1229 if (must_throttle_split_if()) return n;
1230
1231 // Split 'n' through the merge point if it is profitable, replacing it with a new phi.
1232 Node* phi = split_thru_phi(n, n_blk, policy);
1233 if (phi == nullptr) { return n; }
1234
1235 // Moved a load around the loop, 'en-registering' something.
1236 if (n_blk->is_Loop() && n->is_Load() &&
1237 !phi->in(LoopNode::LoopBackControl)->is_Load())
1238 C->set_major_progress();
1239
1240 return phi;
1241 }
1242
1243 static bool merge_point_too_heavy(Compile* C, Node* region) {
1244 // Bail out if the region and its phis have too many users.
1245 int weight = 0;
1246 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1247 weight += region->fast_out(i)->outcnt();
1248 }
1249 int nodes_left = C->max_node_limit() - C->live_nodes();
1250 if (weight * 8 > nodes_left) {
1251 if (PrintOpto) {
1252 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
1253 }
1254 return true;
1255 } else {
1256 return false;
1257 }
1258 }
1259
1260 static bool merge_point_safe(Node* region) {
1261 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1262 // having a PhiNode input. This sidesteps the dangerous case where the split
1263 // ConvI2LNode may become TOP if the input Value() does not
1264 // overlap the ConvI2L range, leaving a node which may not dominate its
1265 // uses.
1266 // A better fix for this problem can be found in the BugTraq entry, but
1267 // expediency for Mantis demands this hack.
1268 #ifdef _LP64
1269 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1270 Node* n = region->fast_out(i);
1271 if (n->is_Phi()) {
1272 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1273 Node* m = n->fast_out(j);
1274 if (m->Opcode() == Op_ConvI2L)
1275 return false;
1276 if (m->is_CastII()) {
1277 return false;
1278 }
1279 }
1280 }
1281 }
1282 #endif
1283 return true;
1284 }
1285
1286
1287 //------------------------------place_outside_loop---------------------------------
1288 // Place some computation outside of this loop on the path to the use passed as argument
1289 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const {
1290 Node* head = loop->_head;
1291 assert(!loop->is_member(get_loop(useblock)), "must be outside loop");
1292 if (head->is_Loop() && head->as_Loop()->is_strip_mined()) {
1293 loop = loop->_parent;
1294 assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop");
1295 }
1296
1297 // Pick control right outside the loop
1298 for (;;) {
1299 Node* dom = idom(useblock);
1300 if (loop->is_member(get_loop(dom))) {
1301 break;
1302 }
1303 useblock = dom;
1304 }
1305 assert(find_non_split_ctrl(useblock) == useblock, "should be non split control");
1306 return useblock;
1307 }
1308
1309
1310 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1311 if (!n->is_If() || n->is_BaseCountedLoopEnd()) {
1312 return false;
1313 }
1314 if (!n->in(0)->is_Region()) {
1315 return false;
1316 }
1317
1318 Node* region = n->in(0);
1319 Node* dom = idom(region);
1320 if (!dom->is_If() || !n->as_If()->same_condition(dom, &_igvn)) {
1321 return false;
1322 }
1323 IfNode* dom_if = dom->as_If();
1324 IfTrueNode* proj_true = dom_if->true_proj();
1325 IfFalseNode* proj_false = dom_if->false_proj();
1326
1327 for (uint i = 1; i < region->req(); i++) {
1328 if (is_dominator(proj_true, region->in(i))) {
1329 continue;
1330 }
1331 if (is_dominator(proj_false, region->in(i))) {
1332 continue;
1333 }
1334 return false;
1335 }
1336
1337 return true;
1338 }
1339
1340
1341 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1342 if (must_throttle_split_if()) {
1343 return false;
1344 }
1345
1346 // Do not do 'split-if' if irreducible loops are present.
1347 if (_has_irreducible_loops) {
1348 return false;
1349 }
1350
1351 if (merge_point_too_heavy(C, n_ctrl)) {
1352 return false;
1353 }
1354
1355 // Do not do 'split-if' if some paths are dead. First do dead code
1356 // elimination and then see if its still profitable.
1357 for (uint i = 1; i < n_ctrl->req(); i++) {
1358 if (n_ctrl->in(i) == C->top()) {
1359 return false;
1360 }
1361 }
1362
1363 // If trying to do a 'Split-If' at the loop head, it is only
1364 // profitable if the cmp folds up on BOTH paths. Otherwise we
1365 // risk peeling a loop forever.
1366
1367 // CNC - Disabled for now. Requires careful handling of loop
1368 // body selection for the cloned code. Also, make sure we check
1369 // for any input path not being in the same loop as n_ctrl. For
1370 // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1371 // because the alternative loop entry points won't be converted
1372 // into LoopNodes.
1373 IdealLoopTree *n_loop = get_loop(n_ctrl);
1374 for (uint j = 1; j < n_ctrl->req(); j++) {
1375 if (get_loop(n_ctrl->in(j)) != n_loop) {
1376 return false;
1377 }
1378 }
1379
1380 // Check for safety of the merge point.
1381 if (!merge_point_safe(n_ctrl)) {
1382 return false;
1383 }
1384
1385 return true;
1386 }
1387
1388 // Detect if the node is the inner strip-mined loop
1389 // Return: null if it's not the case, or the exit of outer strip-mined loop
1390 static Node* is_inner_of_stripmined_loop(const Node* out) {
1391 Node* out_le = nullptr;
1392
1393 if (out->is_CountedLoopEnd()) {
1394 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1395
1396 if (loop != nullptr && loop->is_strip_mined()) {
1397 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1398 }
1399 }
1400
1401 return out_le;
1402 }
1403
1404 //------------------------------split_if_with_blocks_post----------------------
1405 // Do the real work in a non-recursive function. CFG hackery wants to be
1406 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1407 // info.
1408 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1409
1410 // Cloning Cmp through Phi's involves the split-if transform.
1411 // FastLock is not used by an If
1412 if (n->is_Cmp() && !n->is_FastLock()) {
1413 Node *n_ctrl = get_ctrl(n);
1414 // Determine if the Node has inputs from some local Phi.
1415 // Returns the block to clone thru.
1416 Node *n_blk = has_local_phi_input(n);
1417 if (n_blk != n_ctrl) {
1418 return;
1419 }
1420
1421 if (!can_split_if(n_ctrl)) {
1422 return;
1423 }
1424
1425 if (n->outcnt() != 1) {
1426 return; // Multiple bool's from 1 compare?
1427 }
1428 Node *bol = n->unique_out();
1429 assert(bol->is_Bool(), "expect a bool here");
1430 if (bol->outcnt() != 1) {
1431 return;// Multiple branches from 1 compare?
1432 }
1433 Node *iff = bol->unique_out();
1434
1435 // Check some safety conditions
1436 if (iff->is_If()) { // Classic split-if?
1437 if (iff->in(0) != n_ctrl) {
1438 return; // Compare must be in same blk as if
1439 }
1440 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1441 // Can't split CMove with different control.
1442 if (get_ctrl(iff) != n_ctrl) {
1443 return;
1444 }
1445 if (get_ctrl(iff->in(2)) == n_ctrl ||
1446 get_ctrl(iff->in(3)) == n_ctrl) {
1447 return; // Inputs not yet split-up
1448 }
1449 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1450 return; // Loop-invar test gates loop-varying CMOVE
1451 }
1452 } else {
1453 return; // some other kind of node, such as an Allocate
1454 }
1455
1456 // When is split-if profitable? Every 'win' on means some control flow
1457 // goes dead, so it's almost always a win.
1458 int policy = 0;
1459 // Split compare 'n' through the merge point if it is profitable
1460 Node *phi = split_thru_phi( n, n_ctrl, policy);
1461 if (!phi) {
1462 return;
1463 }
1464
1465 // Now split the bool up thru the phi
1466 Node* bolphi = split_thru_phi(bol, n_ctrl, -1);
1467 guarantee(bolphi != nullptr, "null boolean phi node");
1468 assert(iff->in(1) == bolphi, "");
1469
1470 if (bolphi->Value(&_igvn)->singleton()) {
1471 return;
1472 }
1473
1474 // Conditional-move? Must split up now
1475 if (!iff->is_If()) {
1476 Node* cmovphi = split_thru_phi(iff, n_ctrl, -1);
1477 return;
1478 }
1479
1480 // Now split the IF
1481 C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff);
1482 if (TraceLoopOpts) {
1483 tty->print_cr("Split-If");
1484 }
1485 do_split_if(iff);
1486 C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff);
1487 return;
1488 }
1489
1490 // Two identical ifs back to back can be merged
1491 if (try_merge_identical_ifs(n)) {
1492 return;
1493 }
1494
1495 // Check for an IF ready to split; one that has its
1496 // condition codes input coming from a Phi at the block start.
1497 int n_op = n->Opcode();
1498
1499 // Check for an IF being dominated by another IF same test
1500 if (n_op == Op_If ||
1501 n_op == Op_RangeCheck) {
1502 Node *bol = n->in(1);
1503 uint max = bol->outcnt();
1504 // Check for same test used more than once?
1505 if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) {
1506 // Search up IDOMs to see if this IF is dominated.
1507 Node* cmp = bol->in(1);
1508 Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol);
1509
1510 // Now search up IDOMs till cutoff, looking for a dominating test
1511 Node *prevdom = n;
1512 Node *dom = idom(prevdom);
1513 while (dom != cutoff) {
1514 if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom &&
1515 safe_for_if_replacement(dom)) {
1516 // It's invalid to move control dependent data nodes in the inner
1517 // strip-mined loop, because:
1518 // 1) break validation of LoopNode::verify_strip_mined()
1519 // 2) move code with side-effect in strip-mined loop
1520 // Move to the exit of outer strip-mined loop in that case.
1521 Node* out_le = is_inner_of_stripmined_loop(dom);
1522 if (out_le != nullptr) {
1523 prevdom = out_le;
1524 }
1525 // Replace the dominated test with an obvious true or false.
1526 // Place it on the IGVN worklist for later cleanup.
1527 C->set_major_progress();
1528 // Split if: pin array accesses that are control dependent on a range check and moved to a regular if,
1529 // to prevent an array load from floating above its range check. There are three cases:
1530 // 1. Move from RangeCheck "a" to RangeCheck "b": don't need to pin. If we ever remove b, then we pin
1531 // all its array accesses at that point.
1532 // 2. We move from RangeCheck "a" to regular if "b": need to pin. If we ever remove b, then its array
1533 // accesses would start to float, since we don't pin at that point.
1534 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1535 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck &&
1536 prevdom->in(0)->Opcode() != Op_RangeCheck;
1537 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1538 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1539 return;
1540 }
1541 prevdom = dom;
1542 dom = idom(prevdom);
1543 }
1544 }
1545 }
1546
1547 try_sink_out_of_loop(n);
1548 if (C->failing()) {
1549 return;
1550 }
1551
1552 try_move_store_after_loop(n);
1553 }
1554
1555 // Transform:
1556 //
1557 // if (some_condition) {
1558 // // body 1
1559 // } else {
1560 // // body 2
1561 // }
1562 // if (some_condition) {
1563 // // body 3
1564 // } else {
1565 // // body 4
1566 // }
1567 //
1568 // into:
1569 //
1570 //
1571 // if (some_condition) {
1572 // // body 1
1573 // // body 3
1574 // } else {
1575 // // body 2
1576 // // body 4
1577 // }
1578 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) {
1579 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1580 Node *n_ctrl = n->in(0);
1581 IfNode* dom_if = idom(n_ctrl)->as_If();
1582 if (n->in(1) != dom_if->in(1)) {
1583 assert(n->in(1)->in(1)->is_SubTypeCheck() &&
1584 (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr ||
1585 dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached");
1586 _igvn.replace_input_of(n, 1, dom_if->in(1));
1587 }
1588 IfTrueNode* dom_proj_true = dom_if->true_proj();
1589 IfFalseNode* dom_proj_false = dom_if->false_proj();
1590
1591 // Now split the IF
1592 RegionNode* new_false_region;
1593 RegionNode* new_true_region;
1594 do_split_if(n, &new_false_region, &new_true_region);
1595 assert(new_false_region->req() == new_true_region->req(), "");
1596 #ifdef ASSERT
1597 for (uint i = 1; i < new_false_region->req(); ++i) {
1598 assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if");
1599 assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if");
1600 }
1601 #endif
1602 assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test");
1603
1604 // We now have:
1605 // if (some_condition) {
1606 // // body 1
1607 // if (some_condition) {
1608 // body3: // new_true_region
1609 // // body3
1610 // } else {
1611 // goto body4;
1612 // }
1613 // } else {
1614 // // body 2
1615 // if (some_condition) {
1616 // goto body3;
1617 // } else {
1618 // body4: // new_false_region
1619 // // body4;
1620 // }
1621 // }
1622 //
1623
1624 // clone pinned nodes thru the resulting regions
1625 push_pinned_nodes_thru_region(dom_if, new_true_region);
1626 push_pinned_nodes_thru_region(dom_if, new_false_region);
1627
1628 // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent
1629 // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an
1630 // unrelated control dependency.
1631 for (uint i = 1; i < new_false_region->req(); i++) {
1632 if (is_dominator(dom_proj_true, new_false_region->in(i))) {
1633 dominated_by(dom_proj_true, new_false_region->in(i)->in(0)->as_If());
1634 } else {
1635 assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if");
1636 dominated_by(dom_proj_false, new_false_region->in(i)->in(0)->as_If());
1637 }
1638 }
1639 return true;
1640 }
1641 return false;
1642 }
1643
1644 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) {
1645 for (DUIterator i = region->outs(); region->has_out(i); i++) {
1646 Node* u = region->out(i);
1647 if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test() || !_igvn.no_dependent_zero_check(u)) {
1648 continue;
1649 }
1650 assert(u->in(0) == region, "not a control dependent node?");
1651 uint j = 1;
1652 for (; j < u->req(); ++j) {
1653 Node* in = u->in(j);
1654 if (!is_dominator(ctrl_or_self(in), dom_if)) {
1655 break;
1656 }
1657 }
1658 if (j == u->req()) {
1659 Node *phi = PhiNode::make_blank(region, u);
1660 for (uint k = 1; k < region->req(); ++k) {
1661 Node* clone = u->clone();
1662 clone->set_req(0, region->in(k));
1663 register_new_node(clone, region->in(k));
1664 phi->init_req(k, clone);
1665 }
1666 register_new_node(phi, region);
1667 _igvn.replace_node(u, phi);
1668 --i;
1669 }
1670 }
1671 }
1672
1673 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const {
1674 if (!dom->is_CountedLoopEnd()) {
1675 return true;
1676 }
1677 CountedLoopEndNode* le = dom->as_CountedLoopEnd();
1678 CountedLoopNode* cl = le->loopnode();
1679 if (cl == nullptr) {
1680 return true;
1681 }
1682 if (!cl->is_main_loop()) {
1683 return true;
1684 }
1685 if (cl->is_canonical_loop_entry() == nullptr) {
1686 return true;
1687 }
1688 // Further unrolling is possible so loop exit condition might change
1689 return false;
1690 }
1691
1692 // See if a shared loop-varying computation has no loop-varying uses.
1693 // Happens if something is only used for JVM state in uncommon trap exits,
1694 // like various versions of induction variable+offset. Clone the
1695 // computation per usage to allow it to sink out of the loop.
1696 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1697 bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
1698 n->in(1)->bottom_type()->isa_rawptr() &&
1699 !n->bottom_type()->isa_rawptr();
1700
1701 if (has_ctrl(n) &&
1702 !n->is_Phi() &&
1703 !n->is_Bool() &&
1704 !n->is_Proj() &&
1705 !n->is_MergeMem() &&
1706 !n->is_CMove() &&
1707 !n->is_OpaqueNotNull() &&
1708 !n->is_OpaqueInitializedAssertionPredicate() &&
1709 !n->is_OpaqueTemplateAssertionPredicate() &&
1710 !is_raw_to_oop_cast && // don't extend live ranges of raw oops
1711 (KillPathsReachableByDeadTypeNode || !n->is_Type())
1712 ) {
1713 Node *n_ctrl = get_ctrl(n);
1714 IdealLoopTree *n_loop = get_loop(n_ctrl);
1715
1716 if (n->in(0) != nullptr) {
1717 IdealLoopTree* loop_ctrl = get_loop(n->in(0));
1718 if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) {
1719 // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example,
1720 // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop).
1721 // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not.
1722 Node* maybe_pinned_n = n;
1723 Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl);
1724 if (!would_sink_below_pre_loop_exit(loop_ctrl, outside_ctrl)) {
1725 if (n->depends_only_on_test()) {
1726 Node* pinned_clone = n->pin_array_access_node();
1727 if (pinned_clone != nullptr) {
1728 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a
1729 // range check for that access. If that condition is replaced by an identical dominating one, then an
1730 // unpinned load would risk floating above its range check.
1731 register_new_node(pinned_clone, n_ctrl);
1732 maybe_pinned_n = pinned_clone;
1733 _igvn.replace_node(n, pinned_clone);
1734 }
1735 }
1736 _igvn.replace_input_of(maybe_pinned_n, 0, outside_ctrl);
1737 }
1738 }
1739 }
1740 if (n_loop != _ltree_root && n->outcnt() > 1) {
1741 // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of
1742 // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us.
1743 Node* early_ctrl = compute_early_ctrl(n, n_ctrl);
1744 if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now
1745 ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops!
1746 if (n->is_Store() || n->is_LoadStore()) {
1747 assert(false, "no node with a side effect");
1748 C->record_failure("no node with a side effect");
1749 return;
1750 }
1751 Node* outer_loop_clone = nullptr;
1752 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) {
1753 Node* u = n->last_out(j); // Clone private computation per use
1754 _igvn.rehash_node_delayed(u);
1755 Node* x = nullptr;
1756 if (n->depends_only_on_test()) {
1757 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a
1758 // range check for that access. If that condition is replaced by an identical dominating one, then an
1759 // unpinned load would risk floating above its range check.
1760 x = n->pin_array_access_node();
1761 }
1762 if (x == nullptr) {
1763 x = n->clone();
1764 }
1765 Node* x_ctrl = nullptr;
1766 if (u->is_Phi()) {
1767 // Replace all uses of normal nodes. Replace Phi uses
1768 // individually, so the separate Nodes can sink down
1769 // different paths.
1770 uint k = 1;
1771 while (u->in(k) != n) k++;
1772 u->set_req(k, x);
1773 // x goes next to Phi input path
1774 x_ctrl = u->in(0)->in(k);
1775 // Find control for 'x' next to use but not inside inner loops.
1776 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1777 --j;
1778 } else { // Normal use
1779 if (has_ctrl(u)) {
1780 x_ctrl = get_ctrl(u);
1781 } else {
1782 x_ctrl = u->in(0);
1783 }
1784 // Find control for 'x' next to use but not inside inner loops.
1785 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1786 // Replace all uses
1787 if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) {
1788 // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary
1789 // anymore now that we're going to pin n as well
1790 _igvn.replace_node(u, x);
1791 --j;
1792 } else {
1793 int nb = u->replace_edge(n, x, &_igvn);
1794 j -= nb;
1795 }
1796 }
1797
1798 if (n->is_Load()) {
1799 // For loads, add a control edge to a CFG node outside of the loop
1800 // to force them to not combine and return back inside the loop
1801 // during GVN optimization (4641526).
1802 assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked");
1803
1804 IdealLoopTree* x_loop = get_loop(x_ctrl);
1805 Node* x_head = x_loop->_head;
1806 if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) {
1807 // Do not add duplicate LoadNodes to the outer strip mined loop
1808 if (outer_loop_clone != nullptr) {
1809 _igvn.replace_node(x, outer_loop_clone);
1810 continue;
1811 }
1812 outer_loop_clone = x;
1813 }
1814 x->set_req(0, x_ctrl);
1815 } else if (n->in(0) != nullptr){
1816 x->set_req(0, x_ctrl);
1817 }
1818 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1819 assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop");
1820 register_new_node(x, x_ctrl);
1821
1822 // Chain of AddP nodes: (AddP base (AddP base (AddP base )))
1823 // All AddP nodes must keep the same base after sinking so:
1824 // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk,
1825 // their bases remain the same.
1826 // (see 2- below)
1827 assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() ||
1828 x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) ||
1829 !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape");
1830 if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() &&
1831 !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) {
1832 assert(!x->is_Load(), "load should be pinned");
1833 // Use a cast node to pin clone out of loop
1834 Node* cast = nullptr;
1835 for (uint k = 0; k < x->req(); k++) {
1836 Node* in = x->in(k);
1837 if (in != nullptr && ctrl_is_member(n_loop, in)) {
1838 const Type* in_t = _igvn.type(in);
1839 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
1840 ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr);
1841 }
1842 if (cast != nullptr) {
1843 Node* prev = _igvn.hash_find_insert(cast);
1844 if (prev != nullptr && get_ctrl(prev) == x_ctrl) {
1845 cast->destruct(&_igvn);
1846 cast = prev;
1847 } else {
1848 register_new_node(cast, x_ctrl);
1849 }
1850 x->replace_edge(in, cast);
1851 // Chain of AddP nodes:
1852 // 2- A CastPP of the base is only added now that all AddP nodes are sunk
1853 if (x->is_AddP() && k == AddPNode::Base) {
1854 update_addp_chain_base(x, n->in(AddPNode::Base), cast);
1855 }
1856 break;
1857 }
1858 }
1859 assert(cast != nullptr, "must have added a cast to pin the node");
1860 }
1861 }
1862 _igvn.remove_dead_node(n);
1863 }
1864 _dom_lca_tags_round = 0;
1865 }
1866 }
1867 }
1868
1869 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) {
1870 ResourceMark rm;
1871 Node_List wq;
1872 wq.push(x);
1873 while (wq.size() != 0) {
1874 Node* n = wq.pop();
1875 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1876 Node* u = n->fast_out(i);
1877 if (u->is_AddP() && u->in(AddPNode::Base) == old_base) {
1878 _igvn.replace_input_of(u, AddPNode::Base, new_base);
1879 wq.push(u);
1880 }
1881 }
1882 }
1883 }
1884
1885 // Compute the early control of a node by following its inputs until we reach
1886 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes.
1887 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) {
1888 Node* early_ctrl = nullptr;
1889 ResourceMark rm;
1890 Unique_Node_List wq;
1891 wq.push(n);
1892 for (uint i = 0; i < wq.size(); i++) {
1893 Node* m = wq.at(i);
1894 Node* c = nullptr;
1895 if (m->is_CFG()) {
1896 c = m;
1897 } else if (m->pinned()) {
1898 c = m->in(0);
1899 } else {
1900 for (uint j = 0; j < m->req(); j++) {
1901 Node* in = m->in(j);
1902 if (in != nullptr) {
1903 wq.push(in);
1904 }
1905 }
1906 }
1907 if (c != nullptr) {
1908 assert(is_dominator(c, n_ctrl), "control input must dominate current control");
1909 if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) {
1910 early_ctrl = c;
1911 }
1912 }
1913 }
1914 assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control");
1915 return early_ctrl;
1916 }
1917
1918 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) {
1919 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1920 Node* u = n->fast_out(i);
1921 if (u->is_Opaque1()) {
1922 return false; // Found loop limit, bugfix for 4677003
1923 }
1924 // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure calls to
1925 // get_late_ctrl_with_anti_dep() use their own tag
1926 _dom_lca_tags_round++;
1927 assert(_dom_lca_tags_round != 0, "shouldn't wrap around");
1928
1929 if (u->is_Phi()) {
1930 for (uint j = 1; j < u->req(); ++j) {
1931 if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) {
1932 return false;
1933 }
1934 }
1935 } else {
1936 Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0);
1937 if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) {
1938 return false;
1939 }
1940 }
1941 }
1942 return true;
1943 }
1944
1945 // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input
1946 // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit
1947 // test of the pre loop above the point in the graph where it's pinned. This results in a broken graph. One way to avoid
1948 // it would be to not eliminate the check in the main loop. Instead, we prevent sinking of the node here so better code
1949 // is generated for the main loop.
1950 bool PhaseIdealLoop::would_sink_below_pre_loop_exit(IdealLoopTree* n_loop, Node* ctrl) {
1951 if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop()) {
1952 CountedLoopNode* pre_loop = n_loop->_head->as_CountedLoop();
1953 if (is_dominator(pre_loop->loopexit(), ctrl)) {
1954 return true;
1955 }
1956 }
1957 return false;
1958 }
1959
1960 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) {
1961 if (n->is_Load()) {
1962 ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl);
1963 }
1964 IdealLoopTree *u_loop = get_loop(ctrl);
1965 if (u_loop == n_loop) {
1966 return false; // Found loop-varying use
1967 }
1968 if (n_loop->is_member(u_loop)) {
1969 return false; // Found use in inner loop
1970 }
1971 if (would_sink_below_pre_loop_exit(n_loop, ctrl)) {
1972 return false;
1973 }
1974 return true;
1975 }
1976
1977 //------------------------------split_if_with_blocks---------------------------
1978 // Check for aggressive application of 'split-if' optimization,
1979 // using basic block level info.
1980 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
1981 Node* root = C->root();
1982 visited.set(root->_idx); // first, mark root as visited
1983 // Do pre-visit work for root
1984 Node* n = split_if_with_blocks_pre(root);
1985 uint cnt = n->outcnt();
1986 uint i = 0;
1987
1988 while (true) {
1989 // Visit all children
1990 if (i < cnt) {
1991 Node* use = n->raw_out(i);
1992 ++i;
1993 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
1994 // Now do pre-visit work for this use
1995 use = split_if_with_blocks_pre(use);
1996 nstack.push(n, i); // Save parent and next use's index.
1997 n = use; // Process all children of current use.
1998 cnt = use->outcnt();
1999 i = 0;
2000 }
2001 }
2002 else {
2003 // All of n's children have been processed, complete post-processing.
2004 if (cnt != 0 && !n->is_Con()) {
2005 assert(has_node(n), "no dead nodes");
2006 split_if_with_blocks_post(n);
2007 if (C->failing()) {
2008 return;
2009 }
2010 }
2011 if (must_throttle_split_if()) {
2012 nstack.clear();
2013 }
2014 if (nstack.is_empty()) {
2015 // Finished all nodes on stack.
2016 break;
2017 }
2018 // Get saved parent node and next use's index. Visit the rest of uses.
2019 n = nstack.node();
2020 cnt = n->outcnt();
2021 i = nstack.index();
2022 nstack.pop();
2023 }
2024 }
2025 }
2026
2027
2028 //=============================================================================
2029 //
2030 // C L O N E A L O O P B O D Y
2031 //
2032
2033 //------------------------------clone_iff--------------------------------------
2034 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2035 // "Nearly" because all Nodes have been cloned from the original in the loop,
2036 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2037 // through the Phi recursively, and return a Bool.
2038 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
2039
2040 // Convert this Phi into a Phi merging Bools
2041 uint i;
2042 for (i = 1; i < phi->req(); i++) {
2043 Node* b = phi->in(i);
2044 if (b->is_Phi()) {
2045 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2046 } else {
2047 assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(),
2048 "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node");
2049 }
2050 }
2051 Node* n = phi->in(1);
2052 Node* sample_opaque = nullptr;
2053 Node *sample_bool = nullptr;
2054 if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) {
2055 sample_opaque = n;
2056 sample_bool = n->in(1);
2057 assert(sample_bool->is_Bool(), "wrong type");
2058 } else {
2059 sample_bool = n;
2060 }
2061 Node *sample_cmp = sample_bool->in(1);
2062
2063 // Make Phis to merge the Cmp's inputs.
2064 PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP);
2065 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2066 for (i = 1; i < phi->req(); i++) {
2067 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2068 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2069 phi1->set_req(i, n1);
2070 phi2->set_req(i, n2);
2071 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2072 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2073 }
2074 // See if these Phis have been made before.
2075 // Register with optimizer
2076 Node *hit1 = _igvn.hash_find_insert(phi1);
2077 if (hit1) { // Hit, toss just made Phi
2078 _igvn.remove_dead_node(phi1); // Remove new phi
2079 assert(hit1->is_Phi(), "" );
2080 phi1 = (PhiNode*)hit1; // Use existing phi
2081 } else { // Miss
2082 _igvn.register_new_node_with_optimizer(phi1);
2083 }
2084 Node *hit2 = _igvn.hash_find_insert(phi2);
2085 if (hit2) { // Hit, toss just made Phi
2086 _igvn.remove_dead_node(phi2); // Remove new phi
2087 assert(hit2->is_Phi(), "" );
2088 phi2 = (PhiNode*)hit2; // Use existing phi
2089 } else { // Miss
2090 _igvn.register_new_node_with_optimizer(phi2);
2091 }
2092 // Register Phis with loop/block info
2093 set_ctrl(phi1, phi->in(0));
2094 set_ctrl(phi2, phi->in(0));
2095 // Make a new Cmp
2096 Node *cmp = sample_cmp->clone();
2097 cmp->set_req(1, phi1);
2098 cmp->set_req(2, phi2);
2099 _igvn.register_new_node_with_optimizer(cmp);
2100 set_ctrl(cmp, phi->in(0));
2101
2102 // Make a new Bool
2103 Node *b = sample_bool->clone();
2104 b->set_req(1,cmp);
2105 _igvn.register_new_node_with_optimizer(b);
2106 set_ctrl(b, phi->in(0));
2107
2108 if (sample_opaque != nullptr) {
2109 Node* opaque = sample_opaque->clone();
2110 opaque->set_req(1, b);
2111 _igvn.register_new_node_with_optimizer(opaque);
2112 set_ctrl(opaque, phi->in(0));
2113 return opaque;
2114 }
2115
2116 assert(b->is_Bool(), "");
2117 return b;
2118 }
2119
2120 //------------------------------clone_bool-------------------------------------
2121 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2122 // "Nearly" because all Nodes have been cloned from the original in the loop,
2123 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2124 // through the Phi recursively, and return a Bool.
2125 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
2126 uint i;
2127 // Convert this Phi into a Phi merging Bools
2128 for( i = 1; i < phi->req(); i++ ) {
2129 Node *b = phi->in(i);
2130 if( b->is_Phi() ) {
2131 _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi()));
2132 } else {
2133 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
2134 }
2135 }
2136
2137 Node *sample_cmp = phi->in(1);
2138
2139 // Make Phis to merge the Cmp's inputs.
2140 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
2141 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
2142 for( uint j = 1; j < phi->req(); j++ ) {
2143 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
2144 Node *n1, *n2;
2145 if( cmp_top->is_Cmp() ) {
2146 n1 = cmp_top->in(1);
2147 n2 = cmp_top->in(2);
2148 } else {
2149 n1 = n2 = cmp_top;
2150 }
2151 phi1->set_req( j, n1 );
2152 phi2->set_req( j, n2 );
2153 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2154 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2155 }
2156
2157 // See if these Phis have been made before.
2158 // Register with optimizer
2159 Node *hit1 = _igvn.hash_find_insert(phi1);
2160 if( hit1 ) { // Hit, toss just made Phi
2161 _igvn.remove_dead_node(phi1); // Remove new phi
2162 assert( hit1->is_Phi(), "" );
2163 phi1 = (PhiNode*)hit1; // Use existing phi
2164 } else { // Miss
2165 _igvn.register_new_node_with_optimizer(phi1);
2166 }
2167 Node *hit2 = _igvn.hash_find_insert(phi2);
2168 if( hit2 ) { // Hit, toss just made Phi
2169 _igvn.remove_dead_node(phi2); // Remove new phi
2170 assert( hit2->is_Phi(), "" );
2171 phi2 = (PhiNode*)hit2; // Use existing phi
2172 } else { // Miss
2173 _igvn.register_new_node_with_optimizer(phi2);
2174 }
2175 // Register Phis with loop/block info
2176 set_ctrl(phi1, phi->in(0));
2177 set_ctrl(phi2, phi->in(0));
2178 // Make a new Cmp
2179 Node *cmp = sample_cmp->clone();
2180 cmp->set_req( 1, phi1 );
2181 cmp->set_req( 2, phi2 );
2182 _igvn.register_new_node_with_optimizer(cmp);
2183 set_ctrl(cmp, phi->in(0));
2184
2185 assert( cmp->is_Cmp(), "" );
2186 return (CmpNode*)cmp;
2187 }
2188
2189 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
2190 IdealLoopTree* loop, IdealLoopTree* outer_loop,
2191 Node_List*& split_if_set, Node_List*& split_bool_set,
2192 Node_List*& split_cex_set, Node_List& worklist,
2193 uint new_counter, CloneLoopMode mode) {
2194 Node* nnn = old_new[old->_idx];
2195 // Copy uses to a worklist, so I can munge the def-use info
2196 // with impunity.
2197 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2198 worklist.push(old->fast_out(j));
2199
2200 while( worklist.size() ) {
2201 Node *use = worklist.pop();
2202 if (!has_node(use)) continue; // Ignore dead nodes
2203 if (use->in(0) == C->top()) continue;
2204 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2205 // Check for data-use outside of loop - at least one of OLD or USE
2206 // must not be a CFG node.
2207 #ifdef ASSERT
2208 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) {
2209 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
2210 assert(mode != IgnoreStripMined, "incorrect cloning mode");
2211 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
2212 }
2213 #endif
2214 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
2215
2216 // If the Data use is an IF, that means we have an IF outside the
2217 // loop that is switching on a condition that is set inside the
2218 // loop. Happens if people set a loop-exit flag; then test the flag
2219 // in the loop to break the loop, then test is again outside the
2220 // loop to determine which way the loop exited.
2221 //
2222 // For several uses we need to make sure that there is no phi between,
2223 // the use and the Bool/Cmp. We therefore clone the Bool/Cmp down here
2224 // to avoid such a phi in between.
2225 // For example, it is unexpected that there is a Phi between an
2226 // AllocateArray node and its ValidLengthTest input that could cause
2227 // split if to break.
2228 assert(!use->is_OpaqueTemplateAssertionPredicate(),
2229 "should not clone a Template Assertion Predicate which should be removed once it's useless");
2230 if (use->is_If() || use->is_CMove() || use->is_OpaqueNotNull() || use->is_OpaqueInitializedAssertionPredicate() ||
2231 (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) {
2232 // Since this code is highly unlikely, we lazily build the worklist
2233 // of such Nodes to go split.
2234 if (!split_if_set) {
2235 split_if_set = new Node_List();
2236 }
2237 split_if_set->push(use);
2238 }
2239 if (use->is_Bool()) {
2240 if (!split_bool_set) {
2241 split_bool_set = new Node_List();
2242 }
2243 split_bool_set->push(use);
2244 }
2245 if (use->Opcode() == Op_CreateEx) {
2246 if (!split_cex_set) {
2247 split_cex_set = new Node_List();
2248 }
2249 split_cex_set->push(use);
2250 }
2251
2252
2253 // Get "block" use is in
2254 uint idx = 0;
2255 while( use->in(idx) != old ) idx++;
2256 Node *prev = use->is_CFG() ? use : get_ctrl(use);
2257 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
2258 Node* cfg = (prev->_idx >= new_counter && prev->is_Region())
2259 ? prev->in(2)
2260 : idom(prev);
2261 if( use->is_Phi() ) // Phi use is in prior block
2262 cfg = prev->in(idx); // NOT in block of Phi itself
2263 if (cfg->is_top()) { // Use is dead?
2264 _igvn.replace_input_of(use, idx, C->top());
2265 continue;
2266 }
2267
2268 // If use is referenced through control edge... (idx == 0)
2269 if (mode == IgnoreStripMined && idx == 0) {
2270 LoopNode *head = loop->_head->as_Loop();
2271 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
2272 // That node is outside the inner loop, leave it outside the
2273 // outer loop as well to not confuse verification code.
2274 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
2275 _igvn.replace_input_of(use, 0, head->outer_loop_exit());
2276 continue;
2277 }
2278 }
2279
2280 while(!outer_loop->is_member(get_loop(cfg))) {
2281 prev = cfg;
2282 cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg);
2283 }
2284 // If the use occurs after merging several exits from the loop, then
2285 // old value must have dominated all those exits. Since the same old
2286 // value was used on all those exits we did not need a Phi at this
2287 // merge point. NOW we do need a Phi here. Each loop exit value
2288 // is now merged with the peeled body exit; each exit gets its own
2289 // private Phi and those Phis need to be merged here.
2290 Node *phi;
2291 if( prev->is_Region() ) {
2292 if( idx == 0 ) { // Updating control edge?
2293 phi = prev; // Just use existing control
2294 } else { // Else need a new Phi
2295 phi = PhiNode::make( prev, old );
2296 // Now recursively fix up the new uses of old!
2297 for( uint i = 1; i < prev->req(); i++ ) {
2298 worklist.push(phi); // Onto worklist once for each 'old' input
2299 }
2300 }
2301 } else {
2302 // Get new RegionNode merging old and new loop exits
2303 prev = old_new[prev->_idx];
2304 assert( prev, "just made this in step 7" );
2305 if( idx == 0) { // Updating control edge?
2306 phi = prev; // Just use existing control
2307 } else { // Else need a new Phi
2308 // Make a new Phi merging data values properly
2309 phi = PhiNode::make( prev, old );
2310 phi->set_req( 1, nnn );
2311 }
2312 }
2313 // If inserting a new Phi, check for prior hits
2314 if( idx != 0 ) {
2315 Node *hit = _igvn.hash_find_insert(phi);
2316 if( hit == nullptr ) {
2317 _igvn.register_new_node_with_optimizer(phi); // Register new phi
2318 } else { // or
2319 // Remove the new phi from the graph and use the hit
2320 _igvn.remove_dead_node(phi);
2321 phi = hit; // Use existing phi
2322 }
2323 set_ctrl(phi, prev);
2324 }
2325 // Make 'use' use the Phi instead of the old loop body exit value
2326 assert(use->in(idx) == old, "old is still input of use");
2327 // We notify all uses of old, including use, and the indirect uses,
2328 // that may now be optimized because we have replaced old with phi.
2329 _igvn.add_users_to_worklist(old);
2330 if (idx == 0 &&
2331 use->depends_only_on_test()) {
2332 Node* pinned_clone = use->pin_array_access_node();
2333 if (pinned_clone != nullptr) {
2334 // Pin array access nodes: control is updated here to a region. If, after some transformations, only one path
2335 // into the region is left, an array load could become dependent on a condition that's not a range check for
2336 // that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
2337 // floating above its range check.
2338 pinned_clone->set_req(0, phi);
2339 register_new_node_with_ctrl_of(pinned_clone, use);
2340 _igvn.replace_node(use, pinned_clone);
2341 continue;
2342 }
2343 }
2344 _igvn.replace_input_of(use, idx, phi);
2345 if( use->_idx >= new_counter ) { // If updating new phis
2346 // Not needed for correctness, but prevents a weak assert
2347 // in AddPNode from tripping (when we end up with different
2348 // base & derived Phis that will become the same after
2349 // IGVN does CSE).
2350 Node *hit = _igvn.hash_find_insert(use);
2351 if( hit ) // Go ahead and re-hash for hits.
2352 _igvn.replace_node( use, hit );
2353 }
2354 }
2355 }
2356 }
2357
2358 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
2359 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
2360 bool check_old_new) {
2361 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2362 Node* u = n->fast_out(j);
2363 assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned");
2364 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) {
2365 assert(!phase->ctrl_is_member(loop, u) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
2366 if (!phase->ctrl_is_member(loop, u)) {
2367 if (phase->ctrl_is_member(outer_loop, u)) {
2368 wq.push(u);
2369 } else {
2370 // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of
2371 // the outer loop too
2372 Node* u_c = u->in(0);
2373 if (u_c != nullptr) {
2374 IdealLoopTree* u_c_loop = phase->get_loop(u_c);
2375 if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) {
2376 wq.push(u);
2377 }
2378 }
2379 }
2380 }
2381 }
2382 }
2383 }
2384
2385 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
2386 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
2387 Node_List& extra_data_nodes) {
2388 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2389 CountedLoopNode* cl = head->as_CountedLoop();
2390 Node* l = cl->outer_loop();
2391 Node* tail = cl->outer_loop_tail();
2392 IfNode* le = cl->outer_loop_end();
2393 Node* sfpt = cl->outer_safepoint();
2394 CountedLoopEndNode* cle = cl->loopexit();
2395 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
2396 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
2397 IfFalseNode* cle_out = cle->false_proj();
2398
2399 Node* new_sfpt = nullptr;
2400 Node* new_cle_out = cle_out->clone();
2401 old_new.map(cle_out->_idx, new_cle_out);
2402 if (mode == CloneIncludesStripMined) {
2403 // clone outer loop body
2404 Node* new_l = l->clone();
2405 Node* new_tail = tail->clone();
2406 IfNode* new_le = le->clone()->as_If();
2407 new_sfpt = sfpt->clone();
2408
2409 set_loop(new_l, outer_loop->_parent);
2410 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
2411 set_loop(new_cle_out, outer_loop->_parent);
2412 set_idom(new_cle_out, new_cle, dd);
2413 set_loop(new_sfpt, outer_loop->_parent);
2414 set_idom(new_sfpt, new_cle_out, dd);
2415 set_loop(new_le, outer_loop->_parent);
2416 set_idom(new_le, new_sfpt, dd);
2417 set_loop(new_tail, outer_loop->_parent);
2418 set_idom(new_tail, new_le, dd);
2419 set_idom(new_cl, new_l, dd);
2420
2421 old_new.map(l->_idx, new_l);
2422 old_new.map(tail->_idx, new_tail);
2423 old_new.map(le->_idx, new_le);
2424 old_new.map(sfpt->_idx, new_sfpt);
2425
2426 new_l->set_req(LoopNode::LoopBackControl, new_tail);
2427 new_l->set_req(0, new_l);
2428 new_tail->set_req(0, new_le);
2429 new_le->set_req(0, new_sfpt);
2430 new_sfpt->set_req(0, new_cle_out);
2431 new_cle_out->set_req(0, new_cle);
2432 new_cl->set_req(LoopNode::EntryControl, new_l);
2433
2434 _igvn.register_new_node_with_optimizer(new_l);
2435 _igvn.register_new_node_with_optimizer(new_tail);
2436 _igvn.register_new_node_with_optimizer(new_le);
2437 } else {
2438 Node *newhead = old_new[loop->_head->_idx];
2439 newhead->as_Loop()->clear_strip_mined();
2440 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
2441 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2442 }
2443 // Look at data node that were assigned a control in the outer
2444 // loop: they are kept in the outer loop by the safepoint so start
2445 // from the safepoint node's inputs.
2446 IdealLoopTree* outer_loop = get_loop(l);
2447 Node_Stack stack(2);
2448 stack.push(sfpt, 1);
2449 uint new_counter = C->unique();
2450 while (stack.size() > 0) {
2451 Node* n = stack.node();
2452 uint i = stack.index();
2453 while (i < n->req() &&
2454 (n->in(i) == nullptr ||
2455 !has_ctrl(n->in(i)) ||
2456 get_loop(get_ctrl(n->in(i))) != outer_loop ||
2457 (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
2458 i++;
2459 }
2460 if (i < n->req()) {
2461 stack.set_index(i+1);
2462 stack.push(n->in(i), 0);
2463 } else {
2464 assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
2465 Node* m = n == sfpt ? new_sfpt : n->clone();
2466 if (m != nullptr) {
2467 for (uint i = 0; i < n->req(); i++) {
2468 if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) {
2469 m->set_req(i, old_new[m->in(i)->_idx]);
2470 }
2471 }
2472 } else {
2473 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
2474 }
2475 if (n != sfpt) {
2476 extra_data_nodes.push(n);
2477 _igvn.register_new_node_with_optimizer(m);
2478 assert(get_ctrl(n) == cle_out, "what other control?");
2479 set_ctrl(m, new_cle_out);
2480 old_new.map(n->_idx, m);
2481 }
2482 stack.pop();
2483 }
2484 }
2485 if (mode == CloneIncludesStripMined) {
2486 _igvn.register_new_node_with_optimizer(new_sfpt);
2487 _igvn.register_new_node_with_optimizer(new_cle_out);
2488 }
2489 // Some other transformation may have pessimistically assigned some
2490 // data nodes to the outer loop. Set their control so they are out
2491 // of the outer loop.
2492 ResourceMark rm;
2493 Unique_Node_List wq;
2494 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2495 Node* old = extra_data_nodes.at(i);
2496 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2497 }
2498
2499 for (uint i = 0; i < loop->_body.size(); i++) {
2500 Node* old = loop->_body.at(i);
2501 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2502 }
2503
2504 Node* inner_out = sfpt->in(0);
2505 if (inner_out->outcnt() > 1) {
2506 collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true);
2507 }
2508
2509 Node* new_ctrl = cl->outer_loop_exit();
2510 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2511 for (uint i = 0; i < wq.size(); i++) {
2512 Node* n = wq.at(i);
2513 set_ctrl(n, new_ctrl);
2514 if (n->in(0) != nullptr) {
2515 _igvn.replace_input_of(n, 0, new_ctrl);
2516 }
2517 collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false);
2518 }
2519 } else {
2520 Node *newhead = old_new[loop->_head->_idx];
2521 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2522 }
2523 }
2524
2525 //------------------------------clone_loop-------------------------------------
2526 //
2527 // C L O N E A L O O P B O D Y
2528 //
2529 // This is the basic building block of the loop optimizations. It clones an
2530 // entire loop body. It makes an old_new loop body mapping; with this mapping
2531 // you can find the new-loop equivalent to an old-loop node. All new-loop
2532 // nodes are exactly equal to their old-loop counterparts, all edges are the
2533 // same. All exits from the old-loop now have a RegionNode that merges the
2534 // equivalent new-loop path. This is true even for the normal "loop-exit"
2535 // condition. All uses of loop-invariant old-loop values now come from (one
2536 // or more) Phis that merge their new-loop equivalents.
2537 //
2538 // This operation leaves the graph in an illegal state: there are two valid
2539 // control edges coming from the loop pre-header to both loop bodies. I'll
2540 // definitely have to hack the graph after running this transform.
2541 //
2542 // From this building block I will further edit edges to perform loop peeling
2543 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2544 //
2545 // Parameter side_by_size_idom:
2546 // When side_by_size_idom is null, the dominator tree is constructed for
2547 // the clone loop to dominate the original. Used in construction of
2548 // pre-main-post loop sequence.
2549 // When nonnull, the clone and original are side-by-side, both are
2550 // dominated by the side_by_side_idom node. Used in construction of
2551 // unswitched loops.
2552 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2553 CloneLoopMode mode, Node* side_by_side_idom) {
2554
2555 LoopNode* head = loop->_head->as_Loop();
2556 head->verify_strip_mined(1);
2557
2558 if (C->do_vector_loop() && PrintOpto) {
2559 const char* mname = C->method()->name()->as_quoted_ascii();
2560 if (mname != nullptr) {
2561 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2562 }
2563 }
2564
2565 CloneMap& cm = C->clone_map();
2566 if (C->do_vector_loop()) {
2567 cm.set_clone_idx(cm.max_gen()+1);
2568 #ifndef PRODUCT
2569 if (PrintOpto) {
2570 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2571 loop->dump_head();
2572 }
2573 #endif
2574 }
2575
2576 // Step 1: Clone the loop body. Make the old->new mapping.
2577 clone_loop_body(loop->_body, old_new, &cm);
2578
2579 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2580
2581 // Step 2: Fix the edges in the new body. If the old input is outside the
2582 // loop use it. If the old input is INside the loop, use the corresponding
2583 // new node instead.
2584 fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false);
2585
2586 Node_List extra_data_nodes; // data nodes in the outer strip mined loop
2587 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2588
2589 // Step 3: Now fix control uses. Loop varying control uses have already
2590 // been fixed up (as part of all input edges in Step 2). Loop invariant
2591 // control uses must be either an IfFalse or an IfTrue. Make a merge
2592 // point to merge the old and new IfFalse/IfTrue nodes; make the use
2593 // refer to this.
2594 Node_List worklist;
2595 uint new_counter = C->unique();
2596 fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist);
2597
2598 // Step 4: If loop-invariant use is not control, it must be dominated by a
2599 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region
2600 // there if needed. Make a Phi there merging old and new used values.
2601 Node_List *split_if_set = nullptr;
2602 Node_List *split_bool_set = nullptr;
2603 Node_List *split_cex_set = nullptr;
2604 fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set);
2605
2606 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2607 Node* old = extra_data_nodes.at(i);
2608 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2609 split_bool_set, split_cex_set, worklist, new_counter,
2610 mode);
2611 }
2612
2613 // Check for IFs that need splitting/cloning. Happens if an IF outside of
2614 // the loop uses a condition set in the loop. The original IF probably
2615 // takes control from one or more OLD Regions (which in turn get from NEW
2616 // Regions). In any case, there will be a set of Phis for each merge point
2617 // from the IF up to where the original BOOL def exists the loop.
2618 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
2619
2620 }
2621
2622 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) {
2623 if (split_if_set) {
2624 while (split_if_set->size()) {
2625 Node *iff = split_if_set->pop();
2626 uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1;
2627 if (iff->in(input)->is_Phi()) {
2628 Node *b = clone_iff(iff->in(input)->as_Phi());
2629 _igvn.replace_input_of(iff, input, b);
2630 }
2631 }
2632 }
2633 if (split_bool_set) {
2634 while (split_bool_set->size()) {
2635 Node *b = split_bool_set->pop();
2636 Node *phi = b->in(1);
2637 assert(phi->is_Phi(), "");
2638 CmpNode *cmp = clone_bool((PhiNode*) phi);
2639 _igvn.replace_input_of(b, 1, cmp);
2640 }
2641 }
2642 if (split_cex_set) {
2643 while (split_cex_set->size()) {
2644 Node *b = split_cex_set->pop();
2645 assert(b->in(0)->is_Region(), "");
2646 assert(b->in(1)->is_Phi(), "");
2647 assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2648 split_up(b, b->in(0), nullptr);
2649 }
2650 }
2651 }
2652
2653 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop,
2654 uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set,
2655 Node_List*& split_bool_set, Node_List*& split_cex_set) {
2656 for(uint i = 0; i < body.size(); i++ ) {
2657 Node* old = body.at(i);
2658 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2659 split_bool_set, split_cex_set, worklist, new_counter,
2660 mode);
2661 }
2662 }
2663
2664 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode,
2665 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) {
2666 LoopNode* head = loop->_head->as_Loop();
2667 for(uint i = 0; i < body.size(); i++ ) {
2668 Node* old = body.at(i);
2669 if( !old->is_CFG() ) continue;
2670
2671 // Copy uses to a worklist, so I can munge the def-use info
2672 // with impunity.
2673 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) {
2674 worklist.push(old->fast_out(j));
2675 }
2676
2677 while (worklist.size()) { // Visit all uses
2678 Node *use = worklist.pop();
2679 if (!has_node(use)) continue; // Ignore dead nodes
2680 IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use );
2681 if (!loop->is_member(use_loop) && use->is_CFG()) {
2682 // Both OLD and USE are CFG nodes here.
2683 assert(use->is_Proj(), "" );
2684 Node* nnn = old_new[old->_idx];
2685
2686 Node* newuse = nullptr;
2687 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2688 CountedLoopNode* cl = head->as_CountedLoop();
2689 CountedLoopEndNode* cle = cl->loopexit();
2690 // is use the projection that exits the loop from the CountedLoopEndNode?
2691 if (use->in(0) == cle) {
2692 IfFalseNode* cle_out = use->as_IfFalse();
2693 IfNode* le = cl->outer_loop_end();
2694 use = le->false_proj();
2695 use_loop = get_loop(use);
2696 if (mode == CloneIncludesStripMined) {
2697 nnn = old_new[le->_idx];
2698 } else {
2699 newuse = old_new[cle_out->_idx];
2700 }
2701 }
2702 }
2703 if (newuse == nullptr) {
2704 newuse = use->clone();
2705 }
2706
2707 // Clone the loop exit control projection
2708 if (C->do_vector_loop() && cm != nullptr) {
2709 cm->verify_insert_and_clone(use, newuse, cm->clone_idx());
2710 }
2711 newuse->set_req(0,nnn);
2712 _igvn.register_new_node_with_optimizer(newuse);
2713 set_loop(newuse, use_loop);
2714 set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2715
2716 // We need a Region to merge the exit from the peeled body and the
2717 // exit from the old loop body.
2718 RegionNode *r = new RegionNode(3);
2719 uint dd_r = MIN2(dom_depth(newuse), dom_depth(use));
2720 assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" );
2721
2722 // The original user of 'use' uses 'r' instead.
2723 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2724 Node* useuse = use->last_out(l);
2725 _igvn.rehash_node_delayed(useuse);
2726 uint uses_found = 0;
2727 if (useuse->in(0) == use) {
2728 useuse->set_req(0, r);
2729 uses_found++;
2730 if (useuse->is_CFG()) {
2731 // This is not a dom_depth > dd_r because when new
2732 // control flow is constructed by a loop opt, a node and
2733 // its dominator can end up at the same dom_depth
2734 assert(dom_depth(useuse) >= dd_r, "");
2735 set_idom(useuse, r, dom_depth(useuse));
2736 }
2737 }
2738 for (uint k = 1; k < useuse->req(); k++) {
2739 if( useuse->in(k) == use ) {
2740 useuse->set_req(k, r);
2741 uses_found++;
2742 if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2743 // This is not a dom_depth > dd_r because when new
2744 // control flow is constructed by a loop opt, a node
2745 // and its dominator can end up at the same dom_depth
2746 assert(dom_depth(useuse) >= dd_r , "");
2747 set_idom(useuse, r, dom_depth(useuse));
2748 }
2749 }
2750 }
2751 l -= uses_found; // we deleted 1 or more copies of this edge
2752 }
2753
2754 assert(use->is_Proj(), "loop exit should be projection");
2755 // replace_node_and_forward_ctrl() below moves all nodes that are:
2756 // - control dependent on the loop exit or
2757 // - have control set to the loop exit
2758 // below the post-loop merge point.
2759 // replace_node_and_forward_ctrl() takes a dead control as first input.
2760 // To make it possible to use it, the loop exit projection is cloned and becomes the
2761 // new exit projection. The initial one becomes dead and is "replaced" by the region.
2762 Node* use_clone = use->clone();
2763 register_control(use_clone, use_loop, idom(use), dom_depth(use));
2764 // Now finish up 'r'
2765 r->set_req(1, newuse);
2766 r->set_req(2, use_clone);
2767 _igvn.register_new_node_with_optimizer(r);
2768 set_loop(r, use_loop);
2769 set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r);
2770 replace_node_and_forward_ctrl(use, r);
2771 // Map the (cloned) old use to the new merge point
2772 old_new.map(use_clone->_idx, r);
2773 } // End of if a loop-exit test
2774 }
2775 }
2776 }
2777
2778 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd,
2779 IdealLoopTree* parent, bool partial) {
2780 for(uint i = 0; i < body.size(); i++ ) {
2781 Node *old = body.at(i);
2782 Node *nnn = old_new[old->_idx];
2783 // Fix CFG/Loop controlling the new node
2784 if (has_ctrl(old)) {
2785 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2786 } else {
2787 set_loop(nnn, parent);
2788 if (old->outcnt() > 0) {
2789 Node* dom = idom(old);
2790 if (old_new[dom->_idx] != nullptr) {
2791 dom = old_new[dom->_idx];
2792 set_idom(nnn, dom, dd );
2793 }
2794 }
2795 }
2796 // Correct edges to the new node
2797 for (uint j = 0; j < nnn->req(); j++) {
2798 Node *n = nnn->in(j);
2799 if (n != nullptr) {
2800 IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n);
2801 if (loop->is_member(old_in_loop)) {
2802 if (old_new[n->_idx] != nullptr) {
2803 nnn->set_req(j, old_new[n->_idx]);
2804 } else {
2805 assert(!body.contains(n), "");
2806 assert(partial, "node not cloned");
2807 }
2808 }
2809 }
2810 }
2811 _igvn.hash_find_insert(nnn);
2812 }
2813 }
2814
2815 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) {
2816 for (uint i = 0; i < body.size(); i++) {
2817 Node* old = body.at(i);
2818 Node* nnn = old->clone();
2819 old_new.map(old->_idx, nnn);
2820 if (C->do_vector_loop() && cm != nullptr) {
2821 cm->verify_insert_and_clone(old, nnn, cm->clone_idx());
2822 }
2823 _igvn.register_new_node_with_optimizer(nnn);
2824 }
2825 }
2826
2827
2828 //---------------------- stride_of_possible_iv -------------------------------------
2829 // Looks for an iff/bool/comp with one operand of the compare
2830 // being a cycle involving an add and a phi,
2831 // with an optional truncation (left-shift followed by a right-shift)
2832 // of the add. Returns zero if not an iv.
2833 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
2834 Node* trunc1 = nullptr;
2835 Node* trunc2 = nullptr;
2836 const TypeInteger* ttype = nullptr;
2837 if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) {
2838 return 0;
2839 }
2840 BoolNode* bl = iff->in(1)->as_Bool();
2841 Node* cmp = bl->in(1);
2842 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
2843 return 0;
2844 }
2845 // Must have an invariant operand
2846 if (ctrl_is_member(get_loop(iff), cmp->in(2))) {
2847 return 0;
2848 }
2849 Node* add2 = nullptr;
2850 Node* cmp1 = cmp->in(1);
2851 if (cmp1->is_Phi()) {
2852 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
2853 Node* phi = cmp1;
2854 for (uint i = 1; i < phi->req(); i++) {
2855 Node* in = phi->in(i);
2856 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
2857 &trunc1, &trunc2, &ttype, T_INT);
2858 if (add && add->in(1) == phi) {
2859 add2 = add->in(2);
2860 break;
2861 }
2862 }
2863 } else {
2864 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
2865 Node* addtrunc = cmp1;
2866 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
2867 &trunc1, &trunc2, &ttype, T_INT);
2868 if (add && add->in(1)->is_Phi()) {
2869 Node* phi = add->in(1);
2870 for (uint i = 1; i < phi->req(); i++) {
2871 if (phi->in(i) == addtrunc) {
2872 add2 = add->in(2);
2873 break;
2874 }
2875 }
2876 }
2877 }
2878 if (add2 != nullptr) {
2879 const TypeInt* add2t = _igvn.type(add2)->is_int();
2880 if (add2t->is_con()) {
2881 return add2t->get_con();
2882 }
2883 }
2884 return 0;
2885 }
2886
2887
2888 //---------------------- stay_in_loop -------------------------------------
2889 // Return the (unique) control output node that's in the loop (if it exists.)
2890 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
2891 Node* unique = nullptr;
2892 if (!n) return nullptr;
2893 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2894 Node* use = n->fast_out(i);
2895 if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
2896 if (unique != nullptr) {
2897 return nullptr;
2898 }
2899 unique = use;
2900 }
2901 }
2902 return unique;
2903 }
2904
2905 //------------------------------ register_node -------------------------------------
2906 // Utility to register node "n" with PhaseIdealLoop
2907 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) {
2908 _igvn.register_new_node_with_optimizer(n);
2909 loop->_body.push(n);
2910 if (n->is_CFG()) {
2911 set_loop(n, loop);
2912 set_idom(n, pred, ddepth);
2913 } else {
2914 set_ctrl(n, pred);
2915 }
2916 }
2917
2918 //------------------------------ proj_clone -------------------------------------
2919 // Utility to create an if-projection
2920 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
2921 ProjNode* c = p->clone()->as_Proj();
2922 c->set_req(0, iff);
2923 return c;
2924 }
2925
2926 //------------------------------ short_circuit_if -------------------------------------
2927 // Force the iff control output to be the live_proj
2928 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
2929 guarantee(live_proj != nullptr, "null projection");
2930 int proj_con = live_proj->_con;
2931 assert(proj_con == 0 || proj_con == 1, "false or true projection");
2932 Node* con = intcon(proj_con);
2933 if (iff) {
2934 iff->set_req(1, con);
2935 }
2936 return con;
2937 }
2938
2939 //------------------------------ insert_if_before_proj -------------------------------------
2940 // Insert a new if before an if projection (* - new node)
2941 //
2942 // before
2943 // if(test)
2944 // / \
2945 // v v
2946 // other-proj proj (arg)
2947 //
2948 // after
2949 // if(test)
2950 // / \
2951 // / v
2952 // | * proj-clone
2953 // v |
2954 // other-proj v
2955 // * new_if(relop(cmp[IU](left,right)))
2956 // / \
2957 // v v
2958 // * new-proj proj
2959 // (returned)
2960 //
2961 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
2962 IfNode* iff = proj->in(0)->as_If();
2963 IdealLoopTree *loop = get_loop(proj);
2964 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
2965 uint ddepth = dom_depth(proj);
2966
2967 _igvn.rehash_node_delayed(iff);
2968 _igvn.rehash_node_delayed(proj);
2969
2970 proj->set_req(0, nullptr); // temporary disconnect
2971 ProjNode* proj2 = proj_clone(proj, iff);
2972 register_node(proj2, loop, iff, ddepth);
2973
2974 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
2975 register_node(cmp, loop, proj2, ddepth);
2976
2977 BoolNode* bol = new BoolNode(cmp, relop);
2978 register_node(bol, loop, proj2, ddepth);
2979
2980 int opcode = iff->Opcode();
2981 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
2982 IfNode* new_if = IfNode::make_with_same_profile(iff, proj2, bol);
2983 register_node(new_if, loop, proj2, ddepth);
2984
2985 proj->set_req(0, new_if); // reattach
2986 set_idom(proj, new_if, ddepth);
2987
2988 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
2989 guarantee(new_exit != nullptr, "null exit node");
2990 register_node(new_exit, get_loop(other_proj), new_if, ddepth);
2991
2992 return new_exit;
2993 }
2994
2995 //------------------------------ insert_region_before_proj -------------------------------------
2996 // Insert a region before an if projection (* - new node)
2997 //
2998 // before
2999 // if(test)
3000 // / |
3001 // v |
3002 // proj v
3003 // other-proj
3004 //
3005 // after
3006 // if(test)
3007 // / |
3008 // v |
3009 // * proj-clone v
3010 // | other-proj
3011 // v
3012 // * new-region
3013 // |
3014 // v
3015 // * dum_if
3016 // / \
3017 // v \
3018 // * dum-proj v
3019 // proj
3020 //
3021 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
3022 IfNode* iff = proj->in(0)->as_If();
3023 IdealLoopTree *loop = get_loop(proj);
3024 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3025 uint ddepth = dom_depth(proj);
3026
3027 _igvn.rehash_node_delayed(iff);
3028 _igvn.rehash_node_delayed(proj);
3029
3030 proj->set_req(0, nullptr); // temporary disconnect
3031 ProjNode* proj2 = proj_clone(proj, iff);
3032 register_node(proj2, loop, iff, ddepth);
3033
3034 RegionNode* reg = new RegionNode(2);
3035 reg->set_req(1, proj2);
3036 register_node(reg, loop, iff, ddepth);
3037
3038 IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt);
3039 register_node(dum_if, loop, reg, ddepth);
3040
3041 proj->set_req(0, dum_if); // reattach
3042 set_idom(proj, dum_if, ddepth);
3043
3044 ProjNode* dum_proj = proj_clone(other_proj, dum_if);
3045 register_node(dum_proj, loop, dum_if, ddepth);
3046
3047 return reg;
3048 }
3049
3050 // Idea
3051 // ----
3052 // Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops
3053 // require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel
3054 // with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new
3055 // loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned
3056 // test alone instead:
3057 //
3058 // Before Partial Peeling:
3059 // Loop:
3060 // <peeled section>
3061 // Split off signed loop exit test
3062 // <-- CUT HERE -->
3063 // Unchanged unsigned loop exit test
3064 // <rest of unpeeled section>
3065 // goto Loop
3066 //
3067 // After Partial Peeling:
3068 // <cloned peeled section>
3069 // Cloned split off signed loop exit test
3070 // Loop:
3071 // Unchanged unsigned loop exit test
3072 // <rest of unpeeled section>
3073 // <peeled section>
3074 // Split off signed loop exit test
3075 // goto Loop
3076 //
3077 // Details
3078 // -------
3079 // Before:
3080 // if (i <u limit) Unsigned loop exit condition
3081 // / |
3082 // v v
3083 // exit-proj stay-in-loop-proj
3084 //
3085 // Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it
3086 // before the CmpU on the stay-in-loop path and keep both tests:
3087 //
3088 // if (i <u limit) Signed loop exit test
3089 // / |
3090 // / if (i <u limit) Unsigned loop exit test
3091 // / / |
3092 // v v v
3093 // exit-region stay-in-loop-proj
3094 //
3095 // Implementation
3096 // --------------
3097 // We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned
3098 // loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop
3099 // exit tests is preserved, and their loop nesting is correct.
3100 //
3101 // To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit
3102 // test above it and kill the original unsigned loop exit test by setting it's condition to a constant
3103 // (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later:
3104 //
3105 // if (stay-in-loop-const) Killed original unsigned loop exit test
3106 // / |
3107 // / v
3108 // / if (i < limit) Split off signed loop exit test
3109 // / / |
3110 // / / v
3111 // / / if (i <u limit) Cloned unsigned loop exit test
3112 // / / / |
3113 // v v v |
3114 // exit-region |
3115 // | |
3116 // dummy-if |
3117 // / | |
3118 // dead | |
3119 // v v
3120 // exit-proj stay-in-loop-proj
3121 //
3122 // Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned
3123 // loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()).
3124 //
3125 // Requirements
3126 // ------------
3127 // Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly
3128 // the same as before with only a single unsigned test. This is only possible if certain requirements are met.
3129 // Otherwise, we need to bail out (see comments in the code below).
3130 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) {
3131 const bool Signed = true;
3132 const bool Unsigned = false;
3133
3134 BoolNode* bol = if_cmpu->in(1)->as_Bool();
3135 if (bol->_test._test != BoolTest::lt) {
3136 return nullptr;
3137 }
3138 CmpNode* cmpu = bol->in(1)->as_Cmp();
3139 assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison");
3140
3141 int stride = stride_of_possible_iv(if_cmpu);
3142 if (stride == 0) {
3143 return nullptr;
3144 }
3145
3146 Node* lp_proj = stay_in_loop(if_cmpu, loop);
3147 guarantee(lp_proj != nullptr, "null loop node");
3148
3149 ProjNode* lp_continue = lp_proj->as_Proj();
3150 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
3151 if (!lp_exit->is_IfFalse()) {
3152 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit).
3153 // We therefore can't add a single exit condition.
3154 return nullptr;
3155 }
3156 // The unsigned loop exit condition is
3157 // !(i <u limit)
3158 // = i >=u limit
3159 //
3160 // First, we note that for any x for which
3161 // 0 <= x <= INT_MAX
3162 // we can convert x to an unsigned int and still get the same guarantee:
3163 // 0 <= (uint) x <= INT_MAX = (uint) INT_MAX
3164 // 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA)
3165 //
3166 // With that in mind, if
3167 // limit >= 0 (COND)
3168 // then the unsigned loop exit condition
3169 // i >=u limit (ULE)
3170 // is equivalent to
3171 // i < 0 || i >= limit (SLE-full)
3172 // because either i is negative and therefore always greater than MAX_INT when converting to unsigned
3173 // (uint) i >=u MAX_INT >= limit >= 0
3174 // or otherwise
3175 // i >= limit >= 0
3176 // holds due to (LEMMA).
3177 //
3178 // For completeness, a counterexample with limit < 0:
3179 // Assume i = -3 and limit = -2:
3180 // i < 0
3181 // -2 < 0
3182 // is true and thus also "i < 0 || i >= limit". But
3183 // i >=u limit
3184 // -3 >=u -2
3185 // is false.
3186 Node* limit = cmpu->in(2);
3187 const TypeInt* type_limit = _igvn.type(limit)->is_int();
3188 if (type_limit->_lo < 0) {
3189 return nullptr;
3190 }
3191
3192 // We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride:
3193 // stride < 0:
3194 // i < 0 (SLE = SLE-negative)
3195 // stride > 0:
3196 // i >= limit (SLE = SLE-positive)
3197 // such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0):
3198 //
3199 // Loop:
3200 // <peeled section>
3201 // i >= limit (SLE-positive)
3202 // <-- CUT HERE -->
3203 // i >=u limit (ULE)
3204 // <rest of unpeeled section>
3205 // goto Loop
3206 //
3207 // We exit the loop if:
3208 // (SLE) is true OR (ULE) is true
3209 // However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly
3210 // exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure:
3211 // (SLE) IMPLIES (ULE)
3212 // This indeed holds when (COND) is given:
3213 // - stride > 0:
3214 // i >= limit // (SLE = SLE-positive)
3215 // i >= limit >= 0 // (COND)
3216 // i >=u limit >= 0 // (LEMMA)
3217 // which is the unsigned loop exit condition (ULE).
3218 // - stride < 0:
3219 // i < 0 // (SLE = SLE-negative)
3220 // (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned
3221 // MAX_INT >= limit >= 0 // (COND)
3222 // MAX_INT >=u limit >= 0 // (LEMMA)
3223 // and thus from (NEG) and (LEMMA):
3224 // i >=u limit
3225 // which is the unsigned loop exit condition (ULE).
3226 //
3227 //
3228 // After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0):
3229 // <cloned peeled section>
3230 // i >= limit (SLE-positive)
3231 // Loop:
3232 // i >=u limit (ULE)
3233 // <rest of unpeeled section>
3234 // <peeled section>
3235 // i >= limit (SLE-positive)
3236 // goto Loop
3237 Node* rhs_cmpi;
3238 if (stride > 0) {
3239 rhs_cmpi = limit; // For i >= limit
3240 } else {
3241 rhs_cmpi = makecon(TypeInt::ZERO); // For i < 0
3242 }
3243 // Create a new region on the exit path
3244 RegionNode* reg = insert_region_before_proj(lp_exit);
3245 guarantee(reg != nullptr, "null region node");
3246
3247 // Clone the if-cmpu-true-false using a signed compare
3248 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
3249 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue);
3250 reg->add_req(cmpi_exit);
3251
3252 // Clone the if-cmpu-true-false
3253 BoolTest::mask rel_u = bol->_test._test;
3254 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
3255 reg->add_req(cmpu_exit);
3256
3257 // Force original if to stay in loop.
3258 short_circuit_if(if_cmpu, lp_continue);
3259
3260 return cmpi_exit->in(0)->as_If();
3261 }
3262
3263 //------------------------------ remove_cmpi_loop_exit -------------------------------------
3264 // Remove a previously inserted signed compare loop exit.
3265 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
3266 Node* lp_proj = stay_in_loop(if_cmp, loop);
3267 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
3268 stay_in_loop(lp_proj, loop)->is_If() &&
3269 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
3270 Node* con = makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
3271 if_cmp->set_req(1, con);
3272 }
3273
3274 //------------------------------ scheduled_nodelist -------------------------------------
3275 // Create a post order schedule of nodes that are in the
3276 // "member" set. The list is returned in "sched".
3277 // The first node in "sched" is the loop head, followed by
3278 // nodes which have no inputs in the "member" set, and then
3279 // followed by the nodes that have an immediate input dependence
3280 // on a node in "sched".
3281 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
3282
3283 assert(member.test(loop->_head->_idx), "loop head must be in member set");
3284 VectorSet visited;
3285 Node_Stack nstack(loop->_body.size());
3286
3287 Node* n = loop->_head; // top of stack is cached in "n"
3288 uint idx = 0;
3289 visited.set(n->_idx);
3290
3291 // Initially push all with no inputs from within member set
3292 for(uint i = 0; i < loop->_body.size(); i++ ) {
3293 Node *elt = loop->_body.at(i);
3294 if (member.test(elt->_idx)) {
3295 bool found = false;
3296 for (uint j = 0; j < elt->req(); j++) {
3297 Node* def = elt->in(j);
3298 if (def && member.test(def->_idx) && def != elt) {
3299 found = true;
3300 break;
3301 }
3302 }
3303 if (!found && elt != loop->_head) {
3304 nstack.push(n, idx);
3305 n = elt;
3306 assert(!visited.test(n->_idx), "not seen yet");
3307 visited.set(n->_idx);
3308 }
3309 }
3310 }
3311
3312 // traverse out's that are in the member set
3313 while (true) {
3314 if (idx < n->outcnt()) {
3315 Node* use = n->raw_out(idx);
3316 idx++;
3317 if (!visited.test_set(use->_idx)) {
3318 if (member.test(use->_idx)) {
3319 nstack.push(n, idx);
3320 n = use;
3321 idx = 0;
3322 }
3323 }
3324 } else {
3325 // All outputs processed
3326 sched.push(n);
3327 if (nstack.is_empty()) break;
3328 n = nstack.node();
3329 idx = nstack.index();
3330 nstack.pop();
3331 }
3332 }
3333 }
3334
3335
3336 //------------------------------ has_use_in_set -------------------------------------
3337 // Has a use in the vector set
3338 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
3339 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3340 Node* use = n->fast_out(j);
3341 if (vset.test(use->_idx)) {
3342 return true;
3343 }
3344 }
3345 return false;
3346 }
3347
3348
3349 //------------------------------ has_use_internal_to_set -------------------------------------
3350 // Has use internal to the vector set (ie. not in a phi at the loop head)
3351 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
3352 Node* head = loop->_head;
3353 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3354 Node* use = n->fast_out(j);
3355 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
3356 return true;
3357 }
3358 }
3359 return false;
3360 }
3361
3362
3363 //------------------------------ clone_for_use_outside_loop -------------------------------------
3364 // clone "n" for uses that are outside of loop
3365 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
3366 int cloned = 0;
3367 assert(worklist.size() == 0, "should be empty");
3368 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3369 Node* use = n->fast_out(j);
3370 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
3371 worklist.push(use);
3372 }
3373 }
3374
3375 if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor,
3376 "Too many clones required in clone_for_use_outside_loop in partial peeling")) {
3377 return -1;
3378 }
3379
3380 while( worklist.size() ) {
3381 Node *use = worklist.pop();
3382 if (!has_node(use) || use->in(0) == C->top()) continue;
3383 uint j;
3384 for (j = 0; j < use->req(); j++) {
3385 if (use->in(j) == n) break;
3386 }
3387 assert(j < use->req(), "must be there");
3388
3389 // clone "n" and insert it between the inputs of "n" and the use outside the loop
3390 Node* n_clone = n->clone();
3391 _igvn.replace_input_of(use, j, n_clone);
3392 cloned++;
3393 Node* use_c;
3394 if (!use->is_Phi()) {
3395 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
3396 } else {
3397 // Use in a phi is considered a use in the associated predecessor block
3398 use_c = use->in(0)->in(j);
3399 }
3400 set_ctrl(n_clone, use_c);
3401 assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
3402 get_loop(use_c)->_body.push(n_clone);
3403 _igvn.register_new_node_with_optimizer(n_clone);
3404 #ifndef PRODUCT
3405 if (TracePartialPeeling) {
3406 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
3407 }
3408 #endif
3409 }
3410 return cloned;
3411 }
3412
3413
3414 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
3415 // clone "n" for special uses that are in the not_peeled region.
3416 // If these def-uses occur in separate blocks, the code generator
3417 // marks the method as not compilable. For example, if a "BoolNode"
3418 // is in a different basic block than the "IfNode" that uses it, then
3419 // the compilation is aborted in the code generator.
3420 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
3421 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
3422 if (n->is_Phi() || n->is_Load()) {
3423 return;
3424 }
3425 assert(worklist.size() == 0, "should be empty");
3426 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3427 Node* use = n->fast_out(j);
3428 if ( not_peel.test(use->_idx) &&
3429 (use->is_If() || use->is_CMove() || use->is_Bool() || use->is_OpaqueInitializedAssertionPredicate()) &&
3430 use->in(1) == n) {
3431 worklist.push(use);
3432 }
3433 }
3434 if (worklist.size() > 0) {
3435 // clone "n" and insert it between inputs of "n" and the use
3436 Node* n_clone = n->clone();
3437 loop->_body.push(n_clone);
3438 _igvn.register_new_node_with_optimizer(n_clone);
3439 set_ctrl(n_clone, get_ctrl(n));
3440 sink_list.push(n_clone);
3441 not_peel.set(n_clone->_idx);
3442 #ifndef PRODUCT
3443 if (TracePartialPeeling) {
3444 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
3445 }
3446 #endif
3447 while( worklist.size() ) {
3448 Node *use = worklist.pop();
3449 _igvn.rehash_node_delayed(use);
3450 for (uint j = 1; j < use->req(); j++) {
3451 if (use->in(j) == n) {
3452 use->set_req(j, n_clone);
3453 }
3454 }
3455 }
3456 }
3457 }
3458
3459
3460 //------------------------------ insert_phi_for_loop -------------------------------------
3461 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
3462 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
3463 Node *phi = PhiNode::make(lp, back_edge_val);
3464 phi->set_req(LoopNode::EntryControl, lp_entry_val);
3465 // Use existing phi if it already exists
3466 Node *hit = _igvn.hash_find_insert(phi);
3467 if( hit == nullptr ) {
3468 _igvn.register_new_node_with_optimizer(phi);
3469 set_ctrl(phi, lp);
3470 } else {
3471 // Remove the new phi from the graph and use the hit
3472 _igvn.remove_dead_node(phi);
3473 phi = hit;
3474 }
3475 _igvn.replace_input_of(use, idx, phi);
3476 }
3477
3478 #ifdef ASSERT
3479 //------------------------------ is_valid_loop_partition -------------------------------------
3480 // Validate the loop partition sets: peel and not_peel
3481 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
3482 VectorSet& not_peel ) {
3483 uint i;
3484 // Check that peel_list entries are in the peel set
3485 for (i = 0; i < peel_list.size(); i++) {
3486 if (!peel.test(peel_list.at(i)->_idx)) {
3487 return false;
3488 }
3489 }
3490 // Check at loop members are in one of peel set or not_peel set
3491 for (i = 0; i < loop->_body.size(); i++ ) {
3492 Node *def = loop->_body.at(i);
3493 uint di = def->_idx;
3494 // Check that peel set elements are in peel_list
3495 if (peel.test(di)) {
3496 if (not_peel.test(di)) {
3497 return false;
3498 }
3499 // Must be in peel_list also
3500 bool found = false;
3501 for (uint j = 0; j < peel_list.size(); j++) {
3502 if (peel_list.at(j)->_idx == di) {
3503 found = true;
3504 break;
3505 }
3506 }
3507 if (!found) {
3508 return false;
3509 }
3510 } else if (not_peel.test(di)) {
3511 if (peel.test(di)) {
3512 return false;
3513 }
3514 } else {
3515 return false;
3516 }
3517 }
3518 return true;
3519 }
3520
3521 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
3522 // Ensure a use outside of loop is of the right form
3523 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
3524 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3525 return (use->is_Phi() &&
3526 use_c->is_Region() && use_c->req() == 3 &&
3527 (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
3528 use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
3529 use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
3530 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
3531 }
3532
3533 //------------------------------ is_valid_clone_loop_form -------------------------------------
3534 // Ensure that all uses outside of loop are of the right form
3535 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
3536 uint orig_exit_idx, uint clone_exit_idx) {
3537 uint len = peel_list.size();
3538 for (uint i = 0; i < len; i++) {
3539 Node *def = peel_list.at(i);
3540
3541 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3542 Node *use = def->fast_out(j);
3543 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3544 if (!loop->is_member(get_loop(use_c))) {
3545 // use is not in the loop, check for correct structure
3546 if (use->in(0) == def) {
3547 // Okay
3548 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
3549 return false;
3550 }
3551 }
3552 }
3553 }
3554 return true;
3555 }
3556 #endif
3557
3558 //------------------------------ partial_peel -------------------------------------
3559 // Partially peel (aka loop rotation) the top portion of a loop (called
3560 // the peel section below) by cloning it and placing one copy just before
3561 // the new loop head and the other copy at the bottom of the new loop.
3562 //
3563 // before after where it came from
3564 //
3565 // stmt1 stmt1
3566 // loop: stmt2 clone
3567 // stmt2 if condA goto exitA clone
3568 // if condA goto exitA new_loop: new
3569 // stmt3 stmt3 clone
3570 // if !condB goto loop if condB goto exitB clone
3571 // exitB: stmt2 orig
3572 // stmt4 if !condA goto new_loop orig
3573 // exitA: goto exitA
3574 // exitB:
3575 // stmt4
3576 // exitA:
3577 //
3578 // Step 1: find the cut point: an exit test on probable
3579 // induction variable.
3580 // Step 2: schedule (with cloning) operations in the peel
3581 // section that can be executed after the cut into
3582 // the section that is not peeled. This may need
3583 // to clone operations into exit blocks. For
3584 // instance, a reference to A[i] in the not-peel
3585 // section and a reference to B[i] in an exit block
3586 // may cause a left-shift of i by 2 to be placed
3587 // in the peel block. This step will clone the left
3588 // shift into the exit block and sink the left shift
3589 // from the peel to the not-peel section.
3590 // Step 3: clone the loop, retarget the control, and insert
3591 // phis for values that are live across the new loop
3592 // head. This is very dependent on the graph structure
3593 // from clone_loop. It creates region nodes for
3594 // exit control and associated phi nodes for values
3595 // flow out of the loop through that exit. The region
3596 // node is dominated by the clone's control projection.
3597 // So the clone's peel section is placed before the
3598 // new loop head, and the clone's not-peel section is
3599 // forms the top part of the new loop. The original
3600 // peel section forms the tail of the new loop.
3601 // Step 4: update the dominator tree and recompute the
3602 // dominator depth.
3603 //
3604 // orig
3605 //
3606 // stmt1
3607 // |
3608 // v
3609 // predicates
3610 // |
3611 // v
3612 // loop<----+
3613 // | |
3614 // stmt2 |
3615 // | |
3616 // v |
3617 // ifA |
3618 // / | |
3619 // v v |
3620 // false true ^ <-- last_peel
3621 // / | |
3622 // / ===|==cut |
3623 // / stmt3 | <-- first_not_peel
3624 // / | |
3625 // | v |
3626 // v ifB |
3627 // exitA: / \ |
3628 // / \ |
3629 // v v |
3630 // false true |
3631 // / \ |
3632 // / ----+
3633 // |
3634 // v
3635 // exitB:
3636 // stmt4
3637 //
3638 //
3639 // after clone loop
3640 //
3641 // stmt1
3642 // |
3643 // v
3644 // predicates
3645 // / \
3646 // clone / \ orig
3647 // / \
3648 // / \
3649 // v v
3650 // +---->loop loop<----+
3651 // | | | |
3652 // | stmt2 stmt2 |
3653 // | | | |
3654 // | v v |
3655 // | ifA ifA |
3656 // | | \ / | |
3657 // | v v v v |
3658 // ^ true false false true ^ <-- last_peel
3659 // | | ^ \ / | |
3660 // | cut==|== \ \ / ===|==cut |
3661 // | stmt3 \ \ / stmt3 | <-- first_not_peel
3662 // | | dom | | | |
3663 // | v \ 1v v2 v |
3664 // | ifB regionA ifB |
3665 // | / \ | / \ |
3666 // | / \ v / \ |
3667 // | v v exitA: v v |
3668 // | true false false true |
3669 // | / ^ \ / \ |
3670 // +---- \ \ / ----+
3671 // dom \ /
3672 // \ 1v v2
3673 // regionB
3674 // |
3675 // v
3676 // exitB:
3677 // stmt4
3678 //
3679 //
3680 // after partial peel
3681 //
3682 // stmt1
3683 // |
3684 // v
3685 // predicates
3686 // /
3687 // clone / orig
3688 // / TOP
3689 // / \
3690 // v v
3691 // TOP->loop loop----+
3692 // | | |
3693 // stmt2 stmt2 |
3694 // | | |
3695 // v v |
3696 // ifA ifA |
3697 // | \ / | |
3698 // v v v v |
3699 // true false false true | <-- last_peel
3700 // | ^ \ / +------|---+
3701 // +->newloop \ \ / === ==cut | |
3702 // | stmt3 \ \ / TOP | |
3703 // | | dom | | stmt3 | | <-- first_not_peel
3704 // | v \ 1v v2 v | |
3705 // | ifB regionA ifB ^ v
3706 // | / \ | / \ | |
3707 // | / \ v / \ | |
3708 // | v v exitA: v v | |
3709 // | true false false true | |
3710 // | / ^ \ / \ | |
3711 // | | \ \ / v | |
3712 // | | dom \ / TOP | |
3713 // | | \ 1v v2 | |
3714 // ^ v regionB | |
3715 // | | | | |
3716 // | | v ^ v
3717 // | | exitB: | |
3718 // | | stmt4 | |
3719 // | +------------>-----------------+ |
3720 // | |
3721 // +-----------------<---------------------+
3722 //
3723 //
3724 // final graph
3725 //
3726 // stmt1
3727 // |
3728 // v
3729 // predicates
3730 // |
3731 // v
3732 // stmt2 clone
3733 // |
3734 // v
3735 // ........> ifA clone
3736 // : / |
3737 // dom / |
3738 // : v v
3739 // : false true
3740 // : | |
3741 // : | v
3742 // : | newloop<-----+
3743 // : | | |
3744 // : | stmt3 clone |
3745 // : | | |
3746 // : | v |
3747 // : | ifB |
3748 // : | / \ |
3749 // : | v v |
3750 // : | false true |
3751 // : | | | |
3752 // : | v stmt2 |
3753 // : | exitB: | |
3754 // : | stmt4 v |
3755 // : | ifA orig |
3756 // : | / \ |
3757 // : | / \ |
3758 // : | v v |
3759 // : | false true |
3760 // : | / \ |
3761 // : v v -----+
3762 // RegionA
3763 // |
3764 // v
3765 // exitA
3766 //
3767 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3768
3769 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3770 if (!loop->_head->is_Loop()) {
3771 return false;
3772 }
3773 LoopNode *head = loop->_head->as_Loop();
3774
3775 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3776 return false;
3777 }
3778
3779 // Check for complex exit control
3780 for (uint ii = 0; ii < loop->_body.size(); ii++) {
3781 Node *n = loop->_body.at(ii);
3782 int opc = n->Opcode();
3783 if (n->is_Call() ||
3784 opc == Op_Catch ||
3785 opc == Op_CatchProj ||
3786 opc == Op_Jump ||
3787 opc == Op_JumpProj) {
3788 #ifndef PRODUCT
3789 if (TracePartialPeeling) {
3790 tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3791 }
3792 #endif
3793 return false;
3794 }
3795 }
3796
3797 int dd = dom_depth(head);
3798
3799 // Step 1: find cut point
3800
3801 // Walk up dominators to loop head looking for first loop exit
3802 // which is executed on every path thru loop.
3803 IfNode *peel_if = nullptr;
3804 IfNode *peel_if_cmpu = nullptr;
3805
3806 Node *iff = loop->tail();
3807 while (iff != head) {
3808 if (iff->is_If()) {
3809 Node *ctrl = get_ctrl(iff->in(1));
3810 if (ctrl->is_top()) return false; // Dead test on live IF.
3811 // If loop-varying exit-test, check for induction variable
3812 if (loop->is_member(get_loop(ctrl)) &&
3813 loop->is_loop_exit(iff) &&
3814 is_possible_iv_test(iff)) {
3815 Node* cmp = iff->in(1)->in(1);
3816 if (cmp->Opcode() == Op_CmpI) {
3817 peel_if = iff->as_If();
3818 } else {
3819 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
3820 peel_if_cmpu = iff->as_If();
3821 }
3822 }
3823 }
3824 iff = idom(iff);
3825 }
3826
3827 // Prefer signed compare over unsigned compare.
3828 IfNode* new_peel_if = nullptr;
3829 if (peel_if == nullptr) {
3830 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) {
3831 return false; // No peel point found
3832 }
3833 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
3834 if (new_peel_if == nullptr) {
3835 return false; // No peel point found
3836 }
3837 peel_if = new_peel_if;
3838 }
3839 Node* last_peel = stay_in_loop(peel_if, loop);
3840 Node* first_not_peeled = stay_in_loop(last_peel, loop);
3841 if (first_not_peeled == nullptr || first_not_peeled == head) {
3842 return false;
3843 }
3844
3845 #ifndef PRODUCT
3846 if (TraceLoopOpts) {
3847 tty->print("PartialPeel ");
3848 loop->dump_head();
3849 }
3850
3851 if (TracePartialPeeling) {
3852 tty->print_cr("before partial peel one iteration");
3853 Node_List wl;
3854 Node* t = head->in(2);
3855 while (true) {
3856 wl.push(t);
3857 if (t == head) break;
3858 t = idom(t);
3859 }
3860 while (wl.size() > 0) {
3861 Node* tt = wl.pop();
3862 tt->dump();
3863 if (tt == last_peel) tty->print_cr("-- cut --");
3864 }
3865 }
3866 #endif
3867
3868 C->print_method(PHASE_BEFORE_PARTIAL_PEELING, 4, head);
3869
3870 VectorSet peel;
3871 VectorSet not_peel;
3872 Node_List peel_list;
3873 Node_List worklist;
3874 Node_List sink_list;
3875
3876 uint estimate = loop->est_loop_clone_sz(1);
3877 if (exceeding_node_budget(estimate)) {
3878 return false;
3879 }
3880
3881 // Set of cfg nodes to peel are those that are executable from
3882 // the head through last_peel.
3883 assert(worklist.size() == 0, "should be empty");
3884 worklist.push(head);
3885 peel.set(head->_idx);
3886 while (worklist.size() > 0) {
3887 Node *n = worklist.pop();
3888 if (n != last_peel) {
3889 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3890 Node* use = n->fast_out(j);
3891 if (use->is_CFG() &&
3892 loop->is_member(get_loop(use)) &&
3893 !peel.test_set(use->_idx)) {
3894 worklist.push(use);
3895 }
3896 }
3897 }
3898 }
3899
3900 // Set of non-cfg nodes to peel are those that are control
3901 // dependent on the cfg nodes.
3902 for (uint i = 0; i < loop->_body.size(); i++) {
3903 Node *n = loop->_body.at(i);
3904 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
3905 if (peel.test(n_c->_idx)) {
3906 peel.set(n->_idx);
3907 } else {
3908 not_peel.set(n->_idx);
3909 }
3910 }
3911
3912 // Step 2: move operations from the peeled section down into the
3913 // not-peeled section
3914
3915 // Get a post order schedule of nodes in the peel region
3916 // Result in right-most operand.
3917 scheduled_nodelist(loop, peel, peel_list);
3918
3919 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3920
3921 // For future check for too many new phis
3922 uint old_phi_cnt = 0;
3923 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
3924 Node* use = head->fast_out(j);
3925 if (use->is_Phi()) old_phi_cnt++;
3926 }
3927
3928 #ifndef PRODUCT
3929 if (TracePartialPeeling) {
3930 tty->print_cr("\npeeled list");
3931 }
3932 #endif
3933
3934 // Evacuate nodes in peel region into the not_peeled region if possible
3935 bool too_many_clones = false;
3936 uint new_phi_cnt = 0;
3937 uint cloned_for_outside_use = 0;
3938 for (uint i = 0; i < peel_list.size();) {
3939 Node* n = peel_list.at(i);
3940 #ifndef PRODUCT
3941 if (TracePartialPeeling) n->dump();
3942 #endif
3943 bool incr = true;
3944 if (!n->is_CFG()) {
3945 if (has_use_in_set(n, not_peel)) {
3946 // If not used internal to the peeled region,
3947 // move "n" from peeled to not_peeled region.
3948 if (!has_use_internal_to_set(n, peel, loop)) {
3949 // if not pinned and not a load (which maybe anti-dependent on a store)
3950 // and not a CMove (Matcher expects only bool->cmove).
3951 if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) {
3952 int new_clones = clone_for_use_outside_loop(loop, n, worklist);
3953 if (C->failing()) return false;
3954 if (new_clones == -1) {
3955 too_many_clones = true;
3956 break;
3957 }
3958 cloned_for_outside_use += new_clones;
3959 sink_list.push(n);
3960 peel.remove(n->_idx);
3961 not_peel.set(n->_idx);
3962 peel_list.remove(i);
3963 incr = false;
3964 #ifndef PRODUCT
3965 if (TracePartialPeeling) {
3966 tty->print_cr("sink to not_peeled region: %d newbb: %d",
3967 n->_idx, get_ctrl(n)->_idx);
3968 }
3969 #endif
3970 }
3971 } else {
3972 // Otherwise check for special def-use cases that span
3973 // the peel/not_peel boundary such as bool->if
3974 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
3975 new_phi_cnt++;
3976 }
3977 }
3978 }
3979 if (incr) i++;
3980 }
3981
3982 estimate += cloned_for_outside_use + new_phi_cnt;
3983 bool exceed_node_budget = !may_require_nodes(estimate);
3984 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
3985
3986 if (too_many_clones || exceed_node_budget || exceed_phi_limit) {
3987 #ifndef PRODUCT
3988 if (TracePartialPeeling && exceed_phi_limit) {
3989 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
3990 new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F');
3991 }
3992 #endif
3993 if (new_peel_if != nullptr) {
3994 remove_cmpi_loop_exit(new_peel_if, loop);
3995 }
3996 // Inhibit more partial peeling on this loop
3997 assert(!head->is_partial_peel_loop(), "not partial peeled");
3998 head->mark_partial_peel_failed();
3999 if (cloned_for_outside_use > 0) {
4000 // Terminate this round of loop opts because
4001 // the graph outside this loop was changed.
4002 C->set_major_progress();
4003 return true;
4004 }
4005 return false;
4006 }
4007
4008 // Step 3: clone loop, retarget control, and insert new phis
4009
4010 // Create new loop head for new phis and to hang
4011 // the nodes being moved (sinked) from the peel region.
4012 LoopNode* new_head = new LoopNode(last_peel, last_peel);
4013 new_head->set_unswitch_count(head->unswitch_count()); // Preserve
4014 _igvn.register_new_node_with_optimizer(new_head);
4015 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
4016 _igvn.replace_input_of(first_not_peeled, 0, new_head);
4017 set_loop(new_head, loop);
4018 loop->_body.push(new_head);
4019 not_peel.set(new_head->_idx);
4020 set_idom(new_head, last_peel, dom_depth(first_not_peeled));
4021 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
4022
4023 while (sink_list.size() > 0) {
4024 Node* n = sink_list.pop();
4025 set_ctrl(n, new_head);
4026 }
4027
4028 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4029
4030 clone_loop(loop, old_new, dd, IgnoreStripMined);
4031
4032 const uint clone_exit_idx = 1;
4033 const uint orig_exit_idx = 2;
4034 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
4035
4036 Node* head_clone = old_new[head->_idx];
4037 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
4038 Node* orig_tail_clone = head_clone->in(2);
4039
4040 // Add phi if "def" node is in peel set and "use" is not
4041
4042 for (uint i = 0; i < peel_list.size(); i++) {
4043 Node *def = peel_list.at(i);
4044 if (!def->is_CFG()) {
4045 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
4046 Node *use = def->fast_out(j);
4047 if (has_node(use) && use->in(0) != C->top() &&
4048 (!peel.test(use->_idx) ||
4049 (use->is_Phi() && use->in(0) == head)) ) {
4050 worklist.push(use);
4051 }
4052 }
4053 while( worklist.size() ) {
4054 Node *use = worklist.pop();
4055 for (uint j = 1; j < use->req(); j++) {
4056 Node* n = use->in(j);
4057 if (n == def) {
4058
4059 // "def" is in peel set, "use" is not in peel set
4060 // or "use" is in the entry boundary (a phi) of the peel set
4061
4062 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
4063
4064 if ( loop->is_member(get_loop( use_c )) ) {
4065 // use is in loop
4066 if (old_new[use->_idx] != nullptr) { // null for dead code
4067 Node* use_clone = old_new[use->_idx];
4068 _igvn.replace_input_of(use, j, C->top());
4069 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
4070 }
4071 } else {
4072 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
4073 // use is not in the loop, check if the live range includes the cut
4074 Node* lp_if = use_c->in(orig_exit_idx)->in(0);
4075 if (not_peel.test(lp_if->_idx)) {
4076 assert(j == orig_exit_idx, "use from original loop");
4077 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
4078 }
4079 }
4080 }
4081 }
4082 }
4083 }
4084 }
4085
4086 // Step 3b: retarget control
4087
4088 // Redirect control to the new loop head if a cloned node in
4089 // the not_peeled region has control that points into the peeled region.
4090 // This necessary because the cloned peeled region will be outside
4091 // the loop.
4092 // from to
4093 // cloned-peeled <---+
4094 // new_head_clone: | <--+
4095 // cloned-not_peeled in(0) in(0)
4096 // orig-peeled
4097
4098 for (uint i = 0; i < loop->_body.size(); i++) {
4099 Node *n = loop->_body.at(i);
4100 if (!n->is_CFG() && n->in(0) != nullptr &&
4101 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
4102 Node* n_clone = old_new[n->_idx];
4103 if (n_clone->depends_only_on_test()) {
4104 // Pin array access nodes: control is updated here to the loop head. If, after some transformations, the
4105 // backedge is removed, an array load could become dependent on a condition that's not a range check for that
4106 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
4107 // floating above its range check.
4108 Node* pinned_clone = n_clone->pin_array_access_node();
4109 if (pinned_clone != nullptr) {
4110 register_new_node_with_ctrl_of(pinned_clone, n_clone);
4111 old_new.map(n->_idx, pinned_clone);
4112 _igvn.replace_node(n_clone, pinned_clone);
4113 n_clone = pinned_clone;
4114 }
4115 }
4116 _igvn.replace_input_of(n_clone, 0, new_head_clone);
4117 }
4118 }
4119
4120 // Backedge of the surviving new_head (the clone) is original last_peel
4121 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
4122
4123 // Cut first node in original not_peel set
4124 _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
4125 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
4126 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
4127
4128 // Copy head_clone back-branch info to original head
4129 // and remove original head's loop entry and
4130 // clone head's back-branch
4131 _igvn.rehash_node_delayed(head); // Multiple edge updates
4132 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
4133 head->set_req(LoopNode::LoopBackControl, C->top());
4134 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
4135
4136 // Similarly modify the phis
4137 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
4138 Node* use = head->fast_out(k);
4139 if (use->is_Phi() && use->outcnt() > 0) {
4140 Node* use_clone = old_new[use->_idx];
4141 _igvn.rehash_node_delayed(use); // Multiple edge updates
4142 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
4143 use->set_req(LoopNode::LoopBackControl, C->top());
4144 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
4145 }
4146 }
4147
4148 // Step 4: update dominator tree and dominator depth
4149
4150 set_idom(head, orig_tail_clone, dd);
4151 recompute_dom_depth();
4152
4153 // Inhibit more partial peeling on this loop
4154 new_head_clone->set_partial_peel_loop();
4155 C->set_major_progress();
4156 loop->record_for_igvn();
4157
4158 #ifndef PRODUCT
4159 if (TracePartialPeeling) {
4160 tty->print_cr("\nafter partial peel one iteration");
4161 Node_List wl;
4162 Node* t = last_peel;
4163 while (true) {
4164 wl.push(t);
4165 if (t == head_clone) break;
4166 t = idom(t);
4167 }
4168 while (wl.size() > 0) {
4169 Node* tt = wl.pop();
4170 if (tt == head) tty->print_cr("orig head");
4171 else if (tt == new_head_clone) tty->print_cr("new head");
4172 else if (tt == head_clone) tty->print_cr("clone head");
4173 tt->dump();
4174 }
4175 }
4176 #endif
4177
4178 C->print_method(PHASE_AFTER_PARTIAL_PEELING, 4, new_head_clone);
4179
4180 return true;
4181 }
4182
4183 #ifdef ASSERT
4184
4185 // Moves Template Assertion Predicates to a target loop by cloning and killing the old ones. The target loop is the
4186 // original, not-cloned loop. This is currently only used with StressLoopBackedge which is a develop flag only and
4187 // false with product builds. We can therefore guard it with an ifdef. More details can be found at the use-site.
4188 class MoveAssertionPredicatesVisitor : public PredicateVisitor {
4189 ClonePredicateToTargetLoop _clone_predicate_to_loop;
4190 PhaseIdealLoop* const _phase;
4191
4192 public:
4193 MoveAssertionPredicatesVisitor(LoopNode* target_loop_head,
4194 const NodeInSingleLoopBody &node_in_loop_body,
4195 PhaseIdealLoop* phase)
4196 : _clone_predicate_to_loop(target_loop_head, node_in_loop_body, phase),
4197 _phase(phase) {
4198 }
4199 NONCOPYABLE(MoveAssertionPredicatesVisitor);
4200
4201 using PredicateVisitor::visit;
4202
4203 void visit(const TemplateAssertionPredicate& template_assertion_predicate) override {
4204 _clone_predicate_to_loop.clone_template_assertion_predicate(template_assertion_predicate);
4205 template_assertion_predicate.kill(_phase->igvn());
4206 }
4207 };
4208 #endif // ASSERT
4209
4210 // Transform:
4211 //
4212 // loop<-----------------+
4213 // | |
4214 // stmt1 stmt2 .. stmtn |
4215 // | | | |
4216 // \ | / |
4217 // v v v |
4218 // region |
4219 // | |
4220 // shared_stmt |
4221 // | |
4222 // v |
4223 // if |
4224 // / \ |
4225 // | -----------+
4226 // v
4227 //
4228 // into:
4229 //
4230 // loop<-------------------+
4231 // | |
4232 // v |
4233 // +->loop |
4234 // | | |
4235 // | stmt1 stmt2 .. stmtn |
4236 // | | | | |
4237 // | | \ / |
4238 // | | v v |
4239 // | | region1 |
4240 // | | | |
4241 // | shared_stmt shared_stmt |
4242 // | | | |
4243 // | v v |
4244 // | if if |
4245 // | /\ / \ |
4246 // +-- | | -------+
4247 // \ /
4248 // v v
4249 // region2
4250 //
4251 // (region2 is shown to merge mirrored projections of the loop exit
4252 // ifs to make the diagram clearer but they really merge the same
4253 // projection)
4254 //
4255 // Conditions for this transformation to trigger:
4256 // - the path through stmt1 is frequent enough
4257 // - the inner loop will be turned into a counted loop after transformation
4258 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) {
4259 if (!DuplicateBackedge) {
4260 return false;
4261 }
4262 assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only");
4263 if (!loop->_head->is_Loop()) {
4264 return false;
4265 }
4266
4267 uint estimate = loop->est_loop_clone_sz(1);
4268 if (exceeding_node_budget(estimate)) {
4269 return false;
4270 }
4271
4272 LoopNode *head = loop->_head->as_Loop();
4273
4274 Node* region = nullptr;
4275 IfNode* exit_test = nullptr;
4276 uint inner;
4277 float f;
4278 #ifdef ASSERT
4279 if (StressDuplicateBackedge) {
4280 if (head->is_strip_mined()) {
4281 return false;
4282 }
4283 Node* c = head->in(LoopNode::LoopBackControl);
4284
4285 while (c != head) {
4286 if (c->is_Region()) {
4287 region = c;
4288 }
4289 c = idom(c);
4290 }
4291
4292 if (region == nullptr) {
4293 return false;
4294 }
4295
4296 inner = 1;
4297 } else
4298 #endif //ASSERT
4299 {
4300 // Is the shape of the loop that of a counted loop...
4301 Node* back_control = loop_exit_control(head, loop);
4302 if (back_control == nullptr) {
4303 return false;
4304 }
4305
4306 BoolTest::mask bt = BoolTest::illegal;
4307 float cl_prob = 0;
4308 Node* incr = nullptr;
4309 Node* limit = nullptr;
4310 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
4311 if (cmp == nullptr || cmp->Opcode() != Op_CmpI) {
4312 return false;
4313 }
4314
4315 // With an extra phi for the candidate iv?
4316 // Or the region node is the loop head
4317 if (!incr->is_Phi() || incr->in(0) == head) {
4318 return false;
4319 }
4320
4321 PathFrequency pf(head, this);
4322 region = incr->in(0);
4323
4324 // Go over all paths for the extra phi's region and see if that
4325 // path is frequent enough and would match the expected iv shape
4326 // if the extra phi is removed
4327 inner = 0;
4328 for (uint i = 1; i < incr->req(); ++i) {
4329 Node* in = incr->in(i);
4330 Node* trunc1 = nullptr;
4331 Node* trunc2 = nullptr;
4332 const TypeInteger* iv_trunc_t = nullptr;
4333 Node* orig_in = in;
4334 if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) {
4335 continue;
4336 }
4337 assert(in->Opcode() == Op_AddI, "wrong increment code");
4338 Node* xphi = nullptr;
4339 Node* stride = loop_iv_stride(in, xphi);
4340
4341 if (stride == nullptr) {
4342 continue;
4343 }
4344
4345 PhiNode* phi = loop_iv_phi(xphi, nullptr, head);
4346 if (phi == nullptr ||
4347 (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
4348 (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
4349 return false;
4350 }
4351
4352 f = pf.to(region->in(i));
4353 if (f > 0.5) {
4354 inner = i;
4355 break;
4356 }
4357 }
4358
4359 if (inner == 0) {
4360 return false;
4361 }
4362
4363 exit_test = back_control->in(0)->as_If();
4364 }
4365
4366 if (idom(region)->is_Catch()) {
4367 return false;
4368 }
4369
4370 // Collect all control nodes that need to be cloned (shared_stmt in the diagram)
4371 Unique_Node_List wq;
4372 wq.push(head->in(LoopNode::LoopBackControl));
4373 for (uint i = 0; i < wq.size(); i++) {
4374 Node* c = wq.at(i);
4375 assert(get_loop(c) == loop, "not in the right loop?");
4376 if (c->is_Region()) {
4377 if (c != region) {
4378 for (uint j = 1; j < c->req(); ++j) {
4379 wq.push(c->in(j));
4380 }
4381 }
4382 } else {
4383 wq.push(c->in(0));
4384 }
4385 assert(!is_strict_dominator(c, region), "shouldn't go above region");
4386 }
4387
4388 Node* region_dom = idom(region);
4389
4390 // Can't do the transformation if this would cause a membar pair to
4391 // be split
4392 for (uint i = 0; i < wq.size(); i++) {
4393 Node* c = wq.at(i);
4394 if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) {
4395 assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair");
4396 if (!wq.member(c->as_MemBar()->leading_membar())) {
4397 return false;
4398 }
4399 }
4400 }
4401 C->print_method(PHASE_BEFORE_DUPLICATE_LOOP_BACKEDGE, 4, head);
4402
4403 // Collect data nodes that need to be clones as well
4404 int dd = dom_depth(head);
4405
4406 for (uint i = 0; i < loop->_body.size(); ++i) {
4407 Node* n = loop->_body.at(i);
4408 if (has_ctrl(n)) {
4409 Node* c = get_ctrl(n);
4410 if (wq.member(c)) {
4411 wq.push(n);
4412 }
4413 } else {
4414 set_idom(n, idom(n), dd);
4415 }
4416 }
4417
4418 // clone shared_stmt
4419 clone_loop_body(wq, old_new, nullptr);
4420
4421 Node* region_clone = old_new[region->_idx];
4422 region_clone->set_req(inner, C->top());
4423 set_idom(region, region->in(inner), dd);
4424
4425 // Prepare the outer loop
4426 Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]);
4427 register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl));
4428 _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head);
4429 set_idom(head, outer_head, dd);
4430
4431 fix_body_edges(wq, loop, old_new, dd, loop->_parent, true);
4432
4433 // Make one of the shared_stmt copies only reachable from stmt1, the
4434 // other only from stmt2..stmtn.
4435 Node* dom = nullptr;
4436 for (uint i = 1; i < region->req(); ++i) {
4437 if (i != inner) {
4438 _igvn.replace_input_of(region, i, C->top());
4439 }
4440 Node* in = region_clone->in(i);
4441 if (in->is_top()) {
4442 continue;
4443 }
4444 if (dom == nullptr) {
4445 dom = in;
4446 } else {
4447 dom = dom_lca(dom, in);
4448 }
4449 }
4450
4451 set_idom(region_clone, dom, dd);
4452
4453 // Set up the outer loop
4454 for (uint i = 0; i < head->outcnt(); i++) {
4455 Node* u = head->raw_out(i);
4456 if (u->is_Phi()) {
4457 Node* outer_phi = u->clone();
4458 outer_phi->set_req(0, outer_head);
4459 Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx];
4460 if (backedge == nullptr) {
4461 backedge = u->in(LoopNode::LoopBackControl);
4462 }
4463 outer_phi->set_req(LoopNode::LoopBackControl, backedge);
4464 register_new_node(outer_phi, outer_head);
4465 _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi);
4466 }
4467 }
4468
4469 // create control and data nodes for out of loop uses (including region2)
4470 Node_List worklist;
4471 uint new_counter = C->unique();
4472 fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist);
4473
4474 Node_List *split_if_set = nullptr;
4475 Node_List *split_bool_set = nullptr;
4476 Node_List *split_cex_set = nullptr;
4477 fix_data_uses(wq, loop, ControlAroundStripMined, loop->skip_strip_mined(), new_counter, old_new, worklist,
4478 split_if_set, split_bool_set, split_cex_set);
4479
4480 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
4481
4482 if (exit_test != nullptr) {
4483 float cnt = exit_test->_fcnt;
4484 if (cnt != COUNT_UNKNOWN) {
4485 exit_test->_fcnt = cnt * f;
4486 old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f);
4487 }
4488 }
4489
4490 #ifdef ASSERT
4491 if (StressDuplicateBackedge && head->is_CountedLoop()) {
4492 // The Template Assertion Predicates from the old counted loop are now at the new outer loop - clone them to
4493 // the inner counted loop and kill the old ones. We only need to do this with debug builds because
4494 // StressDuplicateBackedge is a devlop flag and false by default. Without StressDuplicateBackedge 'head' will be a
4495 // non-counted loop, and thus we have no Template Assertion Predicates above the old loop to move down.
4496 PredicateIterator predicate_iterator(outer_head->in(LoopNode::EntryControl));
4497 NodeInSingleLoopBody node_in_body(this, loop);
4498 MoveAssertionPredicatesVisitor move_assertion_predicates_visitor(head, node_in_body, this);
4499 predicate_iterator.for_each(move_assertion_predicates_visitor);
4500 }
4501 #endif // ASSERT
4502
4503 C->set_major_progress();
4504
4505 C->print_method(PHASE_AFTER_DUPLICATE_LOOP_BACKEDGE, 4, outer_head);
4506
4507 return true;
4508 }
4509
4510 // AutoVectorize the loop: replace scalar ops with vector ops.
4511 PhaseIdealLoop::AutoVectorizeStatus
4512 PhaseIdealLoop::auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared) {
4513 // Counted loop only
4514 if (!lpt->is_counted()) {
4515 return AutoVectorizeStatus::Impossible;
4516 }
4517
4518 // Main-loop only
4519 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4520 if (!cl->is_main_loop()) {
4521 return AutoVectorizeStatus::Impossible;
4522 }
4523
4524 VLoop vloop(lpt, false);
4525 if (!vloop.check_preconditions()) {
4526 return AutoVectorizeStatus::TriedAndFailed;
4527 }
4528
4529 // Ensure the shared data is cleared before each use
4530 vshared.clear();
4531
4532 const VLoopAnalyzer vloop_analyzer(vloop, vshared);
4533 if (!vloop_analyzer.success()) {
4534 return AutoVectorizeStatus::TriedAndFailed;
4535 }
4536
4537 SuperWord sw(vloop_analyzer);
4538 if (!sw.transform_loop()) {
4539 return AutoVectorizeStatus::TriedAndFailed;
4540 }
4541
4542 return AutoVectorizeStatus::Success;
4543 }
4544
4545 // Just before insert_pre_post_loops, we can multiversion the loop:
4546 //
4547 // multiversion_if
4548 // | |
4549 // fast_loop slow_loop
4550 //
4551 // In the fast_loop we can make speculative assumptions, and put the
4552 // conditions into the multiversion_if. If the conditions hold at runtime,
4553 // we enter the fast_loop, if the conditions fail, we take the slow_loop
4554 // instead which does not make any of the speculative assumptions.
4555 //
4556 // Note: we only multiversion the loop if the loop does not have any
4557 // auto vectorization check Predicate. If we have that predicate,
4558 // then we can simply add the speculative assumption checks to
4559 // that Predicate. This means we do not need to duplicate the
4560 // loop - we have a smaller graph and save compile time. Should
4561 // the conditions ever fail, then we deopt / trap at the Predicate
4562 // and recompile without that Predicate. At that point we will
4563 // multiversion the loop, so that we can still have speculative
4564 // runtime checks.
4565 //
4566 // We perform the multiversioning when the loop is still in its single
4567 // iteration form, even before we insert pre and post loops. This makes
4568 // the cloning much simpler. However, this means that both the fast
4569 // and the slow loop have to be optimized independently (adding pre
4570 // and post loops, unrolling the main loop, auto-vectorize etc.). And
4571 // we may end up not needing any speculative assumptions in the fast_loop
4572 // and then rejecting the slow_loop by constant folding the multiversion_if.
4573 //
4574 // Therefore, we "delay" the optimization of the slow_loop until we add
4575 // at least one speculative assumption for the fast_loop. If we never
4576 // add such a speculative runtime check, the OpaqueMultiversioningNode
4577 // of the multiversion_if constant folds to true after loop opts, and the
4578 // multiversion_if folds away the "delayed" slow_loop. If we add any
4579 // speculative assumption, then we notify the OpaqueMultiversioningNode
4580 // with "notify_slow_loop_that_it_can_resume_optimizations".
4581 //
4582 // Note: new runtime checks can be added to the multiversion_if with
4583 // PhaseIdealLoop::create_new_if_for_multiversion
4584 void PhaseIdealLoop::maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new) {
4585 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4586 LoopNode* outer_loop = cl->skip_strip_mined();
4587 Node* entry = outer_loop->in(LoopNode::EntryControl);
4588
4589 // Check we have multiversioning enabled, and are not already multiversioned.
4590 if (!LoopMultiversioning || cl->is_multiversion()) { return; }
4591
4592 // Check that we do not have a parse-predicate where we can add the runtime checks
4593 // during auto-vectorization.
4594 const Predicates predicates(entry);
4595 const PredicateBlock* predicate_block = predicates.auto_vectorization_check_block();
4596 if (predicate_block->has_parse_predicate()) { return; }
4597
4598 // Check node budget.
4599 uint estimate = lpt->est_loop_clone_sz(2);
4600 if (!may_require_nodes(estimate)) { return; }
4601
4602 do_multiversioning(lpt, old_new);
4603 }
4604
4605 void DataNodeGraph::clone_data_nodes(Node* new_ctrl) {
4606 for (uint i = 0; i < _data_nodes.size(); i++) {
4607 clone(_data_nodes[i], new_ctrl);
4608 }
4609 }
4610
4611 // Clone the given node and set it up properly. Set 'new_ctrl' as ctrl.
4612 void DataNodeGraph::clone(Node* node, Node* new_ctrl) {
4613 Node* clone = node->clone();
4614 _phase->igvn().register_new_node_with_optimizer(clone);
4615 _orig_to_new.put(node, clone);
4616 _phase->set_ctrl(clone, new_ctrl);
4617 if (node->is_CastII()) {
4618 clone->set_req(0, new_ctrl);
4619 }
4620 }
4621
4622 // Rewire the data inputs of all (unprocessed) cloned nodes, whose inputs are still pointing to the same inputs as their
4623 // corresponding orig nodes, to the newly cloned inputs to create a separate cloned graph.
4624 void DataNodeGraph::rewire_clones_to_cloned_inputs() {
4625 _orig_to_new.iterate_all([&](Node* node, Node* clone) {
4626 for (uint i = 1; i < node->req(); i++) {
4627 Node** cloned_input = _orig_to_new.get(node->in(i));
4628 if (cloned_input != nullptr) {
4629 // Input was also cloned -> rewire clone to the cloned input.
4630 _phase->igvn().replace_input_of(clone, i, *cloned_input);
4631 }
4632 }
4633 });
4634 }
4635
4636 // Clone all non-OpaqueLoop* nodes and apply the provided transformation strategy for OpaqueLoop* nodes.
4637 // Set 'new_ctrl' as ctrl for all cloned non-OpaqueLoop* nodes.
4638 void DataNodeGraph::clone_data_nodes_and_transform_opaque_loop_nodes(
4639 const TransformStrategyForOpaqueLoopNodes& transform_strategy,
4640 Node* new_ctrl) {
4641 for (uint i = 0; i < _data_nodes.size(); i++) {
4642 Node* data_node = _data_nodes[i];
4643 if (data_node->is_Opaque1()) {
4644 transform_opaque_node(transform_strategy, data_node);
4645 } else {
4646 clone(data_node, new_ctrl);
4647 }
4648 }
4649 }
4650
4651 void DataNodeGraph::transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node) {
4652 Node* transformed_node;
4653 if (node->is_OpaqueLoopInit()) {
4654 transformed_node = transform_strategy.transform_opaque_init(node->as_OpaqueLoopInit());
4655 } else {
4656 assert(node->is_OpaqueLoopStride(), "must be OpaqueLoopStrideNode");
4657 transformed_node = transform_strategy.transform_opaque_stride(node->as_OpaqueLoopStride());
4658 }
4659 // Add an orig->new mapping to correctly update the inputs of the copied graph in rewire_clones_to_cloned_inputs().
4660 _orig_to_new.put(node, transformed_node);
4661 }