1 /*
2 * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/connode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/inlinetypenode.hpp"
35 #include "opto/loopnode.hpp"
36 #include "opto/matcher.hpp"
37 #include "opto/movenode.hpp"
38 #include "opto/mulnode.hpp"
39 #include "opto/opaquenode.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/subnode.hpp"
42 #include "opto/subtypenode.hpp"
43 #include "opto/superword.hpp"
44 #include "opto/vectornode.hpp"
45 #include "utilities/checkedCast.hpp"
46 #include "utilities/macros.hpp"
47
48 //=============================================================================
49 //------------------------------split_thru_phi---------------------------------
50 // Split Node 'n' through merge point if there is enough win.
51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
52 if ((n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) ||
53 (n->Opcode() == Op_ConvL2I && n->bottom_type() != TypeInt::INT)) {
54 // ConvI2L/ConvL2I may have type information on it which is unsafe to push up
55 // so disable this for now
56 return nullptr;
57 }
58
59 // Splitting range check CastIIs through a loop induction Phi can
60 // cause new Phis to be created that are left unrelated to the loop
61 // induction Phi and prevent optimizations (vectorization)
62 if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
63 n->in(1) == region->as_CountedLoop()->phi()) {
64 return nullptr;
65 }
66
67 // Inline types should not be split through Phis because they cannot be merged
68 // through Phi nodes but each value input needs to be merged individually.
69 if (n->is_InlineType()) {
70 return nullptr;
71 }
72
73 if (cannot_split_division(n, region)) {
74 return nullptr;
75 }
76
77 SplitThruPhiWins wins(region);
78 assert(!n->is_CFG(), "");
79 assert(region->is_Region(), "");
80
81 const Type* type = n->bottom_type();
82 const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
83 Node* phi;
84 if (t_oop != nullptr && t_oop->is_known_instance_field()) {
85 int iid = t_oop->instance_id();
86 int index = C->get_alias_index(t_oop);
87 int offset = t_oop->offset();
88 phi = new PhiNode(region, type, nullptr, iid, index, offset);
89 } else {
90 phi = PhiNode::make_blank(region, n);
91 }
92 uint old_unique = C->unique();
93 for (uint i = 1; i < region->req(); i++) {
94 Node* x;
95 Node* the_clone = nullptr;
96 if (region->in(i) == C->top()) {
97 x = C->top(); // Dead path? Use a dead data op
98 } else {
99 x = n->clone(); // Else clone up the data op
100 the_clone = x; // Remember for possible deletion.
101 // Alter data node to use pre-phi inputs
102 if (n->in(0) == region)
103 x->set_req( 0, region->in(i) );
104 for (uint j = 1; j < n->req(); j++) {
105 Node* in = n->in(j);
106 if (in->is_Phi() && in->in(0) == region)
107 x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
108 }
109 }
110 // Check for a 'win' on some paths
111 const Type* t = x->Value(&_igvn);
112
113 bool singleton = t->singleton();
114
115 // A TOP singleton indicates that there are no possible values incoming
116 // along a particular edge. In most cases, this is OK, and the Phi will
117 // be eliminated later in an Ideal call. However, we can't allow this to
118 // happen if the singleton occurs on loop entry, as the elimination of
119 // the PhiNode may cause the resulting node to migrate back to a previous
120 // loop iteration.
121 if (singleton && t == Type::TOP) {
122 // Is_Loop() == false does not confirm the absence of a loop (e.g., an
123 // irreducible loop may not be indicated by an affirmative is_Loop());
124 // therefore, the only top we can split thru a phi is on a backedge of
125 // a loop.
126 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
127 }
128
129 if (singleton) {
130 wins.add_win(i);
131 x = makecon(t);
132 } else {
133 // We now call Identity to try to simplify the cloned node.
134 // Note that some Identity methods call phase->type(this).
135 // Make sure that the type array is big enough for
136 // our new node, even though we may throw the node away.
137 // (Note: This tweaking with igvn only works because x is a new node.)
138 _igvn.set_type(x, t);
139 // If x is a TypeNode, capture any more-precise type permanently into Node
140 // otherwise it will be not updated during igvn->transform since
141 // igvn->type(x) is set to x->Value() already.
142 x->raise_bottom_type(t);
143 Node* y = x->Identity(&_igvn);
144 if (y != x) {
145 wins.add_win(i);
146 x = y;
147 } else {
148 y = _igvn.hash_find(x);
149 if (y == nullptr) {
150 y = similar_subtype_check(x, region->in(i));
151 }
152 if (y) {
153 wins.add_win(i);
154 x = y;
155 } else {
156 // Else x is a new node we are keeping
157 // We do not need register_new_node_with_optimizer
158 // because set_type has already been called.
159 _igvn._worklist.push(x);
160 }
161 }
162 }
163
164 phi->set_req( i, x );
165
166 if (the_clone == nullptr) {
167 continue;
168 }
169
170 if (the_clone != x) {
171 _igvn.remove_dead_node(the_clone);
172 } else if (region->is_Loop() && i == LoopNode::LoopBackControl &&
173 n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) {
174 // it is not a win if 'x' moved from an outer to an inner loop
175 // this edge case can only happen for Load nodes
176 wins.reset();
177 break;
178 }
179 }
180 // Too few wins?
181 if (!wins.profitable(policy)) {
182 _igvn.remove_dead_node(phi);
183 return nullptr;
184 }
185
186 // Record Phi
187 register_new_node( phi, region );
188
189 for (uint i2 = 1; i2 < phi->req(); i2++) {
190 Node *x = phi->in(i2);
191 // If we commoned up the cloned 'x' with another existing Node,
192 // the existing Node picks up a new use. We need to make the
193 // existing Node occur higher up so it dominates its uses.
194 Node *old_ctrl;
195 IdealLoopTree *old_loop;
196
197 if (x->is_Con()) {
198 assert(get_ctrl(x) == C->root(), "constant control is not root");
199 continue;
200 }
201 // The occasional new node
202 if (x->_idx >= old_unique) { // Found a new, unplaced node?
203 old_ctrl = nullptr;
204 old_loop = nullptr; // Not in any prior loop
205 } else {
206 old_ctrl = get_ctrl(x);
207 old_loop = get_loop(old_ctrl); // Get prior loop
208 }
209 // New late point must dominate new use
210 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
211 if (new_ctrl == old_ctrl) // Nothing is changed
212 continue;
213
214 IdealLoopTree *new_loop = get_loop(new_ctrl);
215
216 // Don't move x into a loop if its uses are
217 // outside of loop. Otherwise x will be cloned
218 // for each use outside of this loop.
219 IdealLoopTree *use_loop = get_loop(region);
220 if (!new_loop->is_member(use_loop) &&
221 (old_loop == nullptr || !new_loop->is_member(old_loop))) {
222 // Take early control, later control will be recalculated
223 // during next iteration of loop optimizations.
224 new_ctrl = get_early_ctrl(x);
225 new_loop = get_loop(new_ctrl);
226 }
227 // Set new location
228 set_ctrl(x, new_ctrl);
229 // If changing loop bodies, see if we need to collect into new body
230 if (old_loop != new_loop) {
231 if (old_loop && !old_loop->_child)
232 old_loop->_body.yank(x);
233 if (!new_loop->_child)
234 new_loop->_body.push(x); // Collect body info
235 }
236 }
237
238 split_thru_phi_yank_old_nodes(n, region);
239 _igvn.replace_node(n, phi);
240
241 #ifndef PRODUCT
242 if (TraceLoopOpts) {
243 tty->print_cr("Split %d %s through %d Phi in %d %s",
244 n->_idx, n->Name(), phi->_idx, region->_idx, region->Name());
245 }
246 #endif // !PRODUCT
247
248 return phi;
249 }
250
251 // If the region is a Loop, we are removing the old n,
252 // and need to yank it from the _body. If any phi we
253 // just split through now has no use any more, it also
254 // has to be removed.
255 void PhaseIdealLoop::split_thru_phi_yank_old_nodes(Node* n, Node* region) {
256 IdealLoopTree* region_loop = get_loop(region);
257 if (region->is_Loop() && region_loop->is_innermost()) {
258 region_loop->_body.yank(n);
259 for (uint j = 1; j < n->req(); j++) {
260 PhiNode* phi = n->in(j)->isa_Phi();
261 // Check that phi belongs to the region and only has n as a use.
262 if (phi != nullptr &&
263 phi->in(0) == region &&
264 phi->unique_multiple_edges_out_or_null() == n) {
265 assert(get_ctrl(phi) == region, "sanity");
266 assert(get_ctrl(n) == region, "sanity");
267 region_loop->_body.yank(phi);
268 }
269 }
270 }
271 }
272
273 // Test whether node 'x' can move into an inner loop relative to node 'n'.
274 // Note: The test is not exact. Returns true if 'x' COULD end up in an inner loop,
275 // BUT it can also return true and 'x' is in the outer loop
276 bool PhaseIdealLoop::can_move_to_inner_loop(Node* n, LoopNode* n_loop, Node* x) {
277 IdealLoopTree* n_loop_tree = get_loop(n_loop);
278 IdealLoopTree* x_loop_tree = get_loop(get_early_ctrl(x));
279 // x_loop_tree should be outer or same loop as n_loop_tree
280 return !x_loop_tree->is_member(n_loop_tree);
281 }
282
283 // Subtype checks that carry profile data don't common so look for a replacement by following edges
284 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) {
285 if (x->is_SubTypeCheck()) {
286 Node* in1 = x->in(1);
287 for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) {
288 Node* u = in1->fast_out(i);
289 if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) {
290 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
291 Node* bol = u->fast_out(j);
292 for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) {
293 Node* iff = bol->fast_out(k);
294 // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with
295 // unrelated profile
296 if (iff->is_If() && is_dominator(iff, r_in)) {
297 return u;
298 }
299 }
300 }
301 }
302 }
303 }
304 return nullptr;
305 }
306
307 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor
308 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In
309 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in
310 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could
311 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis).
312 // We also need to check other loop phis as they could have been created in the same split-if pass when applying
313 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi.
314 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const {
315 const Type* zero;
316 switch (n->Opcode()) {
317 case Op_DivI:
318 case Op_ModI:
319 case Op_UDivI:
320 case Op_UModI:
321 zero = TypeInt::ZERO;
322 break;
323 case Op_DivL:
324 case Op_ModL:
325 case Op_UDivL:
326 case Op_UModL:
327 zero = TypeLong::ZERO;
328 break;
329 default:
330 return false;
331 }
332
333 if (n->in(0) != nullptr) {
334 // Cannot split through phi if Div or Mod node has a control dependency to a zero check.
335 return true;
336 }
337
338 Node* divisor = n->in(2);
339 return is_divisor_loop_phi(divisor, region) &&
340 loop_phi_backedge_type_contains_zero(divisor, zero);
341 }
342
343 bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) {
344 return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop;
345 }
346
347 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const {
348 return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP;
349 }
350
351 //------------------------------dominated_by------------------------------------
352 // Replace the dominated test with an obvious true or false. Place it on the
353 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
354 // live path up to the dominating control.
355 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool pin_array_access_nodes) {
356 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
357
358 // prevdom is the dominating projection of the dominating test.
359 assert(iff->Opcode() == Op_If ||
360 iff->Opcode() == Op_CountedLoopEnd ||
361 iff->Opcode() == Op_LongCountedLoopEnd ||
362 iff->Opcode() == Op_RangeCheck ||
363 iff->Opcode() == Op_ParsePredicate,
364 "Check this code when new subtype is added");
365
366 int pop = prevdom->Opcode();
367 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
368 if (flip) {
369 if (pop == Op_IfTrue)
370 pop = Op_IfFalse;
371 else
372 pop = Op_IfTrue;
373 }
374 // 'con' is set to true or false to kill the dominated test.
375 Node* con = makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
376 // Hack the dominated test
377 _igvn.replace_input_of(iff, 1, con);
378
379 // If I don't have a reachable TRUE and FALSE path following the IfNode then
380 // I can assume this path reaches an infinite loop. In this case it's not
381 // important to optimize the data Nodes - either the whole compilation will
382 // be tossed or this path (and all data Nodes) will go dead.
383 if (iff->outcnt() != 2) {
384 return;
385 }
386
387 // Make control-dependent data Nodes on the live path (path that will remain
388 // once the dominated IF is removed) become control-dependent on the
389 // dominating projection.
390 Node* dp = iff->proj_out_or_null(pop == Op_IfTrue);
391
392 if (dp == nullptr) {
393 return;
394 }
395
396 rewire_safe_outputs_to_dominator(dp, prevdom, pin_array_access_nodes);
397 }
398
399 void PhaseIdealLoop::rewire_safe_outputs_to_dominator(Node* source, Node* dominator, const bool pin_array_access_nodes) {
400 IdealLoopTree* old_loop = get_loop(source);
401
402 for (DUIterator_Fast imax, i = source->fast_outs(imax); i < imax; i++) {
403 Node* out = source->fast_out(i); // Control-dependent node
404 // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
405 if (out->depends_only_on_test() && _igvn.no_dependent_zero_check(out)) {
406 assert(out->in(0) == source, "must be control dependent on source");
407 _igvn.replace_input_of(out, 0, dominator);
408 if (pin_array_access_nodes) {
409 // Because of Loop Predication, Loads and range check Cast nodes that are control dependent on this range
410 // check (that is about to be removed) now depend on multiple dominating Hoisted Check Predicates. After the
411 // removal of this range check, these control dependent nodes end up at the lowest/nearest dominating predicate
412 // in the graph. To ensure that these Loads/Casts do not float above any of the dominating checks (even when the
413 // lowest dominating check is later replaced by yet another dominating check), we need to pin them at the lowest
414 // dominating check.
415 Node* clone = out->pin_array_access_node();
416 if (clone != nullptr) {
417 clone = _igvn.register_new_node_with_optimizer(clone, out);
418 _igvn.replace_node(out, clone);
419 out = clone;
420 }
421 }
422 set_early_ctrl(out, false);
423 IdealLoopTree* new_loop = get_loop(get_ctrl(out));
424 if (old_loop != new_loop) {
425 if (!old_loop->_child) {
426 old_loop->_body.yank(out);
427 }
428 if (!new_loop->_child) {
429 new_loop->_body.push(out);
430 }
431 }
432 --i;
433 --imax;
434 }
435 }
436 }
437
438 //------------------------------has_local_phi_input----------------------------
439 // Return TRUE if 'n' has Phi inputs from its local block and no other
440 // block-local inputs (all non-local-phi inputs come from earlier blocks)
441 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
442 Node *n_ctrl = get_ctrl(n);
443 // See if some inputs come from a Phi in this block, or from before
444 // this block.
445 uint i;
446 for( i = 1; i < n->req(); i++ ) {
447 Node *phi = n->in(i);
448 if( phi->is_Phi() && phi->in(0) == n_ctrl )
449 break;
450 }
451 if( i >= n->req() )
452 return nullptr; // No Phi inputs; nowhere to clone thru
453
454 // Check for inputs created between 'n' and the Phi input. These
455 // must split as well; they have already been given the chance
456 // (courtesy of a post-order visit) and since they did not we must
457 // recover the 'cost' of splitting them by being very profitable
458 // when splitting 'n'. Since this is unlikely we simply give up.
459 for( i = 1; i < n->req(); i++ ) {
460 Node *m = n->in(i);
461 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
462 // We allow the special case of AddP's with no local inputs.
463 // This allows us to split-up address expressions.
464 if (m->is_AddP() &&
465 get_ctrl(m->in(AddPNode::Base)) != n_ctrl &&
466 get_ctrl(m->in(AddPNode::Address)) != n_ctrl &&
467 get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) {
468 // Move the AddP up to the dominating point. That's fine because control of m's inputs
469 // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl.
470 Node* c = find_non_split_ctrl(idom(n_ctrl));
471 if (c->is_OuterStripMinedLoop()) {
472 c->as_Loop()->verify_strip_mined(1);
473 c = c->in(LoopNode::EntryControl);
474 }
475 set_ctrl_and_loop(m, c);
476 continue;
477 }
478 return nullptr;
479 }
480 assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
481 }
482
483 return n_ctrl;
484 }
485
486 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
487 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) {
488 assert(bt == T_INT || bt == T_LONG, "only for integers");
489 int n_op = n->Opcode();
490
491 if (n_op == Op_LShift(bt)) {
492 // Scale is loop invariant
493 Node* scale = n->in(2);
494 Node* scale_ctrl = get_ctrl(scale);
495 IdealLoopTree* scale_loop = get_loop(scale_ctrl);
496 if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) {
497 return nullptr;
498 }
499 const TypeInt* scale_t = scale->bottom_type()->isa_int();
500 if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) {
501 return nullptr; // Dont bother with byte/short masking
502 }
503 // Add must vary with loop (else shift would be loop-invariant)
504 Node* add = n->in(1);
505 Node* add_ctrl = get_ctrl(add);
506 IdealLoopTree* add_loop = get_loop(add_ctrl);
507 if (n_loop != add_loop) {
508 return nullptr; // happens w/ evil ZKM loops
509 }
510
511 // Convert I-V into I+ (0-V); same for V-I
512 if (add->Opcode() == Op_Sub(bt) &&
513 _igvn.type(add->in(1)) != TypeInteger::zero(bt)) {
514 assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, "");
515 Node* zero = integercon(0, bt);
516 Node* neg = SubNode::make(zero, add->in(2), bt);
517 register_new_node_with_ctrl_of(neg, add->in(2));
518 add = AddNode::make(add->in(1), neg, bt);
519 register_new_node(add, add_ctrl);
520 }
521 if (add->Opcode() != Op_Add(bt)) return nullptr;
522 assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, "");
523 // See if one add input is loop invariant
524 Node* add_var = add->in(1);
525 Node* add_var_ctrl = get_ctrl(add_var);
526 IdealLoopTree* add_var_loop = get_loop(add_var_ctrl);
527 Node* add_invar = add->in(2);
528 Node* add_invar_ctrl = get_ctrl(add_invar);
529 IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl);
530 if (add_invar_loop == n_loop) {
531 // Swap to find the invariant part
532 add_invar = add_var;
533 add_invar_ctrl = add_var_ctrl;
534 add_invar_loop = add_var_loop;
535 add_var = add->in(2);
536 } else if (add_var_loop != n_loop) { // Else neither input is loop invariant
537 return nullptr;
538 }
539 if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) {
540 return nullptr; // No invariant part of the add?
541 }
542
543 // Yes! Reshape address expression!
544 Node* inv_scale = LShiftNode::make(add_invar, scale, bt);
545 Node* inv_scale_ctrl =
546 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
547 add_invar_ctrl : scale_ctrl;
548 register_new_node(inv_scale, inv_scale_ctrl);
549 Node* var_scale = LShiftNode::make(add_var, scale, bt);
550 register_new_node(var_scale, n_ctrl);
551 Node* var_add = AddNode::make(var_scale, inv_scale, bt);
552 register_new_node(var_add, n_ctrl);
553 _igvn.replace_node(n, var_add);
554 return var_add;
555 }
556 return nullptr;
557 }
558
559 //------------------------------remix_address_expressions----------------------
560 // Rework addressing expressions to get the most loop-invariant stuff
561 // moved out. We'd like to do all associative operators, but it's especially
562 // important (common) to do address expressions.
563 Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
564 if (!has_ctrl(n)) return nullptr;
565 Node* n_ctrl = get_ctrl(n);
566 IdealLoopTree* n_loop = get_loop(n_ctrl);
567
568 // See if 'n' mixes loop-varying and loop-invariant inputs and
569 // itself is loop-varying.
570
571 // Only interested in binary ops (and AddP)
572 if (n->req() < 3 || n->req() > 4) return nullptr;
573
574 Node* n1_ctrl = get_ctrl(n->in( 1));
575 Node* n2_ctrl = get_ctrl(n->in( 2));
576 Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
577 IdealLoopTree* n1_loop = get_loop(n1_ctrl);
578 IdealLoopTree* n2_loop = get_loop(n2_ctrl);
579 IdealLoopTree* n3_loop = get_loop(n3_ctrl);
580
581 // Does one of my inputs spin in a tighter loop than self?
582 if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) ||
583 (n_loop->is_member(n2_loop) && n_loop != n2_loop) ||
584 (n_loop->is_member(n3_loop) && n_loop != n3_loop)) {
585 return nullptr; // Leave well enough alone
586 }
587
588 // Is at least one of my inputs loop-invariant?
589 if (n1_loop == n_loop &&
590 n2_loop == n_loop &&
591 n3_loop == n_loop) {
592 return nullptr; // No loop-invariant inputs
593 }
594
595 Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT);
596 if (res != nullptr) {
597 return res;
598 }
599 res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG);
600 if (res != nullptr) {
601 return res;
602 }
603
604 int n_op = n->Opcode();
605 // Replace (I+V) with (V+I)
606 if (n_op == Op_AddI ||
607 n_op == Op_AddL ||
608 n_op == Op_AddF ||
609 n_op == Op_AddD ||
610 n_op == Op_MulI ||
611 n_op == Op_MulL ||
612 n_op == Op_MulF ||
613 n_op == Op_MulD) {
614 if (n2_loop == n_loop) {
615 assert(n1_loop != n_loop, "");
616 n->swap_edges(1, 2);
617 }
618 }
619
620 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
621 // but not if I2 is a constant. Skip for irreducible loops.
622 if (n_op == Op_AddP && n_loop->_head->is_Loop()) {
623 if (n2_loop == n_loop && n3_loop != n_loop) {
624 if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) {
625 Node* n22_ctrl = get_ctrl(n->in(2)->in(2));
626 Node* n23_ctrl = get_ctrl(n->in(2)->in(3));
627 IdealLoopTree* n22loop = get_loop(n22_ctrl);
628 IdealLoopTree* n23_loop = get_loop(n23_ctrl);
629 if (n22loop != n_loop && n22loop->is_member(n_loop) &&
630 n23_loop == n_loop) {
631 Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3));
632 // Stuff new AddP in the loop preheader
633 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
634 Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3));
635 register_new_node(add2, n_ctrl);
636 _igvn.replace_node(n, add2);
637 return add2;
638 }
639 }
640 }
641
642 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
643 if (n2_loop != n_loop && n3_loop == n_loop) {
644 if (n->in(3)->Opcode() == Op_AddX) {
645 Node* V = n->in(3)->in(1);
646 Node* I = n->in(3)->in(2);
647 if (ctrl_is_member(n_loop, V)) {
648 } else {
649 Node *tmp = V; V = I; I = tmp;
650 }
651 if (!ctrl_is_member(n_loop, I)) {
652 Node* add1 = new AddPNode(n->in(1), n->in(2), I);
653 // Stuff new AddP in the loop preheader
654 register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
655 Node* add2 = new AddPNode(n->in(1), add1, V);
656 register_new_node(add2, n_ctrl);
657 _igvn.replace_node(n, add2);
658 return add2;
659 }
660 }
661 }
662 }
663
664 return nullptr;
665 }
666
667 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
668 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
669 assert(n->Opcode() == Op_AddI, "sanity");
670 Node * nn = nullptr;
671 Node * in1 = n->in(1);
672 Node * in2 = n->in(2);
673 if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
674 IdealLoopTree* loop_n = get_loop(get_ctrl(n));
675 if (loop_n->is_counted() &&
676 loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) &&
677 Matcher::match_rule_supported(Op_MulAddVS2VI) &&
678 Matcher::match_rule_supported(Op_MulAddS2I)) {
679 Node* mul_in1 = in1->in(1);
680 Node* mul_in2 = in1->in(2);
681 Node* mul_in3 = in2->in(1);
682 Node* mul_in4 = in2->in(2);
683 if (mul_in1->Opcode() == Op_LoadS &&
684 mul_in2->Opcode() == Op_LoadS &&
685 mul_in3->Opcode() == Op_LoadS &&
686 mul_in4->Opcode() == Op_LoadS) {
687 IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
688 IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
689 IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
690 IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
691 IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
692 IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
693 // All nodes should be in the same counted loop.
694 if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
695 loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
696 Node* adr1 = mul_in1->in(MemNode::Address);
697 Node* adr2 = mul_in2->in(MemNode::Address);
698 Node* adr3 = mul_in3->in(MemNode::Address);
699 Node* adr4 = mul_in4->in(MemNode::Address);
700 if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
701 if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
702 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
703 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
704 register_new_node_with_ctrl_of(nn, n);
705 _igvn.replace_node(n, nn);
706 return nn;
707 } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
708 (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
709 nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
710 register_new_node_with_ctrl_of(nn, n);
711 _igvn.replace_node(n, nn);
712 return nn;
713 }
714 }
715 }
716 }
717 }
718 }
719 return nn;
720 }
721
722 //------------------------------conditional_move-------------------------------
723 // Attempt to replace a Phi with a conditional move. We have some pretty
724 // strict profitability requirements. All Phis at the merge point must
725 // be converted, so we can remove the control flow. We need to limit the
726 // number of c-moves to a small handful. All code that was in the side-arms
727 // of the CFG diamond is now speculatively executed. This code has to be
728 // "cheap enough". We are pretty much limited to CFG diamonds that merge
729 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
730 Node *PhaseIdealLoop::conditional_move( Node *region ) {
731
732 assert(region->is_Region(), "sanity check");
733 if (region->req() != 3) return nullptr;
734
735 // Check for CFG diamond
736 Node *lp = region->in(1);
737 Node *rp = region->in(2);
738 if (!lp || !rp) return nullptr;
739 Node *lp_c = lp->in(0);
740 if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr;
741 IfNode *iff = lp_c->as_If();
742
743 // Check for ops pinned in an arm of the diamond.
744 // Can't remove the control flow in this case
745 if (lp->outcnt() > 1) return nullptr;
746 if (rp->outcnt() > 1) return nullptr;
747
748 IdealLoopTree* r_loop = get_loop(region);
749 assert(r_loop == get_loop(iff), "sanity");
750 // Always convert to CMOVE if all results are used only outside this loop.
751 bool used_inside_loop = (r_loop == _ltree_root);
752
753 // Check profitability
754 int cost = 0;
755 int phis = 0;
756 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
757 Node *out = region->fast_out(i);
758 if (!out->is_Phi()) continue; // Ignore other control edges, etc
759 phis++;
760 PhiNode* phi = out->as_Phi();
761 BasicType bt = phi->type()->basic_type();
762 switch (bt) {
763 case T_DOUBLE:
764 case T_FLOAT:
765 if (C->use_cmove()) {
766 continue; //TODO: maybe we want to add some cost
767 }
768 cost += Matcher::float_cmove_cost(); // Could be very expensive
769 break;
770 case T_LONG: {
771 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
772 }
773 case T_INT: // These all CMOV fine
774 case T_ADDRESS: { // (RawPtr)
775 cost++;
776 break;
777 }
778 case T_NARROWOOP: // Fall through
779 case T_OBJECT: { // Base oops are OK, but not derived oops
780 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
781 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
782 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
783 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
784 // have a Phi for the base here that we convert to a CMOVE all is well
785 // and good. But if the base is dead, we'll not make a CMOVE. Later
786 // the allocator will have to produce a base by creating a CMOVE of the
787 // relevant bases. This puts the allocator in the business of
788 // manufacturing expensive instructions, generally a bad plan.
789 // Just Say No to Conditionally-Moved Derived Pointers.
790 if (tp && tp->offset() != 0)
791 return nullptr;
792 cost++;
793 break;
794 }
795 default:
796 return nullptr; // In particular, can't do memory or I/O
797 }
798 // Add in cost any speculative ops
799 for (uint j = 1; j < region->req(); j++) {
800 Node *proj = region->in(j);
801 Node *inp = phi->in(j);
802 if (inp->isa_InlineType()) {
803 // TODO 8302217 This prevents PhiNode::push_inline_types_through
804 return nullptr;
805 }
806 if (get_ctrl(inp) == proj) { // Found local op
807 cost++;
808 // Check for a chain of dependent ops; these will all become
809 // speculative in a CMOV.
810 for (uint k = 1; k < inp->req(); k++)
811 if (get_ctrl(inp->in(k)) == proj)
812 cost += ConditionalMoveLimit; // Too much speculative goo
813 }
814 }
815 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
816 // This will likely Split-If, a higher-payoff operation.
817 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
818 Node* use = phi->fast_out(k);
819 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
820 cost += ConditionalMoveLimit;
821 // Is there a use inside the loop?
822 // Note: check only basic types since CMoveP is pinned.
823 if (!used_inside_loop && is_java_primitive(bt)) {
824 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
825 if (r_loop == u_loop || r_loop->is_member(u_loop)) {
826 used_inside_loop = true;
827 }
828 }
829 }
830 }//for
831 Node* bol = iff->in(1);
832 assert(!bol->is_OpaqueInitializedAssertionPredicate(), "Initialized Assertion Predicates cannot form a diamond with Halt");
833 if (bol->is_OpaqueTemplateAssertionPredicate()) {
834 // Ignore Template Assertion Predicates with OpaqueTemplateAssertionPredicate nodes.
835 return nullptr;
836 }
837 if (bol->is_OpaqueMultiversioning()) {
838 assert(bol->as_OpaqueMultiversioning()->is_useless(), "Must be useless, i.e. fast main loop has already disappeared.");
839 // Ignore multiversion_if that just lost its loops. The OpaqueMultiversioning is marked useless,
840 // and will make the multiversion_if constant fold in the next IGVN round.
841 return nullptr;
842 }
843 if (!bol->is_Bool()) {
844 assert(false, "Expected Bool, but got %s", NodeClassNames[bol->Opcode()]);
845 return nullptr;
846 }
847 int cmp_op = bol->in(1)->Opcode();
848 if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
849 return nullptr;
850 }
851 // It is expensive to generate flags from a float compare.
852 // Avoid duplicated float compare.
853 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
854
855 float infrequent_prob = PROB_UNLIKELY_MAG(3);
856 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
857 if (used_inside_loop) {
858 if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
859
860 // BlockLayoutByFrequency optimization moves infrequent branch
861 // from hot path. No point in CMOV'ing in such case (110 is used
862 // instead of 100 to take into account not exactness of float value).
863 if (BlockLayoutByFrequency) {
864 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
865 }
866 }
867 // Check for highly predictable branch. No point in CMOV'ing if
868 // we are going to predict accurately all the time.
869 if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
870 //keep going
871 } else if (iff->_prob < infrequent_prob ||
872 iff->_prob > (1.0f - infrequent_prob))
873 return nullptr;
874
875 // --------------
876 // Now replace all Phis with CMOV's
877 Node *cmov_ctrl = iff->in(0);
878 uint flip = (lp->Opcode() == Op_IfTrue);
879 Node_List wq;
880 while (1) {
881 PhiNode* phi = nullptr;
882 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
883 Node *out = region->fast_out(i);
884 if (out->is_Phi()) {
885 phi = out->as_Phi();
886 break;
887 }
888 }
889 if (phi == nullptr || _igvn.type(phi) == Type::TOP || !CMoveNode::supported(_igvn.type(phi))) {
890 break;
891 }
892 // Move speculative ops
893 wq.push(phi);
894 while (wq.size() > 0) {
895 Node *n = wq.pop();
896 for (uint j = 1; j < n->req(); j++) {
897 Node* m = n->in(j);
898 if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) {
899 set_ctrl(m, cmov_ctrl);
900 wq.push(m);
901 }
902 }
903 }
904 Node* cmov = CMoveNode::make(iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
905 register_new_node(cmov, cmov_ctrl);
906 _igvn.replace_node(phi, cmov);
907 #ifndef PRODUCT
908 if (TraceLoopOpts) {
909 tty->print("CMOV ");
910 r_loop->dump_head();
911 if (Verbose) {
912 bol->in(1)->dump(1);
913 cmov->dump(1);
914 }
915 }
916 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
917 #endif
918 }
919
920 // The useless CFG diamond will fold up later; see the optimization in
921 // RegionNode::Ideal.
922 _igvn._worklist.push(region);
923
924 return iff->in(1);
925 }
926
927 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
928 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
929 Node* u = m->fast_out(i);
930 if (u->is_CFG()) {
931 if (u->is_NeverBranch()) {
932 u = u->as_NeverBranch()->proj_out(0);
933 enqueue_cfg_uses(u, wq);
934 } else {
935 wq.push(u);
936 }
937 }
938 }
939 }
940
941 // Try moving a store out of a loop, right before the loop
942 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
943 // Store has to be first in the loop body
944 IdealLoopTree *n_loop = get_loop(n_ctrl);
945 if (n->is_Store() && n_loop != _ltree_root &&
946 n_loop->is_loop() && n_loop->_head->is_Loop() &&
947 n->in(0) != nullptr) {
948 Node* address = n->in(MemNode::Address);
949 Node* value = n->in(MemNode::ValueIn);
950 Node* mem = n->in(MemNode::Memory);
951
952 // - address and value must be loop invariant
953 // - memory must be a memory Phi for the loop
954 // - Store must be the only store on this memory slice in the
955 // loop: if there's another store following this one then value
956 // written at iteration i by the second store could be overwritten
957 // at iteration i+n by the first store: it's not safe to move the
958 // first store out of the loop
959 // - nothing must observe the memory Phi: it guarantees no read
960 // before the store, we are also guaranteed the store post
961 // dominates the loop head (ignoring a possible early
962 // exit). Otherwise there would be extra Phi involved between the
963 // loop's Phi and the store.
964 // - there must be no early exit from the loop before the Store
965 // (such an exit most of the time would be an extra use of the
966 // memory Phi but sometimes is a bottom memory Phi that takes the
967 // store as input).
968
969 if (!ctrl_is_member(n_loop, address) &&
970 !ctrl_is_member(n_loop, value) &&
971 mem->is_Phi() && mem->in(0) == n_loop->_head &&
972 mem->outcnt() == 1 &&
973 mem->in(LoopNode::LoopBackControl) == n) {
974
975 assert(n_loop->_tail != nullptr, "need a tail");
976 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
977
978 // Verify that there's no early exit of the loop before the store.
979 bool ctrl_ok = false;
980 {
981 // Follow control from loop head until n, we exit the loop or
982 // we reach the tail
983 ResourceMark rm;
984 Unique_Node_List wq;
985 wq.push(n_loop->_head);
986
987 for (uint next = 0; next < wq.size(); ++next) {
988 Node *m = wq.at(next);
989 if (m == n->in(0)) {
990 ctrl_ok = true;
991 continue;
992 }
993 assert(!has_ctrl(m), "should be CFG");
994 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
995 ctrl_ok = false;
996 break;
997 }
998 enqueue_cfg_uses(m, wq);
999 if (wq.size() > 10) {
1000 ctrl_ok = false;
1001 break;
1002 }
1003 }
1004 }
1005 if (ctrl_ok) {
1006 // move the Store
1007 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
1008 _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
1009 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
1010 // Disconnect the phi now. An empty phi can confuse other
1011 // optimizations in this pass of loop opts.
1012 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
1013 n_loop->_body.yank(mem);
1014
1015 set_ctrl_and_loop(n, n->in(0));
1016
1017 return n;
1018 }
1019 }
1020 }
1021 return nullptr;
1022 }
1023
1024 // Try moving a store out of a loop, right after the loop
1025 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
1026 if (n->is_Store() && n->in(0) != nullptr) {
1027 Node *n_ctrl = get_ctrl(n);
1028 IdealLoopTree *n_loop = get_loop(n_ctrl);
1029 // Store must be in a loop
1030 if (n_loop != _ltree_root && !n_loop->_irreducible) {
1031 Node* address = n->in(MemNode::Address);
1032 Node* value = n->in(MemNode::ValueIn);
1033 // address must be loop invariant
1034 if (!ctrl_is_member(n_loop, address)) {
1035 // Store must be last on this memory slice in the loop and
1036 // nothing in the loop must observe it
1037 Node* phi = nullptr;
1038 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1039 Node* u = n->fast_out(i);
1040 if (has_ctrl(u)) { // control use?
1041 if (!ctrl_is_member(n_loop, u)) {
1042 continue;
1043 }
1044 if (u->is_Phi() && u->in(0) == n_loop->_head) {
1045 assert(_igvn.type(u) == Type::MEMORY, "bad phi");
1046 // multiple phis on the same slice are possible
1047 if (phi != nullptr) {
1048 return;
1049 }
1050 phi = u;
1051 continue;
1052 }
1053 }
1054 return;
1055 }
1056 if (phi != nullptr) {
1057 // Nothing in the loop before the store (next iteration)
1058 // must observe the stored value
1059 bool mem_ok = true;
1060 {
1061 ResourceMark rm;
1062 Unique_Node_List wq;
1063 wq.push(phi);
1064 for (uint next = 0; next < wq.size() && mem_ok; ++next) {
1065 Node *m = wq.at(next);
1066 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
1067 Node* u = m->fast_out(i);
1068 if (u->is_Store() || u->is_Phi()) {
1069 if (u != n) {
1070 wq.push(u);
1071 mem_ok = (wq.size() <= 10);
1072 }
1073 } else {
1074 mem_ok = false;
1075 break;
1076 }
1077 }
1078 }
1079 }
1080 if (mem_ok) {
1081 // Move the store out of the loop if the LCA of all
1082 // users (except for the phi) is outside the loop.
1083 Node* hook = new Node(1);
1084 hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
1085 _igvn.rehash_node_delayed(phi);
1086 int count = phi->replace_edge(n, hook, &_igvn);
1087 assert(count > 0, "inconsistent phi");
1088
1089 // Compute latest point this store can go
1090 Node* lca = get_late_ctrl(n, get_ctrl(n));
1091 if (lca->is_OuterStripMinedLoop()) {
1092 lca = lca->in(LoopNode::EntryControl);
1093 }
1094 if (n_loop->is_member(get_loop(lca))) {
1095 // LCA is in the loop - bail out
1096 _igvn.replace_node(hook, n);
1097 return;
1098 }
1099 #ifdef ASSERT
1100 if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
1101 assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
1102 n_loop->_head->as_Loop()->verify_strip_mined(1);
1103 Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
1104 IdealLoopTree* outer_loop = get_loop(outer);
1105 assert(n_loop->_parent == outer_loop, "broken loop tree");
1106 assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
1107 }
1108 #endif
1109 lca = place_outside_loop(lca, n_loop);
1110 assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop");
1111 assert(get_loop(lca)->_nest < n_loop->_nest || get_loop(lca)->_head->as_Loop()->is_in_infinite_subgraph(), "must not be moved into inner loop");
1112
1113 // Move store out of the loop
1114 _igvn.replace_node(hook, n->in(MemNode::Memory));
1115 _igvn.replace_input_of(n, 0, lca);
1116 set_ctrl_and_loop(n, lca);
1117
1118 // Disconnect the phi now. An empty phi can confuse other
1119 // optimizations in this pass of loop opts..
1120 if (phi->in(LoopNode::LoopBackControl) == phi) {
1121 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1122 n_loop->_body.yank(phi);
1123 }
1124 }
1125 }
1126 }
1127 }
1128 }
1129 }
1130
1131 // We can't use immutable memory for the flat array check because we are loading the mark word which is
1132 // mutable. Although the bits we are interested in are immutable (we check for markWord::unlocked_value),
1133 // we need to use raw memory to not break anti dependency analysis. Below code will attempt to still move
1134 // flat array checks out of loops, mainly to enable loop unswitching.
1135 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1136 // Skip checks for more than one array
1137 if (n->req() > 3) {
1138 return;
1139 }
1140 Node* mem = n->in(FlatArrayCheckNode::Memory);
1141 Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1142 IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1143 IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1144
1145 // Check if array is loop invariant
1146 if (!check_loop->is_member(ary_loop)) {
1147 // Walk up memory graph from the check until we leave the loop
1148 VectorSet wq;
1149 wq.set(mem->_idx);
1150 while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1151 if (mem->is_Phi()) {
1152 mem = mem->in(1);
1153 } else if (mem->is_MergeMem()) {
1154 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1155 } else if (mem->is_Proj()) {
1156 mem = mem->in(0);
1157 } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1158 mem = mem->in(TypeFunc::Memory);
1159 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1160 mem = mem->in(MemNode::Memory);
1161 } else {
1162 #ifdef ASSERT
1163 mem->dump();
1164 #endif
1165 ShouldNotReachHere();
1166 }
1167 if (wq.test_set(mem->_idx)) {
1168 return;
1169 }
1170 }
1171 // Replace memory input and re-compute ctrl to move the check out of the loop
1172 _igvn.replace_input_of(n, 1, mem);
1173 set_ctrl_and_loop(n, get_early_ctrl(n));
1174 Node* bol = n->unique_out();
1175 set_ctrl_and_loop(bol, get_early_ctrl(bol));
1176 }
1177 }
1178
1179 //------------------------------split_if_with_blocks_pre-----------------------
1180 // Do the real work in a non-recursive function. Data nodes want to be
1181 // cloned in the pre-order so they can feed each other nicely.
1182 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1183 // Cloning these guys is unlikely to win
1184 int n_op = n->Opcode();
1185 if (n_op == Op_MergeMem) {
1186 return n;
1187 }
1188 if (n->is_Proj()) {
1189 return n;
1190 }
1191
1192 if (n->isa_FlatArrayCheck()) {
1193 move_flat_array_check_out_of_loop(n);
1194 return n;
1195 }
1196
1197 // Do not clone-up CmpFXXX variations, as these are always
1198 // followed by a CmpI
1199 if (n->is_Cmp()) {
1200 return n;
1201 }
1202 // Attempt to use a conditional move instead of a phi/branch
1203 if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1204 Node *cmov = conditional_move( n );
1205 if (cmov) {
1206 return cmov;
1207 }
1208 }
1209 if (n->is_CFG() || n->is_LoadStore()) {
1210 return n;
1211 }
1212 if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1213 if (!C->major_progress()) { // If chance of no more loop opts...
1214 _igvn._worklist.push(n); // maybe we'll remove them
1215 }
1216 return n;
1217 }
1218
1219 if (n->is_Con()) {
1220 return n; // No cloning for Con nodes
1221 }
1222
1223 Node *n_ctrl = get_ctrl(n);
1224 if (!n_ctrl) {
1225 return n; // Dead node
1226 }
1227
1228 Node* res = try_move_store_before_loop(n, n_ctrl);
1229 if (res != nullptr) {
1230 return n;
1231 }
1232
1233 // Attempt to remix address expressions for loop invariants
1234 Node *m = remix_address_expressions( n );
1235 if( m ) return m;
1236
1237 if (n_op == Op_AddI) {
1238 Node *nn = convert_add_to_muladd( n );
1239 if ( nn ) return nn;
1240 }
1241
1242 if (n->is_ConstraintCast() && n->as_ConstraintCast()->dependency().narrows_type()) {
1243 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1244 // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1245 // Node control inputs don't necessarily agree with loop control info (due to
1246 // transformations happened in between), thus additional dominance check is needed
1247 // to keep loop info valid.
1248 if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1249 _igvn.replace_node(n, dom_cast);
1250 return dom_cast;
1251 }
1252 }
1253
1254 // Determine if the Node has inputs from some local Phi.
1255 // Returns the block to clone thru.
1256 Node *n_blk = has_local_phi_input( n );
1257 if( !n_blk ) return n;
1258
1259 // Do not clone the trip counter through on a CountedLoop
1260 // (messes up the canonical shape).
1261 if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) ||
1262 (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) {
1263 return n;
1264 }
1265 // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination
1266 if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) &&
1267 n->in(1) == n_blk->as_BaseCountedLoop()->phi()) {
1268 return n;
1269 }
1270
1271 // Check for having no control input; not pinned. Allow
1272 // dominating control.
1273 if (n->in(0)) {
1274 Node *dom = idom(n_blk);
1275 if (dom_lca(n->in(0), dom) != n->in(0)) {
1276 return n;
1277 }
1278 }
1279 // Policy: when is it profitable. You must get more wins than
1280 // policy before it is considered profitable. Policy is usually 0,
1281 // so 1 win is considered profitable. Big merges will require big
1282 // cloning, so get a larger policy.
1283 int policy = n_blk->req() >> 2;
1284
1285 // If the loop is a candidate for range check elimination,
1286 // delay splitting through it's phi until a later loop optimization
1287 if (n_blk->is_BaseCountedLoop()) {
1288 IdealLoopTree *lp = get_loop(n_blk);
1289 if (lp && lp->_rce_candidate) {
1290 return n;
1291 }
1292 }
1293
1294 if (must_throttle_split_if()) return n;
1295
1296 // Split 'n' through the merge point if it is profitable, replacing it with a new phi.
1297 Node* phi = split_thru_phi(n, n_blk, policy);
1298 if (phi == nullptr) { return n; }
1299
1300 // Moved a load around the loop, 'en-registering' something.
1301 if (n_blk->is_Loop() && n->is_Load() &&
1302 !phi->in(LoopNode::LoopBackControl)->is_Load())
1303 C->set_major_progress();
1304
1305 return phi;
1306 }
1307
1308 static bool merge_point_too_heavy(Compile* C, Node* region) {
1309 // Bail out if the region and its phis have too many users.
1310 int weight = 0;
1311 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1312 weight += region->fast_out(i)->outcnt();
1313 }
1314 int nodes_left = C->max_node_limit() - C->live_nodes();
1315 if (weight * 8 > nodes_left) {
1316 if (PrintOpto) {
1317 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
1318 }
1319 return true;
1320 } else {
1321 return false;
1322 }
1323 }
1324
1325 static bool merge_point_safe(Node* region) {
1326 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1327 // having a PhiNode input. This sidesteps the dangerous case where the split
1328 // ConvI2LNode may become TOP if the input Value() does not
1329 // overlap the ConvI2L range, leaving a node which may not dominate its
1330 // uses.
1331 // A better fix for this problem can be found in the BugTraq entry, but
1332 // expediency for Mantis demands this hack.
1333 #ifdef _LP64
1334 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1335 Node* n = region->fast_out(i);
1336 if (n->is_Phi()) {
1337 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1338 Node* m = n->fast_out(j);
1339 if (m->Opcode() == Op_ConvI2L)
1340 return false;
1341 if (m->is_CastII()) {
1342 return false;
1343 }
1344 }
1345 }
1346 }
1347 #endif
1348 return true;
1349 }
1350
1351
1352 //------------------------------place_outside_loop---------------------------------
1353 // Place some computation outside of this loop on the path to the use passed as argument
1354 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const {
1355 Node* head = loop->_head;
1356 assert(!loop->is_member(get_loop(useblock)), "must be outside loop");
1357 if (head->is_Loop() && head->as_Loop()->is_strip_mined()) {
1358 loop = loop->_parent;
1359 assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop");
1360 }
1361
1362 // Pick control right outside the loop
1363 for (;;) {
1364 Node* dom = idom(useblock);
1365 if (loop->is_member(get_loop(dom))) {
1366 break;
1367 }
1368 useblock = dom;
1369 }
1370 assert(find_non_split_ctrl(useblock) == useblock, "should be non split control");
1371 return useblock;
1372 }
1373
1374
1375 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1376 if (!n->is_If() || n->is_BaseCountedLoopEnd()) {
1377 return false;
1378 }
1379 if (!n->in(0)->is_Region()) {
1380 return false;
1381 }
1382
1383 Node* region = n->in(0);
1384 Node* dom = idom(region);
1385 if (!dom->is_If() || !n->as_If()->same_condition(dom, &_igvn)) {
1386 return false;
1387 }
1388 IfNode* dom_if = dom->as_If();
1389 IfTrueNode* proj_true = dom_if->true_proj();
1390 IfFalseNode* proj_false = dom_if->false_proj();
1391
1392 for (uint i = 1; i < region->req(); i++) {
1393 if (is_dominator(proj_true, region->in(i))) {
1394 continue;
1395 }
1396 if (is_dominator(proj_false, region->in(i))) {
1397 continue;
1398 }
1399 return false;
1400 }
1401
1402 return true;
1403 }
1404
1405
1406 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1407 if (must_throttle_split_if()) {
1408 return false;
1409 }
1410
1411 // Do not do 'split-if' if irreducible loops are present.
1412 if (_has_irreducible_loops) {
1413 return false;
1414 }
1415
1416 if (merge_point_too_heavy(C, n_ctrl)) {
1417 return false;
1418 }
1419
1420 // Do not do 'split-if' if some paths are dead. First do dead code
1421 // elimination and then see if its still profitable.
1422 for (uint i = 1; i < n_ctrl->req(); i++) {
1423 if (n_ctrl->in(i) == C->top()) {
1424 return false;
1425 }
1426 }
1427
1428 // If trying to do a 'Split-If' at the loop head, it is only
1429 // profitable if the cmp folds up on BOTH paths. Otherwise we
1430 // risk peeling a loop forever.
1431
1432 // CNC - Disabled for now. Requires careful handling of loop
1433 // body selection for the cloned code. Also, make sure we check
1434 // for any input path not being in the same loop as n_ctrl. For
1435 // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1436 // because the alternative loop entry points won't be converted
1437 // into LoopNodes.
1438 IdealLoopTree *n_loop = get_loop(n_ctrl);
1439 for (uint j = 1; j < n_ctrl->req(); j++) {
1440 if (get_loop(n_ctrl->in(j)) != n_loop) {
1441 return false;
1442 }
1443 }
1444
1445 // Check for safety of the merge point.
1446 if (!merge_point_safe(n_ctrl)) {
1447 return false;
1448 }
1449
1450 return true;
1451 }
1452
1453 // Detect if the node is the inner strip-mined loop
1454 // Return: null if it's not the case, or the exit of outer strip-mined loop
1455 static Node* is_inner_of_stripmined_loop(const Node* out) {
1456 Node* out_le = nullptr;
1457
1458 if (out->is_CountedLoopEnd()) {
1459 const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1460
1461 if (loop != nullptr && loop->is_strip_mined()) {
1462 out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1463 }
1464 }
1465
1466 return out_le;
1467 }
1468
1469 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1470 // If the CmpP is a subtype check for a value that has just been
1471 // loaded from an array, the subtype check guarantees the value
1472 // can't be stored in a flat array and the load of the value
1473 // happens with a flat array check then: push the type check
1474 // through the phi of the flat array check. This needs special
1475 // logic because the subtype check's input is not a phi but a
1476 // LoadKlass that must first be cloned through the phi.
1477 if (n->Opcode() != Op_CmpP) {
1478 return false;
1479 }
1480
1481 Node* klassptr = n->in(1);
1482 Node* klasscon = n->in(2);
1483
1484 if (klassptr->is_DecodeNarrowPtr()) {
1485 klassptr = klassptr->in(1);
1486 }
1487
1488 if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1489 return false;
1490 }
1491
1492 if (!klasscon->is_Con()) {
1493 return false;
1494 }
1495
1496 Node* addr = klassptr->in(MemNode::Address);
1497
1498 if (!addr->is_AddP()) {
1499 return false;
1500 }
1501
1502 intptr_t offset;
1503 Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1504
1505 if (obj == nullptr) {
1506 return false;
1507 }
1508
1509 assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1510 if (obj->Opcode() == Op_CastPP) {
1511 obj = obj->in(1);
1512 }
1513
1514 if (!obj->is_Phi()) {
1515 return false;
1516 }
1517
1518 Node* region = obj->in(0);
1519
1520 Node* phi = PhiNode::make_blank(region, n->in(1));
1521 for (uint i = 1; i < region->req(); i++) {
1522 Node* in = obj->in(i);
1523 Node* ctrl = region->in(i);
1524 if (addr->in(AddPNode::Base) != obj) {
1525 Node* cast = addr->in(AddPNode::Base);
1526 assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1527 Node* cast_clone = cast->clone();
1528 cast_clone->set_req(0, ctrl);
1529 cast_clone->set_req(1, in);
1530 register_new_node(cast_clone, ctrl);
1531 const Type* tcast = cast_clone->Value(&_igvn);
1532 _igvn.set_type(cast_clone, tcast);
1533 cast_clone->as_Type()->set_type(tcast);
1534 in = cast_clone;
1535 }
1536 Node* addr_clone = addr->clone();
1537 addr_clone->set_req(AddPNode::Base, in);
1538 addr_clone->set_req(AddPNode::Address, in);
1539 register_new_node(addr_clone, ctrl);
1540 _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1541 Node* klassptr_clone = klassptr->clone();
1542 klassptr_clone->set_req(2, addr_clone);
1543 register_new_node(klassptr_clone, ctrl);
1544 _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1545 if (klassptr != n->in(1)) {
1546 Node* decode = n->in(1);
1547 assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1548 Node* decode_clone = decode->clone();
1549 decode_clone->set_req(1, klassptr_clone);
1550 register_new_node(decode_clone, ctrl);
1551 _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1552 klassptr_clone = decode_clone;
1553 }
1554 phi->set_req(i, klassptr_clone);
1555 }
1556 register_new_node(phi, region);
1557 Node* orig = n->in(1);
1558 _igvn.replace_input_of(n, 1, phi);
1559 split_if_with_blocks_post(n);
1560 if (n->outcnt() != 0) {
1561 _igvn.replace_input_of(n, 1, orig);
1562 _igvn.remove_dead_node(phi);
1563 }
1564 return true;
1565 }
1566
1567 //------------------------------split_if_with_blocks_post----------------------
1568 // Do the real work in a non-recursive function. CFG hackery wants to be
1569 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1570 // info.
1571 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1572
1573 if (flat_array_element_type_check(n)) {
1574 return;
1575 }
1576
1577 // Cloning Cmp through Phi's involves the split-if transform.
1578 // FastLock is not used by an If
1579 if (n->is_Cmp() && !n->is_FastLock()) {
1580 Node *n_ctrl = get_ctrl(n);
1581 // Determine if the Node has inputs from some local Phi.
1582 // Returns the block to clone thru.
1583 Node *n_blk = has_local_phi_input(n);
1584 if (n_blk != n_ctrl) {
1585 return;
1586 }
1587
1588 if (!can_split_if(n_ctrl)) {
1589 return;
1590 }
1591
1592 if (n->outcnt() != 1) {
1593 return; // Multiple bool's from 1 compare?
1594 }
1595 Node *bol = n->unique_out();
1596 assert(bol->is_Bool(), "expect a bool here");
1597 if (bol->outcnt() != 1) {
1598 return;// Multiple branches from 1 compare?
1599 }
1600 Node *iff = bol->unique_out();
1601
1602 // Check some safety conditions
1603 if (iff->is_If()) { // Classic split-if?
1604 if (iff->in(0) != n_ctrl) {
1605 return; // Compare must be in same blk as if
1606 }
1607 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1608 // Can't split CMove with different control.
1609 if (get_ctrl(iff) != n_ctrl) {
1610 return;
1611 }
1612 if (get_ctrl(iff->in(2)) == n_ctrl ||
1613 get_ctrl(iff->in(3)) == n_ctrl) {
1614 return; // Inputs not yet split-up
1615 }
1616 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1617 return; // Loop-invar test gates loop-varying CMOVE
1618 }
1619 } else {
1620 return; // some other kind of node, such as an Allocate
1621 }
1622
1623 // When is split-if profitable? Every 'win' on means some control flow
1624 // goes dead, so it's almost always a win.
1625 int policy = 0;
1626 // Split compare 'n' through the merge point if it is profitable
1627 Node *phi = split_thru_phi( n, n_ctrl, policy);
1628 if (!phi) {
1629 return;
1630 }
1631
1632 // Now split the bool up thru the phi
1633 Node* bolphi = split_thru_phi(bol, n_ctrl, -1);
1634 guarantee(bolphi != nullptr, "null boolean phi node");
1635 assert(iff->in(1) == bolphi, "");
1636
1637 if (bolphi->Value(&_igvn)->singleton()) {
1638 return;
1639 }
1640
1641 // Conditional-move? Must split up now
1642 if (!iff->is_If()) {
1643 Node* cmovphi = split_thru_phi(iff, n_ctrl, -1);
1644 return;
1645 }
1646
1647 // Now split the IF
1648 C->print_method(PHASE_BEFORE_SPLIT_IF, 4, iff);
1649 if (TraceLoopOpts) {
1650 tty->print_cr("Split-If");
1651 }
1652 do_split_if(iff);
1653 C->print_method(PHASE_AFTER_SPLIT_IF, 4, iff);
1654 return;
1655 }
1656
1657 // Two identical ifs back to back can be merged
1658 if (try_merge_identical_ifs(n)) {
1659 return;
1660 }
1661
1662 // Check for an IF ready to split; one that has its
1663 // condition codes input coming from a Phi at the block start.
1664 int n_op = n->Opcode();
1665
1666 // Check for an IF being dominated by another IF same test
1667 if (n_op == Op_If ||
1668 n_op == Op_RangeCheck) {
1669 Node *bol = n->in(1);
1670 uint max = bol->outcnt();
1671 // Check for same test used more than once?
1672 if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) {
1673 // Search up IDOMs to see if this IF is dominated.
1674 Node* cmp = bol->in(1);
1675 Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol);
1676
1677 // Now search up IDOMs till cutoff, looking for a dominating test
1678 Node *prevdom = n;
1679 Node *dom = idom(prevdom);
1680 while (dom != cutoff) {
1681 if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom &&
1682 safe_for_if_replacement(dom)) {
1683 // It's invalid to move control dependent data nodes in the inner
1684 // strip-mined loop, because:
1685 // 1) break validation of LoopNode::verify_strip_mined()
1686 // 2) move code with side-effect in strip-mined loop
1687 // Move to the exit of outer strip-mined loop in that case.
1688 Node* out_le = is_inner_of_stripmined_loop(dom);
1689 if (out_le != nullptr) {
1690 prevdom = out_le;
1691 }
1692 // Replace the dominated test with an obvious true or false.
1693 // Place it on the IGVN worklist for later cleanup.
1694 C->set_major_progress();
1695 // Split if: pin array accesses that are control dependent on a range check and moved to a regular if,
1696 // to prevent an array load from floating above its range check. There are three cases:
1697 // 1. Move from RangeCheck "a" to RangeCheck "b": don't need to pin. If we ever remove b, then we pin
1698 // all its array accesses at that point.
1699 // 2. We move from RangeCheck "a" to regular if "b": need to pin. If we ever remove b, then its array
1700 // accesses would start to float, since we don't pin at that point.
1701 // 3. If we move from regular if: don't pin. All array accesses are already assumed to be pinned.
1702 bool pin_array_access_nodes = n->Opcode() == Op_RangeCheck &&
1703 prevdom->in(0)->Opcode() != Op_RangeCheck;
1704 dominated_by(prevdom->as_IfProj(), n->as_If(), false, pin_array_access_nodes);
1705 DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1706 return;
1707 }
1708 prevdom = dom;
1709 dom = idom(prevdom);
1710 }
1711 }
1712 }
1713
1714 try_sink_out_of_loop(n);
1715 if (C->failing()) {
1716 return;
1717 }
1718
1719 try_move_store_after_loop(n);
1720
1721 // Remove multiple allocations of the same inline type
1722 if (n->is_InlineType()) {
1723 n->as_InlineType()->remove_redundant_allocations(this);
1724 }
1725 }
1726
1727 // Transform:
1728 //
1729 // if (some_condition) {
1730 // // body 1
1731 // } else {
1732 // // body 2
1733 // }
1734 // if (some_condition) {
1735 // // body 3
1736 // } else {
1737 // // body 4
1738 // }
1739 //
1740 // into:
1741 //
1742 //
1743 // if (some_condition) {
1744 // // body 1
1745 // // body 3
1746 // } else {
1747 // // body 2
1748 // // body 4
1749 // }
1750 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) {
1751 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1752 Node *n_ctrl = n->in(0);
1753 IfNode* dom_if = idom(n_ctrl)->as_If();
1754 if (n->in(1) != dom_if->in(1)) {
1755 assert(n->in(1)->in(1)->is_SubTypeCheck() &&
1756 (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr ||
1757 dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached");
1758 _igvn.replace_input_of(n, 1, dom_if->in(1));
1759 }
1760 IfTrueNode* dom_proj_true = dom_if->true_proj();
1761 IfFalseNode* dom_proj_false = dom_if->false_proj();
1762
1763 // Now split the IF
1764 RegionNode* new_false_region;
1765 RegionNode* new_true_region;
1766 do_split_if(n, &new_false_region, &new_true_region);
1767 assert(new_false_region->req() == new_true_region->req(), "");
1768 #ifdef ASSERT
1769 for (uint i = 1; i < new_false_region->req(); ++i) {
1770 assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if");
1771 assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if");
1772 }
1773 #endif
1774 assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test");
1775
1776 // We now have:
1777 // if (some_condition) {
1778 // // body 1
1779 // if (some_condition) {
1780 // body3: // new_true_region
1781 // // body3
1782 // } else {
1783 // goto body4;
1784 // }
1785 // } else {
1786 // // body 2
1787 // if (some_condition) {
1788 // goto body3;
1789 // } else {
1790 // body4: // new_false_region
1791 // // body4;
1792 // }
1793 // }
1794 //
1795
1796 // clone pinned nodes thru the resulting regions
1797 push_pinned_nodes_thru_region(dom_if, new_true_region);
1798 push_pinned_nodes_thru_region(dom_if, new_false_region);
1799
1800 // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent
1801 // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an
1802 // unrelated control dependency.
1803 for (uint i = 1; i < new_false_region->req(); i++) {
1804 if (is_dominator(dom_proj_true, new_false_region->in(i))) {
1805 dominated_by(dom_proj_true, new_false_region->in(i)->in(0)->as_If());
1806 } else {
1807 assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if");
1808 dominated_by(dom_proj_false, new_false_region->in(i)->in(0)->as_If());
1809 }
1810 }
1811 return true;
1812 }
1813 return false;
1814 }
1815
1816 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) {
1817 for (DUIterator i = region->outs(); region->has_out(i); i++) {
1818 Node* u = region->out(i);
1819 if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test() || !_igvn.no_dependent_zero_check(u)) {
1820 continue;
1821 }
1822 assert(u->in(0) == region, "not a control dependent node?");
1823 uint j = 1;
1824 for (; j < u->req(); ++j) {
1825 Node* in = u->in(j);
1826 if (!is_dominator(ctrl_or_self(in), dom_if)) {
1827 break;
1828 }
1829 }
1830 if (j == u->req()) {
1831 Node *phi = PhiNode::make_blank(region, u);
1832 for (uint k = 1; k < region->req(); ++k) {
1833 Node* clone = u->clone();
1834 clone->set_req(0, region->in(k));
1835 register_new_node(clone, region->in(k));
1836 phi->init_req(k, clone);
1837 }
1838 register_new_node(phi, region);
1839 _igvn.replace_node(u, phi);
1840 --i;
1841 }
1842 }
1843 }
1844
1845 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const {
1846 if (!dom->is_CountedLoopEnd()) {
1847 return true;
1848 }
1849 CountedLoopEndNode* le = dom->as_CountedLoopEnd();
1850 CountedLoopNode* cl = le->loopnode();
1851 if (cl == nullptr) {
1852 return true;
1853 }
1854 if (!cl->is_main_loop()) {
1855 return true;
1856 }
1857 if (cl->is_canonical_loop_entry() == nullptr) {
1858 return true;
1859 }
1860 // Further unrolling is possible so loop exit condition might change
1861 return false;
1862 }
1863
1864 // See if a shared loop-varying computation has no loop-varying uses.
1865 // Happens if something is only used for JVM state in uncommon trap exits,
1866 // like various versions of induction variable+offset. Clone the
1867 // computation per usage to allow it to sink out of the loop.
1868 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1869 bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
1870 n->in(1)->bottom_type()->isa_rawptr() &&
1871 !n->bottom_type()->isa_rawptr();
1872
1873 if (has_ctrl(n) &&
1874 !n->is_Phi() &&
1875 !n->is_Bool() &&
1876 !n->is_Proj() &&
1877 !n->is_MergeMem() &&
1878 !n->is_CMove() &&
1879 !n->is_OpaqueNotNull() &&
1880 !n->is_OpaqueInitializedAssertionPredicate() &&
1881 !n->is_OpaqueTemplateAssertionPredicate() &&
1882 !is_raw_to_oop_cast && // don't extend live ranges of raw oops
1883 n->Opcode() != Op_CreateEx &&
1884 (KillPathsReachableByDeadTypeNode || !n->is_Type())
1885 ) {
1886 Node *n_ctrl = get_ctrl(n);
1887 IdealLoopTree *n_loop = get_loop(n_ctrl);
1888
1889 if (n->in(0) != nullptr) {
1890 IdealLoopTree* loop_ctrl = get_loop(n->in(0));
1891 if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) {
1892 // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example,
1893 // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop).
1894 // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not.
1895 Node* maybe_pinned_n = n;
1896 Node* outside_ctrl = place_outside_loop(n_ctrl, loop_ctrl);
1897 if (!would_sink_below_pre_loop_exit(loop_ctrl, outside_ctrl)) {
1898 if (n->depends_only_on_test()) {
1899 Node* pinned_clone = n->pin_array_access_node();
1900 if (pinned_clone != nullptr) {
1901 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a
1902 // range check for that access. If that condition is replaced by an identical dominating one, then an
1903 // unpinned load would risk floating above its range check.
1904 register_new_node(pinned_clone, n_ctrl);
1905 maybe_pinned_n = pinned_clone;
1906 _igvn.replace_node(n, pinned_clone);
1907 }
1908 }
1909 _igvn.replace_input_of(maybe_pinned_n, 0, outside_ctrl);
1910 }
1911 }
1912 }
1913 if (n_loop != _ltree_root && n->outcnt() > 1) {
1914 // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of
1915 // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us.
1916 Node* early_ctrl = compute_early_ctrl(n, n_ctrl);
1917 if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now
1918 ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops!
1919 if (n->is_Store() || n->is_LoadStore()) {
1920 assert(false, "no node with a side effect");
1921 C->record_failure("no node with a side effect");
1922 return;
1923 }
1924 Node* outer_loop_clone = nullptr;
1925 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) {
1926 Node* u = n->last_out(j); // Clone private computation per use
1927 _igvn.rehash_node_delayed(u);
1928 Node* x = nullptr;
1929 if (n->depends_only_on_test()) {
1930 // Pin array access nodes: if this is an array load, it's going to be dependent on a condition that's not a
1931 // range check for that access. If that condition is replaced by an identical dominating one, then an
1932 // unpinned load would risk floating above its range check.
1933 x = n->pin_array_access_node();
1934 }
1935 if (x == nullptr) {
1936 x = n->clone();
1937 }
1938 Node* x_ctrl = nullptr;
1939 if (u->is_Phi()) {
1940 // Replace all uses of normal nodes. Replace Phi uses
1941 // individually, so the separate Nodes can sink down
1942 // different paths.
1943 uint k = 1;
1944 while (u->in(k) != n) k++;
1945 u->set_req(k, x);
1946 // x goes next to Phi input path
1947 x_ctrl = u->in(0)->in(k);
1948 // Find control for 'x' next to use but not inside inner loops.
1949 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1950 --j;
1951 } else { // Normal use
1952 if (has_ctrl(u)) {
1953 x_ctrl = get_ctrl(u);
1954 } else {
1955 x_ctrl = u->in(0);
1956 }
1957 // Find control for 'x' next to use but not inside inner loops.
1958 x_ctrl = place_outside_loop(x_ctrl, n_loop);
1959 // Replace all uses
1960 if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) {
1961 // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary
1962 // anymore now that we're going to pin n as well
1963 _igvn.replace_node(u, x);
1964 --j;
1965 } else {
1966 int nb = u->replace_edge(n, x, &_igvn);
1967 j -= nb;
1968 }
1969 }
1970
1971 if (n->is_Load()) {
1972 // For loads, add a control edge to a CFG node outside of the loop
1973 // to force them to not combine and return back inside the loop
1974 // during GVN optimization (4641526).
1975 assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked");
1976
1977 IdealLoopTree* x_loop = get_loop(x_ctrl);
1978 Node* x_head = x_loop->_head;
1979 if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) {
1980 // Do not add duplicate LoadNodes to the outer strip mined loop
1981 if (outer_loop_clone != nullptr) {
1982 _igvn.replace_node(x, outer_loop_clone);
1983 continue;
1984 }
1985 outer_loop_clone = x;
1986 }
1987 x->set_req(0, x_ctrl);
1988 } else if (n->in(0) != nullptr){
1989 x->set_req(0, x_ctrl);
1990 }
1991 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1992 assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop");
1993 register_new_node(x, x_ctrl);
1994
1995 // Chain of AddP nodes: (AddP base (AddP base (AddP base )))
1996 // All AddP nodes must keep the same base after sinking so:
1997 // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk,
1998 // their bases remain the same.
1999 // (see 2- below)
2000 assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() ||
2001 x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) ||
2002 !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape");
2003 if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() &&
2004 !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) {
2005 assert(!x->is_Load(), "load should be pinned");
2006 // Use a cast node to pin clone out of loop
2007 Node* cast = nullptr;
2008 for (uint k = 0; k < x->req(); k++) {
2009 Node* in = x->in(k);
2010 if (in != nullptr && ctrl_is_member(n_loop, in)) {
2011 const Type* in_t = _igvn.type(in);
2012 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
2013 ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr);
2014 }
2015 if (cast != nullptr) {
2016 Node* prev = _igvn.hash_find_insert(cast);
2017 if (prev != nullptr && get_ctrl(prev) == x_ctrl) {
2018 cast->destruct(&_igvn);
2019 cast = prev;
2020 } else {
2021 register_new_node(cast, x_ctrl);
2022 }
2023 x->replace_edge(in, cast);
2024 // Chain of AddP nodes:
2025 // 2- A CastPP of the base is only added now that all AddP nodes are sunk
2026 if (x->is_AddP() && k == AddPNode::Base) {
2027 update_addp_chain_base(x, n->in(AddPNode::Base), cast);
2028 }
2029 break;
2030 }
2031 }
2032 assert(cast != nullptr, "must have added a cast to pin the node");
2033 }
2034 }
2035 _igvn.remove_dead_node(n);
2036 }
2037 _dom_lca_tags_round = 0;
2038 }
2039 }
2040 }
2041
2042 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) {
2043 ResourceMark rm;
2044 Node_List wq;
2045 wq.push(x);
2046 while (wq.size() != 0) {
2047 Node* n = wq.pop();
2048 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2049 Node* u = n->fast_out(i);
2050 if (u->is_AddP() && u->in(AddPNode::Base) == old_base) {
2051 _igvn.replace_input_of(u, AddPNode::Base, new_base);
2052 wq.push(u);
2053 }
2054 }
2055 }
2056 }
2057
2058 // Compute the early control of a node by following its inputs until we reach
2059 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes.
2060 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) {
2061 Node* early_ctrl = nullptr;
2062 ResourceMark rm;
2063 Unique_Node_List wq;
2064 wq.push(n);
2065 for (uint i = 0; i < wq.size(); i++) {
2066 Node* m = wq.at(i);
2067 Node* c = nullptr;
2068 if (m->is_CFG()) {
2069 c = m;
2070 } else if (m->pinned()) {
2071 c = m->in(0);
2072 } else {
2073 for (uint j = 0; j < m->req(); j++) {
2074 Node* in = m->in(j);
2075 if (in != nullptr) {
2076 wq.push(in);
2077 }
2078 }
2079 }
2080 if (c != nullptr) {
2081 assert(is_dominator(c, n_ctrl), "control input must dominate current control");
2082 if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) {
2083 early_ctrl = c;
2084 }
2085 }
2086 }
2087 assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control");
2088 return early_ctrl;
2089 }
2090
2091 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) {
2092 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2093 Node* u = n->fast_out(i);
2094 if (u->is_Opaque1()) {
2095 return false; // Found loop limit, bugfix for 4677003
2096 }
2097 // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure calls to
2098 // get_late_ctrl_with_anti_dep() use their own tag
2099 _dom_lca_tags_round++;
2100 assert(_dom_lca_tags_round != 0, "shouldn't wrap around");
2101
2102 if (u->is_Phi()) {
2103 for (uint j = 1; j < u->req(); ++j) {
2104 if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) {
2105 return false;
2106 }
2107 }
2108 } else {
2109 Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0);
2110 if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) {
2111 return false;
2112 }
2113 }
2114 }
2115 return true;
2116 }
2117
2118 // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input
2119 // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit
2120 // test of the pre loop above the point in the graph where it's pinned. This results in a broken graph. One way to avoid
2121 // it would be to not eliminate the check in the main loop. Instead, we prevent sinking of the node here so better code
2122 // is generated for the main loop.
2123 bool PhaseIdealLoop::would_sink_below_pre_loop_exit(IdealLoopTree* n_loop, Node* ctrl) {
2124 if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop()) {
2125 CountedLoopNode* pre_loop = n_loop->_head->as_CountedLoop();
2126 if (is_dominator(pre_loop->loopexit(), ctrl)) {
2127 return true;
2128 }
2129 }
2130 return false;
2131 }
2132
2133 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) {
2134 if (n->is_Load()) {
2135 ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl);
2136 }
2137 IdealLoopTree *u_loop = get_loop(ctrl);
2138 if (u_loop == n_loop) {
2139 return false; // Found loop-varying use
2140 }
2141 if (n_loop->is_member(u_loop)) {
2142 return false; // Found use in inner loop
2143 }
2144 if (would_sink_below_pre_loop_exit(n_loop, ctrl)) {
2145 return false;
2146 }
2147 return true;
2148 }
2149
2150 //------------------------------split_if_with_blocks---------------------------
2151 // Check for aggressive application of 'split-if' optimization,
2152 // using basic block level info.
2153 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
2154 Node* root = C->root();
2155 visited.set(root->_idx); // first, mark root as visited
2156 // Do pre-visit work for root
2157 Node* n = split_if_with_blocks_pre(root);
2158 uint cnt = n->outcnt();
2159 uint i = 0;
2160
2161 while (true) {
2162 // Visit all children
2163 if (i < cnt) {
2164 Node* use = n->raw_out(i);
2165 ++i;
2166 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
2167 // Now do pre-visit work for this use
2168 use = split_if_with_blocks_pre(use);
2169 nstack.push(n, i); // Save parent and next use's index.
2170 n = use; // Process all children of current use.
2171 cnt = use->outcnt();
2172 i = 0;
2173 }
2174 }
2175 else {
2176 // All of n's children have been processed, complete post-processing.
2177 if (cnt != 0 && !n->is_Con()) {
2178 assert(has_node(n), "no dead nodes");
2179 split_if_with_blocks_post(n);
2180 if (C->failing()) {
2181 return;
2182 }
2183 }
2184 if (must_throttle_split_if()) {
2185 nstack.clear();
2186 }
2187 if (nstack.is_empty()) {
2188 // Finished all nodes on stack.
2189 break;
2190 }
2191 // Get saved parent node and next use's index. Visit the rest of uses.
2192 n = nstack.node();
2193 cnt = n->outcnt();
2194 i = nstack.index();
2195 nstack.pop();
2196 }
2197 }
2198 }
2199
2200
2201 //=============================================================================
2202 //
2203 // C L O N E A L O O P B O D Y
2204 //
2205
2206 //------------------------------clone_iff--------------------------------------
2207 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2208 // "Nearly" because all Nodes have been cloned from the original in the loop,
2209 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2210 // through the Phi recursively, and return a Bool.
2211 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
2212
2213 // Convert this Phi into a Phi merging Bools
2214 uint i;
2215 for (i = 1; i < phi->req(); i++) {
2216 Node* b = phi->in(i);
2217 if (b->is_Phi()) {
2218 _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2219 } else {
2220 assert(b->is_Bool() || b->is_OpaqueNotNull() || b->is_OpaqueInitializedAssertionPredicate(),
2221 "bool, non-null check with OpaqueNotNull or Initialized Assertion Predicate with its Opaque node");
2222 }
2223 }
2224 Node* n = phi->in(1);
2225 Node* sample_opaque = nullptr;
2226 Node *sample_bool = nullptr;
2227 if (n->is_OpaqueNotNull() || n->is_OpaqueInitializedAssertionPredicate()) {
2228 sample_opaque = n;
2229 sample_bool = n->in(1);
2230 assert(sample_bool->is_Bool(), "wrong type");
2231 } else {
2232 sample_bool = n;
2233 }
2234 Node* sample_cmp = sample_bool->in(1);
2235 const Type* t = Type::TOP;
2236 const TypePtr* at = nullptr;
2237 if (sample_cmp->is_FlatArrayCheck()) {
2238 // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2239 assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2240 t = Type::MEMORY;
2241 at = TypeRawPtr::BOTTOM;
2242 }
2243
2244 // Make Phis to merge the Cmp's inputs.
2245 PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2246 PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2247 for (i = 1; i < phi->req(); i++) {
2248 Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2249 Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2250 phi1->set_req(i, n1);
2251 phi2->set_req(i, n2);
2252 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2253 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2254 }
2255 // See if these Phis have been made before.
2256 // Register with optimizer
2257 Node *hit1 = _igvn.hash_find_insert(phi1);
2258 if (hit1) { // Hit, toss just made Phi
2259 _igvn.remove_dead_node(phi1); // Remove new phi
2260 assert(hit1->is_Phi(), "" );
2261 phi1 = (PhiNode*)hit1; // Use existing phi
2262 } else { // Miss
2263 _igvn.register_new_node_with_optimizer(phi1);
2264 }
2265 Node *hit2 = _igvn.hash_find_insert(phi2);
2266 if (hit2) { // Hit, toss just made Phi
2267 _igvn.remove_dead_node(phi2); // Remove new phi
2268 assert(hit2->is_Phi(), "" );
2269 phi2 = (PhiNode*)hit2; // Use existing phi
2270 } else { // Miss
2271 _igvn.register_new_node_with_optimizer(phi2);
2272 }
2273 // Register Phis with loop/block info
2274 set_ctrl(phi1, phi->in(0));
2275 set_ctrl(phi2, phi->in(0));
2276 // Make a new Cmp
2277 Node *cmp = sample_cmp->clone();
2278 cmp->set_req(1, phi1);
2279 cmp->set_req(2, phi2);
2280 _igvn.register_new_node_with_optimizer(cmp);
2281 set_ctrl(cmp, phi->in(0));
2282
2283 // Make a new Bool
2284 Node *b = sample_bool->clone();
2285 b->set_req(1,cmp);
2286 _igvn.register_new_node_with_optimizer(b);
2287 set_ctrl(b, phi->in(0));
2288
2289 if (sample_opaque != nullptr) {
2290 Node* opaque = sample_opaque->clone();
2291 opaque->set_req(1, b);
2292 _igvn.register_new_node_with_optimizer(opaque);
2293 set_ctrl(opaque, phi->in(0));
2294 return opaque;
2295 }
2296
2297 assert(b->is_Bool(), "");
2298 return b;
2299 }
2300
2301 //------------------------------clone_bool-------------------------------------
2302 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2303 // "Nearly" because all Nodes have been cloned from the original in the loop,
2304 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs
2305 // through the Phi recursively, and return a Bool.
2306 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
2307 uint i;
2308 // Convert this Phi into a Phi merging Bools
2309 for( i = 1; i < phi->req(); i++ ) {
2310 Node *b = phi->in(i);
2311 if( b->is_Phi() ) {
2312 _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi()));
2313 } else {
2314 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
2315 }
2316 }
2317
2318 Node *sample_cmp = phi->in(1);
2319
2320 // Make Phis to merge the Cmp's inputs.
2321 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
2322 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
2323 for( uint j = 1; j < phi->req(); j++ ) {
2324 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
2325 Node *n1, *n2;
2326 if( cmp_top->is_Cmp() ) {
2327 n1 = cmp_top->in(1);
2328 n2 = cmp_top->in(2);
2329 } else {
2330 n1 = n2 = cmp_top;
2331 }
2332 phi1->set_req( j, n1 );
2333 phi2->set_req( j, n2 );
2334 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2335 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2336 }
2337
2338 // See if these Phis have been made before.
2339 // Register with optimizer
2340 Node *hit1 = _igvn.hash_find_insert(phi1);
2341 if( hit1 ) { // Hit, toss just made Phi
2342 _igvn.remove_dead_node(phi1); // Remove new phi
2343 assert( hit1->is_Phi(), "" );
2344 phi1 = (PhiNode*)hit1; // Use existing phi
2345 } else { // Miss
2346 _igvn.register_new_node_with_optimizer(phi1);
2347 }
2348 Node *hit2 = _igvn.hash_find_insert(phi2);
2349 if( hit2 ) { // Hit, toss just made Phi
2350 _igvn.remove_dead_node(phi2); // Remove new phi
2351 assert( hit2->is_Phi(), "" );
2352 phi2 = (PhiNode*)hit2; // Use existing phi
2353 } else { // Miss
2354 _igvn.register_new_node_with_optimizer(phi2);
2355 }
2356 // Register Phis with loop/block info
2357 set_ctrl(phi1, phi->in(0));
2358 set_ctrl(phi2, phi->in(0));
2359 // Make a new Cmp
2360 Node *cmp = sample_cmp->clone();
2361 cmp->set_req( 1, phi1 );
2362 cmp->set_req( 2, phi2 );
2363 _igvn.register_new_node_with_optimizer(cmp);
2364 set_ctrl(cmp, phi->in(0));
2365
2366 assert( cmp->is_Cmp(), "" );
2367 return (CmpNode*)cmp;
2368 }
2369
2370 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
2371 IdealLoopTree* loop, IdealLoopTree* outer_loop,
2372 Node_List*& split_if_set, Node_List*& split_bool_set,
2373 Node_List*& split_cex_set, Node_List& worklist,
2374 uint new_counter, CloneLoopMode mode) {
2375 Node* nnn = old_new[old->_idx];
2376 // Copy uses to a worklist, so I can munge the def-use info
2377 // with impunity.
2378 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2379 worklist.push(old->fast_out(j));
2380
2381 while( worklist.size() ) {
2382 Node *use = worklist.pop();
2383 if (!has_node(use)) continue; // Ignore dead nodes
2384 if (use->in(0) == C->top()) continue;
2385 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2386 // Check for data-use outside of loop - at least one of OLD or USE
2387 // must not be a CFG node.
2388 #ifdef ASSERT
2389 if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) {
2390 Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
2391 assert(mode != IgnoreStripMined, "incorrect cloning mode");
2392 assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
2393 }
2394 #endif
2395 if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
2396
2397 // If the Data use is an IF, that means we have an IF outside the
2398 // loop that is switching on a condition that is set inside the
2399 // loop. Happens if people set a loop-exit flag; then test the flag
2400 // in the loop to break the loop, then test is again outside the
2401 // loop to determine which way the loop exited.
2402 //
2403 // For several uses we need to make sure that there is no phi between,
2404 // the use and the Bool/Cmp. We therefore clone the Bool/Cmp down here
2405 // to avoid such a phi in between.
2406 // For example, it is unexpected that there is a Phi between an
2407 // AllocateArray node and its ValidLengthTest input that could cause
2408 // split if to break.
2409 assert(!use->is_OpaqueTemplateAssertionPredicate(),
2410 "should not clone a Template Assertion Predicate which should be removed once it's useless");
2411 if (use->is_If() || use->is_CMove() || use->is_OpaqueNotNull() || use->is_OpaqueInitializedAssertionPredicate() ||
2412 (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) {
2413 // Since this code is highly unlikely, we lazily build the worklist
2414 // of such Nodes to go split.
2415 if (!split_if_set) {
2416 split_if_set = new Node_List();
2417 }
2418 split_if_set->push(use);
2419 }
2420 if (use->is_Bool()) {
2421 if (!split_bool_set) {
2422 split_bool_set = new Node_List();
2423 }
2424 split_bool_set->push(use);
2425 }
2426 if (use->Opcode() == Op_CreateEx) {
2427 if (!split_cex_set) {
2428 split_cex_set = new Node_List();
2429 }
2430 split_cex_set->push(use);
2431 }
2432
2433
2434 // Get "block" use is in
2435 uint idx = 0;
2436 while( use->in(idx) != old ) idx++;
2437 Node *prev = use->is_CFG() ? use : get_ctrl(use);
2438 assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
2439 Node* cfg = (prev->_idx >= new_counter && prev->is_Region())
2440 ? prev->in(2)
2441 : idom(prev);
2442 if( use->is_Phi() ) // Phi use is in prior block
2443 cfg = prev->in(idx); // NOT in block of Phi itself
2444 if (cfg->is_top()) { // Use is dead?
2445 _igvn.replace_input_of(use, idx, C->top());
2446 continue;
2447 }
2448
2449 // If use is referenced through control edge... (idx == 0)
2450 if (mode == IgnoreStripMined && idx == 0) {
2451 LoopNode *head = loop->_head->as_Loop();
2452 if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
2453 // That node is outside the inner loop, leave it outside the
2454 // outer loop as well to not confuse verification code.
2455 assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
2456 _igvn.replace_input_of(use, 0, head->outer_loop_exit());
2457 continue;
2458 }
2459 }
2460
2461 while(!outer_loop->is_member(get_loop(cfg))) {
2462 prev = cfg;
2463 cfg = (cfg->_idx >= new_counter && cfg->is_Region()) ? cfg->in(2) : idom(cfg);
2464 }
2465 // If the use occurs after merging several exits from the loop, then
2466 // old value must have dominated all those exits. Since the same old
2467 // value was used on all those exits we did not need a Phi at this
2468 // merge point. NOW we do need a Phi here. Each loop exit value
2469 // is now merged with the peeled body exit; each exit gets its own
2470 // private Phi and those Phis need to be merged here.
2471 Node *phi;
2472 if( prev->is_Region() ) {
2473 if( idx == 0 ) { // Updating control edge?
2474 phi = prev; // Just use existing control
2475 } else { // Else need a new Phi
2476 phi = PhiNode::make( prev, old );
2477 // Now recursively fix up the new uses of old!
2478 for( uint i = 1; i < prev->req(); i++ ) {
2479 worklist.push(phi); // Onto worklist once for each 'old' input
2480 }
2481 }
2482 } else {
2483 // Get new RegionNode merging old and new loop exits
2484 prev = old_new[prev->_idx];
2485 assert( prev, "just made this in step 7" );
2486 if( idx == 0) { // Updating control edge?
2487 phi = prev; // Just use existing control
2488 } else { // Else need a new Phi
2489 // Make a new Phi merging data values properly
2490 phi = PhiNode::make( prev, old );
2491 phi->set_req( 1, nnn );
2492 }
2493 }
2494 // If inserting a new Phi, check for prior hits
2495 if( idx != 0 ) {
2496 Node *hit = _igvn.hash_find_insert(phi);
2497 if( hit == nullptr ) {
2498 _igvn.register_new_node_with_optimizer(phi); // Register new phi
2499 } else { // or
2500 // Remove the new phi from the graph and use the hit
2501 _igvn.remove_dead_node(phi);
2502 phi = hit; // Use existing phi
2503 }
2504 set_ctrl(phi, prev);
2505 }
2506 // Make 'use' use the Phi instead of the old loop body exit value
2507 assert(use->in(idx) == old, "old is still input of use");
2508 // We notify all uses of old, including use, and the indirect uses,
2509 // that may now be optimized because we have replaced old with phi.
2510 _igvn.add_users_to_worklist(old);
2511 if (idx == 0 &&
2512 use->depends_only_on_test()) {
2513 Node* pinned_clone = use->pin_array_access_node();
2514 if (pinned_clone != nullptr) {
2515 // Pin array access nodes: control is updated here to a region. If, after some transformations, only one path
2516 // into the region is left, an array load could become dependent on a condition that's not a range check for
2517 // that access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
2518 // floating above its range check.
2519 pinned_clone->set_req(0, phi);
2520 register_new_node_with_ctrl_of(pinned_clone, use);
2521 _igvn.replace_node(use, pinned_clone);
2522 continue;
2523 }
2524 }
2525 _igvn.replace_input_of(use, idx, phi);
2526 if( use->_idx >= new_counter ) { // If updating new phis
2527 // Not needed for correctness, but prevents a weak assert
2528 // in AddPNode from tripping (when we end up with different
2529 // base & derived Phis that will become the same after
2530 // IGVN does CSE).
2531 Node *hit = _igvn.hash_find_insert(use);
2532 if( hit ) // Go ahead and re-hash for hits.
2533 _igvn.replace_node( use, hit );
2534 }
2535 }
2536 }
2537 }
2538
2539 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
2540 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
2541 bool check_old_new) {
2542 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2543 Node* u = n->fast_out(j);
2544 assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned");
2545 if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) {
2546 assert(!phase->ctrl_is_member(loop, u) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
2547 if (!phase->ctrl_is_member(loop, u)) {
2548 if (phase->ctrl_is_member(outer_loop, u)) {
2549 wq.push(u);
2550 } else {
2551 // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of
2552 // the outer loop too
2553 Node* u_c = u->in(0);
2554 if (u_c != nullptr) {
2555 IdealLoopTree* u_c_loop = phase->get_loop(u_c);
2556 if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) {
2557 wq.push(u);
2558 }
2559 }
2560 }
2561 }
2562 }
2563 }
2564 }
2565
2566 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
2567 IdealLoopTree* outer_loop, int dd, Node_List &old_new,
2568 Node_List& extra_data_nodes) {
2569 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2570 CountedLoopNode* cl = head->as_CountedLoop();
2571 Node* l = cl->outer_loop();
2572 Node* tail = cl->outer_loop_tail();
2573 IfNode* le = cl->outer_loop_end();
2574 Node* sfpt = cl->outer_safepoint();
2575 CountedLoopEndNode* cle = cl->loopexit();
2576 CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
2577 CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
2578 IfFalseNode* cle_out = cle->false_proj();
2579
2580 Node* new_sfpt = nullptr;
2581 Node* new_cle_out = cle_out->clone();
2582 old_new.map(cle_out->_idx, new_cle_out);
2583 if (mode == CloneIncludesStripMined) {
2584 // clone outer loop body
2585 Node* new_l = l->clone();
2586 Node* new_tail = tail->clone();
2587 IfNode* new_le = le->clone()->as_If();
2588 new_sfpt = sfpt->clone();
2589
2590 set_loop(new_l, outer_loop->_parent);
2591 set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
2592 set_loop(new_cle_out, outer_loop->_parent);
2593 set_idom(new_cle_out, new_cle, dd);
2594 set_loop(new_sfpt, outer_loop->_parent);
2595 set_idom(new_sfpt, new_cle_out, dd);
2596 set_loop(new_le, outer_loop->_parent);
2597 set_idom(new_le, new_sfpt, dd);
2598 set_loop(new_tail, outer_loop->_parent);
2599 set_idom(new_tail, new_le, dd);
2600 set_idom(new_cl, new_l, dd);
2601
2602 old_new.map(l->_idx, new_l);
2603 old_new.map(tail->_idx, new_tail);
2604 old_new.map(le->_idx, new_le);
2605 old_new.map(sfpt->_idx, new_sfpt);
2606
2607 new_l->set_req(LoopNode::LoopBackControl, new_tail);
2608 new_l->set_req(0, new_l);
2609 new_tail->set_req(0, new_le);
2610 new_le->set_req(0, new_sfpt);
2611 new_sfpt->set_req(0, new_cle_out);
2612 new_cle_out->set_req(0, new_cle);
2613 new_cl->set_req(LoopNode::EntryControl, new_l);
2614
2615 _igvn.register_new_node_with_optimizer(new_l);
2616 _igvn.register_new_node_with_optimizer(new_tail);
2617 _igvn.register_new_node_with_optimizer(new_le);
2618 } else {
2619 Node *newhead = old_new[loop->_head->_idx];
2620 newhead->as_Loop()->clear_strip_mined();
2621 _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
2622 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2623 }
2624 // Look at data node that were assigned a control in the outer
2625 // loop: they are kept in the outer loop by the safepoint so start
2626 // from the safepoint node's inputs.
2627 IdealLoopTree* outer_loop = get_loop(l);
2628 Node_Stack stack(2);
2629 stack.push(sfpt, 1);
2630 uint new_counter = C->unique();
2631 while (stack.size() > 0) {
2632 Node* n = stack.node();
2633 uint i = stack.index();
2634 while (i < n->req() &&
2635 (n->in(i) == nullptr ||
2636 !has_ctrl(n->in(i)) ||
2637 get_loop(get_ctrl(n->in(i))) != outer_loop ||
2638 (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
2639 i++;
2640 }
2641 if (i < n->req()) {
2642 stack.set_index(i+1);
2643 stack.push(n->in(i), 0);
2644 } else {
2645 assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
2646 Node* m = n == sfpt ? new_sfpt : n->clone();
2647 if (m != nullptr) {
2648 for (uint i = 0; i < n->req(); i++) {
2649 if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) {
2650 m->set_req(i, old_new[m->in(i)->_idx]);
2651 }
2652 }
2653 } else {
2654 assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
2655 }
2656 if (n != sfpt) {
2657 extra_data_nodes.push(n);
2658 _igvn.register_new_node_with_optimizer(m);
2659 assert(get_ctrl(n) == cle_out, "what other control?");
2660 set_ctrl(m, new_cle_out);
2661 old_new.map(n->_idx, m);
2662 }
2663 stack.pop();
2664 }
2665 }
2666 if (mode == CloneIncludesStripMined) {
2667 _igvn.register_new_node_with_optimizer(new_sfpt);
2668 _igvn.register_new_node_with_optimizer(new_cle_out);
2669 }
2670 // Some other transformation may have pessimistically assigned some
2671 // data nodes to the outer loop. Set their control so they are out
2672 // of the outer loop.
2673 ResourceMark rm;
2674 Unique_Node_List wq;
2675 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2676 Node* old = extra_data_nodes.at(i);
2677 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2678 }
2679
2680 for (uint i = 0; i < loop->_body.size(); i++) {
2681 Node* old = loop->_body.at(i);
2682 collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2683 }
2684
2685 Node* inner_out = sfpt->in(0);
2686 if (inner_out->outcnt() > 1) {
2687 collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true);
2688 }
2689
2690 Node* new_ctrl = cl->outer_loop_exit();
2691 assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2692 for (uint i = 0; i < wq.size(); i++) {
2693 Node* n = wq.at(i);
2694 set_ctrl(n, new_ctrl);
2695 if (n->in(0) != nullptr) {
2696 _igvn.replace_input_of(n, 0, new_ctrl);
2697 }
2698 collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false);
2699 }
2700 } else {
2701 Node *newhead = old_new[loop->_head->_idx];
2702 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2703 }
2704 }
2705
2706 //------------------------------clone_loop-------------------------------------
2707 //
2708 // C L O N E A L O O P B O D Y
2709 //
2710 // This is the basic building block of the loop optimizations. It clones an
2711 // entire loop body. It makes an old_new loop body mapping; with this mapping
2712 // you can find the new-loop equivalent to an old-loop node. All new-loop
2713 // nodes are exactly equal to their old-loop counterparts, all edges are the
2714 // same. All exits from the old-loop now have a RegionNode that merges the
2715 // equivalent new-loop path. This is true even for the normal "loop-exit"
2716 // condition. All uses of loop-invariant old-loop values now come from (one
2717 // or more) Phis that merge their new-loop equivalents.
2718 //
2719 // This operation leaves the graph in an illegal state: there are two valid
2720 // control edges coming from the loop pre-header to both loop bodies. I'll
2721 // definitely have to hack the graph after running this transform.
2722 //
2723 // From this building block I will further edit edges to perform loop peeling
2724 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2725 //
2726 // Parameter side_by_size_idom:
2727 // When side_by_size_idom is null, the dominator tree is constructed for
2728 // the clone loop to dominate the original. Used in construction of
2729 // pre-main-post loop sequence.
2730 // When nonnull, the clone and original are side-by-side, both are
2731 // dominated by the side_by_side_idom node. Used in construction of
2732 // unswitched loops.
2733 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2734 CloneLoopMode mode, Node* side_by_side_idom) {
2735
2736 LoopNode* head = loop->_head->as_Loop();
2737 head->verify_strip_mined(1);
2738
2739 if (C->do_vector_loop() && PrintOpto) {
2740 const char* mname = C->method()->name()->as_quoted_ascii();
2741 if (mname != nullptr) {
2742 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2743 }
2744 }
2745
2746 CloneMap& cm = C->clone_map();
2747 if (C->do_vector_loop()) {
2748 cm.set_clone_idx(cm.max_gen()+1);
2749 #ifndef PRODUCT
2750 if (PrintOpto) {
2751 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2752 loop->dump_head();
2753 }
2754 #endif
2755 }
2756
2757 // Step 1: Clone the loop body. Make the old->new mapping.
2758 clone_loop_body(loop->_body, old_new, &cm);
2759
2760 IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2761
2762 // Step 2: Fix the edges in the new body. If the old input is outside the
2763 // loop use it. If the old input is INside the loop, use the corresponding
2764 // new node instead.
2765 fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false);
2766
2767 Node_List extra_data_nodes; // data nodes in the outer strip mined loop
2768 clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2769
2770 // Step 3: Now fix control uses. Loop varying control uses have already
2771 // been fixed up (as part of all input edges in Step 2). Loop invariant
2772 // control uses must be either an IfFalse or an IfTrue. Make a merge
2773 // point to merge the old and new IfFalse/IfTrue nodes; make the use
2774 // refer to this.
2775 Node_List worklist;
2776 uint new_counter = C->unique();
2777 fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist);
2778
2779 // Step 4: If loop-invariant use is not control, it must be dominated by a
2780 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region
2781 // there if needed. Make a Phi there merging old and new used values.
2782 Node_List *split_if_set = nullptr;
2783 Node_List *split_bool_set = nullptr;
2784 Node_List *split_cex_set = nullptr;
2785 fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set);
2786
2787 for (uint i = 0; i < extra_data_nodes.size(); i++) {
2788 Node* old = extra_data_nodes.at(i);
2789 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2790 split_bool_set, split_cex_set, worklist, new_counter,
2791 mode);
2792 }
2793
2794 // Check for IFs that need splitting/cloning. Happens if an IF outside of
2795 // the loop uses a condition set in the loop. The original IF probably
2796 // takes control from one or more OLD Regions (which in turn get from NEW
2797 // Regions). In any case, there will be a set of Phis for each merge point
2798 // from the IF up to where the original BOOL def exists the loop.
2799 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
2800
2801 }
2802
2803 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) {
2804 if (split_if_set) {
2805 while (split_if_set->size()) {
2806 Node *iff = split_if_set->pop();
2807 uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1;
2808 if (iff->in(input)->is_Phi()) {
2809 Node *b = clone_iff(iff->in(input)->as_Phi());
2810 _igvn.replace_input_of(iff, input, b);
2811 }
2812 }
2813 }
2814 if (split_bool_set) {
2815 while (split_bool_set->size()) {
2816 Node *b = split_bool_set->pop();
2817 Node *phi = b->in(1);
2818 assert(phi->is_Phi(), "");
2819 CmpNode *cmp = clone_bool((PhiNode*) phi);
2820 _igvn.replace_input_of(b, 1, cmp);
2821 }
2822 }
2823 if (split_cex_set) {
2824 while (split_cex_set->size()) {
2825 Node *b = split_cex_set->pop();
2826 assert(b->in(0)->is_Region(), "");
2827 assert(b->in(1)->is_Phi(), "");
2828 assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2829 split_up(b, b->in(0), nullptr);
2830 }
2831 }
2832 }
2833
2834 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop,
2835 uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set,
2836 Node_List*& split_bool_set, Node_List*& split_cex_set) {
2837 for(uint i = 0; i < body.size(); i++ ) {
2838 Node* old = body.at(i);
2839 clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2840 split_bool_set, split_cex_set, worklist, new_counter,
2841 mode);
2842 }
2843 }
2844
2845 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode,
2846 Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) {
2847 LoopNode* head = loop->_head->as_Loop();
2848 for(uint i = 0; i < body.size(); i++ ) {
2849 Node* old = body.at(i);
2850 if( !old->is_CFG() ) continue;
2851
2852 // Copy uses to a worklist, so I can munge the def-use info
2853 // with impunity.
2854 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) {
2855 worklist.push(old->fast_out(j));
2856 }
2857
2858 while (worklist.size()) { // Visit all uses
2859 Node *use = worklist.pop();
2860 if (!has_node(use)) continue; // Ignore dead nodes
2861 IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use );
2862 if (!loop->is_member(use_loop) && use->is_CFG()) {
2863 // Both OLD and USE are CFG nodes here.
2864 assert(use->is_Proj(), "" );
2865 Node* nnn = old_new[old->_idx];
2866
2867 Node* newuse = nullptr;
2868 if (head->is_strip_mined() && mode != IgnoreStripMined) {
2869 CountedLoopNode* cl = head->as_CountedLoop();
2870 CountedLoopEndNode* cle = cl->loopexit();
2871 // is use the projection that exits the loop from the CountedLoopEndNode?
2872 if (use->in(0) == cle) {
2873 IfFalseNode* cle_out = use->as_IfFalse();
2874 IfNode* le = cl->outer_loop_end();
2875 use = le->false_proj();
2876 use_loop = get_loop(use);
2877 if (mode == CloneIncludesStripMined) {
2878 nnn = old_new[le->_idx];
2879 } else {
2880 newuse = old_new[cle_out->_idx];
2881 }
2882 }
2883 }
2884 if (newuse == nullptr) {
2885 newuse = use->clone();
2886 }
2887
2888 // Clone the loop exit control projection
2889 if (C->do_vector_loop() && cm != nullptr) {
2890 cm->verify_insert_and_clone(use, newuse, cm->clone_idx());
2891 }
2892 newuse->set_req(0,nnn);
2893 _igvn.register_new_node_with_optimizer(newuse);
2894 set_loop(newuse, use_loop);
2895 set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2896
2897 // We need a Region to merge the exit from the peeled body and the
2898 // exit from the old loop body.
2899 RegionNode *r = new RegionNode(3);
2900 uint dd_r = MIN2(dom_depth(newuse), dom_depth(use));
2901 assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" );
2902
2903 // The original user of 'use' uses 'r' instead.
2904 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2905 Node* useuse = use->last_out(l);
2906 _igvn.rehash_node_delayed(useuse);
2907 uint uses_found = 0;
2908 if (useuse->in(0) == use) {
2909 useuse->set_req(0, r);
2910 uses_found++;
2911 if (useuse->is_CFG()) {
2912 // This is not a dom_depth > dd_r because when new
2913 // control flow is constructed by a loop opt, a node and
2914 // its dominator can end up at the same dom_depth
2915 assert(dom_depth(useuse) >= dd_r, "");
2916 set_idom(useuse, r, dom_depth(useuse));
2917 }
2918 }
2919 for (uint k = 1; k < useuse->req(); k++) {
2920 if( useuse->in(k) == use ) {
2921 useuse->set_req(k, r);
2922 uses_found++;
2923 if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2924 // This is not a dom_depth > dd_r because when new
2925 // control flow is constructed by a loop opt, a node
2926 // and its dominator can end up at the same dom_depth
2927 assert(dom_depth(useuse) >= dd_r , "");
2928 set_idom(useuse, r, dom_depth(useuse));
2929 }
2930 }
2931 }
2932 l -= uses_found; // we deleted 1 or more copies of this edge
2933 }
2934
2935 assert(use->is_Proj(), "loop exit should be projection");
2936 // replace_node_and_forward_ctrl() below moves all nodes that are:
2937 // - control dependent on the loop exit or
2938 // - have control set to the loop exit
2939 // below the post-loop merge point.
2940 // replace_node_and_forward_ctrl() takes a dead control as first input.
2941 // To make it possible to use it, the loop exit projection is cloned and becomes the
2942 // new exit projection. The initial one becomes dead and is "replaced" by the region.
2943 Node* use_clone = use->clone();
2944 register_control(use_clone, use_loop, idom(use), dom_depth(use));
2945 // Now finish up 'r'
2946 r->set_req(1, newuse);
2947 r->set_req(2, use_clone);
2948 _igvn.register_new_node_with_optimizer(r);
2949 set_loop(r, use_loop);
2950 set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r);
2951 replace_node_and_forward_ctrl(use, r);
2952 // Map the (cloned) old use to the new merge point
2953 old_new.map(use_clone->_idx, r);
2954 } // End of if a loop-exit test
2955 }
2956 }
2957 }
2958
2959 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd,
2960 IdealLoopTree* parent, bool partial) {
2961 for(uint i = 0; i < body.size(); i++ ) {
2962 Node *old = body.at(i);
2963 Node *nnn = old_new[old->_idx];
2964 // Fix CFG/Loop controlling the new node
2965 if (has_ctrl(old)) {
2966 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2967 } else {
2968 set_loop(nnn, parent);
2969 if (old->outcnt() > 0) {
2970 Node* dom = idom(old);
2971 if (old_new[dom->_idx] != nullptr) {
2972 dom = old_new[dom->_idx];
2973 set_idom(nnn, dom, dd );
2974 }
2975 }
2976 }
2977 // Correct edges to the new node
2978 for (uint j = 0; j < nnn->req(); j++) {
2979 Node *n = nnn->in(j);
2980 if (n != nullptr) {
2981 IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n);
2982 if (loop->is_member(old_in_loop)) {
2983 if (old_new[n->_idx] != nullptr) {
2984 nnn->set_req(j, old_new[n->_idx]);
2985 } else {
2986 assert(!body.contains(n), "");
2987 assert(partial, "node not cloned");
2988 }
2989 }
2990 }
2991 }
2992 _igvn.hash_find_insert(nnn);
2993 }
2994 }
2995
2996 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) {
2997 for (uint i = 0; i < body.size(); i++) {
2998 Node* old = body.at(i);
2999 Node* nnn = old->clone();
3000 old_new.map(old->_idx, nnn);
3001 if (C->do_vector_loop() && cm != nullptr) {
3002 cm->verify_insert_and_clone(old, nnn, cm->clone_idx());
3003 }
3004 _igvn.register_new_node_with_optimizer(nnn);
3005 }
3006 }
3007
3008
3009 //---------------------- stride_of_possible_iv -------------------------------------
3010 // Looks for an iff/bool/comp with one operand of the compare
3011 // being a cycle involving an add and a phi,
3012 // with an optional truncation (left-shift followed by a right-shift)
3013 // of the add. Returns zero if not an iv.
3014 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
3015 Node* trunc1 = nullptr;
3016 Node* trunc2 = nullptr;
3017 const TypeInteger* ttype = nullptr;
3018 if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) {
3019 return 0;
3020 }
3021 BoolNode* bl = iff->in(1)->as_Bool();
3022 Node* cmp = bl->in(1);
3023 if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
3024 return 0;
3025 }
3026 // Must have an invariant operand
3027 if (ctrl_is_member(get_loop(iff), cmp->in(2))) {
3028 return 0;
3029 }
3030 Node* add2 = nullptr;
3031 Node* cmp1 = cmp->in(1);
3032 if (cmp1->is_Phi()) {
3033 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
3034 Node* phi = cmp1;
3035 for (uint i = 1; i < phi->req(); i++) {
3036 Node* in = phi->in(i);
3037 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
3038 &trunc1, &trunc2, &ttype, T_INT);
3039 if (add && add->in(1) == phi) {
3040 add2 = add->in(2);
3041 break;
3042 }
3043 }
3044 } else {
3045 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
3046 Node* addtrunc = cmp1;
3047 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
3048 &trunc1, &trunc2, &ttype, T_INT);
3049 if (add && add->in(1)->is_Phi()) {
3050 Node* phi = add->in(1);
3051 for (uint i = 1; i < phi->req(); i++) {
3052 if (phi->in(i) == addtrunc) {
3053 add2 = add->in(2);
3054 break;
3055 }
3056 }
3057 }
3058 }
3059 if (add2 != nullptr) {
3060 const TypeInt* add2t = _igvn.type(add2)->is_int();
3061 if (add2t->is_con()) {
3062 return add2t->get_con();
3063 }
3064 }
3065 return 0;
3066 }
3067
3068
3069 //---------------------- stay_in_loop -------------------------------------
3070 // Return the (unique) control output node that's in the loop (if it exists.)
3071 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
3072 Node* unique = nullptr;
3073 if (!n) return nullptr;
3074 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3075 Node* use = n->fast_out(i);
3076 if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
3077 if (unique != nullptr) {
3078 return nullptr;
3079 }
3080 unique = use;
3081 }
3082 }
3083 return unique;
3084 }
3085
3086 //------------------------------ register_node -------------------------------------
3087 // Utility to register node "n" with PhaseIdealLoop
3088 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) {
3089 _igvn.register_new_node_with_optimizer(n);
3090 loop->_body.push(n);
3091 if (n->is_CFG()) {
3092 set_loop(n, loop);
3093 set_idom(n, pred, ddepth);
3094 } else {
3095 set_ctrl(n, pred);
3096 }
3097 }
3098
3099 //------------------------------ proj_clone -------------------------------------
3100 // Utility to create an if-projection
3101 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
3102 ProjNode* c = p->clone()->as_Proj();
3103 c->set_req(0, iff);
3104 return c;
3105 }
3106
3107 //------------------------------ short_circuit_if -------------------------------------
3108 // Force the iff control output to be the live_proj
3109 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
3110 guarantee(live_proj != nullptr, "null projection");
3111 int proj_con = live_proj->_con;
3112 assert(proj_con == 0 || proj_con == 1, "false or true projection");
3113 Node* con = intcon(proj_con);
3114 if (iff) {
3115 iff->set_req(1, con);
3116 }
3117 return con;
3118 }
3119
3120 //------------------------------ insert_if_before_proj -------------------------------------
3121 // Insert a new if before an if projection (* - new node)
3122 //
3123 // before
3124 // if(test)
3125 // / \
3126 // v v
3127 // other-proj proj (arg)
3128 //
3129 // after
3130 // if(test)
3131 // / \
3132 // / v
3133 // | * proj-clone
3134 // v |
3135 // other-proj v
3136 // * new_if(relop(cmp[IU](left,right)))
3137 // / \
3138 // v v
3139 // * new-proj proj
3140 // (returned)
3141 //
3142 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
3143 IfNode* iff = proj->in(0)->as_If();
3144 IdealLoopTree *loop = get_loop(proj);
3145 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3146 uint ddepth = dom_depth(proj);
3147
3148 _igvn.rehash_node_delayed(iff);
3149 _igvn.rehash_node_delayed(proj);
3150
3151 proj->set_req(0, nullptr); // temporary disconnect
3152 ProjNode* proj2 = proj_clone(proj, iff);
3153 register_node(proj2, loop, iff, ddepth);
3154
3155 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
3156 register_node(cmp, loop, proj2, ddepth);
3157
3158 BoolNode* bol = new BoolNode(cmp, relop);
3159 register_node(bol, loop, proj2, ddepth);
3160
3161 int opcode = iff->Opcode();
3162 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
3163 IfNode* new_if = IfNode::make_with_same_profile(iff, proj2, bol);
3164 register_node(new_if, loop, proj2, ddepth);
3165
3166 proj->set_req(0, new_if); // reattach
3167 set_idom(proj, new_if, ddepth);
3168
3169 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
3170 guarantee(new_exit != nullptr, "null exit node");
3171 register_node(new_exit, get_loop(other_proj), new_if, ddepth);
3172
3173 return new_exit;
3174 }
3175
3176 //------------------------------ insert_region_before_proj -------------------------------------
3177 // Insert a region before an if projection (* - new node)
3178 //
3179 // before
3180 // if(test)
3181 // / |
3182 // v |
3183 // proj v
3184 // other-proj
3185 //
3186 // after
3187 // if(test)
3188 // / |
3189 // v |
3190 // * proj-clone v
3191 // | other-proj
3192 // v
3193 // * new-region
3194 // |
3195 // v
3196 // * dum_if
3197 // / \
3198 // v \
3199 // * dum-proj v
3200 // proj
3201 //
3202 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
3203 IfNode* iff = proj->in(0)->as_If();
3204 IdealLoopTree *loop = get_loop(proj);
3205 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3206 uint ddepth = dom_depth(proj);
3207
3208 _igvn.rehash_node_delayed(iff);
3209 _igvn.rehash_node_delayed(proj);
3210
3211 proj->set_req(0, nullptr); // temporary disconnect
3212 ProjNode* proj2 = proj_clone(proj, iff);
3213 register_node(proj2, loop, iff, ddepth);
3214
3215 RegionNode* reg = new RegionNode(2);
3216 reg->set_req(1, proj2);
3217 register_node(reg, loop, iff, ddepth);
3218
3219 IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt);
3220 register_node(dum_if, loop, reg, ddepth);
3221
3222 proj->set_req(0, dum_if); // reattach
3223 set_idom(proj, dum_if, ddepth);
3224
3225 ProjNode* dum_proj = proj_clone(other_proj, dum_if);
3226 register_node(dum_proj, loop, dum_if, ddepth);
3227
3228 return reg;
3229 }
3230
3231 // Idea
3232 // ----
3233 // Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops
3234 // require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel
3235 // with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new
3236 // loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned
3237 // test alone instead:
3238 //
3239 // Before Partial Peeling:
3240 // Loop:
3241 // <peeled section>
3242 // Split off signed loop exit test
3243 // <-- CUT HERE -->
3244 // Unchanged unsigned loop exit test
3245 // <rest of unpeeled section>
3246 // goto Loop
3247 //
3248 // After Partial Peeling:
3249 // <cloned peeled section>
3250 // Cloned split off signed loop exit test
3251 // Loop:
3252 // Unchanged unsigned loop exit test
3253 // <rest of unpeeled section>
3254 // <peeled section>
3255 // Split off signed loop exit test
3256 // goto Loop
3257 //
3258 // Details
3259 // -------
3260 // Before:
3261 // if (i <u limit) Unsigned loop exit condition
3262 // / |
3263 // v v
3264 // exit-proj stay-in-loop-proj
3265 //
3266 // Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it
3267 // before the CmpU on the stay-in-loop path and keep both tests:
3268 //
3269 // if (i <u limit) Signed loop exit test
3270 // / |
3271 // / if (i <u limit) Unsigned loop exit test
3272 // / / |
3273 // v v v
3274 // exit-region stay-in-loop-proj
3275 //
3276 // Implementation
3277 // --------------
3278 // We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned
3279 // loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop
3280 // exit tests is preserved, and their loop nesting is correct.
3281 //
3282 // To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit
3283 // test above it and kill the original unsigned loop exit test by setting it's condition to a constant
3284 // (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later:
3285 //
3286 // if (stay-in-loop-const) Killed original unsigned loop exit test
3287 // / |
3288 // / v
3289 // / if (i < limit) Split off signed loop exit test
3290 // / / |
3291 // / / v
3292 // / / if (i <u limit) Cloned unsigned loop exit test
3293 // / / / |
3294 // v v v |
3295 // exit-region |
3296 // | |
3297 // dummy-if |
3298 // / | |
3299 // dead | |
3300 // v v
3301 // exit-proj stay-in-loop-proj
3302 //
3303 // Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned
3304 // loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()).
3305 //
3306 // Requirements
3307 // ------------
3308 // Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly
3309 // the same as before with only a single unsigned test. This is only possible if certain requirements are met.
3310 // Otherwise, we need to bail out (see comments in the code below).
3311 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) {
3312 const bool Signed = true;
3313 const bool Unsigned = false;
3314
3315 BoolNode* bol = if_cmpu->in(1)->as_Bool();
3316 if (bol->_test._test != BoolTest::lt) {
3317 return nullptr;
3318 }
3319 CmpNode* cmpu = bol->in(1)->as_Cmp();
3320 assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison");
3321
3322 int stride = stride_of_possible_iv(if_cmpu);
3323 if (stride == 0) {
3324 return nullptr;
3325 }
3326
3327 Node* lp_proj = stay_in_loop(if_cmpu, loop);
3328 guarantee(lp_proj != nullptr, "null loop node");
3329
3330 ProjNode* lp_continue = lp_proj->as_Proj();
3331 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
3332 if (!lp_exit->is_IfFalse()) {
3333 // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit).
3334 // We therefore can't add a single exit condition.
3335 return nullptr;
3336 }
3337 // The unsigned loop exit condition is
3338 // !(i <u limit)
3339 // = i >=u limit
3340 //
3341 // First, we note that for any x for which
3342 // 0 <= x <= INT_MAX
3343 // we can convert x to an unsigned int and still get the same guarantee:
3344 // 0 <= (uint) x <= INT_MAX = (uint) INT_MAX
3345 // 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA)
3346 //
3347 // With that in mind, if
3348 // limit >= 0 (COND)
3349 // then the unsigned loop exit condition
3350 // i >=u limit (ULE)
3351 // is equivalent to
3352 // i < 0 || i >= limit (SLE-full)
3353 // because either i is negative and therefore always greater than MAX_INT when converting to unsigned
3354 // (uint) i >=u MAX_INT >= limit >= 0
3355 // or otherwise
3356 // i >= limit >= 0
3357 // holds due to (LEMMA).
3358 //
3359 // For completeness, a counterexample with limit < 0:
3360 // Assume i = -3 and limit = -2:
3361 // i < 0
3362 // -2 < 0
3363 // is true and thus also "i < 0 || i >= limit". But
3364 // i >=u limit
3365 // -3 >=u -2
3366 // is false.
3367 Node* limit = cmpu->in(2);
3368 const TypeInt* type_limit = _igvn.type(limit)->is_int();
3369 if (type_limit->_lo < 0) {
3370 return nullptr;
3371 }
3372
3373 // We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride:
3374 // stride < 0:
3375 // i < 0 (SLE = SLE-negative)
3376 // stride > 0:
3377 // i >= limit (SLE = SLE-positive)
3378 // such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0):
3379 //
3380 // Loop:
3381 // <peeled section>
3382 // i >= limit (SLE-positive)
3383 // <-- CUT HERE -->
3384 // i >=u limit (ULE)
3385 // <rest of unpeeled section>
3386 // goto Loop
3387 //
3388 // We exit the loop if:
3389 // (SLE) is true OR (ULE) is true
3390 // However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly
3391 // exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure:
3392 // (SLE) IMPLIES (ULE)
3393 // This indeed holds when (COND) is given:
3394 // - stride > 0:
3395 // i >= limit // (SLE = SLE-positive)
3396 // i >= limit >= 0 // (COND)
3397 // i >=u limit >= 0 // (LEMMA)
3398 // which is the unsigned loop exit condition (ULE).
3399 // - stride < 0:
3400 // i < 0 // (SLE = SLE-negative)
3401 // (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned
3402 // MAX_INT >= limit >= 0 // (COND)
3403 // MAX_INT >=u limit >= 0 // (LEMMA)
3404 // and thus from (NEG) and (LEMMA):
3405 // i >=u limit
3406 // which is the unsigned loop exit condition (ULE).
3407 //
3408 //
3409 // After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0):
3410 // <cloned peeled section>
3411 // i >= limit (SLE-positive)
3412 // Loop:
3413 // i >=u limit (ULE)
3414 // <rest of unpeeled section>
3415 // <peeled section>
3416 // i >= limit (SLE-positive)
3417 // goto Loop
3418 Node* rhs_cmpi;
3419 if (stride > 0) {
3420 rhs_cmpi = limit; // For i >= limit
3421 } else {
3422 rhs_cmpi = makecon(TypeInt::ZERO); // For i < 0
3423 }
3424 // Create a new region on the exit path
3425 RegionNode* reg = insert_region_before_proj(lp_exit);
3426 guarantee(reg != nullptr, "null region node");
3427
3428 // Clone the if-cmpu-true-false using a signed compare
3429 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
3430 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue);
3431 reg->add_req(cmpi_exit);
3432
3433 // Clone the if-cmpu-true-false
3434 BoolTest::mask rel_u = bol->_test._test;
3435 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
3436 reg->add_req(cmpu_exit);
3437
3438 // Force original if to stay in loop.
3439 short_circuit_if(if_cmpu, lp_continue);
3440
3441 return cmpi_exit->in(0)->as_If();
3442 }
3443
3444 //------------------------------ remove_cmpi_loop_exit -------------------------------------
3445 // Remove a previously inserted signed compare loop exit.
3446 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
3447 Node* lp_proj = stay_in_loop(if_cmp, loop);
3448 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
3449 stay_in_loop(lp_proj, loop)->is_If() &&
3450 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
3451 Node* con = makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
3452 if_cmp->set_req(1, con);
3453 }
3454
3455 //------------------------------ scheduled_nodelist -------------------------------------
3456 // Create a post order schedule of nodes that are in the
3457 // "member" set. The list is returned in "sched".
3458 // The first node in "sched" is the loop head, followed by
3459 // nodes which have no inputs in the "member" set, and then
3460 // followed by the nodes that have an immediate input dependence
3461 // on a node in "sched".
3462 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
3463
3464 assert(member.test(loop->_head->_idx), "loop head must be in member set");
3465 VectorSet visited;
3466 Node_Stack nstack(loop->_body.size());
3467
3468 Node* n = loop->_head; // top of stack is cached in "n"
3469 uint idx = 0;
3470 visited.set(n->_idx);
3471
3472 // Initially push all with no inputs from within member set
3473 for(uint i = 0; i < loop->_body.size(); i++ ) {
3474 Node *elt = loop->_body.at(i);
3475 if (member.test(elt->_idx)) {
3476 bool found = false;
3477 for (uint j = 0; j < elt->req(); j++) {
3478 Node* def = elt->in(j);
3479 if (def && member.test(def->_idx) && def != elt) {
3480 found = true;
3481 break;
3482 }
3483 }
3484 if (!found && elt != loop->_head) {
3485 nstack.push(n, idx);
3486 n = elt;
3487 assert(!visited.test(n->_idx), "not seen yet");
3488 visited.set(n->_idx);
3489 }
3490 }
3491 }
3492
3493 // traverse out's that are in the member set
3494 while (true) {
3495 if (idx < n->outcnt()) {
3496 Node* use = n->raw_out(idx);
3497 idx++;
3498 if (!visited.test_set(use->_idx)) {
3499 if (member.test(use->_idx)) {
3500 nstack.push(n, idx);
3501 n = use;
3502 idx = 0;
3503 }
3504 }
3505 } else {
3506 // All outputs processed
3507 sched.push(n);
3508 if (nstack.is_empty()) break;
3509 n = nstack.node();
3510 idx = nstack.index();
3511 nstack.pop();
3512 }
3513 }
3514 }
3515
3516
3517 //------------------------------ has_use_in_set -------------------------------------
3518 // Has a use in the vector set
3519 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
3520 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3521 Node* use = n->fast_out(j);
3522 if (vset.test(use->_idx)) {
3523 return true;
3524 }
3525 }
3526 return false;
3527 }
3528
3529
3530 //------------------------------ has_use_internal_to_set -------------------------------------
3531 // Has use internal to the vector set (ie. not in a phi at the loop head)
3532 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
3533 Node* head = loop->_head;
3534 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3535 Node* use = n->fast_out(j);
3536 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
3537 return true;
3538 }
3539 }
3540 return false;
3541 }
3542
3543
3544 //------------------------------ clone_for_use_outside_loop -------------------------------------
3545 // clone "n" for uses that are outside of loop
3546 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
3547 int cloned = 0;
3548 assert(worklist.size() == 0, "should be empty");
3549 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3550 Node* use = n->fast_out(j);
3551 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
3552 worklist.push(use);
3553 }
3554 }
3555
3556 if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor,
3557 "Too many clones required in clone_for_use_outside_loop in partial peeling")) {
3558 return -1;
3559 }
3560
3561 while( worklist.size() ) {
3562 Node *use = worklist.pop();
3563 if (!has_node(use) || use->in(0) == C->top()) continue;
3564 uint j;
3565 for (j = 0; j < use->req(); j++) {
3566 if (use->in(j) == n) break;
3567 }
3568 assert(j < use->req(), "must be there");
3569
3570 // clone "n" and insert it between the inputs of "n" and the use outside the loop
3571 Node* n_clone = n->clone();
3572 _igvn.replace_input_of(use, j, n_clone);
3573 cloned++;
3574 Node* use_c;
3575 if (!use->is_Phi()) {
3576 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
3577 } else {
3578 // Use in a phi is considered a use in the associated predecessor block
3579 use_c = use->in(0)->in(j);
3580 }
3581 set_ctrl(n_clone, use_c);
3582 assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
3583 get_loop(use_c)->_body.push(n_clone);
3584 _igvn.register_new_node_with_optimizer(n_clone);
3585 #ifndef PRODUCT
3586 if (TracePartialPeeling) {
3587 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
3588 }
3589 #endif
3590 }
3591 return cloned;
3592 }
3593
3594
3595 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
3596 // clone "n" for special uses that are in the not_peeled region.
3597 // If these def-uses occur in separate blocks, the code generator
3598 // marks the method as not compilable. For example, if a "BoolNode"
3599 // is in a different basic block than the "IfNode" that uses it, then
3600 // the compilation is aborted in the code generator.
3601 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
3602 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
3603 if (n->is_Phi() || n->is_Load()) {
3604 return;
3605 }
3606 assert(worklist.size() == 0, "should be empty");
3607 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3608 Node* use = n->fast_out(j);
3609 if ( not_peel.test(use->_idx) &&
3610 (use->is_If() || use->is_CMove() || use->is_Bool() || use->is_OpaqueInitializedAssertionPredicate()) &&
3611 use->in(1) == n) {
3612 worklist.push(use);
3613 }
3614 }
3615 if (worklist.size() > 0) {
3616 // clone "n" and insert it between inputs of "n" and the use
3617 Node* n_clone = n->clone();
3618 loop->_body.push(n_clone);
3619 _igvn.register_new_node_with_optimizer(n_clone);
3620 set_ctrl(n_clone, get_ctrl(n));
3621 sink_list.push(n_clone);
3622 not_peel.set(n_clone->_idx);
3623 #ifndef PRODUCT
3624 if (TracePartialPeeling) {
3625 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
3626 }
3627 #endif
3628 while( worklist.size() ) {
3629 Node *use = worklist.pop();
3630 _igvn.rehash_node_delayed(use);
3631 for (uint j = 1; j < use->req(); j++) {
3632 if (use->in(j) == n) {
3633 use->set_req(j, n_clone);
3634 }
3635 }
3636 }
3637 }
3638 }
3639
3640
3641 //------------------------------ insert_phi_for_loop -------------------------------------
3642 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
3643 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
3644 Node *phi = PhiNode::make(lp, back_edge_val);
3645 phi->set_req(LoopNode::EntryControl, lp_entry_val);
3646 // Use existing phi if it already exists
3647 Node *hit = _igvn.hash_find_insert(phi);
3648 if( hit == nullptr ) {
3649 _igvn.register_new_node_with_optimizer(phi);
3650 set_ctrl(phi, lp);
3651 } else {
3652 // Remove the new phi from the graph and use the hit
3653 _igvn.remove_dead_node(phi);
3654 phi = hit;
3655 }
3656 _igvn.replace_input_of(use, idx, phi);
3657 }
3658
3659 #ifdef ASSERT
3660 //------------------------------ is_valid_loop_partition -------------------------------------
3661 // Validate the loop partition sets: peel and not_peel
3662 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
3663 VectorSet& not_peel ) {
3664 uint i;
3665 // Check that peel_list entries are in the peel set
3666 for (i = 0; i < peel_list.size(); i++) {
3667 if (!peel.test(peel_list.at(i)->_idx)) {
3668 return false;
3669 }
3670 }
3671 // Check at loop members are in one of peel set or not_peel set
3672 for (i = 0; i < loop->_body.size(); i++ ) {
3673 Node *def = loop->_body.at(i);
3674 uint di = def->_idx;
3675 // Check that peel set elements are in peel_list
3676 if (peel.test(di)) {
3677 if (not_peel.test(di)) {
3678 return false;
3679 }
3680 // Must be in peel_list also
3681 bool found = false;
3682 for (uint j = 0; j < peel_list.size(); j++) {
3683 if (peel_list.at(j)->_idx == di) {
3684 found = true;
3685 break;
3686 }
3687 }
3688 if (!found) {
3689 return false;
3690 }
3691 } else if (not_peel.test(di)) {
3692 if (peel.test(di)) {
3693 return false;
3694 }
3695 } else {
3696 return false;
3697 }
3698 }
3699 return true;
3700 }
3701
3702 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
3703 // Ensure a use outside of loop is of the right form
3704 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
3705 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3706 return (use->is_Phi() &&
3707 use_c->is_Region() && use_c->req() == 3 &&
3708 (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
3709 use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
3710 use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
3711 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
3712 }
3713
3714 //------------------------------ is_valid_clone_loop_form -------------------------------------
3715 // Ensure that all uses outside of loop are of the right form
3716 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
3717 uint orig_exit_idx, uint clone_exit_idx) {
3718 uint len = peel_list.size();
3719 for (uint i = 0; i < len; i++) {
3720 Node *def = peel_list.at(i);
3721
3722 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3723 Node *use = def->fast_out(j);
3724 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3725 if (!loop->is_member(get_loop(use_c))) {
3726 // use is not in the loop, check for correct structure
3727 if (use->in(0) == def) {
3728 // Okay
3729 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
3730 return false;
3731 }
3732 }
3733 }
3734 }
3735 return true;
3736 }
3737 #endif
3738
3739 //------------------------------ partial_peel -------------------------------------
3740 // Partially peel (aka loop rotation) the top portion of a loop (called
3741 // the peel section below) by cloning it and placing one copy just before
3742 // the new loop head and the other copy at the bottom of the new loop.
3743 //
3744 // before after where it came from
3745 //
3746 // stmt1 stmt1
3747 // loop: stmt2 clone
3748 // stmt2 if condA goto exitA clone
3749 // if condA goto exitA new_loop: new
3750 // stmt3 stmt3 clone
3751 // if !condB goto loop if condB goto exitB clone
3752 // exitB: stmt2 orig
3753 // stmt4 if !condA goto new_loop orig
3754 // exitA: goto exitA
3755 // exitB:
3756 // stmt4
3757 // exitA:
3758 //
3759 // Step 1: find the cut point: an exit test on probable
3760 // induction variable.
3761 // Step 2: schedule (with cloning) operations in the peel
3762 // section that can be executed after the cut into
3763 // the section that is not peeled. This may need
3764 // to clone operations into exit blocks. For
3765 // instance, a reference to A[i] in the not-peel
3766 // section and a reference to B[i] in an exit block
3767 // may cause a left-shift of i by 2 to be placed
3768 // in the peel block. This step will clone the left
3769 // shift into the exit block and sink the left shift
3770 // from the peel to the not-peel section.
3771 // Step 3: clone the loop, retarget the control, and insert
3772 // phis for values that are live across the new loop
3773 // head. This is very dependent on the graph structure
3774 // from clone_loop. It creates region nodes for
3775 // exit control and associated phi nodes for values
3776 // flow out of the loop through that exit. The region
3777 // node is dominated by the clone's control projection.
3778 // So the clone's peel section is placed before the
3779 // new loop head, and the clone's not-peel section is
3780 // forms the top part of the new loop. The original
3781 // peel section forms the tail of the new loop.
3782 // Step 4: update the dominator tree and recompute the
3783 // dominator depth.
3784 //
3785 // orig
3786 //
3787 // stmt1
3788 // |
3789 // v
3790 // predicates
3791 // |
3792 // v
3793 // loop<----+
3794 // | |
3795 // stmt2 |
3796 // | |
3797 // v |
3798 // ifA |
3799 // / | |
3800 // v v |
3801 // false true ^ <-- last_peel
3802 // / | |
3803 // / ===|==cut |
3804 // / stmt3 | <-- first_not_peel
3805 // / | |
3806 // | v |
3807 // v ifB |
3808 // exitA: / \ |
3809 // / \ |
3810 // v v |
3811 // false true |
3812 // / \ |
3813 // / ----+
3814 // |
3815 // v
3816 // exitB:
3817 // stmt4
3818 //
3819 //
3820 // after clone loop
3821 //
3822 // stmt1
3823 // |
3824 // v
3825 // predicates
3826 // / \
3827 // clone / \ orig
3828 // / \
3829 // / \
3830 // v v
3831 // +---->loop loop<----+
3832 // | | | |
3833 // | stmt2 stmt2 |
3834 // | | | |
3835 // | v v |
3836 // | ifA ifA |
3837 // | | \ / | |
3838 // | v v v v |
3839 // ^ true false false true ^ <-- last_peel
3840 // | | ^ \ / | |
3841 // | cut==|== \ \ / ===|==cut |
3842 // | stmt3 \ \ / stmt3 | <-- first_not_peel
3843 // | | dom | | | |
3844 // | v \ 1v v2 v |
3845 // | ifB regionA ifB |
3846 // | / \ | / \ |
3847 // | / \ v / \ |
3848 // | v v exitA: v v |
3849 // | true false false true |
3850 // | / ^ \ / \ |
3851 // +---- \ \ / ----+
3852 // dom \ /
3853 // \ 1v v2
3854 // regionB
3855 // |
3856 // v
3857 // exitB:
3858 // stmt4
3859 //
3860 //
3861 // after partial peel
3862 //
3863 // stmt1
3864 // |
3865 // v
3866 // predicates
3867 // /
3868 // clone / orig
3869 // / TOP
3870 // / \
3871 // v v
3872 // TOP->loop loop----+
3873 // | | |
3874 // stmt2 stmt2 |
3875 // | | |
3876 // v v |
3877 // ifA ifA |
3878 // | \ / | |
3879 // v v v v |
3880 // true false false true | <-- last_peel
3881 // | ^ \ / +------|---+
3882 // +->newloop \ \ / === ==cut | |
3883 // | stmt3 \ \ / TOP | |
3884 // | | dom | | stmt3 | | <-- first_not_peel
3885 // | v \ 1v v2 v | |
3886 // | ifB regionA ifB ^ v
3887 // | / \ | / \ | |
3888 // | / \ v / \ | |
3889 // | v v exitA: v v | |
3890 // | true false false true | |
3891 // | / ^ \ / \ | |
3892 // | | \ \ / v | |
3893 // | | dom \ / TOP | |
3894 // | | \ 1v v2 | |
3895 // ^ v regionB | |
3896 // | | | | |
3897 // | | v ^ v
3898 // | | exitB: | |
3899 // | | stmt4 | |
3900 // | +------------>-----------------+ |
3901 // | |
3902 // +-----------------<---------------------+
3903 //
3904 //
3905 // final graph
3906 //
3907 // stmt1
3908 // |
3909 // v
3910 // predicates
3911 // |
3912 // v
3913 // stmt2 clone
3914 // |
3915 // v
3916 // ........> ifA clone
3917 // : / |
3918 // dom / |
3919 // : v v
3920 // : false true
3921 // : | |
3922 // : | v
3923 // : | newloop<-----+
3924 // : | | |
3925 // : | stmt3 clone |
3926 // : | | |
3927 // : | v |
3928 // : | ifB |
3929 // : | / \ |
3930 // : | v v |
3931 // : | false true |
3932 // : | | | |
3933 // : | v stmt2 |
3934 // : | exitB: | |
3935 // : | stmt4 v |
3936 // : | ifA orig |
3937 // : | / \ |
3938 // : | / \ |
3939 // : | v v |
3940 // : | false true |
3941 // : | / \ |
3942 // : v v -----+
3943 // RegionA
3944 // |
3945 // v
3946 // exitA
3947 //
3948 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3949
3950 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3951 if (!loop->_head->is_Loop()) {
3952 return false;
3953 }
3954 LoopNode *head = loop->_head->as_Loop();
3955
3956 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3957 return false;
3958 }
3959
3960 // Check for complex exit control
3961 for (uint ii = 0; ii < loop->_body.size(); ii++) {
3962 Node *n = loop->_body.at(ii);
3963 int opc = n->Opcode();
3964 if (n->is_Call() ||
3965 opc == Op_Catch ||
3966 opc == Op_CatchProj ||
3967 opc == Op_Jump ||
3968 opc == Op_JumpProj) {
3969 #ifndef PRODUCT
3970 if (TracePartialPeeling) {
3971 tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3972 }
3973 #endif
3974 return false;
3975 }
3976 }
3977
3978 int dd = dom_depth(head);
3979
3980 // Step 1: find cut point
3981
3982 // Walk up dominators to loop head looking for first loop exit
3983 // which is executed on every path thru loop.
3984 IfNode *peel_if = nullptr;
3985 IfNode *peel_if_cmpu = nullptr;
3986
3987 Node *iff = loop->tail();
3988 while (iff != head) {
3989 if (iff->is_If()) {
3990 Node *ctrl = get_ctrl(iff->in(1));
3991 if (ctrl->is_top()) return false; // Dead test on live IF.
3992 // If loop-varying exit-test, check for induction variable
3993 if (loop->is_member(get_loop(ctrl)) &&
3994 loop->is_loop_exit(iff) &&
3995 is_possible_iv_test(iff)) {
3996 Node* cmp = iff->in(1)->in(1);
3997 if (cmp->Opcode() == Op_CmpI) {
3998 peel_if = iff->as_If();
3999 } else {
4000 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
4001 peel_if_cmpu = iff->as_If();
4002 }
4003 }
4004 }
4005 iff = idom(iff);
4006 }
4007
4008 // Prefer signed compare over unsigned compare.
4009 IfNode* new_peel_if = nullptr;
4010 if (peel_if == nullptr) {
4011 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) {
4012 return false; // No peel point found
4013 }
4014 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
4015 if (new_peel_if == nullptr) {
4016 return false; // No peel point found
4017 }
4018 peel_if = new_peel_if;
4019 }
4020 Node* last_peel = stay_in_loop(peel_if, loop);
4021 Node* first_not_peeled = stay_in_loop(last_peel, loop);
4022 if (first_not_peeled == nullptr || first_not_peeled == head) {
4023 return false;
4024 }
4025
4026 #ifndef PRODUCT
4027 if (TraceLoopOpts) {
4028 tty->print("PartialPeel ");
4029 loop->dump_head();
4030 }
4031
4032 if (TracePartialPeeling) {
4033 tty->print_cr("before partial peel one iteration");
4034 Node_List wl;
4035 Node* t = head->in(2);
4036 while (true) {
4037 wl.push(t);
4038 if (t == head) break;
4039 t = idom(t);
4040 }
4041 while (wl.size() > 0) {
4042 Node* tt = wl.pop();
4043 tt->dump();
4044 if (tt == last_peel) tty->print_cr("-- cut --");
4045 }
4046 }
4047 #endif
4048
4049 C->print_method(PHASE_BEFORE_PARTIAL_PEELING, 4, head);
4050
4051 VectorSet peel;
4052 VectorSet not_peel;
4053 Node_List peel_list;
4054 Node_List worklist;
4055 Node_List sink_list;
4056
4057 uint estimate = loop->est_loop_clone_sz(1);
4058 if (exceeding_node_budget(estimate)) {
4059 return false;
4060 }
4061
4062 // Set of cfg nodes to peel are those that are executable from
4063 // the head through last_peel.
4064 assert(worklist.size() == 0, "should be empty");
4065 worklist.push(head);
4066 peel.set(head->_idx);
4067 while (worklist.size() > 0) {
4068 Node *n = worklist.pop();
4069 if (n != last_peel) {
4070 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
4071 Node* use = n->fast_out(j);
4072 if (use->is_CFG() &&
4073 loop->is_member(get_loop(use)) &&
4074 !peel.test_set(use->_idx)) {
4075 worklist.push(use);
4076 }
4077 }
4078 }
4079 }
4080
4081 // Set of non-cfg nodes to peel are those that are control
4082 // dependent on the cfg nodes.
4083 for (uint i = 0; i < loop->_body.size(); i++) {
4084 Node *n = loop->_body.at(i);
4085 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
4086 if (peel.test(n_c->_idx)) {
4087 peel.set(n->_idx);
4088 } else {
4089 not_peel.set(n->_idx);
4090 }
4091 }
4092
4093 // Step 2: move operations from the peeled section down into the
4094 // not-peeled section
4095
4096 // Get a post order schedule of nodes in the peel region
4097 // Result in right-most operand.
4098 scheduled_nodelist(loop, peel, peel_list);
4099
4100 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4101
4102 // For future check for too many new phis
4103 uint old_phi_cnt = 0;
4104 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
4105 Node* use = head->fast_out(j);
4106 if (use->is_Phi()) old_phi_cnt++;
4107 }
4108
4109 #ifndef PRODUCT
4110 if (TracePartialPeeling) {
4111 tty->print_cr("\npeeled list");
4112 }
4113 #endif
4114
4115 // Evacuate nodes in peel region into the not_peeled region if possible
4116 bool too_many_clones = false;
4117 uint new_phi_cnt = 0;
4118 uint cloned_for_outside_use = 0;
4119 for (uint i = 0; i < peel_list.size();) {
4120 Node* n = peel_list.at(i);
4121 #ifndef PRODUCT
4122 if (TracePartialPeeling) n->dump();
4123 #endif
4124 bool incr = true;
4125 if (!n->is_CFG()) {
4126 if (has_use_in_set(n, not_peel)) {
4127 // If not used internal to the peeled region,
4128 // move "n" from peeled to not_peeled region.
4129 if (!has_use_internal_to_set(n, peel, loop)) {
4130 // if not pinned and not a load (which maybe anti-dependent on a store)
4131 // and not a CMove (Matcher expects only bool->cmove).
4132 if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) {
4133 int new_clones = clone_for_use_outside_loop(loop, n, worklist);
4134 if (C->failing()) return false;
4135 if (new_clones == -1) {
4136 too_many_clones = true;
4137 break;
4138 }
4139 cloned_for_outside_use += new_clones;
4140 sink_list.push(n);
4141 peel.remove(n->_idx);
4142 not_peel.set(n->_idx);
4143 peel_list.remove(i);
4144 incr = false;
4145 #ifndef PRODUCT
4146 if (TracePartialPeeling) {
4147 tty->print_cr("sink to not_peeled region: %d newbb: %d",
4148 n->_idx, get_ctrl(n)->_idx);
4149 }
4150 #endif
4151 }
4152 } else {
4153 // Otherwise check for special def-use cases that span
4154 // the peel/not_peel boundary such as bool->if
4155 clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
4156 new_phi_cnt++;
4157 }
4158 }
4159 }
4160 if (incr) i++;
4161 }
4162
4163 estimate += cloned_for_outside_use + new_phi_cnt;
4164 bool exceed_node_budget = !may_require_nodes(estimate);
4165 bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
4166
4167 if (too_many_clones || exceed_node_budget || exceed_phi_limit) {
4168 #ifndef PRODUCT
4169 if (TracePartialPeeling && exceed_phi_limit) {
4170 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c",
4171 new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F');
4172 }
4173 #endif
4174 if (new_peel_if != nullptr) {
4175 remove_cmpi_loop_exit(new_peel_if, loop);
4176 }
4177 // Inhibit more partial peeling on this loop
4178 assert(!head->is_partial_peel_loop(), "not partial peeled");
4179 head->mark_partial_peel_failed();
4180 if (cloned_for_outside_use > 0) {
4181 // Terminate this round of loop opts because
4182 // the graph outside this loop was changed.
4183 C->set_major_progress();
4184 return true;
4185 }
4186 return false;
4187 }
4188
4189 // Step 3: clone loop, retarget control, and insert new phis
4190
4191 // Create new loop head for new phis and to hang
4192 // the nodes being moved (sinked) from the peel region.
4193 LoopNode* new_head = new LoopNode(last_peel, last_peel);
4194 new_head->set_unswitch_count(head->unswitch_count()); // Preserve
4195 _igvn.register_new_node_with_optimizer(new_head);
4196 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
4197 _igvn.replace_input_of(first_not_peeled, 0, new_head);
4198 set_loop(new_head, loop);
4199 loop->_body.push(new_head);
4200 not_peel.set(new_head->_idx);
4201 set_idom(new_head, last_peel, dom_depth(first_not_peeled));
4202 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
4203
4204 while (sink_list.size() > 0) {
4205 Node* n = sink_list.pop();
4206 set_ctrl(n, new_head);
4207 }
4208
4209 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
4210
4211 clone_loop(loop, old_new, dd, IgnoreStripMined);
4212
4213 const uint clone_exit_idx = 1;
4214 const uint orig_exit_idx = 2;
4215 assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
4216
4217 Node* head_clone = old_new[head->_idx];
4218 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop();
4219 Node* orig_tail_clone = head_clone->in(2);
4220
4221 // Add phi if "def" node is in peel set and "use" is not
4222
4223 for (uint i = 0; i < peel_list.size(); i++) {
4224 Node *def = peel_list.at(i);
4225 if (!def->is_CFG()) {
4226 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
4227 Node *use = def->fast_out(j);
4228 if (has_node(use) && use->in(0) != C->top() &&
4229 (!peel.test(use->_idx) ||
4230 (use->is_Phi() && use->in(0) == head)) ) {
4231 worklist.push(use);
4232 }
4233 }
4234 while( worklist.size() ) {
4235 Node *use = worklist.pop();
4236 for (uint j = 1; j < use->req(); j++) {
4237 Node* n = use->in(j);
4238 if (n == def) {
4239
4240 // "def" is in peel set, "use" is not in peel set
4241 // or "use" is in the entry boundary (a phi) of the peel set
4242
4243 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
4244
4245 if ( loop->is_member(get_loop( use_c )) ) {
4246 // use is in loop
4247 if (old_new[use->_idx] != nullptr) { // null for dead code
4248 Node* use_clone = old_new[use->_idx];
4249 _igvn.replace_input_of(use, j, C->top());
4250 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
4251 }
4252 } else {
4253 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
4254 // use is not in the loop, check if the live range includes the cut
4255 Node* lp_if = use_c->in(orig_exit_idx)->in(0);
4256 if (not_peel.test(lp_if->_idx)) {
4257 assert(j == orig_exit_idx, "use from original loop");
4258 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
4259 }
4260 }
4261 }
4262 }
4263 }
4264 }
4265 }
4266
4267 // Step 3b: retarget control
4268
4269 // Redirect control to the new loop head if a cloned node in
4270 // the not_peeled region has control that points into the peeled region.
4271 // This necessary because the cloned peeled region will be outside
4272 // the loop.
4273 // from to
4274 // cloned-peeled <---+
4275 // new_head_clone: | <--+
4276 // cloned-not_peeled in(0) in(0)
4277 // orig-peeled
4278
4279 for (uint i = 0; i < loop->_body.size(); i++) {
4280 Node *n = loop->_body.at(i);
4281 if (!n->is_CFG() && n->in(0) != nullptr &&
4282 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
4283 Node* n_clone = old_new[n->_idx];
4284 if (n_clone->depends_only_on_test()) {
4285 // Pin array access nodes: control is updated here to the loop head. If, after some transformations, the
4286 // backedge is removed, an array load could become dependent on a condition that's not a range check for that
4287 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk
4288 // floating above its range check.
4289 Node* pinned_clone = n_clone->pin_array_access_node();
4290 if (pinned_clone != nullptr) {
4291 register_new_node_with_ctrl_of(pinned_clone, n_clone);
4292 old_new.map(n->_idx, pinned_clone);
4293 _igvn.replace_node(n_clone, pinned_clone);
4294 n_clone = pinned_clone;
4295 }
4296 }
4297 _igvn.replace_input_of(n_clone, 0, new_head_clone);
4298 }
4299 }
4300
4301 // Backedge of the surviving new_head (the clone) is original last_peel
4302 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
4303
4304 // Cut first node in original not_peel set
4305 _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
4306 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
4307 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
4308
4309 // Copy head_clone back-branch info to original head
4310 // and remove original head's loop entry and
4311 // clone head's back-branch
4312 _igvn.rehash_node_delayed(head); // Multiple edge updates
4313 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
4314 head->set_req(LoopNode::LoopBackControl, C->top());
4315 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
4316
4317 // Similarly modify the phis
4318 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
4319 Node* use = head->fast_out(k);
4320 if (use->is_Phi() && use->outcnt() > 0) {
4321 Node* use_clone = old_new[use->_idx];
4322 _igvn.rehash_node_delayed(use); // Multiple edge updates
4323 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
4324 use->set_req(LoopNode::LoopBackControl, C->top());
4325 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
4326 }
4327 }
4328
4329 // Step 4: update dominator tree and dominator depth
4330
4331 set_idom(head, orig_tail_clone, dd);
4332 recompute_dom_depth();
4333
4334 // Inhibit more partial peeling on this loop
4335 new_head_clone->set_partial_peel_loop();
4336 C->set_major_progress();
4337 loop->record_for_igvn();
4338
4339 #ifndef PRODUCT
4340 if (TracePartialPeeling) {
4341 tty->print_cr("\nafter partial peel one iteration");
4342 Node_List wl;
4343 Node* t = last_peel;
4344 while (true) {
4345 wl.push(t);
4346 if (t == head_clone) break;
4347 t = idom(t);
4348 }
4349 while (wl.size() > 0) {
4350 Node* tt = wl.pop();
4351 if (tt == head) tty->print_cr("orig head");
4352 else if (tt == new_head_clone) tty->print_cr("new head");
4353 else if (tt == head_clone) tty->print_cr("clone head");
4354 tt->dump();
4355 }
4356 }
4357 #endif
4358
4359 C->print_method(PHASE_AFTER_PARTIAL_PEELING, 4, new_head_clone);
4360
4361 return true;
4362 }
4363
4364 #ifdef ASSERT
4365
4366 // Moves Template Assertion Predicates to a target loop by cloning and killing the old ones. The target loop is the
4367 // original, not-cloned loop. This is currently only used with StressLoopBackedge which is a develop flag only and
4368 // false with product builds. We can therefore guard it with an ifdef. More details can be found at the use-site.
4369 class MoveAssertionPredicatesVisitor : public PredicateVisitor {
4370 ClonePredicateToTargetLoop _clone_predicate_to_loop;
4371 PhaseIdealLoop* const _phase;
4372
4373 public:
4374 MoveAssertionPredicatesVisitor(LoopNode* target_loop_head,
4375 const NodeInSingleLoopBody &node_in_loop_body,
4376 PhaseIdealLoop* phase)
4377 : _clone_predicate_to_loop(target_loop_head, node_in_loop_body, phase),
4378 _phase(phase) {
4379 }
4380 NONCOPYABLE(MoveAssertionPredicatesVisitor);
4381
4382 using PredicateVisitor::visit;
4383
4384 void visit(const TemplateAssertionPredicate& template_assertion_predicate) override {
4385 _clone_predicate_to_loop.clone_template_assertion_predicate(template_assertion_predicate);
4386 template_assertion_predicate.kill(_phase->igvn());
4387 }
4388 };
4389 #endif // ASSERT
4390
4391 // Transform:
4392 //
4393 // loop<-----------------+
4394 // | |
4395 // stmt1 stmt2 .. stmtn |
4396 // | | | |
4397 // \ | / |
4398 // v v v |
4399 // region |
4400 // | |
4401 // shared_stmt |
4402 // | |
4403 // v |
4404 // if |
4405 // / \ |
4406 // | -----------+
4407 // v
4408 //
4409 // into:
4410 //
4411 // loop<-------------------+
4412 // | |
4413 // v |
4414 // +->loop |
4415 // | | |
4416 // | stmt1 stmt2 .. stmtn |
4417 // | | | | |
4418 // | | \ / |
4419 // | | v v |
4420 // | | region1 |
4421 // | | | |
4422 // | shared_stmt shared_stmt |
4423 // | | | |
4424 // | v v |
4425 // | if if |
4426 // | /\ / \ |
4427 // +-- | | -------+
4428 // \ /
4429 // v v
4430 // region2
4431 //
4432 // (region2 is shown to merge mirrored projections of the loop exit
4433 // ifs to make the diagram clearer but they really merge the same
4434 // projection)
4435 //
4436 // Conditions for this transformation to trigger:
4437 // - the path through stmt1 is frequent enough
4438 // - the inner loop will be turned into a counted loop after transformation
4439 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) {
4440 if (!DuplicateBackedge) {
4441 return false;
4442 }
4443 assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only");
4444 if (!loop->_head->is_Loop()) {
4445 return false;
4446 }
4447
4448 uint estimate = loop->est_loop_clone_sz(1);
4449 if (exceeding_node_budget(estimate)) {
4450 return false;
4451 }
4452
4453 LoopNode *head = loop->_head->as_Loop();
4454
4455 Node* region = nullptr;
4456 IfNode* exit_test = nullptr;
4457 uint inner;
4458 float f;
4459 #ifdef ASSERT
4460 if (StressDuplicateBackedge) {
4461 if (head->is_strip_mined()) {
4462 return false;
4463 }
4464 Node* c = head->in(LoopNode::LoopBackControl);
4465
4466 while (c != head) {
4467 if (c->is_Region()) {
4468 region = c;
4469 }
4470 c = idom(c);
4471 }
4472
4473 if (region == nullptr) {
4474 return false;
4475 }
4476
4477 inner = 1;
4478 } else
4479 #endif //ASSERT
4480 {
4481 // Is the shape of the loop that of a counted loop...
4482 Node* back_control = loop_exit_control(head, loop);
4483 if (back_control == nullptr) {
4484 return false;
4485 }
4486
4487 BoolTest::mask bt = BoolTest::illegal;
4488 float cl_prob = 0;
4489 Node* incr = nullptr;
4490 Node* limit = nullptr;
4491 Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
4492 if (cmp == nullptr || cmp->Opcode() != Op_CmpI) {
4493 return false;
4494 }
4495
4496 // With an extra phi for the candidate iv?
4497 // Or the region node is the loop head
4498 if (!incr->is_Phi() || incr->in(0) == head) {
4499 return false;
4500 }
4501
4502 PathFrequency pf(head, this);
4503 region = incr->in(0);
4504
4505 // Go over all paths for the extra phi's region and see if that
4506 // path is frequent enough and would match the expected iv shape
4507 // if the extra phi is removed
4508 inner = 0;
4509 for (uint i = 1; i < incr->req(); ++i) {
4510 Node* in = incr->in(i);
4511 Node* trunc1 = nullptr;
4512 Node* trunc2 = nullptr;
4513 const TypeInteger* iv_trunc_t = nullptr;
4514 Node* orig_in = in;
4515 if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) {
4516 continue;
4517 }
4518 assert(in->Opcode() == Op_AddI, "wrong increment code");
4519 Node* xphi = nullptr;
4520 Node* stride = loop_iv_stride(in, xphi);
4521
4522 if (stride == nullptr) {
4523 continue;
4524 }
4525
4526 PhiNode* phi = loop_iv_phi(xphi, nullptr, head);
4527 if (phi == nullptr ||
4528 (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
4529 (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
4530 return false;
4531 }
4532
4533 f = pf.to(region->in(i));
4534 if (f > 0.5) {
4535 inner = i;
4536 break;
4537 }
4538 }
4539
4540 if (inner == 0) {
4541 return false;
4542 }
4543
4544 exit_test = back_control->in(0)->as_If();
4545 }
4546
4547 if (idom(region)->is_Catch()) {
4548 return false;
4549 }
4550
4551 // Collect all control nodes that need to be cloned (shared_stmt in the diagram)
4552 Unique_Node_List wq;
4553 wq.push(head->in(LoopNode::LoopBackControl));
4554 for (uint i = 0; i < wq.size(); i++) {
4555 Node* c = wq.at(i);
4556 assert(get_loop(c) == loop, "not in the right loop?");
4557 if (c->is_Region()) {
4558 if (c != region) {
4559 for (uint j = 1; j < c->req(); ++j) {
4560 wq.push(c->in(j));
4561 }
4562 }
4563 } else {
4564 wq.push(c->in(0));
4565 }
4566 assert(!is_strict_dominator(c, region), "shouldn't go above region");
4567 }
4568
4569 Node* region_dom = idom(region);
4570
4571 // Can't do the transformation if this would cause a membar pair to
4572 // be split
4573 for (uint i = 0; i < wq.size(); i++) {
4574 Node* c = wq.at(i);
4575 if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) {
4576 assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair");
4577 if (!wq.member(c->as_MemBar()->leading_membar())) {
4578 return false;
4579 }
4580 }
4581 }
4582 C->print_method(PHASE_BEFORE_DUPLICATE_LOOP_BACKEDGE, 4, head);
4583
4584 // Collect data nodes that need to be clones as well
4585 int dd = dom_depth(head);
4586
4587 for (uint i = 0; i < loop->_body.size(); ++i) {
4588 Node* n = loop->_body.at(i);
4589 if (has_ctrl(n)) {
4590 Node* c = get_ctrl(n);
4591 if (wq.member(c)) {
4592 wq.push(n);
4593 }
4594 } else {
4595 set_idom(n, idom(n), dd);
4596 }
4597 }
4598
4599 // clone shared_stmt
4600 clone_loop_body(wq, old_new, nullptr);
4601
4602 Node* region_clone = old_new[region->_idx];
4603 region_clone->set_req(inner, C->top());
4604 set_idom(region, region->in(inner), dd);
4605
4606 // Prepare the outer loop
4607 Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]);
4608 register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl));
4609 _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head);
4610 set_idom(head, outer_head, dd);
4611
4612 fix_body_edges(wq, loop, old_new, dd, loop->_parent, true);
4613
4614 // Make one of the shared_stmt copies only reachable from stmt1, the
4615 // other only from stmt2..stmtn.
4616 Node* dom = nullptr;
4617 for (uint i = 1; i < region->req(); ++i) {
4618 if (i != inner) {
4619 _igvn.replace_input_of(region, i, C->top());
4620 }
4621 Node* in = region_clone->in(i);
4622 if (in->is_top()) {
4623 continue;
4624 }
4625 if (dom == nullptr) {
4626 dom = in;
4627 } else {
4628 dom = dom_lca(dom, in);
4629 }
4630 }
4631
4632 set_idom(region_clone, dom, dd);
4633
4634 // Set up the outer loop
4635 for (uint i = 0; i < head->outcnt(); i++) {
4636 Node* u = head->raw_out(i);
4637 if (u->is_Phi()) {
4638 Node* outer_phi = u->clone();
4639 outer_phi->set_req(0, outer_head);
4640 Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx];
4641 if (backedge == nullptr) {
4642 backedge = u->in(LoopNode::LoopBackControl);
4643 }
4644 outer_phi->set_req(LoopNode::LoopBackControl, backedge);
4645 register_new_node(outer_phi, outer_head);
4646 _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi);
4647 }
4648 }
4649
4650 // create control and data nodes for out of loop uses (including region2)
4651 Node_List worklist;
4652 uint new_counter = C->unique();
4653 fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist);
4654
4655 Node_List *split_if_set = nullptr;
4656 Node_List *split_bool_set = nullptr;
4657 Node_List *split_cex_set = nullptr;
4658 fix_data_uses(wq, loop, ControlAroundStripMined, loop->skip_strip_mined(), new_counter, old_new, worklist,
4659 split_if_set, split_bool_set, split_cex_set);
4660
4661 finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
4662
4663 if (exit_test != nullptr) {
4664 float cnt = exit_test->_fcnt;
4665 if (cnt != COUNT_UNKNOWN) {
4666 exit_test->_fcnt = cnt * f;
4667 old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f);
4668 }
4669 }
4670
4671 #ifdef ASSERT
4672 if (StressDuplicateBackedge && head->is_CountedLoop()) {
4673 // The Template Assertion Predicates from the old counted loop are now at the new outer loop - clone them to
4674 // the inner counted loop and kill the old ones. We only need to do this with debug builds because
4675 // StressDuplicateBackedge is a devlop flag and false by default. Without StressDuplicateBackedge 'head' will be a
4676 // non-counted loop, and thus we have no Template Assertion Predicates above the old loop to move down.
4677 PredicateIterator predicate_iterator(outer_head->in(LoopNode::EntryControl));
4678 NodeInSingleLoopBody node_in_body(this, loop);
4679 MoveAssertionPredicatesVisitor move_assertion_predicates_visitor(head, node_in_body, this);
4680 predicate_iterator.for_each(move_assertion_predicates_visitor);
4681 }
4682 #endif // ASSERT
4683
4684 C->set_major_progress();
4685
4686 C->print_method(PHASE_AFTER_DUPLICATE_LOOP_BACKEDGE, 4, outer_head);
4687
4688 return true;
4689 }
4690
4691 // AutoVectorize the loop: replace scalar ops with vector ops.
4692 PhaseIdealLoop::AutoVectorizeStatus
4693 PhaseIdealLoop::auto_vectorize(IdealLoopTree* lpt, VSharedData &vshared) {
4694 // Counted loop only
4695 if (!lpt->is_counted()) {
4696 return AutoVectorizeStatus::Impossible;
4697 }
4698
4699 // Main-loop only
4700 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4701 if (!cl->is_main_loop()) {
4702 return AutoVectorizeStatus::Impossible;
4703 }
4704
4705 VLoop vloop(lpt, false);
4706 if (!vloop.check_preconditions()) {
4707 return AutoVectorizeStatus::TriedAndFailed;
4708 }
4709
4710 // Ensure the shared data is cleared before each use
4711 vshared.clear();
4712
4713 const VLoopAnalyzer vloop_analyzer(vloop, vshared);
4714 if (!vloop_analyzer.success()) {
4715 return AutoVectorizeStatus::TriedAndFailed;
4716 }
4717
4718 SuperWord sw(vloop_analyzer);
4719 if (!sw.transform_loop()) {
4720 return AutoVectorizeStatus::TriedAndFailed;
4721 }
4722
4723 return AutoVectorizeStatus::Success;
4724 }
4725
4726 // Just before insert_pre_post_loops, we can multiversion the loop:
4727 //
4728 // multiversion_if
4729 // | |
4730 // fast_loop slow_loop
4731 //
4732 // In the fast_loop we can make speculative assumptions, and put the
4733 // conditions into the multiversion_if. If the conditions hold at runtime,
4734 // we enter the fast_loop, if the conditions fail, we take the slow_loop
4735 // instead which does not make any of the speculative assumptions.
4736 //
4737 // Note: we only multiversion the loop if the loop does not have any
4738 // auto vectorization check Predicate. If we have that predicate,
4739 // then we can simply add the speculative assumption checks to
4740 // that Predicate. This means we do not need to duplicate the
4741 // loop - we have a smaller graph and save compile time. Should
4742 // the conditions ever fail, then we deopt / trap at the Predicate
4743 // and recompile without that Predicate. At that point we will
4744 // multiversion the loop, so that we can still have speculative
4745 // runtime checks.
4746 //
4747 // We perform the multiversioning when the loop is still in its single
4748 // iteration form, even before we insert pre and post loops. This makes
4749 // the cloning much simpler. However, this means that both the fast
4750 // and the slow loop have to be optimized independently (adding pre
4751 // and post loops, unrolling the main loop, auto-vectorize etc.). And
4752 // we may end up not needing any speculative assumptions in the fast_loop
4753 // and then rejecting the slow_loop by constant folding the multiversion_if.
4754 //
4755 // Therefore, we "delay" the optimization of the slow_loop until we add
4756 // at least one speculative assumption for the fast_loop. If we never
4757 // add such a speculative runtime check, the OpaqueMultiversioningNode
4758 // of the multiversion_if constant folds to true after loop opts, and the
4759 // multiversion_if folds away the "delayed" slow_loop. If we add any
4760 // speculative assumption, then we notify the OpaqueMultiversioningNode
4761 // with "notify_slow_loop_that_it_can_resume_optimizations".
4762 //
4763 // Note: new runtime checks can be added to the multiversion_if with
4764 // PhaseIdealLoop::create_new_if_for_multiversion
4765 void PhaseIdealLoop::maybe_multiversion_for_auto_vectorization_runtime_checks(IdealLoopTree* lpt, Node_List& old_new) {
4766 CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4767 LoopNode* outer_loop = cl->skip_strip_mined();
4768 Node* entry = outer_loop->in(LoopNode::EntryControl);
4769
4770 // Check we have multiversioning enabled, and are not already multiversioned.
4771 if (!LoopMultiversioning || cl->is_multiversion()) { return; }
4772
4773 // Check that we do not have a parse-predicate where we can add the runtime checks
4774 // during auto-vectorization.
4775 const Predicates predicates(entry);
4776 const PredicateBlock* predicate_block = predicates.auto_vectorization_check_block();
4777 if (predicate_block->has_parse_predicate()) { return; }
4778
4779 // Check node budget.
4780 uint estimate = lpt->est_loop_clone_sz(2);
4781 if (!may_require_nodes(estimate)) { return; }
4782
4783 do_multiversioning(lpt, old_new);
4784 }
4785
4786 void DataNodeGraph::clone_data_nodes(Node* new_ctrl) {
4787 for (uint i = 0; i < _data_nodes.size(); i++) {
4788 clone(_data_nodes[i], new_ctrl);
4789 }
4790 }
4791
4792 // Clone the given node and set it up properly. Set 'new_ctrl' as ctrl.
4793 void DataNodeGraph::clone(Node* node, Node* new_ctrl) {
4794 Node* clone = node->clone();
4795 _phase->igvn().register_new_node_with_optimizer(clone);
4796 _orig_to_new.put(node, clone);
4797 _phase->set_ctrl(clone, new_ctrl);
4798 if (node->is_CastII()) {
4799 clone->set_req(0, new_ctrl);
4800 }
4801 }
4802
4803 // Rewire the data inputs of all (unprocessed) cloned nodes, whose inputs are still pointing to the same inputs as their
4804 // corresponding orig nodes, to the newly cloned inputs to create a separate cloned graph.
4805 void DataNodeGraph::rewire_clones_to_cloned_inputs() {
4806 _orig_to_new.iterate_all([&](Node* node, Node* clone) {
4807 for (uint i = 1; i < node->req(); i++) {
4808 Node** cloned_input = _orig_to_new.get(node->in(i));
4809 if (cloned_input != nullptr) {
4810 // Input was also cloned -> rewire clone to the cloned input.
4811 _phase->igvn().replace_input_of(clone, i, *cloned_input);
4812 }
4813 }
4814 });
4815 }
4816
4817 // Clone all non-OpaqueLoop* nodes and apply the provided transformation strategy for OpaqueLoop* nodes.
4818 // Set 'new_ctrl' as ctrl for all cloned non-OpaqueLoop* nodes.
4819 void DataNodeGraph::clone_data_nodes_and_transform_opaque_loop_nodes(
4820 const TransformStrategyForOpaqueLoopNodes& transform_strategy,
4821 Node* new_ctrl) {
4822 for (uint i = 0; i < _data_nodes.size(); i++) {
4823 Node* data_node = _data_nodes[i];
4824 if (data_node->is_Opaque1()) {
4825 transform_opaque_node(transform_strategy, data_node);
4826 } else {
4827 clone(data_node, new_ctrl);
4828 }
4829 }
4830 }
4831
4832 void DataNodeGraph::transform_opaque_node(const TransformStrategyForOpaqueLoopNodes& transform_strategy, Node* node) {
4833 Node* transformed_node;
4834 if (node->is_OpaqueLoopInit()) {
4835 transformed_node = transform_strategy.transform_opaque_init(node->as_OpaqueLoopInit());
4836 } else {
4837 assert(node->is_OpaqueLoopStride(), "must be OpaqueLoopStrideNode");
4838 transformed_node = transform_strategy.transform_opaque_stride(node->as_OpaqueLoopStride());
4839 }
4840 // Add an orig->new mapping to correctly update the inputs of the copied graph in rewire_clones_to_cloned_inputs().
4841 _orig_to_new.put(node, transformed_node);
4842 }