1 /*
   2  * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/callnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/connode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/divnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "opto/mulnode.hpp"
  40 #include "opto/movenode.hpp"
  41 #include "opto/opaquenode.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/subnode.hpp"
  44 #include "opto/subtypenode.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 //=============================================================================
  49 //------------------------------split_thru_phi---------------------------------
  50 // Split Node 'n' through merge point if there is enough win.
  51 Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
  52   if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) {
  53     // ConvI2L may have type information on it which is unsafe to push up
  54     // so disable this for now
  55     return nullptr;
  56   }
  57 
  58   // Splitting range check CastIIs through a loop induction Phi can
  59   // cause new Phis to be created that are left unrelated to the loop
  60   // induction Phi and prevent optimizations (vectorization)
  61   if (n->Opcode() == Op_CastII && region->is_CountedLoop() &&
  62       n->in(1) == region->as_CountedLoop()->phi()) {
  63     return nullptr;
  64   }
  65 
  66   // Inline types should not be split through Phis because they cannot be merged
  67   // through Phi nodes but each value input needs to be merged individually.
  68   if (n->is_InlineType()) {
  69     return nullptr;
  70   }
  71 
  72   if (cannot_split_division(n, region)) {
  73     return nullptr;
  74   }
  75 
  76   int wins = 0;
  77   assert(!n->is_CFG(), "");
  78   assert(region->is_Region(), "");
  79 
  80   const Type* type = n->bottom_type();
  81   const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr();
  82   Node* phi;
  83   if (t_oop != nullptr && t_oop->is_known_instance_field()) {
  84     int iid    = t_oop->instance_id();
  85     int index  = C->get_alias_index(t_oop);
  86     int offset = t_oop->offset();
  87     phi = new PhiNode(region, type, nullptr, iid, index, offset);
  88   } else {
  89     phi = PhiNode::make_blank(region, n);
  90   }
  91   uint old_unique = C->unique();
  92   for (uint i = 1; i < region->req(); i++) {
  93     Node* x;
  94     Node* the_clone = nullptr;
  95     if (region->in(i) == C->top()) {
  96       x = C->top();             // Dead path?  Use a dead data op
  97     } else {
  98       x = n->clone();           // Else clone up the data op
  99       the_clone = x;            // Remember for possible deletion.
 100       // Alter data node to use pre-phi inputs
 101       if (n->in(0) == region)
 102         x->set_req( 0, region->in(i) );
 103       for (uint j = 1; j < n->req(); j++) {
 104         Node* in = n->in(j);
 105         if (in->is_Phi() && in->in(0) == region)
 106           x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
 107       }
 108     }
 109     // Check for a 'win' on some paths
 110     const Type* t = x->Value(&_igvn);
 111 
 112     bool singleton = t->singleton();
 113 
 114     // A TOP singleton indicates that there are no possible values incoming
 115     // along a particular edge. In most cases, this is OK, and the Phi will
 116     // be eliminated later in an Ideal call. However, we can't allow this to
 117     // happen if the singleton occurs on loop entry, as the elimination of
 118     // the PhiNode may cause the resulting node to migrate back to a previous
 119     // loop iteration.
 120     if (singleton && t == Type::TOP) {
 121       // Is_Loop() == false does not confirm the absence of a loop (e.g., an
 122       // irreducible loop may not be indicated by an affirmative is_Loop());
 123       // therefore, the only top we can split thru a phi is on a backedge of
 124       // a loop.
 125       singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
 126     }
 127 
 128     if (singleton) {
 129       wins++;
 130       x = ((PhaseGVN&)_igvn).makecon(t);
 131     } else {
 132       // We now call Identity to try to simplify the cloned node.
 133       // Note that some Identity methods call phase->type(this).
 134       // Make sure that the type array is big enough for
 135       // our new node, even though we may throw the node away.
 136       // (Note: This tweaking with igvn only works because x is a new node.)
 137       _igvn.set_type(x, t);
 138       // If x is a TypeNode, capture any more-precise type permanently into Node
 139       // otherwise it will be not updated during igvn->transform since
 140       // igvn->type(x) is set to x->Value() already.
 141       x->raise_bottom_type(t);
 142       Node* y = x->Identity(&_igvn);
 143       if (y != x) {
 144         wins++;
 145         x = y;
 146       } else {
 147         y = _igvn.hash_find(x);
 148         if (y == nullptr) {
 149           y = similar_subtype_check(x, region->in(i));
 150         }
 151         if (y) {
 152           wins++;
 153           x = y;
 154         } else {
 155           // Else x is a new node we are keeping
 156           // We do not need register_new_node_with_optimizer
 157           // because set_type has already been called.
 158           _igvn._worklist.push(x);
 159         }
 160       }
 161     }
 162     if (x != the_clone && the_clone != nullptr)
 163       _igvn.remove_dead_node(the_clone);
 164     phi->set_req( i, x );
 165   }
 166   // Too few wins?
 167   if (wins <= policy) {
 168     _igvn.remove_dead_node(phi);
 169     return nullptr;
 170   }
 171 
 172   // Record Phi
 173   register_new_node( phi, region );
 174 
 175   for (uint i2 = 1; i2 < phi->req(); i2++) {
 176     Node *x = phi->in(i2);
 177     // If we commoned up the cloned 'x' with another existing Node,
 178     // the existing Node picks up a new use.  We need to make the
 179     // existing Node occur higher up so it dominates its uses.
 180     Node *old_ctrl;
 181     IdealLoopTree *old_loop;
 182 
 183     if (x->is_Con()) {
 184       // Constant's control is always root.
 185       set_ctrl(x, C->root());
 186       continue;
 187     }
 188     // The occasional new node
 189     if (x->_idx >= old_unique) {     // Found a new, unplaced node?
 190       old_ctrl = nullptr;
 191       old_loop = nullptr;               // Not in any prior loop
 192     } else {
 193       old_ctrl = get_ctrl(x);
 194       old_loop = get_loop(old_ctrl); // Get prior loop
 195     }
 196     // New late point must dominate new use
 197     Node *new_ctrl = dom_lca(old_ctrl, region->in(i2));
 198     if (new_ctrl == old_ctrl) // Nothing is changed
 199       continue;
 200 
 201     IdealLoopTree *new_loop = get_loop(new_ctrl);
 202 
 203     // Don't move x into a loop if its uses are
 204     // outside of loop. Otherwise x will be cloned
 205     // for each use outside of this loop.
 206     IdealLoopTree *use_loop = get_loop(region);
 207     if (!new_loop->is_member(use_loop) &&
 208         (old_loop == nullptr || !new_loop->is_member(old_loop))) {
 209       // Take early control, later control will be recalculated
 210       // during next iteration of loop optimizations.
 211       new_ctrl = get_early_ctrl(x);
 212       new_loop = get_loop(new_ctrl);
 213     }
 214     // Set new location
 215     set_ctrl(x, new_ctrl);
 216     // If changing loop bodies, see if we need to collect into new body
 217     if (old_loop != new_loop) {
 218       if (old_loop && !old_loop->_child)
 219         old_loop->_body.yank(x);
 220       if (!new_loop->_child)
 221         new_loop->_body.push(x);  // Collect body info
 222     }
 223   }
 224 
 225   return phi;
 226 }
 227 
 228 // Subtype checks that carry profile data don't common so look for a replacement by following edges
 229 Node* PhaseIdealLoop::similar_subtype_check(const Node* x, Node* r_in) {
 230   if (x->is_SubTypeCheck()) {
 231     Node* in1 = x->in(1);
 232     for (DUIterator_Fast imax, i = in1->fast_outs(imax); i < imax; i++) {
 233       Node* u = in1->fast_out(i);
 234       if (u != x && u->is_SubTypeCheck() && u->in(1) == x->in(1) && u->in(2) == x->in(2)) {
 235         for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
 236           Node* bol = u->fast_out(j);
 237           for (DUIterator_Fast kmax, k = bol->fast_outs(kmax); k < kmax; k++) {
 238             Node* iff = bol->fast_out(k);
 239             // Only dominating subtype checks are interesting: otherwise we risk replacing a subtype check by another with
 240             // unrelated profile
 241             if (iff->is_If() && is_dominator(iff, r_in)) {
 242               return u;
 243             }
 244           }
 245         }
 246       }
 247     }
 248   }
 249   return nullptr;
 250 }
 251 
 252 // Return true if 'n' is a Div or Mod node (without zero check If node which was removed earlier) with a loop phi divisor
 253 // of a trip-counted (integer or long) loop with a backedge input that could be zero (include zero in its type range). In
 254 // this case, we cannot split the division to the backedge as it could freely float above the loop exit check resulting in
 255 // a division by zero. This situation is possible because the type of an increment node of an iv phi (trip-counter) could
 256 // include zero while the iv phi does not (see PhiNode::Value() for trip-counted loops where we improve types of iv phis).
 257 // We also need to check other loop phis as they could have been created in the same split-if pass when applying
 258 // PhaseIdealLoop::split_thru_phi() to split nodes through an iv phi.
 259 bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) const {
 260   const Type* zero;
 261   switch (n->Opcode()) {
 262     case Op_DivI:
 263     case Op_ModI:
 264       zero = TypeInt::ZERO;
 265       break;
 266     case Op_DivL:
 267     case Op_ModL:
 268       zero = TypeLong::ZERO;
 269       break;
 270     default:
 271       return false;
 272   }
 273 
 274   assert(n->in(0) == nullptr, "divisions with zero check should already have bailed out earlier in split-if");
 275   Node* divisor = n->in(2);
 276   return is_divisor_counted_loop_phi(divisor, region) &&
 277          loop_phi_backedge_type_contains_zero(divisor, zero);
 278 }
 279 
 280 bool PhaseIdealLoop::is_divisor_counted_loop_phi(const Node* divisor, const Node* loop) {
 281   return loop->is_BaseCountedLoop() && divisor->is_Phi() && divisor->in(0) == loop;
 282 }
 283 
 284 bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const {
 285     return _igvn.type(phi_divisor->in(LoopNode::LoopBackControl))->filter_speculative(zero) != Type::TOP;
 286 }
 287 
 288 //------------------------------dominated_by------------------------------------
 289 // Replace the dominated test with an obvious true or false.  Place it on the
 290 // IGVN worklist for later cleanup.  Move control-dependent data Nodes on the
 291 // live path up to the dominating control.
 292 void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, bool exclude_loop_predicate) {
 293   if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
 294 
 295   // prevdom is the dominating projection of the dominating test.
 296   assert(iff->Opcode() == Op_If ||
 297          iff->Opcode() == Op_CountedLoopEnd ||
 298          iff->Opcode() == Op_LongCountedLoopEnd ||
 299          iff->Opcode() == Op_RangeCheck ||
 300          iff->Opcode() == Op_ParsePredicate,
 301         "Check this code when new subtype is added");
 302 
 303   int pop = prevdom->Opcode();
 304   assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
 305   if (flip) {
 306     if (pop == Op_IfTrue)
 307       pop = Op_IfFalse;
 308     else
 309       pop = Op_IfTrue;
 310   }
 311   // 'con' is set to true or false to kill the dominated test.
 312   Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
 313   set_ctrl(con, C->root()); // Constant gets a new use
 314   // Hack the dominated test
 315   _igvn.replace_input_of(iff, 1, con);
 316 
 317   // If I dont have a reachable TRUE and FALSE path following the IfNode then
 318   // I can assume this path reaches an infinite loop.  In this case it's not
 319   // important to optimize the data Nodes - either the whole compilation will
 320   // be tossed or this path (and all data Nodes) will go dead.
 321   if (iff->outcnt() != 2) return;
 322 
 323   // Make control-dependent data Nodes on the live path (path that will remain
 324   // once the dominated IF is removed) become control-dependent on the
 325   // dominating projection.
 326   Node* dp = iff->proj_out_or_null(pop == Op_IfTrue);
 327 
 328   // Loop predicates may have depending checks which should not
 329   // be skipped. For example, range check predicate has two checks
 330   // for lower and upper bounds.
 331   if (dp == nullptr)
 332     return;
 333 
 334   ProjNode* dp_proj  = dp->as_Proj();
 335   ProjNode* unc_proj = iff->proj_out(1 - dp_proj->_con)->as_Proj();
 336   if (exclude_loop_predicate &&
 337       (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != nullptr ||
 338        unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != nullptr ||
 339        unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != nullptr)) {
 340     // If this is a range check (IfNode::is_range_check), do not
 341     // reorder because Compile::allow_range_check_smearing might have
 342     // changed the check.
 343     return; // Let IGVN transformation change control dependence.
 344   }
 345 
 346   IdealLoopTree* old_loop = get_loop(dp);
 347 
 348   for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
 349     Node* cd = dp->fast_out(i); // Control-dependent node
 350     // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
 351     if (cd->depends_only_on_test() && _igvn.no_dependent_zero_check(cd)) {
 352       assert(cd->in(0) == dp, "");
 353       _igvn.replace_input_of(cd, 0, prevdom);
 354       set_early_ctrl(cd, false);
 355       IdealLoopTree* new_loop = get_loop(get_ctrl(cd));
 356       if (old_loop != new_loop) {
 357         if (!old_loop->_child) {
 358           old_loop->_body.yank(cd);
 359         }
 360         if (!new_loop->_child) {
 361           new_loop->_body.push(cd);
 362         }
 363       }
 364       --i;
 365       --imax;
 366     }
 367   }
 368 }
 369 
 370 //------------------------------has_local_phi_input----------------------------
 371 // Return TRUE if 'n' has Phi inputs from its local block and no other
 372 // block-local inputs (all non-local-phi inputs come from earlier blocks)
 373 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) {
 374   Node *n_ctrl = get_ctrl(n);
 375   // See if some inputs come from a Phi in this block, or from before
 376   // this block.
 377   uint i;
 378   for( i = 1; i < n->req(); i++ ) {
 379     Node *phi = n->in(i);
 380     if( phi->is_Phi() && phi->in(0) == n_ctrl )
 381       break;
 382   }
 383   if( i >= n->req() )
 384     return nullptr;                // No Phi inputs; nowhere to clone thru
 385 
 386   // Check for inputs created between 'n' and the Phi input.  These
 387   // must split as well; they have already been given the chance
 388   // (courtesy of a post-order visit) and since they did not we must
 389   // recover the 'cost' of splitting them by being very profitable
 390   // when splitting 'n'.  Since this is unlikely we simply give up.
 391   for( i = 1; i < n->req(); i++ ) {
 392     Node *m = n->in(i);
 393     if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) {
 394       // We allow the special case of AddP's with no local inputs.
 395       // This allows us to split-up address expressions.
 396       if (m->is_AddP() &&
 397           get_ctrl(m->in(AddPNode::Base)) != n_ctrl &&
 398           get_ctrl(m->in(AddPNode::Address)) != n_ctrl &&
 399           get_ctrl(m->in(AddPNode::Offset)) != n_ctrl) {
 400         // Move the AddP up to the dominating point. That's fine because control of m's inputs
 401         // must dominate get_ctrl(m) == n_ctrl and we just checked that the input controls are != n_ctrl.
 402         Node* c = find_non_split_ctrl(idom(n_ctrl));
 403         if (c->is_OuterStripMinedLoop()) {
 404           c->as_Loop()->verify_strip_mined(1);
 405           c = c->in(LoopNode::EntryControl);
 406         }
 407         set_ctrl_and_loop(m, c);
 408         continue;
 409       }
 410       return nullptr;
 411     }
 412     assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control");
 413   }
 414 
 415   return n_ctrl;
 416 }
 417 
 418 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2).
 419 Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoopTree* n_loop, Node* n_ctrl, BasicType bt) {
 420   assert(bt == T_INT || bt == T_LONG, "only for integers");
 421   int n_op = n->Opcode();
 422 
 423   if (n_op == Op_LShift(bt)) {
 424     // Scale is loop invariant
 425     Node* scale = n->in(2);
 426     Node* scale_ctrl = get_ctrl(scale);
 427     IdealLoopTree* scale_loop = get_loop(scale_ctrl);
 428     if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) {
 429       return nullptr;
 430     }
 431     const TypeInt* scale_t = scale->bottom_type()->isa_int();
 432     if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) {
 433       return nullptr;              // Dont bother with byte/short masking
 434     }
 435     // Add must vary with loop (else shift would be loop-invariant)
 436     Node* add = n->in(1);
 437     Node* add_ctrl = get_ctrl(add);
 438     IdealLoopTree* add_loop = get_loop(add_ctrl);
 439     if (n_loop != add_loop) {
 440       return nullptr;  // happens w/ evil ZKM loops
 441     }
 442 
 443     // Convert I-V into I+ (0-V); same for V-I
 444     if (add->Opcode() == Op_Sub(bt) &&
 445         _igvn.type(add->in(1)) != TypeInteger::zero(bt)) {
 446       assert(add->Opcode() == Op_SubI || add->Opcode() == Op_SubL, "");
 447       Node* zero = _igvn.integercon(0, bt);
 448       set_ctrl(zero, C->root());
 449       Node* neg = SubNode::make(zero, add->in(2), bt);
 450       register_new_node(neg, get_ctrl(add->in(2)));
 451       add = AddNode::make(add->in(1), neg, bt);
 452       register_new_node(add, add_ctrl);
 453     }
 454     if (add->Opcode() != Op_Add(bt)) return nullptr;
 455     assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, "");
 456     // See if one add input is loop invariant
 457     Node* add_var = add->in(1);
 458     Node* add_var_ctrl = get_ctrl(add_var);
 459     IdealLoopTree* add_var_loop = get_loop(add_var_ctrl);
 460     Node* add_invar = add->in(2);
 461     Node* add_invar_ctrl = get_ctrl(add_invar);
 462     IdealLoopTree* add_invar_loop = get_loop(add_invar_ctrl);
 463     if (add_invar_loop == n_loop) {
 464       // Swap to find the invariant part
 465       add_invar = add_var;
 466       add_invar_ctrl = add_var_ctrl;
 467       add_invar_loop = add_var_loop;
 468       add_var = add->in(2);
 469     } else if (add_var_loop != n_loop) { // Else neither input is loop invariant
 470       return nullptr;
 471     }
 472     if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) {
 473       return nullptr;              // No invariant part of the add?
 474     }
 475 
 476     // Yes!  Reshape address expression!
 477     Node* inv_scale = LShiftNode::make(add_invar, scale, bt);
 478     Node* inv_scale_ctrl =
 479             dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
 480             add_invar_ctrl : scale_ctrl;
 481     register_new_node(inv_scale, inv_scale_ctrl);
 482     Node* var_scale = LShiftNode::make(add_var, scale, bt);
 483     register_new_node(var_scale, n_ctrl);
 484     Node* var_add = AddNode::make(var_scale, inv_scale, bt);
 485     register_new_node(var_add, n_ctrl);
 486     _igvn.replace_node(n, var_add);
 487     return var_add;
 488   }
 489   return nullptr;
 490 }
 491 
 492 //------------------------------remix_address_expressions----------------------
 493 // Rework addressing expressions to get the most loop-invariant stuff
 494 // moved out.  We'd like to do all associative operators, but it's especially
 495 // important (common) to do address expressions.
 496 Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
 497   if (!has_ctrl(n))  return nullptr;
 498   Node* n_ctrl = get_ctrl(n);
 499   IdealLoopTree* n_loop = get_loop(n_ctrl);
 500 
 501   // See if 'n' mixes loop-varying and loop-invariant inputs and
 502   // itself is loop-varying.
 503 
 504   // Only interested in binary ops (and AddP)
 505   if (n->req() < 3 || n->req() > 4) return nullptr;
 506 
 507   Node* n1_ctrl = get_ctrl(n->in(                    1));
 508   Node* n2_ctrl = get_ctrl(n->in(                    2));
 509   Node* n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3));
 510   IdealLoopTree* n1_loop = get_loop(n1_ctrl);
 511   IdealLoopTree* n2_loop = get_loop(n2_ctrl);
 512   IdealLoopTree* n3_loop = get_loop(n3_ctrl);
 513 
 514   // Does one of my inputs spin in a tighter loop than self?
 515   if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) ||
 516       (n_loop->is_member(n2_loop) && n_loop != n2_loop) ||
 517       (n_loop->is_member(n3_loop) && n_loop != n3_loop)) {
 518     return nullptr;                // Leave well enough alone
 519   }
 520 
 521   // Is at least one of my inputs loop-invariant?
 522   if (n1_loop == n_loop &&
 523       n2_loop == n_loop &&
 524       n3_loop == n_loop) {
 525     return nullptr;                // No loop-invariant inputs
 526   }
 527 
 528   Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT);
 529   if (res != nullptr) {
 530     return res;
 531   }
 532   res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG);
 533   if (res != nullptr) {
 534     return res;
 535   }
 536 
 537   int n_op = n->Opcode();
 538   // Replace (I+V) with (V+I)
 539   if (n_op == Op_AddI ||
 540       n_op == Op_AddL ||
 541       n_op == Op_AddF ||
 542       n_op == Op_AddD ||
 543       n_op == Op_MulI ||
 544       n_op == Op_MulL ||
 545       n_op == Op_MulF ||
 546       n_op == Op_MulD) {
 547     if (n2_loop == n_loop) {
 548       assert(n1_loop != n_loop, "");
 549       n->swap_edges(1, 2);
 550     }
 551   }
 552 
 553   // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V),
 554   // but not if I2 is a constant.
 555   if (n_op == Op_AddP) {
 556     if (n2_loop == n_loop && n3_loop != n_loop) {
 557       if (n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con()) {
 558         Node* n22_ctrl = get_ctrl(n->in(2)->in(2));
 559         Node* n23_ctrl = get_ctrl(n->in(2)->in(3));
 560         IdealLoopTree* n22loop = get_loop(n22_ctrl);
 561         IdealLoopTree* n23_loop = get_loop(n23_ctrl);
 562         if (n22loop != n_loop && n22loop->is_member(n_loop) &&
 563             n23_loop == n_loop) {
 564           Node* add1 = new AddPNode(n->in(1), n->in(2)->in(2), n->in(3));
 565           // Stuff new AddP in the loop preheader
 566           register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
 567           Node* add2 = new AddPNode(n->in(1), add1, n->in(2)->in(3));
 568           register_new_node(add2, n_ctrl);
 569           _igvn.replace_node(n, add2);
 570           return add2;
 571         }
 572       }
 573     }
 574 
 575     // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V)
 576     if (n2_loop != n_loop && n3_loop == n_loop) {
 577       if (n->in(3)->Opcode() == Op_AddX) {
 578         Node* V = n->in(3)->in(1);
 579         Node* I = n->in(3)->in(2);
 580         if (is_member(n_loop,get_ctrl(V))) {
 581         } else {
 582           Node *tmp = V; V = I; I = tmp;
 583         }
 584         if (!is_member(n_loop,get_ctrl(I))) {
 585           Node* add1 = new AddPNode(n->in(1), n->in(2), I);
 586           // Stuff new AddP in the loop preheader
 587           register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
 588           Node* add2 = new AddPNode(n->in(1), add1, V);
 589           register_new_node(add2, n_ctrl);
 590           _igvn.replace_node(n, add2);
 591           return add2;
 592         }
 593       }
 594     }
 595   }
 596 
 597   return nullptr;
 598 }
 599 
 600 // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1]))
 601 Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
 602   assert(n->Opcode() == Op_AddI, "sanity");
 603   Node * nn = nullptr;
 604   Node * in1 = n->in(1);
 605   Node * in2 = n->in(2);
 606   if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
 607     IdealLoopTree* loop_n = get_loop(get_ctrl(n));
 608     if (loop_n->is_counted() &&
 609         loop_n->_head->as_Loop()->is_valid_counted_loop(T_INT) &&
 610         Matcher::match_rule_supported(Op_MulAddVS2VI) &&
 611         Matcher::match_rule_supported(Op_MulAddS2I)) {
 612       Node* mul_in1 = in1->in(1);
 613       Node* mul_in2 = in1->in(2);
 614       Node* mul_in3 = in2->in(1);
 615       Node* mul_in4 = in2->in(2);
 616       if (mul_in1->Opcode() == Op_LoadS &&
 617           mul_in2->Opcode() == Op_LoadS &&
 618           mul_in3->Opcode() == Op_LoadS &&
 619           mul_in4->Opcode() == Op_LoadS) {
 620         IdealLoopTree* loop1 = get_loop(get_ctrl(mul_in1));
 621         IdealLoopTree* loop2 = get_loop(get_ctrl(mul_in2));
 622         IdealLoopTree* loop3 = get_loop(get_ctrl(mul_in3));
 623         IdealLoopTree* loop4 = get_loop(get_ctrl(mul_in4));
 624         IdealLoopTree* loop5 = get_loop(get_ctrl(in1));
 625         IdealLoopTree* loop6 = get_loop(get_ctrl(in2));
 626         // All nodes should be in the same counted loop.
 627         if (loop_n == loop1 && loop_n == loop2 && loop_n == loop3 &&
 628             loop_n == loop4 && loop_n == loop5 && loop_n == loop6) {
 629           Node* adr1 = mul_in1->in(MemNode::Address);
 630           Node* adr2 = mul_in2->in(MemNode::Address);
 631           Node* adr3 = mul_in3->in(MemNode::Address);
 632           Node* adr4 = mul_in4->in(MemNode::Address);
 633           if (adr1->is_AddP() && adr2->is_AddP() && adr3->is_AddP() && adr4->is_AddP()) {
 634             if ((adr1->in(AddPNode::Base) == adr3->in(AddPNode::Base)) &&
 635                 (adr2->in(AddPNode::Base) == adr4->in(AddPNode::Base))) {
 636               nn = new MulAddS2INode(mul_in1, mul_in2, mul_in3, mul_in4);
 637               register_new_node(nn, get_ctrl(n));
 638               _igvn.replace_node(n, nn);
 639               return nn;
 640             } else if ((adr1->in(AddPNode::Base) == adr4->in(AddPNode::Base)) &&
 641                        (adr2->in(AddPNode::Base) == adr3->in(AddPNode::Base))) {
 642               nn = new MulAddS2INode(mul_in1, mul_in2, mul_in4, mul_in3);
 643               register_new_node(nn, get_ctrl(n));
 644               _igvn.replace_node(n, nn);
 645               return nn;
 646             }
 647           }
 648         }
 649       }
 650     }
 651   }
 652   return nn;
 653 }
 654 
 655 //------------------------------conditional_move-------------------------------
 656 // Attempt to replace a Phi with a conditional move.  We have some pretty
 657 // strict profitability requirements.  All Phis at the merge point must
 658 // be converted, so we can remove the control flow.  We need to limit the
 659 // number of c-moves to a small handful.  All code that was in the side-arms
 660 // of the CFG diamond is now speculatively executed.  This code has to be
 661 // "cheap enough".  We are pretty much limited to CFG diamonds that merge
 662 // 1 or 2 items with a total of 1 or 2 ops executed speculatively.
 663 Node *PhaseIdealLoop::conditional_move( Node *region ) {
 664 
 665   assert(region->is_Region(), "sanity check");
 666   if (region->req() != 3) return nullptr;
 667 
 668   // Check for CFG diamond
 669   Node *lp = region->in(1);
 670   Node *rp = region->in(2);
 671   if (!lp || !rp) return nullptr;
 672   Node *lp_c = lp->in(0);
 673   if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr;
 674   IfNode *iff = lp_c->as_If();
 675 
 676   // Check for ops pinned in an arm of the diamond.
 677   // Can't remove the control flow in this case
 678   if (lp->outcnt() > 1) return nullptr;
 679   if (rp->outcnt() > 1) return nullptr;
 680 
 681   IdealLoopTree* r_loop = get_loop(region);
 682   assert(r_loop == get_loop(iff), "sanity");
 683   // Always convert to CMOVE if all results are used only outside this loop.
 684   bool used_inside_loop = (r_loop == _ltree_root);
 685 
 686   // Check profitability
 687   int cost = 0;
 688   int phis = 0;
 689   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
 690     Node *out = region->fast_out(i);
 691     if (!out->is_Phi()) continue; // Ignore other control edges, etc
 692     phis++;
 693     PhiNode* phi = out->as_Phi();
 694     BasicType bt = phi->type()->basic_type();
 695     switch (bt) {
 696     case T_DOUBLE:
 697     case T_FLOAT:
 698       if (C->use_cmove()) {
 699         continue; //TODO: maybe we want to add some cost
 700       }
 701       cost += Matcher::float_cmove_cost(); // Could be very expensive
 702       break;
 703     case T_LONG: {
 704       cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's
 705     }
 706     case T_INT:                 // These all CMOV fine
 707     case T_ADDRESS: {           // (RawPtr)
 708       cost++;
 709       break;
 710     }
 711     case T_NARROWOOP: // Fall through
 712     case T_OBJECT: {            // Base oops are OK, but not derived oops
 713       const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
 714       // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
 715       // CMOVE'd derived pointer?  It's a CMOVE'd derived base.  Thus
 716       // CMOVE'ing a derived pointer requires we also CMOVE the base.  If we
 717       // have a Phi for the base here that we convert to a CMOVE all is well
 718       // and good.  But if the base is dead, we'll not make a CMOVE.  Later
 719       // the allocator will have to produce a base by creating a CMOVE of the
 720       // relevant bases.  This puts the allocator in the business of
 721       // manufacturing expensive instructions, generally a bad plan.
 722       // Just Say No to Conditionally-Moved Derived Pointers.
 723       if (tp && tp->offset() != 0)
 724         return nullptr;
 725       cost++;
 726       break;
 727     }
 728     default:
 729       return nullptr;              // In particular, can't do memory or I/O
 730     }
 731     // Add in cost any speculative ops
 732     for (uint j = 1; j < region->req(); j++) {
 733       Node *proj = region->in(j);
 734       Node *inp = phi->in(j);
 735       if (inp->isa_InlineType()) {
 736         // TODO 8302217 This prevents PhiNode::push_inline_types_through
 737         return nullptr;
 738       }
 739       if (get_ctrl(inp) == proj) { // Found local op
 740         cost++;
 741         // Check for a chain of dependent ops; these will all become
 742         // speculative in a CMOV.
 743         for (uint k = 1; k < inp->req(); k++)
 744           if (get_ctrl(inp->in(k)) == proj)
 745             cost += ConditionalMoveLimit; // Too much speculative goo
 746       }
 747     }
 748     // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
 749     // This will likely Split-If, a higher-payoff operation.
 750     for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
 751       Node* use = phi->fast_out(k);
 752       if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr())
 753         cost += ConditionalMoveLimit;
 754       // Is there a use inside the loop?
 755       // Note: check only basic types since CMoveP is pinned.
 756       if (!used_inside_loop && is_java_primitive(bt)) {
 757         IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use);
 758         if (r_loop == u_loop || r_loop->is_member(u_loop)) {
 759           used_inside_loop = true;
 760         }
 761       }
 762     }
 763   }//for
 764   Node* bol = iff->in(1);
 765   if (bol->Opcode() == Op_Opaque4) {
 766     return nullptr; // Ignore loop predicate checks (the Opaque4 ensures they will go away)
 767   }
 768   assert(bol->Opcode() == Op_Bool, "Unexpected node");
 769   int cmp_op = bol->in(1)->Opcode();
 770   if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode
 771     return nullptr;
 772   }
 773   // It is expensive to generate flags from a float compare.
 774   // Avoid duplicated float compare.
 775   if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
 776 
 777   float infrequent_prob = PROB_UNLIKELY_MAG(3);
 778   // Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
 779   if (used_inside_loop) {
 780     if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
 781 
 782     // BlockLayoutByFrequency optimization moves infrequent branch
 783     // from hot path. No point in CMOV'ing in such case (110 is used
 784     // instead of 100 to take into account not exactness of float value).
 785     if (BlockLayoutByFrequency) {
 786       infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
 787     }
 788   }
 789   // Check for highly predictable branch.  No point in CMOV'ing if
 790   // we are going to predict accurately all the time.
 791   if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
 792     //keep going
 793   } else if (iff->_prob < infrequent_prob ||
 794       iff->_prob > (1.0f - infrequent_prob))
 795     return nullptr;
 796 
 797   // --------------
 798   // Now replace all Phis with CMOV's
 799   Node *cmov_ctrl = iff->in(0);
 800   uint flip = (lp->Opcode() == Op_IfTrue);
 801   Node_List wq;
 802   while (1) {
 803     PhiNode* phi = nullptr;
 804     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
 805       Node *out = region->fast_out(i);
 806       if (out->is_Phi()) {
 807         phi = out->as_Phi();
 808         break;
 809       }
 810     }
 811     if (phi == nullptr || _igvn.type(phi) == Type::TOP) {
 812       break;
 813     }
 814     if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
 815     // Move speculative ops
 816     wq.push(phi);
 817     while (wq.size() > 0) {
 818       Node *n = wq.pop();
 819       for (uint j = 1; j < n->req(); j++) {
 820         Node* m = n->in(j);
 821         if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) {
 822 #ifndef PRODUCT
 823           if (PrintOpto && VerifyLoopOptimizations) {
 824             tty->print("  speculate: ");
 825             m->dump();
 826           }
 827 #endif
 828           set_ctrl(m, cmov_ctrl);
 829           wq.push(m);
 830         }
 831       }
 832     }
 833     Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
 834     register_new_node( cmov, cmov_ctrl );
 835     _igvn.replace_node( phi, cmov );
 836 #ifndef PRODUCT
 837     if (TraceLoopOpts) {
 838       tty->print("CMOV  ");
 839       r_loop->dump_head();
 840       if (Verbose) {
 841         bol->in(1)->dump(1);
 842         cmov->dump(1);
 843       }
 844     }
 845     DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
 846 #endif
 847   }
 848 
 849   // The useless CFG diamond will fold up later; see the optimization in
 850   // RegionNode::Ideal.
 851   _igvn._worklist.push(region);
 852 
 853   return iff->in(1);
 854 }
 855 
 856 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
 857   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 858     Node* u = m->fast_out(i);
 859     if (u->is_CFG()) {
 860       if (u->is_NeverBranch()) {
 861         u = u->as_NeverBranch()->proj_out(0);
 862         enqueue_cfg_uses(u, wq);
 863       } else {
 864         wq.push(u);
 865       }
 866     }
 867   }
 868 }
 869 
 870 // Try moving a store out of a loop, right before the loop
 871 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
 872   // Store has to be first in the loop body
 873   IdealLoopTree *n_loop = get_loop(n_ctrl);
 874   if (n->is_Store() && n_loop != _ltree_root &&
 875       n_loop->is_loop() && n_loop->_head->is_Loop() &&
 876       n->in(0) != nullptr) {
 877     Node* address = n->in(MemNode::Address);
 878     Node* value = n->in(MemNode::ValueIn);
 879     Node* mem = n->in(MemNode::Memory);
 880     IdealLoopTree* address_loop = get_loop(get_ctrl(address));
 881     IdealLoopTree* value_loop = get_loop(get_ctrl(value));
 882 
 883     // - address and value must be loop invariant
 884     // - memory must be a memory Phi for the loop
 885     // - Store must be the only store on this memory slice in the
 886     // loop: if there's another store following this one then value
 887     // written at iteration i by the second store could be overwritten
 888     // at iteration i+n by the first store: it's not safe to move the
 889     // first store out of the loop
 890     // - nothing must observe the memory Phi: it guarantees no read
 891     // before the store, we are also guaranteed the store post
 892     // dominates the loop head (ignoring a possible early
 893     // exit). Otherwise there would be extra Phi involved between the
 894     // loop's Phi and the store.
 895     // - there must be no early exit from the loop before the Store
 896     // (such an exit most of the time would be an extra use of the
 897     // memory Phi but sometimes is a bottom memory Phi that takes the
 898     // store as input).
 899 
 900     if (!n_loop->is_member(address_loop) &&
 901         !n_loop->is_member(value_loop) &&
 902         mem->is_Phi() && mem->in(0) == n_loop->_head &&
 903         mem->outcnt() == 1 &&
 904         mem->in(LoopNode::LoopBackControl) == n) {
 905 
 906       assert(n_loop->_tail != nullptr, "need a tail");
 907       assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop");
 908 
 909       // Verify that there's no early exit of the loop before the store.
 910       bool ctrl_ok = false;
 911       {
 912         // Follow control from loop head until n, we exit the loop or
 913         // we reach the tail
 914         ResourceMark rm;
 915         Unique_Node_List wq;
 916         wq.push(n_loop->_head);
 917 
 918         for (uint next = 0; next < wq.size(); ++next) {
 919           Node *m = wq.at(next);
 920           if (m == n->in(0)) {
 921             ctrl_ok = true;
 922             continue;
 923           }
 924           assert(!has_ctrl(m), "should be CFG");
 925           if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) {
 926             ctrl_ok = false;
 927             break;
 928           }
 929           enqueue_cfg_uses(m, wq);
 930           if (wq.size() > 10) {
 931             ctrl_ok = false;
 932             break;
 933           }
 934         }
 935       }
 936       if (ctrl_ok) {
 937         // move the Store
 938         _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem);
 939         _igvn.replace_input_of(n, 0, n_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl));
 940         _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl));
 941         // Disconnect the phi now. An empty phi can confuse other
 942         // optimizations in this pass of loop opts.
 943         _igvn.replace_node(mem, mem->in(LoopNode::EntryControl));
 944         n_loop->_body.yank(mem);
 945 
 946         set_ctrl_and_loop(n, n->in(0));
 947 
 948         return n;
 949       }
 950     }
 951   }
 952   return nullptr;
 953 }
 954 
 955 // Try moving a store out of a loop, right after the loop
 956 void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
 957   if (n->is_Store() && n->in(0) != nullptr) {
 958     Node *n_ctrl = get_ctrl(n);
 959     IdealLoopTree *n_loop = get_loop(n_ctrl);
 960     // Store must be in a loop
 961     if (n_loop != _ltree_root && !n_loop->_irreducible) {
 962       Node* address = n->in(MemNode::Address);
 963       Node* value = n->in(MemNode::ValueIn);
 964       IdealLoopTree* address_loop = get_loop(get_ctrl(address));
 965       // address must be loop invariant
 966       if (!n_loop->is_member(address_loop)) {
 967         // Store must be last on this memory slice in the loop and
 968         // nothing in the loop must observe it
 969         Node* phi = nullptr;
 970         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 971           Node* u = n->fast_out(i);
 972           if (has_ctrl(u)) { // control use?
 973             IdealLoopTree *u_loop = get_loop(get_ctrl(u));
 974             if (!n_loop->is_member(u_loop)) {
 975               continue;
 976             }
 977             if (u->is_Phi() && u->in(0) == n_loop->_head) {
 978               assert(_igvn.type(u) == Type::MEMORY, "bad phi");
 979               // multiple phis on the same slice are possible
 980               if (phi != nullptr) {
 981                 return;
 982               }
 983               phi = u;
 984               continue;
 985             }
 986           }
 987           return;
 988         }
 989         if (phi != nullptr) {
 990           // Nothing in the loop before the store (next iteration)
 991           // must observe the stored value
 992           bool mem_ok = true;
 993           {
 994             ResourceMark rm;
 995             Unique_Node_List wq;
 996             wq.push(phi);
 997             for (uint next = 0; next < wq.size() && mem_ok; ++next) {
 998               Node *m = wq.at(next);
 999               for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) {
1000                 Node* u = m->fast_out(i);
1001                 if (u->is_Store() || u->is_Phi()) {
1002                   if (u != n) {
1003                     wq.push(u);
1004                     mem_ok = (wq.size() <= 10);
1005                   }
1006                 } else {
1007                   mem_ok = false;
1008                   break;
1009                 }
1010               }
1011             }
1012           }
1013           if (mem_ok) {
1014             // Move the store out of the loop if the LCA of all
1015             // users (except for the phi) is outside the loop.
1016             Node* hook = new Node(1);
1017             hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
1018             _igvn.rehash_node_delayed(phi);
1019             int count = phi->replace_edge(n, hook, &_igvn);
1020             assert(count > 0, "inconsistent phi");
1021 
1022             // Compute latest point this store can go
1023             Node* lca = get_late_ctrl(n, get_ctrl(n));
1024             if (lca->is_OuterStripMinedLoop()) {
1025               lca = lca->in(LoopNode::EntryControl);
1026             }
1027             if (n_loop->is_member(get_loop(lca))) {
1028               // LCA is in the loop - bail out
1029               _igvn.replace_node(hook, n);
1030               return;
1031             }
1032 #ifdef ASSERT
1033             if (n_loop->_head->is_Loop() && n_loop->_head->as_Loop()->is_strip_mined()) {
1034               assert(n_loop->_head->Opcode() == Op_CountedLoop, "outer loop is a strip mined");
1035               n_loop->_head->as_Loop()->verify_strip_mined(1);
1036               Node* outer = n_loop->_head->as_CountedLoop()->outer_loop();
1037               IdealLoopTree* outer_loop = get_loop(outer);
1038               assert(n_loop->_parent == outer_loop, "broken loop tree");
1039               assert(get_loop(lca) == outer_loop, "safepoint in outer loop consume all memory state");
1040             }
1041 #endif
1042             lca = place_outside_loop(lca, n_loop);
1043             assert(!n_loop->is_member(get_loop(lca)), "control must not be back in the loop");
1044             assert(get_loop(lca)->_nest < n_loop->_nest || lca->in(0)->is_NeverBranch(), "must not be moved into inner loop");
1045 
1046             // Move store out of the loop
1047             _igvn.replace_node(hook, n->in(MemNode::Memory));
1048             _igvn.replace_input_of(n, 0, lca);
1049             set_ctrl_and_loop(n, lca);
1050 
1051             // Disconnect the phi now. An empty phi can confuse other
1052             // optimizations in this pass of loop opts..
1053             if (phi->in(LoopNode::LoopBackControl) == phi) {
1054               _igvn.replace_node(phi, phi->in(LoopNode::EntryControl));
1055               n_loop->_body.yank(phi);
1056             }
1057           }
1058         }
1059       }
1060     }
1061   }
1062 }
1063 
1064 // If UseArrayMarkWordCheck is enabled, we can't use immutable memory for the flat array check
1065 // because we are loading the mark word which is mutable. Although the bits we are interested in
1066 // are immutable (we check for markWord::unlocked_value), we need to use raw memory to not break
1067 // anti dependency analysis. Below code will attempt to still move flat array checks out of loops,
1068 // mainly to enable loop unswitching.
1069 void PhaseIdealLoop::move_flat_array_check_out_of_loop(Node* n) {
1070   // Skip checks for more than one array
1071   if (n->req() > 3) {
1072     return;
1073   }
1074   Node* mem = n->in(FlatArrayCheckNode::Memory);
1075   Node* array = n->in(FlatArrayCheckNode::ArrayOrKlass)->uncast();
1076   IdealLoopTree* check_loop = get_loop(get_ctrl(n));
1077   IdealLoopTree* ary_loop = get_loop(get_ctrl(array));
1078 
1079   // Check if array is loop invariant
1080   if (!check_loop->is_member(ary_loop)) {
1081     // Walk up memory graph from the check until we leave the loop
1082     VectorSet wq;
1083     wq.set(mem->_idx);
1084     while (check_loop->is_member(get_loop(ctrl_or_self(mem)))) {
1085       if (mem->is_Phi()) {
1086         mem = mem->in(1);
1087       } else if (mem->is_MergeMem()) {
1088         mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
1089       } else if (mem->is_Proj()) {
1090         mem = mem->in(0);
1091       } else if (mem->is_MemBar() || mem->is_SafePoint()) {
1092         mem = mem->in(TypeFunc::Memory);
1093       } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1094         mem = mem->in(MemNode::Memory);
1095       } else {
1096 #ifdef ASSERT
1097         mem->dump();
1098 #endif
1099         ShouldNotReachHere();
1100       }
1101       if (wq.test_set(mem->_idx)) {
1102         return;
1103       }
1104     }
1105     // Replace memory input and re-compute ctrl to move the check out of the loop
1106     _igvn.replace_input_of(n, 1, mem);
1107     set_ctrl_and_loop(n, get_early_ctrl(n));
1108     Node* bol = n->unique_out();
1109     set_ctrl_and_loop(bol, get_early_ctrl(bol));
1110   }
1111 }
1112 
1113 //------------------------------split_if_with_blocks_pre-----------------------
1114 // Do the real work in a non-recursive function.  Data nodes want to be
1115 // cloned in the pre-order so they can feed each other nicely.
1116 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
1117   // Cloning these guys is unlikely to win
1118   int n_op = n->Opcode();
1119   if (n_op == Op_MergeMem) {
1120     return n;
1121   }
1122   if (n->is_Proj()) {
1123     return n;
1124   }
1125 
1126   if (UseArrayMarkWordCheck && n->isa_FlatArrayCheck()) {
1127     move_flat_array_check_out_of_loop(n);
1128     return n;
1129   }
1130 
1131   // Do not clone-up CmpFXXX variations, as these are always
1132   // followed by a CmpI
1133   if (n->is_Cmp()) {
1134     return n;
1135   }
1136   // Attempt to use a conditional move instead of a phi/branch
1137   if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
1138     Node *cmov = conditional_move( n );
1139     if (cmov) {
1140       return cmov;
1141     }
1142   }
1143   if (n->is_CFG() || n->is_LoadStore()) {
1144     return n;
1145   }
1146   if (n->is_Opaque1()) { // Opaque nodes cannot be mod'd
1147     if (!C->major_progress()) {   // If chance of no more loop opts...
1148       _igvn._worklist.push(n);  // maybe we'll remove them
1149     }
1150     return n;
1151   }
1152 
1153   if (n->is_Con()) {
1154     return n;   // No cloning for Con nodes
1155   }
1156 
1157   Node *n_ctrl = get_ctrl(n);
1158   if (!n_ctrl) {
1159     return n;       // Dead node
1160   }
1161 
1162   Node* res = try_move_store_before_loop(n, n_ctrl);
1163   if (res != nullptr) {
1164     return n;
1165   }
1166 
1167   // Attempt to remix address expressions for loop invariants
1168   Node *m = remix_address_expressions( n );
1169   if( m ) return m;
1170 
1171   if (n_op == Op_AddI) {
1172     Node *nn = convert_add_to_muladd( n );
1173     if ( nn ) return nn;
1174   }
1175 
1176   if (n->is_ConstraintCast()) {
1177     Node* dom_cast = n->as_ConstraintCast()->dominating_cast(&_igvn, this);
1178     // ConstraintCastNode::dominating_cast() uses node control input to determine domination.
1179     // Node control inputs don't necessarily agree with loop control info (due to
1180     // transformations happened in between), thus additional dominance check is needed
1181     // to keep loop info valid.
1182     if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) {
1183       _igvn.replace_node(n, dom_cast);
1184       return dom_cast;
1185     }
1186   }
1187 
1188   // Determine if the Node has inputs from some local Phi.
1189   // Returns the block to clone thru.
1190   Node *n_blk = has_local_phi_input( n );
1191   if( !n_blk ) return n;
1192 
1193   // Do not clone the trip counter through on a CountedLoop
1194   // (messes up the canonical shape).
1195   if (((n_blk->is_CountedLoop() || (n_blk->is_Loop() && n_blk->as_Loop()->is_loop_nest_inner_loop())) && n->Opcode() == Op_AddI) ||
1196       (n_blk->is_LongCountedLoop() && n->Opcode() == Op_AddL)) {
1197     return n;
1198   }
1199   // Pushing a shift through the iv Phi can get in the way of addressing optimizations or range check elimination
1200   if (n_blk->is_BaseCountedLoop() && n->Opcode() == Op_LShift(n_blk->as_BaseCountedLoop()->bt()) &&
1201       n->in(1) == n_blk->as_BaseCountedLoop()->phi()) {
1202     return n;
1203   }
1204 
1205   // Check for having no control input; not pinned.  Allow
1206   // dominating control.
1207   if (n->in(0)) {
1208     Node *dom = idom(n_blk);
1209     if (dom_lca(n->in(0), dom) != n->in(0)) {
1210       return n;
1211     }
1212   }
1213   // Policy: when is it profitable.  You must get more wins than
1214   // policy before it is considered profitable.  Policy is usually 0,
1215   // so 1 win is considered profitable.  Big merges will require big
1216   // cloning, so get a larger policy.
1217   int policy = n_blk->req() >> 2;
1218 
1219   // If the loop is a candidate for range check elimination,
1220   // delay splitting through it's phi until a later loop optimization
1221   if (n_blk->is_BaseCountedLoop()) {
1222     IdealLoopTree *lp = get_loop(n_blk);
1223     if (lp && lp->_rce_candidate) {
1224       return n;
1225     }
1226   }
1227 
1228   if (must_throttle_split_if()) return n;
1229 
1230   // Split 'n' through the merge point if it is profitable
1231   Node *phi = split_thru_phi( n, n_blk, policy );
1232   if (!phi) return n;
1233 
1234   // Found a Phi to split thru!
1235   // Replace 'n' with the new phi
1236   _igvn.replace_node( n, phi );
1237   // Moved a load around the loop, 'en-registering' something.
1238   if (n_blk->is_Loop() && n->is_Load() &&
1239       !phi->in(LoopNode::LoopBackControl)->is_Load())
1240     C->set_major_progress();
1241 
1242   return phi;
1243 }
1244 
1245 static bool merge_point_too_heavy(Compile* C, Node* region) {
1246   // Bail out if the region and its phis have too many users.
1247   int weight = 0;
1248   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1249     weight += region->fast_out(i)->outcnt();
1250   }
1251   int nodes_left = C->max_node_limit() - C->live_nodes();
1252   if (weight * 8 > nodes_left) {
1253     if (PrintOpto) {
1254       tty->print_cr("*** Split-if bails out:  %d nodes, region weight %d", C->unique(), weight);
1255     }
1256     return true;
1257   } else {
1258     return false;
1259   }
1260 }
1261 
1262 static bool merge_point_safe(Node* region) {
1263   // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
1264   // having a PhiNode input. This sidesteps the dangerous case where the split
1265   // ConvI2LNode may become TOP if the input Value() does not
1266   // overlap the ConvI2L range, leaving a node which may not dominate its
1267   // uses.
1268   // A better fix for this problem can be found in the BugTraq entry, but
1269   // expediency for Mantis demands this hack.
1270 #ifdef _LP64
1271   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1272     Node* n = region->fast_out(i);
1273     if (n->is_Phi()) {
1274       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
1275         Node* m = n->fast_out(j);
1276         if (m->Opcode() == Op_ConvI2L)
1277           return false;
1278         if (m->is_CastII()) {
1279           return false;
1280         }
1281       }
1282     }
1283   }
1284 #endif
1285   return true;
1286 }
1287 
1288 
1289 //------------------------------place_outside_loop---------------------------------
1290 // Place some computation outside of this loop on the path to the use passed as argument
1291 Node* PhaseIdealLoop::place_outside_loop(Node* useblock, IdealLoopTree* loop) const {
1292   Node* head = loop->_head;
1293   assert(!loop->is_member(get_loop(useblock)), "must be outside loop");
1294   if (head->is_Loop() && head->as_Loop()->is_strip_mined()) {
1295     loop = loop->_parent;
1296     assert(loop->_head->is_OuterStripMinedLoop(), "malformed strip mined loop");
1297   }
1298 
1299   // Pick control right outside the loop
1300   for (;;) {
1301     Node* dom = idom(useblock);
1302     if (loop->is_member(get_loop(dom)) ||
1303         // NeverBranch nodes are not assigned to the loop when constructed
1304         (dom->is_NeverBranch() && loop->is_member(get_loop(dom->in(0))))) {
1305       break;
1306     }
1307     useblock = dom;
1308   }
1309   assert(find_non_split_ctrl(useblock) == useblock, "should be non split control");
1310   return useblock;
1311 }
1312 
1313 
1314 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
1315   if (!n->is_If() || n->is_BaseCountedLoopEnd()) {
1316     return false;
1317   }
1318   if (!n->in(0)->is_Region()) {
1319     return false;
1320   }
1321 
1322   Node* region = n->in(0);
1323   Node* dom = idom(region);
1324   if (!dom->is_If() ||  !n->as_If()->same_condition(dom, &_igvn)) {
1325     return false;
1326   }
1327   IfNode* dom_if = dom->as_If();
1328   Node* proj_true = dom_if->proj_out(1);
1329   Node* proj_false = dom_if->proj_out(0);
1330 
1331   for (uint i = 1; i < region->req(); i++) {
1332     if (is_dominator(proj_true, region->in(i))) {
1333       continue;
1334     }
1335     if (is_dominator(proj_false, region->in(i))) {
1336       continue;
1337     }
1338     return false;
1339   }
1340 
1341   return true;
1342 }
1343 
1344 
1345 bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
1346   if (must_throttle_split_if()) {
1347     return false;
1348   }
1349 
1350   // Do not do 'split-if' if irreducible loops are present.
1351   if (_has_irreducible_loops) {
1352     return false;
1353   }
1354 
1355   if (merge_point_too_heavy(C, n_ctrl)) {
1356     return false;
1357   }
1358 
1359   // Do not do 'split-if' if some paths are dead.  First do dead code
1360   // elimination and then see if its still profitable.
1361   for (uint i = 1; i < n_ctrl->req(); i++) {
1362     if (n_ctrl->in(i) == C->top()) {
1363       return false;
1364     }
1365   }
1366 
1367   // If trying to do a 'Split-If' at the loop head, it is only
1368   // profitable if the cmp folds up on BOTH paths.  Otherwise we
1369   // risk peeling a loop forever.
1370 
1371   // CNC - Disabled for now.  Requires careful handling of loop
1372   // body selection for the cloned code.  Also, make sure we check
1373   // for any input path not being in the same loop as n_ctrl.  For
1374   // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
1375   // because the alternative loop entry points won't be converted
1376   // into LoopNodes.
1377   IdealLoopTree *n_loop = get_loop(n_ctrl);
1378   for (uint j = 1; j < n_ctrl->req(); j++) {
1379     if (get_loop(n_ctrl->in(j)) != n_loop) {
1380       return false;
1381     }
1382   }
1383 
1384   // Check for safety of the merge point.
1385   if (!merge_point_safe(n_ctrl)) {
1386     return false;
1387   }
1388 
1389   return true;
1390 }
1391 
1392 // Detect if the node is the inner strip-mined loop
1393 // Return: null if it's not the case, or the exit of outer strip-mined loop
1394 static Node* is_inner_of_stripmined_loop(const Node* out) {
1395   Node* out_le = nullptr;
1396 
1397   if (out->is_CountedLoopEnd()) {
1398       const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode();
1399 
1400       if (loop != nullptr && loop->is_strip_mined()) {
1401         out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit();
1402       }
1403   }
1404 
1405   return out_le;
1406 }
1407 
1408 bool PhaseIdealLoop::flat_array_element_type_check(Node *n) {
1409   // If the CmpP is a subtype check for a value that has just been
1410   // loaded from an array, the subtype check guarantees the value
1411   // can't be stored in a flat array and the load of the value
1412   // happens with a flat array check then: push the type check
1413   // through the phi of the flat array check. This needs special
1414   // logic because the subtype check's input is not a phi but a
1415   // LoadKlass that must first be cloned through the phi.
1416   if (n->Opcode() != Op_CmpP) {
1417     return false;
1418   }
1419 
1420   Node* klassptr = n->in(1);
1421   Node* klasscon = n->in(2);
1422 
1423   if (klassptr->is_DecodeNarrowPtr()) {
1424     klassptr = klassptr->in(1);
1425   }
1426 
1427   if (klassptr->Opcode() != Op_LoadKlass && klassptr->Opcode() != Op_LoadNKlass) {
1428     return false;
1429   }
1430 
1431   if (!klasscon->is_Con()) {
1432     return false;
1433   }
1434 
1435   Node* addr = klassptr->in(MemNode::Address);
1436 
1437   if (!addr->is_AddP()) {
1438     return false;
1439   }
1440 
1441   intptr_t offset;
1442   Node* obj = AddPNode::Ideal_base_and_offset(addr, &_igvn, offset);
1443 
1444   if (obj == nullptr) {
1445     return false;
1446   }
1447 
1448   assert(obj != nullptr && addr->in(AddPNode::Base) == addr->in(AddPNode::Address), "malformed AddP?");
1449   if (obj->Opcode() == Op_CastPP) {
1450     obj = obj->in(1);
1451   }
1452 
1453   if (!obj->is_Phi()) {
1454     return false;
1455   }
1456 
1457   Node* region = obj->in(0);
1458 
1459   Node* phi = PhiNode::make_blank(region, n->in(1));
1460   for (uint i = 1; i < region->req(); i++) {
1461     Node* in = obj->in(i);
1462     Node* ctrl = region->in(i);
1463     if (addr->in(AddPNode::Base) != obj) {
1464       Node* cast = addr->in(AddPNode::Base);
1465       assert(cast->Opcode() == Op_CastPP && cast->in(0) != nullptr, "inconsistent subgraph");
1466       Node* cast_clone = cast->clone();
1467       cast_clone->set_req(0, ctrl);
1468       cast_clone->set_req(1, in);
1469       register_new_node(cast_clone, ctrl);
1470       const Type* tcast = cast_clone->Value(&_igvn);
1471       _igvn.set_type(cast_clone, tcast);
1472       cast_clone->as_Type()->set_type(tcast);
1473       in = cast_clone;
1474     }
1475     Node* addr_clone = addr->clone();
1476     addr_clone->set_req(AddPNode::Base, in);
1477     addr_clone->set_req(AddPNode::Address, in);
1478     register_new_node(addr_clone, ctrl);
1479     _igvn.set_type(addr_clone, addr_clone->Value(&_igvn));
1480     Node* klassptr_clone = klassptr->clone();
1481     klassptr_clone->set_req(2, addr_clone);
1482     register_new_node(klassptr_clone, ctrl);
1483     _igvn.set_type(klassptr_clone, klassptr_clone->Value(&_igvn));
1484     if (klassptr != n->in(1)) {
1485       Node* decode = n->in(1);
1486       assert(decode->is_DecodeNarrowPtr(), "inconsistent subgraph");
1487       Node* decode_clone = decode->clone();
1488       decode_clone->set_req(1, klassptr_clone);
1489       register_new_node(decode_clone, ctrl);
1490       _igvn.set_type(decode_clone, decode_clone->Value(&_igvn));
1491       klassptr_clone = decode_clone;
1492     }
1493     phi->set_req(i, klassptr_clone);
1494   }
1495   register_new_node(phi, region);
1496   Node* orig = n->in(1);
1497   _igvn.replace_input_of(n, 1, phi);
1498   split_if_with_blocks_post(n);
1499   if (n->outcnt() != 0) {
1500     _igvn.replace_input_of(n, 1, orig);
1501     _igvn.remove_dead_node(phi);
1502   }
1503   return true;
1504 }
1505 
1506 //------------------------------split_if_with_blocks_post----------------------
1507 // Do the real work in a non-recursive function.  CFG hackery wants to be
1508 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
1509 // info.
1510 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
1511 
1512   if (flat_array_element_type_check(n)) {
1513     return;
1514   }
1515 
1516   // Cloning Cmp through Phi's involves the split-if transform.
1517   // FastLock is not used by an If
1518   if (n->is_Cmp() && !n->is_FastLock()) {
1519     Node *n_ctrl = get_ctrl(n);
1520     // Determine if the Node has inputs from some local Phi.
1521     // Returns the block to clone thru.
1522     Node *n_blk = has_local_phi_input(n);
1523     if (n_blk != n_ctrl) {
1524       return;
1525     }
1526 
1527     if (!can_split_if(n_ctrl)) {
1528       return;
1529     }
1530 
1531     if (n->outcnt() != 1) {
1532       return; // Multiple bool's from 1 compare?
1533     }
1534     Node *bol = n->unique_out();
1535     assert(bol->is_Bool(), "expect a bool here");
1536     if (bol->outcnt() != 1) {
1537       return;// Multiple branches from 1 compare?
1538     }
1539     Node *iff = bol->unique_out();
1540 
1541     // Check some safety conditions
1542     if (iff->is_If()) {        // Classic split-if?
1543       if (iff->in(0) != n_ctrl) {
1544         return; // Compare must be in same blk as if
1545       }
1546     } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
1547       // Can't split CMove with different control.
1548       if (get_ctrl(iff) != n_ctrl) {
1549         return;
1550       }
1551       if (get_ctrl(iff->in(2)) == n_ctrl ||
1552           get_ctrl(iff->in(3)) == n_ctrl) {
1553         return;                 // Inputs not yet split-up
1554       }
1555       if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
1556         return;                 // Loop-invar test gates loop-varying CMOVE
1557       }
1558     } else {
1559       return;  // some other kind of node, such as an Allocate
1560     }
1561 
1562     // When is split-if profitable?  Every 'win' on means some control flow
1563     // goes dead, so it's almost always a win.
1564     int policy = 0;
1565     // Split compare 'n' through the merge point if it is profitable
1566     Node *phi = split_thru_phi( n, n_ctrl, policy);
1567     if (!phi) {
1568       return;
1569     }
1570 
1571     // Found a Phi to split thru!
1572     // Replace 'n' with the new phi
1573     _igvn.replace_node(n, phi);
1574 
1575     // Now split the bool up thru the phi
1576     Node *bolphi = split_thru_phi(bol, n_ctrl, -1);
1577     guarantee(bolphi != nullptr, "null boolean phi node");
1578 
1579     _igvn.replace_node(bol, bolphi);
1580     assert(iff->in(1) == bolphi, "");
1581 
1582     if (bolphi->Value(&_igvn)->singleton()) {
1583       return;
1584     }
1585 
1586     // Conditional-move?  Must split up now
1587     if (!iff->is_If()) {
1588       Node *cmovphi = split_thru_phi(iff, n_ctrl, -1);
1589       _igvn.replace_node(iff, cmovphi);
1590       return;
1591     }
1592 
1593     // Now split the IF
1594     do_split_if(iff);
1595     return;
1596   }
1597 
1598   // Two identical ifs back to back can be merged
1599   if (try_merge_identical_ifs(n)) {
1600     return;
1601   }
1602 
1603   // Check for an IF ready to split; one that has its
1604   // condition codes input coming from a Phi at the block start.
1605   int n_op = n->Opcode();
1606 
1607   // Check for an IF being dominated by another IF same test
1608   if (n_op == Op_If ||
1609       n_op == Op_RangeCheck) {
1610     Node *bol = n->in(1);
1611     uint max = bol->outcnt();
1612     // Check for same test used more than once?
1613     if (bol->is_Bool() && (max > 1 || bol->in(1)->is_SubTypeCheck())) {
1614       // Search up IDOMs to see if this IF is dominated.
1615       Node* cmp = bol->in(1);
1616       Node *cutoff = cmp->is_SubTypeCheck() ? dom_lca(get_ctrl(cmp->in(1)), get_ctrl(cmp->in(2))) : get_ctrl(bol);
1617 
1618       // Now search up IDOMs till cutoff, looking for a dominating test
1619       Node *prevdom = n;
1620       Node *dom = idom(prevdom);
1621       while (dom != cutoff) {
1622         if (dom->req() > 1 && n->as_If()->same_condition(dom, &_igvn) && prevdom->in(0) == dom &&
1623             safe_for_if_replacement(dom)) {
1624           // It's invalid to move control dependent data nodes in the inner
1625           // strip-mined loop, because:
1626           //  1) break validation of LoopNode::verify_strip_mined()
1627           //  2) move code with side-effect in strip-mined loop
1628           // Move to the exit of outer strip-mined loop in that case.
1629           Node* out_le = is_inner_of_stripmined_loop(dom);
1630           if (out_le != nullptr) {
1631             prevdom = out_le;
1632           }
1633           // Replace the dominated test with an obvious true or false.
1634           // Place it on the IGVN worklist for later cleanup.
1635           C->set_major_progress();
1636           dominated_by(prevdom->as_IfProj(), n->as_If(), false, true);
1637           DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
1638           return;
1639         }
1640         prevdom = dom;
1641         dom = idom(prevdom);
1642       }
1643     }
1644   }
1645 
1646   try_sink_out_of_loop(n);
1647 
1648   try_move_store_after_loop(n);
1649 
1650   // Remove multiple allocations of the same inline type
1651   if (n->is_InlineType()) {
1652     n->as_InlineType()->remove_redundant_allocations(this);
1653   }
1654 }
1655 
1656 // Transform:
1657 //
1658 // if (some_condition) {
1659 //   // body 1
1660 // } else {
1661 //   // body 2
1662 // }
1663 // if (some_condition) {
1664 //   // body 3
1665 // } else {
1666 //   // body 4
1667 // }
1668 //
1669 // into:
1670 //
1671 //
1672 // if (some_condition) {
1673 //   // body 1
1674 //   // body 3
1675 // } else {
1676 //   // body 2
1677 //   // body 4
1678 // }
1679 bool PhaseIdealLoop::try_merge_identical_ifs(Node* n) {
1680   if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
1681     Node *n_ctrl = n->in(0);
1682     IfNode* dom_if = idom(n_ctrl)->as_If();
1683     if (n->in(1) != dom_if->in(1)) {
1684       assert(n->in(1)->in(1)->is_SubTypeCheck() &&
1685              (n->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr ||
1686               dom_if->in(1)->in(1)->as_SubTypeCheck()->method() != nullptr), "only for subtype checks with profile data attached");
1687       _igvn.replace_input_of(n, 1, dom_if->in(1));
1688     }
1689     ProjNode* dom_proj_true = dom_if->proj_out(1);
1690     ProjNode* dom_proj_false = dom_if->proj_out(0);
1691 
1692     // Now split the IF
1693     RegionNode* new_false_region;
1694     RegionNode* new_true_region;
1695     do_split_if(n, &new_false_region, &new_true_region);
1696     assert(new_false_region->req() == new_true_region->req(), "");
1697 #ifdef ASSERT
1698     for (uint i = 1; i < new_false_region->req(); ++i) {
1699       assert(new_false_region->in(i)->in(0) == new_true_region->in(i)->in(0), "unexpected shape following split if");
1700       assert(i == new_false_region->req() - 1 || new_false_region->in(i)->in(0)->in(1) == new_false_region->in(i + 1)->in(0)->in(1), "unexpected shape following split if");
1701     }
1702 #endif
1703     assert(new_false_region->in(1)->in(0)->in(1) == dom_if->in(1), "dominating if and dominated if after split must share test");
1704 
1705     // We now have:
1706     // if (some_condition) {
1707     //   // body 1
1708     //   if (some_condition) {
1709     //     body3: // new_true_region
1710     //     // body3
1711     //   } else {
1712     //     goto body4;
1713     //   }
1714     // } else {
1715     //   // body 2
1716     //  if (some_condition) {
1717     //     goto body3;
1718     //   } else {
1719     //     body4:   // new_false_region
1720     //     // body4;
1721     //   }
1722     // }
1723     //
1724 
1725     // clone pinned nodes thru the resulting regions
1726     push_pinned_nodes_thru_region(dom_if, new_true_region);
1727     push_pinned_nodes_thru_region(dom_if, new_false_region);
1728 
1729     // Optimize out the cloned ifs. Because pinned nodes were cloned, this also allows a CastPP that would be dependent
1730     // on a projection of n to have the dom_if as a control dependency. We don't want the CastPP to end up with an
1731     // unrelated control dependency.
1732     for (uint i = 1; i < new_false_region->req(); i++) {
1733       if (is_dominator(dom_proj_true, new_false_region->in(i))) {
1734         dominated_by(dom_proj_true->as_IfProj(), new_false_region->in(i)->in(0)->as_If(), false, false);
1735       } else {
1736         assert(is_dominator(dom_proj_false, new_false_region->in(i)), "bad if");
1737         dominated_by(dom_proj_false->as_IfProj(), new_false_region->in(i)->in(0)->as_If(), false, false);
1738       }
1739     }
1740     return true;
1741   }
1742   return false;
1743 }
1744 
1745 void PhaseIdealLoop::push_pinned_nodes_thru_region(IfNode* dom_if, Node* region) {
1746   for (DUIterator i = region->outs(); region->has_out(i); i++) {
1747     Node* u = region->out(i);
1748     if (!has_ctrl(u) || u->is_Phi() || !u->depends_only_on_test() || !_igvn.no_dependent_zero_check(u)) {
1749       continue;
1750     }
1751     assert(u->in(0) == region, "not a control dependent node?");
1752     uint j = 1;
1753     for (; j < u->req(); ++j) {
1754       Node* in = u->in(j);
1755       if (!is_dominator(ctrl_or_self(in), dom_if)) {
1756         break;
1757       }
1758     }
1759     if (j == u->req()) {
1760       Node *phi = PhiNode::make_blank(region, u);
1761       for (uint k = 1; k < region->req(); ++k) {
1762         Node* clone = u->clone();
1763         clone->set_req(0, region->in(k));
1764         register_new_node(clone, region->in(k));
1765         phi->init_req(k, clone);
1766       }
1767       register_new_node(phi, region);
1768       _igvn.replace_node(u, phi);
1769       --i;
1770     }
1771   }
1772 }
1773 
1774 bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const {
1775   if (!dom->is_CountedLoopEnd()) {
1776     return true;
1777   }
1778   CountedLoopEndNode* le = dom->as_CountedLoopEnd();
1779   CountedLoopNode* cl = le->loopnode();
1780   if (cl == nullptr) {
1781     return true;
1782   }
1783   if (!cl->is_main_loop()) {
1784     return true;
1785   }
1786   if (cl->is_canonical_loop_entry() == nullptr) {
1787     return true;
1788   }
1789   // Further unrolling is possible so loop exit condition might change
1790   return false;
1791 }
1792 
1793 // See if a shared loop-varying computation has no loop-varying uses.
1794 // Happens if something is only used for JVM state in uncommon trap exits,
1795 // like various versions of induction variable+offset.  Clone the
1796 // computation per usage to allow it to sink out of the loop.
1797 void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
1798   if (has_ctrl(n) &&
1799       !n->is_Phi() &&
1800       !n->is_Bool() &&
1801       !n->is_Proj() &&
1802       !n->is_MergeMem() &&
1803       !n->is_CMove() &&
1804       n->Opcode() != Op_Opaque4 &&
1805       !n->is_Type()) {
1806     Node *n_ctrl = get_ctrl(n);
1807     IdealLoopTree *n_loop = get_loop(n_ctrl);
1808 
1809     if (n->in(0) != nullptr) {
1810       IdealLoopTree* loop_ctrl = get_loop(n->in(0));
1811       if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) {
1812         // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example,
1813         // for Div nodes inside a loop (control input inside loop) without a use except for an UCT (outside the loop).
1814         // Rewire control of n to right outside of the loop, regardless if its input(s) are later sunk or not.
1815         _igvn.replace_input_of(n, 0, place_outside_loop(n_ctrl, loop_ctrl));
1816       }
1817     }
1818     if (n_loop != _ltree_root && n->outcnt() > 1) {
1819       // Compute early control: needed for anti-dependence analysis. It's also possible that as a result of
1820       // previous transformations in this loop opts round, the node can be hoisted now: early control will tell us.
1821       Node* early_ctrl = compute_early_ctrl(n, n_ctrl);
1822       if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now
1823           ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops!
1824         assert(!n->is_Store() && !n->is_LoadStore(), "no node with a side effect");
1825         Node* outer_loop_clone = nullptr;
1826         for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) {
1827           Node* u = n->last_out(j); // Clone private computation per use
1828           _igvn.rehash_node_delayed(u);
1829           Node* x = n->clone(); // Clone computation
1830           Node* x_ctrl = nullptr;
1831           if (u->is_Phi()) {
1832             // Replace all uses of normal nodes.  Replace Phi uses
1833             // individually, so the separate Nodes can sink down
1834             // different paths.
1835             uint k = 1;
1836             while (u->in(k) != n) k++;
1837             u->set_req(k, x);
1838             // x goes next to Phi input path
1839             x_ctrl = u->in(0)->in(k);
1840             // Find control for 'x' next to use but not inside inner loops.
1841             x_ctrl = place_outside_loop(x_ctrl, n_loop);
1842             --j;
1843           } else {              // Normal use
1844             if (has_ctrl(u)) {
1845               x_ctrl = get_ctrl(u);
1846             } else {
1847               x_ctrl = u->in(0);
1848             }
1849             // Find control for 'x' next to use but not inside inner loops.
1850             x_ctrl = place_outside_loop(x_ctrl, n_loop);
1851             // Replace all uses
1852             if (u->is_ConstraintCast() && _igvn.type(n)->higher_equal(u->bottom_type()) && u->in(0) == x_ctrl) {
1853               // If we're sinking a chain of data nodes, we might have inserted a cast to pin the use which is not necessary
1854               // anymore now that we're going to pin n as well
1855               _igvn.replace_node(u, x);
1856               --j;
1857             } else {
1858               int nb = u->replace_edge(n, x, &_igvn);
1859               j -= nb;
1860             }
1861           }
1862 
1863           if (n->is_Load()) {
1864             // For loads, add a control edge to a CFG node outside of the loop
1865             // to force them to not combine and return back inside the loop
1866             // during GVN optimization (4641526).
1867             assert(x_ctrl == get_late_ctrl_with_anti_dep(x->as_Load(), early_ctrl, x_ctrl), "anti-dependences were already checked");
1868 
1869             IdealLoopTree* x_loop = get_loop(x_ctrl);
1870             Node* x_head = x_loop->_head;
1871             if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) {
1872               // Do not add duplicate LoadNodes to the outer strip mined loop
1873               if (outer_loop_clone != nullptr) {
1874                 _igvn.replace_node(x, outer_loop_clone);
1875                 continue;
1876               }
1877               outer_loop_clone = x;
1878             }
1879             x->set_req(0, x_ctrl);
1880           } else if (n->in(0) != nullptr){
1881             x->set_req(0, x_ctrl);
1882           }
1883           assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone");
1884           assert(!n_loop->is_member(get_loop(x_ctrl)), "should have moved out of loop");
1885           register_new_node(x, x_ctrl);
1886 
1887           // Chain of AddP nodes: (AddP base (AddP base (AddP base )))
1888           // All AddP nodes must keep the same base after sinking so:
1889           // 1- We don't add a CastPP here until the last one of the chain is sunk: if part of the chain is not sunk,
1890           // their bases remain the same.
1891           // (see 2- below)
1892           assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() ||
1893                  x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) ||
1894                  !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape");
1895           if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() &&
1896               !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) {
1897             assert(!x->is_Load(), "load should be pinned");
1898             // Use a cast node to pin clone out of loop
1899             Node* cast = nullptr;
1900             for (uint k = 0; k < x->req(); k++) {
1901               Node* in = x->in(k);
1902               if (in != nullptr && n_loop->is_member(get_loop(get_ctrl(in)))) {
1903                 const Type* in_t = _igvn.type(in);
1904                 cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t, ConstraintCastNode::UnconditionalDependency);
1905               }
1906               if (cast != nullptr) {
1907                 Node* prev = _igvn.hash_find_insert(cast);
1908                 if (prev != nullptr && get_ctrl(prev) == x_ctrl) {
1909                   cast->destruct(&_igvn);
1910                   cast = prev;
1911                 } else {
1912                   register_new_node(cast, x_ctrl);
1913                 }
1914                 x->replace_edge(in, cast);
1915                 // Chain of AddP nodes:
1916                 // 2- A CastPP of the base is only added now that all AddP nodes are sunk
1917                 if (x->is_AddP() && k == AddPNode::Base) {
1918                   update_addp_chain_base(x, n->in(AddPNode::Base), cast);
1919                 }
1920                 break;
1921               }
1922             }
1923             assert(cast != nullptr, "must have added a cast to pin the node");
1924           }
1925         }
1926         _igvn.remove_dead_node(n);
1927       }
1928       _dom_lca_tags_round = 0;
1929     }
1930   }
1931 }
1932 
1933 void PhaseIdealLoop::update_addp_chain_base(Node* x, Node* old_base, Node* new_base) {
1934   ResourceMark rm;
1935   Node_List wq;
1936   wq.push(x);
1937   while (wq.size() != 0) {
1938     Node* n = wq.pop();
1939     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1940       Node* u = n->fast_out(i);
1941       if (u->is_AddP() && u->in(AddPNode::Base) == old_base) {
1942         _igvn.replace_input_of(u, AddPNode::Base, new_base);
1943         wq.push(u);
1944       }
1945     }
1946   }
1947 }
1948 
1949 // Compute the early control of a node by following its inputs until we reach
1950 // nodes that are pinned. Then compute the LCA of the control of all pinned nodes.
1951 Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) {
1952   Node* early_ctrl = nullptr;
1953   ResourceMark rm;
1954   Unique_Node_List wq;
1955   wq.push(n);
1956   for (uint i = 0; i < wq.size(); i++) {
1957     Node* m = wq.at(i);
1958     Node* c = nullptr;
1959     if (m->is_CFG()) {
1960       c = m;
1961     } else if (m->pinned()) {
1962       c = m->in(0);
1963     } else {
1964       for (uint j = 0; j < m->req(); j++) {
1965         Node* in = m->in(j);
1966         if (in != nullptr) {
1967           wq.push(in);
1968         }
1969       }
1970     }
1971     if (c != nullptr) {
1972       assert(is_dominator(c, n_ctrl), "control input must dominate current control");
1973       if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) {
1974         early_ctrl = c;
1975       }
1976     }
1977   }
1978   assert(is_dominator(early_ctrl, n_ctrl), "early control must dominate current control");
1979   return early_ctrl;
1980 }
1981 
1982 bool PhaseIdealLoop::ctrl_of_all_uses_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop) {
1983   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1984     Node* u = n->fast_out(i);
1985     if (u->is_Opaque1()) {
1986       return false;  // Found loop limit, bugfix for 4677003
1987     }
1988     // We can't reuse tags in PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal() so make sure calls to
1989     // get_late_ctrl_with_anti_dep() use their own tag
1990     _dom_lca_tags_round++;
1991     assert(_dom_lca_tags_round != 0, "shouldn't wrap around");
1992 
1993     if (u->is_Phi()) {
1994       for (uint j = 1; j < u->req(); ++j) {
1995         if (u->in(j) == n && !ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, u->in(0)->in(j))) {
1996           return false;
1997         }
1998       }
1999     } else {
2000       Node* ctrl = has_ctrl(u) ? get_ctrl(u) : u->in(0);
2001       if (!ctrl_of_use_out_of_loop(n, n_ctrl, n_loop, ctrl)) {
2002         return false;
2003       }
2004     }
2005   }
2006   return true;
2007 }
2008 
2009 bool PhaseIdealLoop::ctrl_of_use_out_of_loop(const Node* n, Node* n_ctrl, IdealLoopTree* n_loop, Node* ctrl) {
2010   if (n->is_Load()) {
2011     ctrl = get_late_ctrl_with_anti_dep(n->as_Load(), n_ctrl, ctrl);
2012   }
2013   IdealLoopTree *u_loop = get_loop(ctrl);
2014   if (u_loop == n_loop) {
2015     return false; // Found loop-varying use
2016   }
2017   if (n_loop->is_member(u_loop)) {
2018     return false; // Found use in inner loop
2019   }
2020   // Sinking a node from a pre loop to its main loop pins the node between the pre and main loops. If that node is input
2021   // to a check that's eliminated by range check elimination, it becomes input to an expression that feeds into the exit
2022   // test of the pre loop above the point in the graph where it's pinned.
2023   if (n_loop->_head->is_CountedLoop() && n_loop->_head->as_CountedLoop()->is_pre_loop() &&
2024       u_loop->_head->is_CountedLoop() && u_loop->_head->as_CountedLoop()->is_main_loop() &&
2025       n_loop->_next == get_loop(u_loop->_head->as_CountedLoop()->skip_strip_mined())) {
2026     return false;
2027   }
2028   return true;
2029 }
2030 
2031 //------------------------------split_if_with_blocks---------------------------
2032 // Check for aggressive application of 'split-if' optimization,
2033 // using basic block level info.
2034 void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
2035   Node* root = C->root();
2036   visited.set(root->_idx); // first, mark root as visited
2037   // Do pre-visit work for root
2038   Node* n   = split_if_with_blocks_pre(root);
2039   uint  cnt = n->outcnt();
2040   uint  i   = 0;
2041 
2042   while (true) {
2043     // Visit all children
2044     if (i < cnt) {
2045       Node* use = n->raw_out(i);
2046       ++i;
2047       if (use->outcnt() != 0 && !visited.test_set(use->_idx)) {
2048         // Now do pre-visit work for this use
2049         use = split_if_with_blocks_pre(use);
2050         nstack.push(n, i); // Save parent and next use's index.
2051         n   = use;         // Process all children of current use.
2052         cnt = use->outcnt();
2053         i   = 0;
2054       }
2055     }
2056     else {
2057       // All of n's children have been processed, complete post-processing.
2058       if (cnt != 0 && !n->is_Con()) {
2059         assert(has_node(n), "no dead nodes");
2060         split_if_with_blocks_post(n);
2061       }
2062       if (must_throttle_split_if()) {
2063         nstack.clear();
2064       }
2065       if (nstack.is_empty()) {
2066         // Finished all nodes on stack.
2067         break;
2068       }
2069       // Get saved parent node and next use's index. Visit the rest of uses.
2070       n   = nstack.node();
2071       cnt = n->outcnt();
2072       i   = nstack.index();
2073       nstack.pop();
2074     }
2075   }
2076 }
2077 
2078 
2079 //=============================================================================
2080 //
2081 //                   C L O N E   A   L O O P   B O D Y
2082 //
2083 
2084 //------------------------------clone_iff--------------------------------------
2085 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2086 // "Nearly" because all Nodes have been cloned from the original in the loop,
2087 // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
2088 // through the Phi recursively, and return a Bool.
2089 Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
2090 
2091   // Convert this Phi into a Phi merging Bools
2092   uint i;
2093   for (i = 1; i < phi->req(); i++) {
2094     Node *b = phi->in(i);
2095     if (b->is_Phi()) {
2096       _igvn.replace_input_of(phi, i, clone_iff(b->as_Phi()));
2097     } else {
2098       assert(b->is_Bool() || b->Opcode() == Op_Opaque4, "");
2099     }
2100   }
2101 
2102   Node* n = phi->in(1);
2103   Node* sample_opaque = nullptr;
2104   Node *sample_bool = nullptr;
2105   if (n->Opcode() == Op_Opaque4) {
2106     sample_opaque = n;
2107     sample_bool = n->in(1);
2108     assert(sample_bool->is_Bool(), "wrong type");
2109   } else {
2110     sample_bool = n;
2111   }
2112   Node* sample_cmp = sample_bool->in(1);
2113   const Type* t = Type::TOP;
2114   const TypePtr* at = nullptr;
2115   if (sample_cmp->is_FlatArrayCheck()) {
2116     // Left input of a FlatArrayCheckNode is memory, set the (adr) type of the phi accordingly
2117     assert(sample_cmp->in(1)->bottom_type() == Type::MEMORY, "unexpected input type");
2118     t = Type::MEMORY;
2119     at = TypeRawPtr::BOTTOM;
2120   }
2121 
2122   // Make Phis to merge the Cmp's inputs.
2123   PhiNode *phi1 = new PhiNode(phi->in(0), t, at);
2124   PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP);
2125   for (i = 1; i < phi->req(); i++) {
2126     Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1);
2127     Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2);
2128     phi1->set_req(i, n1);
2129     phi2->set_req(i, n2);
2130     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2131     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2132   }
2133   // See if these Phis have been made before.
2134   // Register with optimizer
2135   Node *hit1 = _igvn.hash_find_insert(phi1);
2136   if (hit1) {                   // Hit, toss just made Phi
2137     _igvn.remove_dead_node(phi1); // Remove new phi
2138     assert(hit1->is_Phi(), "" );
2139     phi1 = (PhiNode*)hit1;      // Use existing phi
2140   } else {                      // Miss
2141     _igvn.register_new_node_with_optimizer(phi1);
2142   }
2143   Node *hit2 = _igvn.hash_find_insert(phi2);
2144   if (hit2) {                   // Hit, toss just made Phi
2145     _igvn.remove_dead_node(phi2); // Remove new phi
2146     assert(hit2->is_Phi(), "" );
2147     phi2 = (PhiNode*)hit2;      // Use existing phi
2148   } else {                      // Miss
2149     _igvn.register_new_node_with_optimizer(phi2);
2150   }
2151   // Register Phis with loop/block info
2152   set_ctrl(phi1, phi->in(0));
2153   set_ctrl(phi2, phi->in(0));
2154   // Make a new Cmp
2155   Node *cmp = sample_cmp->clone();
2156   cmp->set_req(1, phi1);
2157   cmp->set_req(2, phi2);
2158   _igvn.register_new_node_with_optimizer(cmp);
2159   set_ctrl(cmp, phi->in(0));
2160 
2161   // Make a new Bool
2162   Node *b = sample_bool->clone();
2163   b->set_req(1,cmp);
2164   _igvn.register_new_node_with_optimizer(b);
2165   set_ctrl(b, phi->in(0));
2166 
2167   if (sample_opaque != nullptr) {
2168     Node* opaque = sample_opaque->clone();
2169     opaque->set_req(1, b);
2170     _igvn.register_new_node_with_optimizer(opaque);
2171     set_ctrl(opaque, phi->in(0));
2172     return opaque;
2173   }
2174 
2175   assert(b->is_Bool(), "");
2176   return b;
2177 }
2178 
2179 //------------------------------clone_bool-------------------------------------
2180 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps.
2181 // "Nearly" because all Nodes have been cloned from the original in the loop,
2182 // but the fall-in edges to the Cmp are different.  Clone bool/Cmp pairs
2183 // through the Phi recursively, and return a Bool.
2184 CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
2185   uint i;
2186   // Convert this Phi into a Phi merging Bools
2187   for( i = 1; i < phi->req(); i++ ) {
2188     Node *b = phi->in(i);
2189     if( b->is_Phi() ) {
2190       _igvn.replace_input_of(phi, i, clone_bool(b->as_Phi()));
2191     } else {
2192       assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
2193     }
2194   }
2195 
2196   Node *sample_cmp = phi->in(1);
2197 
2198   // Make Phis to merge the Cmp's inputs.
2199   PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP );
2200   PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP );
2201   for( uint j = 1; j < phi->req(); j++ ) {
2202     Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP
2203     Node *n1, *n2;
2204     if( cmp_top->is_Cmp() ) {
2205       n1 = cmp_top->in(1);
2206       n2 = cmp_top->in(2);
2207     } else {
2208       n1 = n2 = cmp_top;
2209     }
2210     phi1->set_req( j, n1 );
2211     phi2->set_req( j, n2 );
2212     phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
2213     phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
2214   }
2215 
2216   // See if these Phis have been made before.
2217   // Register with optimizer
2218   Node *hit1 = _igvn.hash_find_insert(phi1);
2219   if( hit1 ) {                  // Hit, toss just made Phi
2220     _igvn.remove_dead_node(phi1); // Remove new phi
2221     assert( hit1->is_Phi(), "" );
2222     phi1 = (PhiNode*)hit1;      // Use existing phi
2223   } else {                      // Miss
2224     _igvn.register_new_node_with_optimizer(phi1);
2225   }
2226   Node *hit2 = _igvn.hash_find_insert(phi2);
2227   if( hit2 ) {                  // Hit, toss just made Phi
2228     _igvn.remove_dead_node(phi2); // Remove new phi
2229     assert( hit2->is_Phi(), "" );
2230     phi2 = (PhiNode*)hit2;      // Use existing phi
2231   } else {                      // Miss
2232     _igvn.register_new_node_with_optimizer(phi2);
2233   }
2234   // Register Phis with loop/block info
2235   set_ctrl(phi1, phi->in(0));
2236   set_ctrl(phi2, phi->in(0));
2237   // Make a new Cmp
2238   Node *cmp = sample_cmp->clone();
2239   cmp->set_req( 1, phi1 );
2240   cmp->set_req( 2, phi2 );
2241   _igvn.register_new_node_with_optimizer(cmp);
2242   set_ctrl(cmp, phi->in(0));
2243 
2244   assert( cmp->is_Cmp(), "" );
2245   return (CmpNode*)cmp;
2246 }
2247 
2248 //------------------------------sink_use---------------------------------------
2249 // If 'use' was in the loop-exit block, it now needs to be sunk
2250 // below the post-loop merge point.
2251 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
2252   if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) {
2253     set_ctrl(use, post_loop);
2254     for (DUIterator j = use->outs(); use->has_out(j); j++)
2255       sink_use(use->out(j), post_loop);
2256   }
2257 }
2258 
2259 void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
2260                                                  IdealLoopTree* loop, IdealLoopTree* outer_loop,
2261                                                  Node_List*& split_if_set, Node_List*& split_bool_set,
2262                                                  Node_List*& split_cex_set, Node_List& worklist,
2263                                                  uint new_counter, CloneLoopMode mode) {
2264   Node* nnn = old_new[old->_idx];
2265   // Copy uses to a worklist, so I can munge the def-use info
2266   // with impunity.
2267   for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++)
2268     worklist.push(old->fast_out(j));
2269 
2270   while( worklist.size() ) {
2271     Node *use = worklist.pop();
2272     if (!has_node(use))  continue; // Ignore dead nodes
2273     if (use->in(0) == C->top())  continue;
2274     IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use );
2275     // Check for data-use outside of loop - at least one of OLD or USE
2276     // must not be a CFG node.
2277 #ifdef ASSERT
2278     if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) {
2279       Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint();
2280       assert(mode != IgnoreStripMined, "incorrect cloning mode");
2281       assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node");
2282     }
2283 #endif
2284     if (!loop->is_member(use_loop) && !outer_loop->is_member(use_loop) && (!old->is_CFG() || !use->is_CFG())) {
2285 
2286       // If the Data use is an IF, that means we have an IF outside of the
2287       // loop that is switching on a condition that is set inside of the
2288       // loop.  Happens if people set a loop-exit flag; then test the flag
2289       // in the loop to break the loop, then test is again outside of the
2290       // loop to determine which way the loop exited.
2291       // Loop predicate If node connects to Bool node through Opaque1 node.
2292       //
2293       // If the use is an AllocateArray through its ValidLengthTest input,
2294       // make sure the Bool/Cmp input is cloned down to avoid a Phi between
2295       // the AllocateArray node and its ValidLengthTest input that could cause
2296       // split if to break.
2297       if (use->is_If() || use->is_CMove() || use->Opcode() == Op_Opaque4 ||
2298           (use->Opcode() == Op_AllocateArray && use->in(AllocateNode::ValidLengthTest) == old)) {
2299         // Since this code is highly unlikely, we lazily build the worklist
2300         // of such Nodes to go split.
2301         if (!split_if_set) {
2302           split_if_set = new Node_List();
2303         }
2304         split_if_set->push(use);
2305       }
2306       if (use->is_Bool()) {
2307         if (!split_bool_set) {
2308           split_bool_set = new Node_List();
2309         }
2310         split_bool_set->push(use);
2311       }
2312       if (use->Opcode() == Op_CreateEx) {
2313         if (!split_cex_set) {
2314           split_cex_set = new Node_List();
2315         }
2316         split_cex_set->push(use);
2317       }
2318 
2319 
2320       // Get "block" use is in
2321       uint idx = 0;
2322       while( use->in(idx) != old ) idx++;
2323       Node *prev = use->is_CFG() ? use : get_ctrl(use);
2324       assert(!loop->is_member(get_loop(prev)) && !outer_loop->is_member(get_loop(prev)), "" );
2325       Node *cfg = prev->_idx >= new_counter
2326         ? prev->in(2)
2327         : idom(prev);
2328       if( use->is_Phi() )     // Phi use is in prior block
2329         cfg = prev->in(idx);  // NOT in block of Phi itself
2330       if (cfg->is_top()) {    // Use is dead?
2331         _igvn.replace_input_of(use, idx, C->top());
2332         continue;
2333       }
2334 
2335       // If use is referenced through control edge... (idx == 0)
2336       if (mode == IgnoreStripMined && idx == 0) {
2337         LoopNode *head = loop->_head->as_Loop();
2338         if (head->is_strip_mined() && is_dominator(head->outer_loop_exit(), prev)) {
2339           // That node is outside the inner loop, leave it outside the
2340           // outer loop as well to not confuse verification code.
2341           assert(!loop->_parent->is_member(use_loop), "should be out of the outer loop");
2342           _igvn.replace_input_of(use, 0, head->outer_loop_exit());
2343           continue;
2344         }
2345       }
2346 
2347       while(!outer_loop->is_member(get_loop(cfg))) {
2348         prev = cfg;
2349         cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg);
2350       }
2351       // If the use occurs after merging several exits from the loop, then
2352       // old value must have dominated all those exits.  Since the same old
2353       // value was used on all those exits we did not need a Phi at this
2354       // merge point.  NOW we do need a Phi here.  Each loop exit value
2355       // is now merged with the peeled body exit; each exit gets its own
2356       // private Phi and those Phis need to be merged here.
2357       Node *phi;
2358       if( prev->is_Region() ) {
2359         if( idx == 0 ) {      // Updating control edge?
2360           phi = prev;         // Just use existing control
2361         } else {              // Else need a new Phi
2362           phi = PhiNode::make( prev, old );
2363           // Now recursively fix up the new uses of old!
2364           for( uint i = 1; i < prev->req(); i++ ) {
2365             worklist.push(phi); // Onto worklist once for each 'old' input
2366           }
2367         }
2368       } else {
2369         // Get new RegionNode merging old and new loop exits
2370         prev = old_new[prev->_idx];
2371         assert( prev, "just made this in step 7" );
2372         if( idx == 0) {      // Updating control edge?
2373           phi = prev;         // Just use existing control
2374         } else {              // Else need a new Phi
2375           // Make a new Phi merging data values properly
2376           phi = PhiNode::make( prev, old );
2377           phi->set_req( 1, nnn );
2378         }
2379       }
2380       // If inserting a new Phi, check for prior hits
2381       if( idx != 0 ) {
2382         Node *hit = _igvn.hash_find_insert(phi);
2383         if( hit == nullptr ) {
2384           _igvn.register_new_node_with_optimizer(phi); // Register new phi
2385         } else {                                      // or
2386           // Remove the new phi from the graph and use the hit
2387           _igvn.remove_dead_node(phi);
2388           phi = hit;                                  // Use existing phi
2389         }
2390         set_ctrl(phi, prev);
2391       }
2392       // Make 'use' use the Phi instead of the old loop body exit value
2393       assert(use->in(idx) == old, "old is still input of use");
2394       // We notify all uses of old, including use, and the indirect uses,
2395       // that may now be optimized because we have replaced old with phi.
2396       _igvn.add_users_to_worklist(old);
2397       _igvn.replace_input_of(use, idx, phi);
2398       if( use->_idx >= new_counter ) { // If updating new phis
2399         // Not needed for correctness, but prevents a weak assert
2400         // in AddPNode from tripping (when we end up with different
2401         // base & derived Phis that will become the same after
2402         // IGVN does CSE).
2403         Node *hit = _igvn.hash_find_insert(use);
2404         if( hit )             // Go ahead and re-hash for hits.
2405           _igvn.replace_node( use, hit );
2406       }
2407 
2408       // If 'use' was in the loop-exit block, it now needs to be sunk
2409       // below the post-loop merge point.
2410       sink_use( use, prev );
2411     }
2412   }
2413 }
2414 
2415 static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const IdealLoopTree *loop, const IdealLoopTree* outer_loop,
2416                                                                 const Node_List &old_new, Unique_Node_List& wq, PhaseIdealLoop* phase,
2417                                                                 bool check_old_new) {
2418   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
2419     Node* u = n->fast_out(j);
2420     assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned");
2421     if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) {
2422       Node* c = phase->get_ctrl(u);
2423       IdealLoopTree* u_loop = phase->get_loop(c);
2424       assert(!loop->is_member(u_loop) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
2425       if (!loop->is_member(u_loop)) {
2426         if (outer_loop->is_member(u_loop)) {
2427           wq.push(u);
2428         } else {
2429           // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of
2430           // the outer loop too
2431           Node* u_c = u->in(0);
2432           if (u_c != nullptr) {
2433             IdealLoopTree* u_c_loop = phase->get_loop(u_c);
2434             if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) {
2435               wq.push(u);
2436             }
2437           }
2438         }
2439       }
2440     }
2441   }
2442 }
2443 
2444 void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealLoopTree *loop,
2445                                       IdealLoopTree* outer_loop, int dd, Node_List &old_new,
2446                                       Node_List& extra_data_nodes) {
2447   if (head->is_strip_mined() && mode != IgnoreStripMined) {
2448     CountedLoopNode* cl = head->as_CountedLoop();
2449     Node* l = cl->outer_loop();
2450     Node* tail = cl->outer_loop_tail();
2451     IfNode* le = cl->outer_loop_end();
2452     Node* sfpt = cl->outer_safepoint();
2453     CountedLoopEndNode* cle = cl->loopexit();
2454     CountedLoopNode* new_cl = old_new[cl->_idx]->as_CountedLoop();
2455     CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null();
2456     Node* cle_out = cle->proj_out(false);
2457 
2458     Node* new_sfpt = nullptr;
2459     Node* new_cle_out = cle_out->clone();
2460     old_new.map(cle_out->_idx, new_cle_out);
2461     if (mode == CloneIncludesStripMined) {
2462       // clone outer loop body
2463       Node* new_l = l->clone();
2464       Node* new_tail = tail->clone();
2465       IfNode* new_le = le->clone()->as_If();
2466       new_sfpt = sfpt->clone();
2467 
2468       set_loop(new_l, outer_loop->_parent);
2469       set_idom(new_l, new_l->in(LoopNode::EntryControl), dd);
2470       set_loop(new_cle_out, outer_loop->_parent);
2471       set_idom(new_cle_out, new_cle, dd);
2472       set_loop(new_sfpt, outer_loop->_parent);
2473       set_idom(new_sfpt, new_cle_out, dd);
2474       set_loop(new_le, outer_loop->_parent);
2475       set_idom(new_le, new_sfpt, dd);
2476       set_loop(new_tail, outer_loop->_parent);
2477       set_idom(new_tail, new_le, dd);
2478       set_idom(new_cl, new_l, dd);
2479 
2480       old_new.map(l->_idx, new_l);
2481       old_new.map(tail->_idx, new_tail);
2482       old_new.map(le->_idx, new_le);
2483       old_new.map(sfpt->_idx, new_sfpt);
2484 
2485       new_l->set_req(LoopNode::LoopBackControl, new_tail);
2486       new_l->set_req(0, new_l);
2487       new_tail->set_req(0, new_le);
2488       new_le->set_req(0, new_sfpt);
2489       new_sfpt->set_req(0, new_cle_out);
2490       new_cle_out->set_req(0, new_cle);
2491       new_cl->set_req(LoopNode::EntryControl, new_l);
2492 
2493       _igvn.register_new_node_with_optimizer(new_l);
2494       _igvn.register_new_node_with_optimizer(new_tail);
2495       _igvn.register_new_node_with_optimizer(new_le);
2496     } else {
2497       Node *newhead = old_new[loop->_head->_idx];
2498       newhead->as_Loop()->clear_strip_mined();
2499       _igvn.replace_input_of(newhead, LoopNode::EntryControl, newhead->in(LoopNode::EntryControl)->in(LoopNode::EntryControl));
2500       set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2501     }
2502     // Look at data node that were assigned a control in the outer
2503     // loop: they are kept in the outer loop by the safepoint so start
2504     // from the safepoint node's inputs.
2505     IdealLoopTree* outer_loop = get_loop(l);
2506     Node_Stack stack(2);
2507     stack.push(sfpt, 1);
2508     uint new_counter = C->unique();
2509     while (stack.size() > 0) {
2510       Node* n = stack.node();
2511       uint i = stack.index();
2512       while (i < n->req() &&
2513              (n->in(i) == nullptr ||
2514               !has_ctrl(n->in(i)) ||
2515               get_loop(get_ctrl(n->in(i))) != outer_loop ||
2516               (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) {
2517         i++;
2518       }
2519       if (i < n->req()) {
2520         stack.set_index(i+1);
2521         stack.push(n->in(i), 0);
2522       } else {
2523         assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet");
2524         Node* m = n == sfpt ? new_sfpt : n->clone();
2525         if (m != nullptr) {
2526           for (uint i = 0; i < n->req(); i++) {
2527             if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) {
2528               m->set_req(i, old_new[m->in(i)->_idx]);
2529             }
2530           }
2531         } else {
2532           assert(n == sfpt && mode != CloneIncludesStripMined, "where's the safepoint clone?");
2533         }
2534         if (n != sfpt) {
2535           extra_data_nodes.push(n);
2536           _igvn.register_new_node_with_optimizer(m);
2537           assert(get_ctrl(n) == cle_out, "what other control?");
2538           set_ctrl(m, new_cle_out);
2539           old_new.map(n->_idx, m);
2540         }
2541         stack.pop();
2542       }
2543     }
2544     if (mode == CloneIncludesStripMined) {
2545       _igvn.register_new_node_with_optimizer(new_sfpt);
2546       _igvn.register_new_node_with_optimizer(new_cle_out);
2547     }
2548     // Some other transformation may have pessimistically assigned some
2549     // data nodes to the outer loop. Set their control so they are out
2550     // of the outer loop.
2551     ResourceMark rm;
2552     Unique_Node_List wq;
2553     for (uint i = 0; i < extra_data_nodes.size(); i++) {
2554       Node* old = extra_data_nodes.at(i);
2555       collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2556     }
2557 
2558     for (uint i = 0; i < loop->_body.size(); i++) {
2559       Node* old = loop->_body.at(i);
2560       collect_nodes_in_outer_loop_not_reachable_from_sfpt(old, loop, outer_loop, old_new, wq, this, true);
2561     }
2562 
2563     Node* inner_out = sfpt->in(0);
2564     if (inner_out->outcnt() > 1) {
2565       collect_nodes_in_outer_loop_not_reachable_from_sfpt(inner_out, loop, outer_loop, old_new, wq, this, true);
2566     }
2567 
2568     Node* new_ctrl = cl->outer_loop_exit();
2569     assert(get_loop(new_ctrl) != outer_loop, "must be out of the loop nest");
2570     for (uint i = 0; i < wq.size(); i++) {
2571       Node* n = wq.at(i);
2572       set_ctrl(n, new_ctrl);
2573       if (n->in(0) != nullptr) {
2574         _igvn.replace_input_of(n, 0, new_ctrl);
2575       }
2576       collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false);
2577     }
2578   } else {
2579     Node *newhead = old_new[loop->_head->_idx];
2580     set_idom(newhead, newhead->in(LoopNode::EntryControl), dd);
2581   }
2582 }
2583 
2584 //------------------------------clone_loop-------------------------------------
2585 //
2586 //                   C L O N E   A   L O O P   B O D Y
2587 //
2588 // This is the basic building block of the loop optimizations.  It clones an
2589 // entire loop body.  It makes an old_new loop body mapping; with this mapping
2590 // you can find the new-loop equivalent to an old-loop node.  All new-loop
2591 // nodes are exactly equal to their old-loop counterparts, all edges are the
2592 // same.  All exits from the old-loop now have a RegionNode that merges the
2593 // equivalent new-loop path.  This is true even for the normal "loop-exit"
2594 // condition.  All uses of loop-invariant old-loop values now come from (one
2595 // or more) Phis that merge their new-loop equivalents.
2596 //
2597 // This operation leaves the graph in an illegal state: there are two valid
2598 // control edges coming from the loop pre-header to both loop bodies.  I'll
2599 // definitely have to hack the graph after running this transform.
2600 //
2601 // From this building block I will further edit edges to perform loop peeling
2602 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc.
2603 //
2604 // Parameter side_by_size_idom:
2605 //   When side_by_size_idom is null, the dominator tree is constructed for
2606 //      the clone loop to dominate the original.  Used in construction of
2607 //      pre-main-post loop sequence.
2608 //   When nonnull, the clone and original are side-by-side, both are
2609 //      dominated by the side_by_side_idom node.  Used in construction of
2610 //      unswitched loops.
2611 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
2612                                 CloneLoopMode mode, Node* side_by_side_idom) {
2613 
2614   LoopNode* head = loop->_head->as_Loop();
2615   head->verify_strip_mined(1);
2616 
2617   if (C->do_vector_loop() && PrintOpto) {
2618     const char* mname = C->method()->name()->as_quoted_ascii();
2619     if (mname != nullptr) {
2620       tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
2621     }
2622   }
2623 
2624   CloneMap& cm = C->clone_map();
2625   if (C->do_vector_loop()) {
2626     cm.set_clone_idx(cm.max_gen()+1);
2627 #ifndef PRODUCT
2628     if (PrintOpto) {
2629       tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx());
2630       loop->dump_head();
2631     }
2632 #endif
2633   }
2634 
2635   // Step 1: Clone the loop body.  Make the old->new mapping.
2636   clone_loop_body(loop->_body, old_new, &cm);
2637 
2638   IdealLoopTree* outer_loop = (head->is_strip_mined() && mode != IgnoreStripMined) ? get_loop(head->as_CountedLoop()->outer_loop()) : loop;
2639 
2640   // Step 2: Fix the edges in the new body.  If the old input is outside the
2641   // loop use it.  If the old input is INside the loop, use the corresponding
2642   // new node instead.
2643   fix_body_edges(loop->_body, loop, old_new, dd, outer_loop->_parent, false);
2644 
2645   Node_List extra_data_nodes; // data nodes in the outer strip mined loop
2646   clone_outer_loop(head, mode, loop, outer_loop, dd, old_new, extra_data_nodes);
2647 
2648   // Step 3: Now fix control uses.  Loop varying control uses have already
2649   // been fixed up (as part of all input edges in Step 2).  Loop invariant
2650   // control uses must be either an IfFalse or an IfTrue.  Make a merge
2651   // point to merge the old and new IfFalse/IfTrue nodes; make the use
2652   // refer to this.
2653   Node_List worklist;
2654   uint new_counter = C->unique();
2655   fix_ctrl_uses(loop->_body, loop, old_new, mode, side_by_side_idom, &cm, worklist);
2656 
2657   // Step 4: If loop-invariant use is not control, it must be dominated by a
2658   // loop exit IfFalse/IfTrue.  Find "proper" loop exit.  Make a Region
2659   // there if needed.  Make a Phi there merging old and new used values.
2660   Node_List *split_if_set = nullptr;
2661   Node_List *split_bool_set = nullptr;
2662   Node_List *split_cex_set = nullptr;
2663   fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set);
2664 
2665   for (uint i = 0; i < extra_data_nodes.size(); i++) {
2666     Node* old = extra_data_nodes.at(i);
2667     clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2668                                 split_bool_set, split_cex_set, worklist, new_counter,
2669                                 mode);
2670   }
2671 
2672   // Check for IFs that need splitting/cloning.  Happens if an IF outside of
2673   // the loop uses a condition set in the loop.  The original IF probably
2674   // takes control from one or more OLD Regions (which in turn get from NEW
2675   // Regions).  In any case, there will be a set of Phis for each merge point
2676   // from the IF up to where the original BOOL def exists the loop.
2677   finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
2678 
2679 }
2680 
2681 void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split_bool_set, Node_List* split_cex_set) {
2682   if (split_if_set) {
2683     while (split_if_set->size()) {
2684       Node *iff = split_if_set->pop();
2685       uint input = iff->Opcode() == Op_AllocateArray ? AllocateNode::ValidLengthTest : 1;
2686       if (iff->in(input)->is_Phi()) {
2687         Node *b = clone_iff(iff->in(input)->as_Phi());
2688         _igvn.replace_input_of(iff, input, b);
2689       }
2690     }
2691   }
2692   if (split_bool_set) {
2693     while (split_bool_set->size()) {
2694       Node *b = split_bool_set->pop();
2695       Node *phi = b->in(1);
2696       assert(phi->is_Phi(), "");
2697       CmpNode *cmp = clone_bool((PhiNode*) phi);
2698       _igvn.replace_input_of(b, 1, cmp);
2699     }
2700   }
2701   if (split_cex_set) {
2702     while (split_cex_set->size()) {
2703       Node *b = split_cex_set->pop();
2704       assert(b->in(0)->is_Region(), "");
2705       assert(b->in(1)->is_Phi(), "");
2706       assert(b->in(0)->in(0) == b->in(1)->in(0), "");
2707       split_up(b, b->in(0), nullptr);
2708     }
2709   }
2710 }
2711 
2712 void PhaseIdealLoop::fix_data_uses(Node_List& body, IdealLoopTree* loop, CloneLoopMode mode, IdealLoopTree* outer_loop,
2713                                    uint new_counter, Node_List &old_new, Node_List &worklist, Node_List*& split_if_set,
2714                                    Node_List*& split_bool_set, Node_List*& split_cex_set) {
2715   for(uint i = 0; i < body.size(); i++ ) {
2716     Node* old = body.at(i);
2717     clone_loop_handle_data_uses(old, old_new, loop, outer_loop, split_if_set,
2718                                 split_bool_set, split_cex_set, worklist, new_counter,
2719                                 mode);
2720   }
2721 }
2722 
2723 void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* loop, Node_List &old_new, CloneLoopMode mode,
2724                                    Node* side_by_side_idom, CloneMap* cm, Node_List &worklist) {
2725   LoopNode* head = loop->_head->as_Loop();
2726   for(uint i = 0; i < body.size(); i++ ) {
2727     Node* old = body.at(i);
2728     if( !old->is_CFG() ) continue;
2729 
2730     // Copy uses to a worklist, so I can munge the def-use info
2731     // with impunity.
2732     for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) {
2733       worklist.push(old->fast_out(j));
2734     }
2735 
2736     while (worklist.size()) {  // Visit all uses
2737       Node *use = worklist.pop();
2738       if (!has_node(use))  continue; // Ignore dead nodes
2739       IdealLoopTree *use_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use );
2740       if (!loop->is_member(use_loop) && use->is_CFG()) {
2741         // Both OLD and USE are CFG nodes here.
2742         assert(use->is_Proj(), "" );
2743         Node* nnn = old_new[old->_idx];
2744 
2745         Node* newuse = nullptr;
2746         if (head->is_strip_mined() && mode != IgnoreStripMined) {
2747           CountedLoopNode* cl = head->as_CountedLoop();
2748           CountedLoopEndNode* cle = cl->loopexit();
2749           Node* cle_out = cle->proj_out_or_null(false);
2750           if (use == cle_out) {
2751             IfNode* le = cl->outer_loop_end();
2752             use = le->proj_out(false);
2753             use_loop = get_loop(use);
2754             if (mode == CloneIncludesStripMined) {
2755               nnn = old_new[le->_idx];
2756             } else {
2757               newuse = old_new[cle_out->_idx];
2758             }
2759           }
2760         }
2761         if (newuse == nullptr) {
2762           newuse = use->clone();
2763         }
2764 
2765         // Clone the loop exit control projection
2766         if (C->do_vector_loop() && cm != nullptr) {
2767           cm->verify_insert_and_clone(use, newuse, cm->clone_idx());
2768         }
2769         newuse->set_req(0,nnn);
2770         _igvn.register_new_node_with_optimizer(newuse);
2771         set_loop(newuse, use_loop);
2772         set_idom(newuse, nnn, dom_depth(nnn) + 1 );
2773 
2774         // We need a Region to merge the exit from the peeled body and the
2775         // exit from the old loop body.
2776         RegionNode *r = new RegionNode(3);
2777         // Map the old use to the new merge point
2778         old_new.map( use->_idx, r );
2779         uint dd_r = MIN2(dom_depth(newuse), dom_depth(use));
2780         assert(dd_r >= dom_depth(dom_lca(newuse, use)), "" );
2781 
2782         // The original user of 'use' uses 'r' instead.
2783         for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
2784           Node* useuse = use->last_out(l);
2785           _igvn.rehash_node_delayed(useuse);
2786           uint uses_found = 0;
2787           if (useuse->in(0) == use) {
2788             useuse->set_req(0, r);
2789             uses_found++;
2790             if (useuse->is_CFG()) {
2791               // This is not a dom_depth > dd_r because when new
2792               // control flow is constructed by a loop opt, a node and
2793               // its dominator can end up at the same dom_depth
2794               assert(dom_depth(useuse) >= dd_r, "");
2795               set_idom(useuse, r, dom_depth(useuse));
2796             }
2797           }
2798           for (uint k = 1; k < useuse->req(); k++) {
2799             if( useuse->in(k) == use ) {
2800               useuse->set_req(k, r);
2801               uses_found++;
2802               if (useuse->is_Loop() && k == LoopNode::EntryControl) {
2803                 // This is not a dom_depth > dd_r because when new
2804                 // control flow is constructed by a loop opt, a node
2805                 // and its dominator can end up at the same dom_depth
2806                 assert(dom_depth(useuse) >= dd_r , "");
2807                 set_idom(useuse, r, dom_depth(useuse));
2808               }
2809             }
2810           }
2811           l -= uses_found;    // we deleted 1 or more copies of this edge
2812         }
2813 
2814         // Now finish up 'r'
2815         r->set_req(1, newuse);
2816         r->set_req(2,    use);
2817         _igvn.register_new_node_with_optimizer(r);
2818         set_loop(r, use_loop);
2819         set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r);
2820       } // End of if a loop-exit test
2821     }
2822   }
2823 }
2824 
2825 void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, const Node_List &old_new, int dd,
2826                                     IdealLoopTree* parent, bool partial) {
2827   for(uint i = 0; i < body.size(); i++ ) {
2828     Node *old = body.at(i);
2829     Node *nnn = old_new[old->_idx];
2830     // Fix CFG/Loop controlling the new node
2831     if (has_ctrl(old)) {
2832       set_ctrl(nnn, old_new[get_ctrl(old)->_idx]);
2833     } else {
2834       set_loop(nnn, parent);
2835       if (old->outcnt() > 0) {
2836         Node* dom = idom(old);
2837         if (old_new[dom->_idx] != nullptr) {
2838           dom = old_new[dom->_idx];
2839           set_idom(nnn, dom, dd );
2840         }
2841       }
2842     }
2843     // Correct edges to the new node
2844     for (uint j = 0; j < nnn->req(); j++) {
2845         Node *n = nnn->in(j);
2846         if (n != nullptr) {
2847           IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n);
2848           if (loop->is_member(old_in_loop)) {
2849             if (old_new[n->_idx] != nullptr) {
2850               nnn->set_req(j, old_new[n->_idx]);
2851             } else {
2852               assert(!body.contains(n), "");
2853               assert(partial, "node not cloned");
2854             }
2855           }
2856         }
2857     }
2858     _igvn.hash_find_insert(nnn);
2859   }
2860 }
2861 
2862 void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, CloneMap* cm) {
2863   for (uint i = 0; i < body.size(); i++) {
2864     Node* old = body.at(i);
2865     Node* nnn = old->clone();
2866     old_new.map(old->_idx, nnn);
2867     if (C->do_vector_loop() && cm != nullptr) {
2868       cm->verify_insert_and_clone(old, nnn, cm->clone_idx());
2869     }
2870     _igvn.register_new_node_with_optimizer(nnn);
2871   }
2872 }
2873 
2874 
2875 //---------------------- stride_of_possible_iv -------------------------------------
2876 // Looks for an iff/bool/comp with one operand of the compare
2877 // being a cycle involving an add and a phi,
2878 // with an optional truncation (left-shift followed by a right-shift)
2879 // of the add. Returns zero if not an iv.
2880 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
2881   Node* trunc1 = nullptr;
2882   Node* trunc2 = nullptr;
2883   const TypeInteger* ttype = nullptr;
2884   if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) {
2885     return 0;
2886   }
2887   BoolNode* bl = iff->in(1)->as_Bool();
2888   Node* cmp = bl->in(1);
2889   if (!cmp || (cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU)) {
2890     return 0;
2891   }
2892   // Must have an invariant operand
2893   if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) {
2894     return 0;
2895   }
2896   Node* add2 = nullptr;
2897   Node* cmp1 = cmp->in(1);
2898   if (cmp1->is_Phi()) {
2899     // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) )))
2900     Node* phi = cmp1;
2901     for (uint i = 1; i < phi->req(); i++) {
2902       Node* in = phi->in(i);
2903       Node* add = CountedLoopNode::match_incr_with_optional_truncation(in,
2904                                 &trunc1, &trunc2, &ttype, T_INT);
2905       if (add && add->in(1) == phi) {
2906         add2 = add->in(2);
2907         break;
2908       }
2909     }
2910   } else {
2911     // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) )))
2912     Node* addtrunc = cmp1;
2913     Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc,
2914                                 &trunc1, &trunc2, &ttype, T_INT);
2915     if (add && add->in(1)->is_Phi()) {
2916       Node* phi = add->in(1);
2917       for (uint i = 1; i < phi->req(); i++) {
2918         if (phi->in(i) == addtrunc) {
2919           add2 = add->in(2);
2920           break;
2921         }
2922       }
2923     }
2924   }
2925   if (add2 != nullptr) {
2926     const TypeInt* add2t = _igvn.type(add2)->is_int();
2927     if (add2t->is_con()) {
2928       return add2t->get_con();
2929     }
2930   }
2931   return 0;
2932 }
2933 
2934 
2935 //---------------------- stay_in_loop -------------------------------------
2936 // Return the (unique) control output node that's in the loop (if it exists.)
2937 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) {
2938   Node* unique = nullptr;
2939   if (!n) return nullptr;
2940   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2941     Node* use = n->fast_out(i);
2942     if (!has_ctrl(use) && loop->is_member(get_loop(use))) {
2943       if (unique != nullptr) {
2944         return nullptr;
2945       }
2946       unique = use;
2947     }
2948   }
2949   return unique;
2950 }
2951 
2952 //------------------------------ register_node -------------------------------------
2953 // Utility to register node "n" with PhaseIdealLoop
2954 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree* loop, Node* pred, uint ddepth) {
2955   _igvn.register_new_node_with_optimizer(n);
2956   loop->_body.push(n);
2957   if (n->is_CFG()) {
2958     set_loop(n, loop);
2959     set_idom(n, pred, ddepth);
2960   } else {
2961     set_ctrl(n, pred);
2962   }
2963 }
2964 
2965 //------------------------------ proj_clone -------------------------------------
2966 // Utility to create an if-projection
2967 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) {
2968   ProjNode* c = p->clone()->as_Proj();
2969   c->set_req(0, iff);
2970   return c;
2971 }
2972 
2973 //------------------------------ short_circuit_if -------------------------------------
2974 // Force the iff control output to be the live_proj
2975 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) {
2976   guarantee(live_proj != nullptr, "null projection");
2977   int proj_con = live_proj->_con;
2978   assert(proj_con == 0 || proj_con == 1, "false or true projection");
2979   Node *con = _igvn.intcon(proj_con);
2980   set_ctrl(con, C->root());
2981   if (iff) {
2982     iff->set_req(1, con);
2983   }
2984   return con;
2985 }
2986 
2987 //------------------------------ insert_if_before_proj -------------------------------------
2988 // Insert a new if before an if projection (* - new node)
2989 //
2990 // before
2991 //           if(test)
2992 //           /     \
2993 //          v       v
2994 //    other-proj   proj (arg)
2995 //
2996 // after
2997 //           if(test)
2998 //           /     \
2999 //          /       v
3000 //         |      * proj-clone
3001 //         v          |
3002 //    other-proj      v
3003 //                * new_if(relop(cmp[IU](left,right)))
3004 //                  /  \
3005 //                 v    v
3006 //         * new-proj  proj
3007 //         (returned)
3008 //
3009 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) {
3010   IfNode* iff = proj->in(0)->as_If();
3011   IdealLoopTree *loop = get_loop(proj);
3012   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3013   uint ddepth = dom_depth(proj);
3014 
3015   _igvn.rehash_node_delayed(iff);
3016   _igvn.rehash_node_delayed(proj);
3017 
3018   proj->set_req(0, nullptr);  // temporary disconnect
3019   ProjNode* proj2 = proj_clone(proj, iff);
3020   register_node(proj2, loop, iff, ddepth);
3021 
3022   Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right);
3023   register_node(cmp, loop, proj2, ddepth);
3024 
3025   BoolNode* bol = new BoolNode(cmp, relop);
3026   register_node(bol, loop, proj2, ddepth);
3027 
3028   int opcode = iff->Opcode();
3029   assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
3030   IfNode* new_if = (opcode == Op_If) ? new IfNode(proj2, bol, iff->_prob, iff->_fcnt):
3031     new RangeCheckNode(proj2, bol, iff->_prob, iff->_fcnt);
3032   register_node(new_if, loop, proj2, ddepth);
3033 
3034   proj->set_req(0, new_if); // reattach
3035   set_idom(proj, new_if, ddepth);
3036 
3037   ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj();
3038   guarantee(new_exit != nullptr, "null exit node");
3039   register_node(new_exit, get_loop(other_proj), new_if, ddepth);
3040 
3041   return new_exit;
3042 }
3043 
3044 //------------------------------ insert_region_before_proj -------------------------------------
3045 // Insert a region before an if projection (* - new node)
3046 //
3047 // before
3048 //           if(test)
3049 //          /      |
3050 //         v       |
3051 //       proj      v
3052 //               other-proj
3053 //
3054 // after
3055 //           if(test)
3056 //          /      |
3057 //         v       |
3058 // * proj-clone    v
3059 //         |     other-proj
3060 //         v
3061 // * new-region
3062 //         |
3063 //         v
3064 // *      dum_if
3065 //       /     \
3066 //      v       \
3067 // * dum-proj    v
3068 //              proj
3069 //
3070 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
3071   IfNode* iff = proj->in(0)->as_If();
3072   IdealLoopTree *loop = get_loop(proj);
3073   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
3074   uint ddepth = dom_depth(proj);
3075 
3076   _igvn.rehash_node_delayed(iff);
3077   _igvn.rehash_node_delayed(proj);
3078 
3079   proj->set_req(0, nullptr);  // temporary disconnect
3080   ProjNode* proj2 = proj_clone(proj, iff);
3081   register_node(proj2, loop, iff, ddepth);
3082 
3083   RegionNode* reg = new RegionNode(2);
3084   reg->set_req(1, proj2);
3085   register_node(reg, loop, iff, ddepth);
3086 
3087   IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt);
3088   register_node(dum_if, loop, reg, ddepth);
3089 
3090   proj->set_req(0, dum_if); // reattach
3091   set_idom(proj, dum_if, ddepth);
3092 
3093   ProjNode* dum_proj = proj_clone(other_proj, dum_if);
3094   register_node(dum_proj, loop, dum_if, ddepth);
3095 
3096   return reg;
3097 }
3098 
3099 //------------------------------ insert_cmpi_loop_exit -------------------------------------
3100 // Clone a signed compare loop exit from an unsigned compare and
3101 // insert it before the unsigned cmp on the stay-in-loop path.
3102 // All new nodes inserted in the dominator tree between the original
3103 // if and it's projections.  The original if test is replaced with
3104 // a constant to force the stay-in-loop path.
3105 //
3106 // This is done to make sure that the original if and it's projections
3107 // still dominate the same set of control nodes, that the ctrl() relation
3108 // from data nodes to them is preserved, and that their loop nesting is
3109 // preserved.
3110 //
3111 // before
3112 //          if(i <u limit)    unsigned compare loop exit
3113 //         /       |
3114 //        v        v
3115 //   exit-proj   stay-in-loop-proj
3116 //
3117 // after
3118 //          if(stay-in-loop-const)  original if
3119 //         /       |
3120 //        /        v
3121 //       /  if(i <  limit)    new signed test
3122 //      /  /       |
3123 //     /  /        v
3124 //    /  /  if(i <u limit)    new cloned unsigned test
3125 //   /  /   /      |
3126 //   v  v  v       |
3127 //    region       |
3128 //        |        |
3129 //      dum-if     |
3130 //     /  |        |
3131 // ether  |        |
3132 //        v        v
3133 //   exit-proj   stay-in-loop-proj
3134 //
3135 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) {
3136   const bool Signed   = true;
3137   const bool Unsigned = false;
3138 
3139   BoolNode* bol = if_cmpu->in(1)->as_Bool();
3140   if (bol->_test._test != BoolTest::lt) return nullptr;
3141   CmpNode* cmpu = bol->in(1)->as_Cmp();
3142   if (cmpu->Opcode() != Op_CmpU) return nullptr;
3143   int stride = stride_of_possible_iv(if_cmpu);
3144   if (stride == 0) return nullptr;
3145 
3146   Node* lp_proj = stay_in_loop(if_cmpu, loop);
3147   guarantee(lp_proj != nullptr, "null loop node");
3148 
3149   ProjNode* lp_continue = lp_proj->as_Proj();
3150   ProjNode* lp_exit     = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj();
3151   if (!lp_exit->is_IfFalse()) {
3152     // The loop exit condition is (i <u limit) ==> (i >= 0 && i < limit).
3153     // We therefore can't add a single exit condition.
3154     return nullptr;
3155   }
3156   // The loop exit condition is !(i <u limit) ==> (i < 0 || i >= limit).
3157   // Split out the exit condition (i < 0) for stride < 0 or (i >= limit) for stride > 0.
3158   Node* limit = nullptr;
3159   if (stride > 0) {
3160     limit = cmpu->in(2);
3161   } else {
3162     limit = _igvn.makecon(TypeInt::ZERO);
3163     set_ctrl(limit, C->root());
3164   }
3165   // Create a new region on the exit path
3166   RegionNode* reg = insert_region_before_proj(lp_exit);
3167   guarantee(reg != nullptr, "null region node");
3168 
3169   // Clone the if-cmpu-true-false using a signed compare
3170   BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
3171   ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue);
3172   reg->add_req(cmpi_exit);
3173 
3174   // Clone the if-cmpu-true-false
3175   BoolTest::mask rel_u = bol->_test._test;
3176   ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue);
3177   reg->add_req(cmpu_exit);
3178 
3179   // Force original if to stay in loop.
3180   short_circuit_if(if_cmpu, lp_continue);
3181 
3182   return cmpi_exit->in(0)->as_If();
3183 }
3184 
3185 //------------------------------ remove_cmpi_loop_exit -------------------------------------
3186 // Remove a previously inserted signed compare loop exit.
3187 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) {
3188   Node* lp_proj = stay_in_loop(if_cmp, loop);
3189   assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI &&
3190          stay_in_loop(lp_proj, loop)->is_If() &&
3191          stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu");
3192   Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO);
3193   set_ctrl(con, C->root());
3194   if_cmp->set_req(1, con);
3195 }
3196 
3197 //------------------------------ scheduled_nodelist -------------------------------------
3198 // Create a post order schedule of nodes that are in the
3199 // "member" set.  The list is returned in "sched".
3200 // The first node in "sched" is the loop head, followed by
3201 // nodes which have no inputs in the "member" set, and then
3202 // followed by the nodes that have an immediate input dependence
3203 // on a node in "sched".
3204 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) {
3205 
3206   assert(member.test(loop->_head->_idx), "loop head must be in member set");
3207   VectorSet visited;
3208   Node_Stack nstack(loop->_body.size());
3209 
3210   Node* n  = loop->_head;  // top of stack is cached in "n"
3211   uint idx = 0;
3212   visited.set(n->_idx);
3213 
3214   // Initially push all with no inputs from within member set
3215   for(uint i = 0; i < loop->_body.size(); i++ ) {
3216     Node *elt = loop->_body.at(i);
3217     if (member.test(elt->_idx)) {
3218       bool found = false;
3219       for (uint j = 0; j < elt->req(); j++) {
3220         Node* def = elt->in(j);
3221         if (def && member.test(def->_idx) && def != elt) {
3222           found = true;
3223           break;
3224         }
3225       }
3226       if (!found && elt != loop->_head) {
3227         nstack.push(n, idx);
3228         n = elt;
3229         assert(!visited.test(n->_idx), "not seen yet");
3230         visited.set(n->_idx);
3231       }
3232     }
3233   }
3234 
3235   // traverse out's that are in the member set
3236   while (true) {
3237     if (idx < n->outcnt()) {
3238       Node* use = n->raw_out(idx);
3239       idx++;
3240       if (!visited.test_set(use->_idx)) {
3241         if (member.test(use->_idx)) {
3242           nstack.push(n, idx);
3243           n = use;
3244           idx = 0;
3245         }
3246       }
3247     } else {
3248       // All outputs processed
3249       sched.push(n);
3250       if (nstack.is_empty()) break;
3251       n   = nstack.node();
3252       idx = nstack.index();
3253       nstack.pop();
3254     }
3255   }
3256 }
3257 
3258 
3259 //------------------------------ has_use_in_set -------------------------------------
3260 // Has a use in the vector set
3261 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) {
3262   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3263     Node* use = n->fast_out(j);
3264     if (vset.test(use->_idx)) {
3265       return true;
3266     }
3267   }
3268   return false;
3269 }
3270 
3271 
3272 //------------------------------ has_use_internal_to_set -------------------------------------
3273 // Has use internal to the vector set (ie. not in a phi at the loop head)
3274 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) {
3275   Node* head  = loop->_head;
3276   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3277     Node* use = n->fast_out(j);
3278     if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) {
3279       return true;
3280     }
3281   }
3282   return false;
3283 }
3284 
3285 
3286 //------------------------------ clone_for_use_outside_loop -------------------------------------
3287 // clone "n" for uses that are outside of loop
3288 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
3289   int cloned = 0;
3290   assert(worklist.size() == 0, "should be empty");
3291   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3292     Node* use = n->fast_out(j);
3293     if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) {
3294       worklist.push(use);
3295     }
3296   }
3297 
3298   if (C->check_node_count(worklist.size() + NodeLimitFudgeFactor,
3299                           "Too many clones required in clone_for_use_outside_loop in partial peeling")) {
3300     return -1;
3301   }
3302 
3303   while( worklist.size() ) {
3304     Node *use = worklist.pop();
3305     if (!has_node(use) || use->in(0) == C->top()) continue;
3306     uint j;
3307     for (j = 0; j < use->req(); j++) {
3308       if (use->in(j) == n) break;
3309     }
3310     assert(j < use->req(), "must be there");
3311 
3312     // clone "n" and insert it between the inputs of "n" and the use outside the loop
3313     Node* n_clone = n->clone();
3314     _igvn.replace_input_of(use, j, n_clone);
3315     cloned++;
3316     Node* use_c;
3317     if (!use->is_Phi()) {
3318       use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
3319     } else {
3320       // Use in a phi is considered a use in the associated predecessor block
3321       use_c = use->in(0)->in(j);
3322     }
3323     set_ctrl(n_clone, use_c);
3324     assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
3325     get_loop(use_c)->_body.push(n_clone);
3326     _igvn.register_new_node_with_optimizer(n_clone);
3327 #ifndef PRODUCT
3328     if (TracePartialPeeling) {
3329       tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx);
3330     }
3331 #endif
3332   }
3333   return cloned;
3334 }
3335 
3336 
3337 //------------------------------ clone_for_special_use_inside_loop -------------------------------------
3338 // clone "n" for special uses that are in the not_peeled region.
3339 // If these def-uses occur in separate blocks, the code generator
3340 // marks the method as not compilable.  For example, if a "BoolNode"
3341 // is in a different basic block than the "IfNode" that uses it, then
3342 // the compilation is aborted in the code generator.
3343 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
3344                                                         VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) {
3345   if (n->is_Phi() || n->is_Load()) {
3346     return;
3347   }
3348   assert(worklist.size() == 0, "should be empty");
3349   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3350     Node* use = n->fast_out(j);
3351     if ( not_peel.test(use->_idx) &&
3352          (use->is_If() || use->is_CMove() || use->is_Bool()) &&
3353          use->in(1) == n)  {
3354       worklist.push(use);
3355     }
3356   }
3357   if (worklist.size() > 0) {
3358     // clone "n" and insert it between inputs of "n" and the use
3359     Node* n_clone = n->clone();
3360     loop->_body.push(n_clone);
3361     _igvn.register_new_node_with_optimizer(n_clone);
3362     set_ctrl(n_clone, get_ctrl(n));
3363     sink_list.push(n_clone);
3364     not_peel.set(n_clone->_idx);
3365 #ifndef PRODUCT
3366     if (TracePartialPeeling) {
3367       tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx);
3368     }
3369 #endif
3370     while( worklist.size() ) {
3371       Node *use = worklist.pop();
3372       _igvn.rehash_node_delayed(use);
3373       for (uint j = 1; j < use->req(); j++) {
3374         if (use->in(j) == n) {
3375           use->set_req(j, n_clone);
3376         }
3377       }
3378     }
3379   }
3380 }
3381 
3382 
3383 //------------------------------ insert_phi_for_loop -------------------------------------
3384 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
3385 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) {
3386   Node *phi = PhiNode::make(lp, back_edge_val);
3387   phi->set_req(LoopNode::EntryControl, lp_entry_val);
3388   // Use existing phi if it already exists
3389   Node *hit = _igvn.hash_find_insert(phi);
3390   if( hit == nullptr ) {
3391     _igvn.register_new_node_with_optimizer(phi);
3392     set_ctrl(phi, lp);
3393   } else {
3394     // Remove the new phi from the graph and use the hit
3395     _igvn.remove_dead_node(phi);
3396     phi = hit;
3397   }
3398   _igvn.replace_input_of(use, idx, phi);
3399 }
3400 
3401 #ifdef ASSERT
3402 //------------------------------ is_valid_loop_partition -------------------------------------
3403 // Validate the loop partition sets: peel and not_peel
3404 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list,
3405                                               VectorSet& not_peel ) {
3406   uint i;
3407   // Check that peel_list entries are in the peel set
3408   for (i = 0; i < peel_list.size(); i++) {
3409     if (!peel.test(peel_list.at(i)->_idx)) {
3410       return false;
3411     }
3412   }
3413   // Check at loop members are in one of peel set or not_peel set
3414   for (i = 0; i < loop->_body.size(); i++ ) {
3415     Node *def  = loop->_body.at(i);
3416     uint di = def->_idx;
3417     // Check that peel set elements are in peel_list
3418     if (peel.test(di)) {
3419       if (not_peel.test(di)) {
3420         return false;
3421       }
3422       // Must be in peel_list also
3423       bool found = false;
3424       for (uint j = 0; j < peel_list.size(); j++) {
3425         if (peel_list.at(j)->_idx == di) {
3426           found = true;
3427           break;
3428         }
3429       }
3430       if (!found) {
3431         return false;
3432       }
3433     } else if (not_peel.test(di)) {
3434       if (peel.test(di)) {
3435         return false;
3436       }
3437     } else {
3438       return false;
3439     }
3440   }
3441   return true;
3442 }
3443 
3444 //------------------------------ is_valid_clone_loop_exit_use -------------------------------------
3445 // Ensure a use outside of loop is of the right form
3446 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) {
3447   Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3448   return (use->is_Phi() &&
3449           use_c->is_Region() && use_c->req() == 3 &&
3450           (use_c->in(exit_idx)->Opcode() == Op_IfTrue ||
3451            use_c->in(exit_idx)->Opcode() == Op_IfFalse ||
3452            use_c->in(exit_idx)->Opcode() == Op_JumpProj) &&
3453           loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) );
3454 }
3455 
3456 //------------------------------ is_valid_clone_loop_form -------------------------------------
3457 // Ensure that all uses outside of loop are of the right form
3458 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list,
3459                                                uint orig_exit_idx, uint clone_exit_idx) {
3460   uint len = peel_list.size();
3461   for (uint i = 0; i < len; i++) {
3462     Node *def = peel_list.at(i);
3463 
3464     for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3465       Node *use = def->fast_out(j);
3466       Node *use_c = has_ctrl(use) ? get_ctrl(use) : use;
3467       if (!loop->is_member(get_loop(use_c))) {
3468         // use is not in the loop, check for correct structure
3469         if (use->in(0) == def) {
3470           // Okay
3471         } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) {
3472           return false;
3473         }
3474       }
3475     }
3476   }
3477   return true;
3478 }
3479 #endif
3480 
3481 //------------------------------ partial_peel -------------------------------------
3482 // Partially peel (aka loop rotation) the top portion of a loop (called
3483 // the peel section below) by cloning it and placing one copy just before
3484 // the new loop head and the other copy at the bottom of the new loop.
3485 //
3486 //    before                       after                where it came from
3487 //
3488 //    stmt1                        stmt1
3489 //  loop:                          stmt2                     clone
3490 //    stmt2                        if condA goto exitA       clone
3491 //    if condA goto exitA        new_loop:                   new
3492 //    stmt3                        stmt3                     clone
3493 //    if !condB goto loop          if condB goto exitB       clone
3494 //  exitB:                         stmt2                     orig
3495 //    stmt4                        if !condA goto new_loop   orig
3496 //  exitA:                         goto exitA
3497 //                               exitB:
3498 //                                 stmt4
3499 //                               exitA:
3500 //
3501 // Step 1: find the cut point: an exit test on probable
3502 //         induction variable.
3503 // Step 2: schedule (with cloning) operations in the peel
3504 //         section that can be executed after the cut into
3505 //         the section that is not peeled.  This may need
3506 //         to clone operations into exit blocks.  For
3507 //         instance, a reference to A[i] in the not-peel
3508 //         section and a reference to B[i] in an exit block
3509 //         may cause a left-shift of i by 2 to be placed
3510 //         in the peel block.  This step will clone the left
3511 //         shift into the exit block and sink the left shift
3512 //         from the peel to the not-peel section.
3513 // Step 3: clone the loop, retarget the control, and insert
3514 //         phis for values that are live across the new loop
3515 //         head.  This is very dependent on the graph structure
3516 //         from clone_loop.  It creates region nodes for
3517 //         exit control and associated phi nodes for values
3518 //         flow out of the loop through that exit.  The region
3519 //         node is dominated by the clone's control projection.
3520 //         So the clone's peel section is placed before the
3521 //         new loop head, and the clone's not-peel section is
3522 //         forms the top part of the new loop.  The original
3523 //         peel section forms the tail of the new loop.
3524 // Step 4: update the dominator tree and recompute the
3525 //         dominator depth.
3526 //
3527 //                   orig
3528 //
3529 //                   stmt1
3530 //                     |
3531 //                     v
3532 //                 predicates
3533 //                     |
3534 //                     v
3535 //                   loop<----+
3536 //                     |      |
3537 //                   stmt2    |
3538 //                     |      |
3539 //                     v      |
3540 //                    ifA     |
3541 //                   / |      |
3542 //                  v  v      |
3543 //               false true   ^  <-- last_peel
3544 //               /     |      |
3545 //              /   ===|==cut |
3546 //             /     stmt3    |  <-- first_not_peel
3547 //            /        |      |
3548 //            |        v      |
3549 //            v       ifB     |
3550 //          exitA:   / \      |
3551 //                  /   \     |
3552 //                 v     v    |
3553 //               false true   |
3554 //               /       \    |
3555 //              /         ----+
3556 //             |
3557 //             v
3558 //           exitB:
3559 //           stmt4
3560 //
3561 //
3562 //            after clone loop
3563 //
3564 //                   stmt1
3565 //                     |
3566 //                     v
3567 //                predicates
3568 //                 /       \
3569 //        clone   /         \   orig
3570 //               /           \
3571 //              /             \
3572 //             v               v
3573 //   +---->loop                loop<----+
3574 //   |      |                    |      |
3575 //   |    stmt2                stmt2    |
3576 //   |      |                    |      |
3577 //   |      v                    v      |
3578 //   |      ifA                 ifA     |
3579 //   |      | \                / |      |
3580 //   |      v  v              v  v      |
3581 //   ^    true  false      false true   ^  <-- last_peel
3582 //   |      |   ^   \       /    |      |
3583 //   | cut==|==  \   \     /  ===|==cut |
3584 //   |    stmt3   \   \   /    stmt3    |  <-- first_not_peel
3585 //   |      |    dom   | |       |      |
3586 //   |      v      \  1v v2      v      |
3587 //   |      ifB     regionA     ifB     |
3588 //   |      / \        |       / \      |
3589 //   |     /   \       v      /   \     |
3590 //   |    v     v    exitA:  v     v    |
3591 //   |    true  false      false true   |
3592 //   |    /     ^   \      /       \    |
3593 //   +----       \   \    /         ----+
3594 //               dom  \  /
3595 //                 \  1v v2
3596 //                  regionB
3597 //                     |
3598 //                     v
3599 //                   exitB:
3600 //                   stmt4
3601 //
3602 //
3603 //           after partial peel
3604 //
3605 //                  stmt1
3606 //                     |
3607 //                     v
3608 //                predicates
3609 //                 /
3610 //        clone   /             orig
3611 //               /          TOP
3612 //              /             \
3613 //             v               v
3614 //    TOP->loop                loop----+
3615 //          |                    |      |
3616 //        stmt2                stmt2    |
3617 //          |                    |      |
3618 //          v                    v      |
3619 //          ifA                 ifA     |
3620 //          | \                / |      |
3621 //          v  v              v  v      |
3622 //        true  false      false true   |     <-- last_peel
3623 //          |   ^   \       /    +------|---+
3624 //  +->newloop   \   \     /  === ==cut |   |
3625 //  |     stmt3   \   \   /     TOP     |   |
3626 //  |       |    dom   | |      stmt3   |   | <-- first_not_peel
3627 //  |       v      \  1v v2      v      |   |
3628 //  |       ifB     regionA     ifB     ^   v
3629 //  |       / \        |       / \      |   |
3630 //  |      /   \       v      /   \     |   |
3631 //  |     v     v    exitA:  v     v    |   |
3632 //  |     true  false      false true   |   |
3633 //  |     /     ^   \      /       \    |   |
3634 //  |    |       \   \    /         v   |   |
3635 //  |    |       dom  \  /         TOP  |   |
3636 //  |    |         \  1v v2             |   |
3637 //  ^    v          regionB             |   |
3638 //  |    |             |                |   |
3639 //  |    |             v                ^   v
3640 //  |    |           exitB:             |   |
3641 //  |    |           stmt4              |   |
3642 //  |    +------------>-----------------+   |
3643 //  |                                       |
3644 //  +-----------------<---------------------+
3645 //
3646 //
3647 //              final graph
3648 //
3649 //                  stmt1
3650 //                    |
3651 //                    v
3652 //                predicates
3653 //                    |
3654 //                    v
3655 //                  stmt2 clone
3656 //                    |
3657 //                    v
3658 //         ........> ifA clone
3659 //         :        / |
3660 //        dom      /  |
3661 //         :      v   v
3662 //         :  false   true
3663 //         :  |       |
3664 //         :  |       v
3665 //         :  |    newloop<-----+
3666 //         :  |        |        |
3667 //         :  |     stmt3 clone |
3668 //         :  |        |        |
3669 //         :  |        v        |
3670 //         :  |       ifB       |
3671 //         :  |      / \        |
3672 //         :  |     v   v       |
3673 //         :  |  false true     |
3674 //         :  |   |     |       |
3675 //         :  |   v    stmt2    |
3676 //         :  | exitB:  |       |
3677 //         :  | stmt4   v       |
3678 //         :  |       ifA orig  |
3679 //         :  |      /  \       |
3680 //         :  |     /    \      |
3681 //         :  |    v     v      |
3682 //         :  |  false  true    |
3683 //         :  |  /        \     |
3684 //         :  v  v         -----+
3685 //          RegionA
3686 //             |
3687 //             v
3688 //           exitA
3689 //
3690 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
3691 
3692   assert(!loop->_head->is_CountedLoop(), "Non-counted loop only");
3693   if (!loop->_head->is_Loop()) {
3694     return false;
3695   }
3696   LoopNode *head = loop->_head->as_Loop();
3697 
3698   if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) {
3699     return false;
3700   }
3701 
3702   // Check for complex exit control
3703   for (uint ii = 0; ii < loop->_body.size(); ii++) {
3704     Node *n = loop->_body.at(ii);
3705     int opc = n->Opcode();
3706     if (n->is_Call()        ||
3707         opc == Op_Catch     ||
3708         opc == Op_CatchProj ||
3709         opc == Op_Jump      ||
3710         opc == Op_JumpProj) {
3711 #ifndef PRODUCT
3712       if (TracePartialPeeling) {
3713         tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
3714       }
3715 #endif
3716       return false;
3717     }
3718   }
3719 
3720   int dd = dom_depth(head);
3721 
3722   // Step 1: find cut point
3723 
3724   // Walk up dominators to loop head looking for first loop exit
3725   // which is executed on every path thru loop.
3726   IfNode *peel_if = nullptr;
3727   IfNode *peel_if_cmpu = nullptr;
3728 
3729   Node *iff = loop->tail();
3730   while (iff != head) {
3731     if (iff->is_If()) {
3732       Node *ctrl = get_ctrl(iff->in(1));
3733       if (ctrl->is_top()) return false; // Dead test on live IF.
3734       // If loop-varying exit-test, check for induction variable
3735       if (loop->is_member(get_loop(ctrl)) &&
3736           loop->is_loop_exit(iff) &&
3737           is_possible_iv_test(iff)) {
3738         Node* cmp = iff->in(1)->in(1);
3739         if (cmp->Opcode() == Op_CmpI) {
3740           peel_if = iff->as_If();
3741         } else {
3742           assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU");
3743           peel_if_cmpu = iff->as_If();
3744         }
3745       }
3746     }
3747     iff = idom(iff);
3748   }
3749 
3750   // Prefer signed compare over unsigned compare.
3751   IfNode* new_peel_if = nullptr;
3752   if (peel_if == nullptr) {
3753     if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) {
3754       return false;   // No peel point found
3755     }
3756     new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop);
3757     if (new_peel_if == nullptr) {
3758       return false;   // No peel point found
3759     }
3760     peel_if = new_peel_if;
3761   }
3762   Node* last_peel        = stay_in_loop(peel_if, loop);
3763   Node* first_not_peeled = stay_in_loop(last_peel, loop);
3764   if (first_not_peeled == nullptr || first_not_peeled == head) {
3765     return false;
3766   }
3767 
3768 #ifndef PRODUCT
3769   if (TraceLoopOpts) {
3770     tty->print("PartialPeel  ");
3771     loop->dump_head();
3772   }
3773 
3774   if (TracePartialPeeling) {
3775     tty->print_cr("before partial peel one iteration");
3776     Node_List wl;
3777     Node* t = head->in(2);
3778     while (true) {
3779       wl.push(t);
3780       if (t == head) break;
3781       t = idom(t);
3782     }
3783     while (wl.size() > 0) {
3784       Node* tt = wl.pop();
3785       tt->dump();
3786       if (tt == last_peel) tty->print_cr("-- cut --");
3787     }
3788   }
3789 #endif
3790   VectorSet peel;
3791   VectorSet not_peel;
3792   Node_List peel_list;
3793   Node_List worklist;
3794   Node_List sink_list;
3795 
3796   uint estimate = loop->est_loop_clone_sz(1);
3797   if (exceeding_node_budget(estimate)) {
3798     return false;
3799   }
3800 
3801   // Set of cfg nodes to peel are those that are executable from
3802   // the head through last_peel.
3803   assert(worklist.size() == 0, "should be empty");
3804   worklist.push(head);
3805   peel.set(head->_idx);
3806   while (worklist.size() > 0) {
3807     Node *n = worklist.pop();
3808     if (n != last_peel) {
3809       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
3810         Node* use = n->fast_out(j);
3811         if (use->is_CFG() &&
3812             loop->is_member(get_loop(use)) &&
3813             !peel.test_set(use->_idx)) {
3814           worklist.push(use);
3815         }
3816       }
3817     }
3818   }
3819 
3820   // Set of non-cfg nodes to peel are those that are control
3821   // dependent on the cfg nodes.
3822   for (uint i = 0; i < loop->_body.size(); i++) {
3823     Node *n = loop->_body.at(i);
3824     Node *n_c = has_ctrl(n) ? get_ctrl(n) : n;
3825     if (peel.test(n_c->_idx)) {
3826       peel.set(n->_idx);
3827     } else {
3828       not_peel.set(n->_idx);
3829     }
3830   }
3831 
3832   // Step 2: move operations from the peeled section down into the
3833   //         not-peeled section
3834 
3835   // Get a post order schedule of nodes in the peel region
3836   // Result in right-most operand.
3837   scheduled_nodelist(loop, peel, peel_list);
3838 
3839   assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3840 
3841   // For future check for too many new phis
3842   uint old_phi_cnt = 0;
3843   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
3844     Node* use = head->fast_out(j);
3845     if (use->is_Phi()) old_phi_cnt++;
3846   }
3847 
3848 #ifndef PRODUCT
3849   if (TracePartialPeeling) {
3850     tty->print_cr("\npeeled list");
3851   }
3852 #endif
3853 
3854   // Evacuate nodes in peel region into the not_peeled region if possible
3855   bool too_many_clones = false;
3856   uint new_phi_cnt = 0;
3857   uint cloned_for_outside_use = 0;
3858   for (uint i = 0; i < peel_list.size();) {
3859     Node* n = peel_list.at(i);
3860 #ifndef PRODUCT
3861   if (TracePartialPeeling) n->dump();
3862 #endif
3863     bool incr = true;
3864     if (!n->is_CFG()) {
3865       if (has_use_in_set(n, not_peel)) {
3866         // If not used internal to the peeled region,
3867         // move "n" from peeled to not_peeled region.
3868         if (!has_use_internal_to_set(n, peel, loop)) {
3869           // if not pinned and not a load (which maybe anti-dependent on a store)
3870           // and not a CMove (Matcher expects only bool->cmove).
3871           if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) {
3872             int new_clones = clone_for_use_outside_loop(loop, n, worklist);
3873             if (new_clones == -1) {
3874               too_many_clones = true;
3875               break;
3876             }
3877             cloned_for_outside_use += new_clones;
3878             sink_list.push(n);
3879             peel.remove(n->_idx);
3880             not_peel.set(n->_idx);
3881             peel_list.remove(i);
3882             incr = false;
3883 #ifndef PRODUCT
3884             if (TracePartialPeeling) {
3885               tty->print_cr("sink to not_peeled region: %d newbb: %d",
3886                             n->_idx, get_ctrl(n)->_idx);
3887             }
3888 #endif
3889           }
3890         } else {
3891           // Otherwise check for special def-use cases that span
3892           // the peel/not_peel boundary such as bool->if
3893           clone_for_special_use_inside_loop(loop, n, not_peel, sink_list, worklist);
3894           new_phi_cnt++;
3895         }
3896       }
3897     }
3898     if (incr) i++;
3899   }
3900 
3901   estimate += cloned_for_outside_use + new_phi_cnt;
3902   bool exceed_node_budget = !may_require_nodes(estimate);
3903   bool exceed_phi_limit = new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta;
3904 
3905   if (too_many_clones || exceed_node_budget || exceed_phi_limit) {
3906 #ifndef PRODUCT
3907     if (TracePartialPeeling && exceed_phi_limit) {
3908       tty->print_cr("\nToo many new phis: %d  old %d new cmpi: %c",
3909                     new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F');
3910     }
3911 #endif
3912     if (new_peel_if != nullptr) {
3913       remove_cmpi_loop_exit(new_peel_if, loop);
3914     }
3915     // Inhibit more partial peeling on this loop
3916     assert(!head->is_partial_peel_loop(), "not partial peeled");
3917     head->mark_partial_peel_failed();
3918     if (cloned_for_outside_use > 0) {
3919       // Terminate this round of loop opts because
3920       // the graph outside this loop was changed.
3921       C->set_major_progress();
3922       return true;
3923     }
3924     return false;
3925   }
3926 
3927   // Step 3: clone loop, retarget control, and insert new phis
3928 
3929   // Create new loop head for new phis and to hang
3930   // the nodes being moved (sinked) from the peel region.
3931   LoopNode* new_head = new LoopNode(last_peel, last_peel);
3932   new_head->set_unswitch_count(head->unswitch_count()); // Preserve
3933   _igvn.register_new_node_with_optimizer(new_head);
3934   assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
3935   _igvn.replace_input_of(first_not_peeled, 0, new_head);
3936   set_loop(new_head, loop);
3937   loop->_body.push(new_head);
3938   not_peel.set(new_head->_idx);
3939   set_idom(new_head, last_peel, dom_depth(first_not_peeled));
3940   set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled));
3941 
3942   while (sink_list.size() > 0) {
3943     Node* n = sink_list.pop();
3944     set_ctrl(n, new_head);
3945   }
3946 
3947   assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition");
3948 
3949   clone_loop(loop, old_new, dd, IgnoreStripMined);
3950 
3951   const uint clone_exit_idx = 1;
3952   const uint orig_exit_idx  = 2;
3953   assert(is_valid_clone_loop_form(loop, peel_list, orig_exit_idx, clone_exit_idx), "bad clone loop");
3954 
3955   Node* head_clone             = old_new[head->_idx];
3956   LoopNode* new_head_clone     = old_new[new_head->_idx]->as_Loop();
3957   Node* orig_tail_clone        = head_clone->in(2);
3958 
3959   // Add phi if "def" node is in peel set and "use" is not
3960 
3961   for (uint i = 0; i < peel_list.size(); i++) {
3962     Node *def  = peel_list.at(i);
3963     if (!def->is_CFG()) {
3964       for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) {
3965         Node *use = def->fast_out(j);
3966         if (has_node(use) && use->in(0) != C->top() &&
3967             (!peel.test(use->_idx) ||
3968              (use->is_Phi() && use->in(0) == head)) ) {
3969           worklist.push(use);
3970         }
3971       }
3972       while( worklist.size() ) {
3973         Node *use = worklist.pop();
3974         for (uint j = 1; j < use->req(); j++) {
3975           Node* n = use->in(j);
3976           if (n == def) {
3977 
3978             // "def" is in peel set, "use" is not in peel set
3979             // or "use" is in the entry boundary (a phi) of the peel set
3980 
3981             Node* use_c = has_ctrl(use) ? get_ctrl(use) : use;
3982 
3983             if ( loop->is_member(get_loop( use_c )) ) {
3984               // use is in loop
3985               if (old_new[use->_idx] != nullptr) { // null for dead code
3986                 Node* use_clone = old_new[use->_idx];
3987                 _igvn.replace_input_of(use, j, C->top());
3988                 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
3989               }
3990             } else {
3991               assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format");
3992               // use is not in the loop, check if the live range includes the cut
3993               Node* lp_if = use_c->in(orig_exit_idx)->in(0);
3994               if (not_peel.test(lp_if->_idx)) {
3995                 assert(j == orig_exit_idx, "use from original loop");
3996                 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone );
3997               }
3998             }
3999           }
4000         }
4001       }
4002     }
4003   }
4004 
4005   // Step 3b: retarget control
4006 
4007   // Redirect control to the new loop head if a cloned node in
4008   // the not_peeled region has control that points into the peeled region.
4009   // This necessary because the cloned peeled region will be outside
4010   // the loop.
4011   //                            from    to
4012   //          cloned-peeled    <---+
4013   //    new_head_clone:            |    <--+
4014   //          cloned-not_peeled  in(0)    in(0)
4015   //          orig-peeled
4016 
4017   for (uint i = 0; i < loop->_body.size(); i++) {
4018     Node *n = loop->_body.at(i);
4019     if (!n->is_CFG()           && n->in(0) != nullptr        &&
4020         not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
4021       Node* n_clone = old_new[n->_idx];
4022       _igvn.replace_input_of(n_clone, 0, new_head_clone);
4023     }
4024   }
4025 
4026   // Backedge of the surviving new_head (the clone) is original last_peel
4027   _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
4028 
4029   // Cut first node in original not_peel set
4030   _igvn.rehash_node_delayed(new_head);                     // Multiple edge updates:
4031   new_head->set_req(LoopNode::EntryControl,    C->top());  //   use rehash_node_delayed / set_req instead of
4032   new_head->set_req(LoopNode::LoopBackControl, C->top());  //   multiple replace_input_of calls
4033 
4034   // Copy head_clone back-branch info to original head
4035   // and remove original head's loop entry and
4036   // clone head's back-branch
4037   _igvn.rehash_node_delayed(head); // Multiple edge updates
4038   head->set_req(LoopNode::EntryControl,    head_clone->in(LoopNode::LoopBackControl));
4039   head->set_req(LoopNode::LoopBackControl, C->top());
4040   _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
4041 
4042   // Similarly modify the phis
4043   for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
4044     Node* use = head->fast_out(k);
4045     if (use->is_Phi() && use->outcnt() > 0) {
4046       Node* use_clone = old_new[use->_idx];
4047       _igvn.rehash_node_delayed(use); // Multiple edge updates
4048       use->set_req(LoopNode::EntryControl,    use_clone->in(LoopNode::LoopBackControl));
4049       use->set_req(LoopNode::LoopBackControl, C->top());
4050       _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
4051     }
4052   }
4053 
4054   // Step 4: update dominator tree and dominator depth
4055 
4056   set_idom(head, orig_tail_clone, dd);
4057   recompute_dom_depth();
4058 
4059   // Inhibit more partial peeling on this loop
4060   new_head_clone->set_partial_peel_loop();
4061   C->set_major_progress();
4062   loop->record_for_igvn();
4063 
4064 #ifndef PRODUCT
4065   if (TracePartialPeeling) {
4066     tty->print_cr("\nafter partial peel one iteration");
4067     Node_List wl;
4068     Node* t = last_peel;
4069     while (true) {
4070       wl.push(t);
4071       if (t == head_clone) break;
4072       t = idom(t);
4073     }
4074     while (wl.size() > 0) {
4075       Node* tt = wl.pop();
4076       if (tt == head) tty->print_cr("orig head");
4077       else if (tt == new_head_clone) tty->print_cr("new head");
4078       else if (tt == head_clone) tty->print_cr("clone head");
4079       tt->dump();
4080     }
4081   }
4082 #endif
4083   return true;
4084 }
4085 
4086 // Transform:
4087 //
4088 // loop<-----------------+
4089 //  |                    |
4090 // stmt1 stmt2 .. stmtn  |
4091 //  |     |        |     |
4092 //  \     |       /      |
4093 //    v   v     v        |
4094 //       region          |
4095 //         |             |
4096 //     shared_stmt       |
4097 //         |             |
4098 //         v             |
4099 //         if            |
4100 //         / \           |
4101 //        |   -----------+
4102 //        v
4103 //
4104 // into:
4105 //
4106 //    loop<-------------------+
4107 //     |                      |
4108 //     v                      |
4109 // +->loop                    |
4110 // |   |                      |
4111 // |  stmt1 stmt2 .. stmtn    |
4112 // |   |     |        |       |
4113 // |   |      \       /       |
4114 // |   |       v     v        |
4115 // |   |        region1       |
4116 // |   |           |          |
4117 // |  shared_stmt shared_stmt |
4118 // |   |           |          |
4119 // |   v           v          |
4120 // |   if          if         |
4121 // |   /\          / \        |
4122 // +--   |         |   -------+
4123 //       \         /
4124 //        v       v
4125 //         region2
4126 //
4127 // (region2 is shown to merge mirrored projections of the loop exit
4128 // ifs to make the diagram clearer but they really merge the same
4129 // projection)
4130 //
4131 // Conditions for this transformation to trigger:
4132 // - the path through stmt1 is frequent enough
4133 // - the inner loop will be turned into a counted loop after transformation
4134 bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old_new) {
4135   if (!DuplicateBackedge) {
4136     return false;
4137   }
4138   assert(!loop->_head->is_CountedLoop() || StressDuplicateBackedge, "Non-counted loop only");
4139   if (!loop->_head->is_Loop()) {
4140     return false;
4141   }
4142 
4143   uint estimate = loop->est_loop_clone_sz(1);
4144   if (exceeding_node_budget(estimate)) {
4145     return false;
4146   }
4147 
4148   LoopNode *head = loop->_head->as_Loop();
4149 
4150   Node* region = nullptr;
4151   IfNode* exit_test = nullptr;
4152   uint inner;
4153   float f;
4154   if (StressDuplicateBackedge) {
4155     if (head->is_strip_mined()) {
4156       return false;
4157     }
4158     Node* c = head->in(LoopNode::LoopBackControl);
4159 
4160     while (c != head) {
4161       if (c->is_Region()) {
4162         region = c;
4163       }
4164       c = idom(c);
4165     }
4166 
4167     if (region == nullptr) {
4168       return false;
4169     }
4170 
4171     inner = 1;
4172   } else {
4173     // Is the shape of the loop that of a counted loop...
4174     Node* back_control = loop_exit_control(head, loop);
4175     if (back_control == nullptr) {
4176       return false;
4177     }
4178 
4179     BoolTest::mask bt = BoolTest::illegal;
4180     float cl_prob = 0;
4181     Node* incr = nullptr;
4182     Node* limit = nullptr;
4183     Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
4184     if (cmp == nullptr || cmp->Opcode() != Op_CmpI) {
4185       return false;
4186     }
4187 
4188     // With an extra phi for the candidate iv?
4189     // Or the region node is the loop head
4190     if (!incr->is_Phi() || incr->in(0) == head) {
4191       return false;
4192     }
4193 
4194     PathFrequency pf(head, this);
4195     region = incr->in(0);
4196 
4197     // Go over all paths for the extra phi's region and see if that
4198     // path is frequent enough and would match the expected iv shape
4199     // if the extra phi is removed
4200     inner = 0;
4201     for (uint i = 1; i < incr->req(); ++i) {
4202       Node* in = incr->in(i);
4203       Node* trunc1 = nullptr;
4204       Node* trunc2 = nullptr;
4205       const TypeInteger* iv_trunc_t = nullptr;
4206       Node* orig_in = in;
4207       if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) {
4208         continue;
4209       }
4210       assert(in->Opcode() == Op_AddI, "wrong increment code");
4211       Node* xphi = nullptr;
4212       Node* stride = loop_iv_stride(in, loop, xphi);
4213 
4214       if (stride == nullptr) {
4215         continue;
4216       }
4217 
4218       PhiNode* phi = loop_iv_phi(xphi, nullptr, head, loop);
4219       if (phi == nullptr ||
4220           (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
4221           (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
4222         return false;
4223       }
4224 
4225       f = pf.to(region->in(i));
4226       if (f > 0.5) {
4227         inner = i;
4228         break;
4229       }
4230     }
4231 
4232     if (inner == 0) {
4233       return false;
4234     }
4235 
4236     exit_test = back_control->in(0)->as_If();
4237   }
4238 
4239   if (idom(region)->is_Catch()) {
4240     return false;
4241   }
4242 
4243   // Collect all control nodes that need to be cloned (shared_stmt in the diagram)
4244   Unique_Node_List wq;
4245   wq.push(head->in(LoopNode::LoopBackControl));
4246   for (uint i = 0; i < wq.size(); i++) {
4247     Node* c = wq.at(i);
4248     assert(get_loop(c) == loop, "not in the right loop?");
4249     if (c->is_Region()) {
4250       if (c != region) {
4251         for (uint j = 1; j < c->req(); ++j) {
4252           wq.push(c->in(j));
4253         }
4254       }
4255     } else {
4256       wq.push(c->in(0));
4257     }
4258     assert(!is_dominator(c, region) || c == region, "shouldn't go above region");
4259   }
4260 
4261   Node* region_dom = idom(region);
4262 
4263   // Can't do the transformation if this would cause a membar pair to
4264   // be split
4265   for (uint i = 0; i < wq.size(); i++) {
4266     Node* c = wq.at(i);
4267     if (c->is_MemBar() && (c->as_MemBar()->trailing_store() || c->as_MemBar()->trailing_load_store())) {
4268       assert(c->as_MemBar()->leading_membar()->trailing_membar() == c, "bad membar pair");
4269       if (!wq.member(c->as_MemBar()->leading_membar())) {
4270         return false;
4271       }
4272     }
4273   }
4274 
4275   // Collect data nodes that need to be clones as well
4276   int dd = dom_depth(head);
4277 
4278   for (uint i = 0; i < loop->_body.size(); ++i) {
4279     Node* n = loop->_body.at(i);
4280     if (has_ctrl(n)) {
4281       Node* c = get_ctrl(n);
4282       if (wq.member(c)) {
4283         wq.push(n);
4284       }
4285     } else {
4286       set_idom(n, idom(n), dd);
4287     }
4288   }
4289 
4290   // clone shared_stmt
4291   clone_loop_body(wq, old_new, nullptr);
4292 
4293   Node* region_clone = old_new[region->_idx];
4294   region_clone->set_req(inner, C->top());
4295   set_idom(region, region->in(inner), dd);
4296 
4297   // Prepare the outer loop
4298   Node* outer_head = new LoopNode(head->in(LoopNode::EntryControl), old_new[head->in(LoopNode::LoopBackControl)->_idx]);
4299   register_control(outer_head, loop->_parent, outer_head->in(LoopNode::EntryControl));
4300   _igvn.replace_input_of(head, LoopNode::EntryControl, outer_head);
4301   set_idom(head, outer_head, dd);
4302 
4303   fix_body_edges(wq, loop, old_new, dd, loop->_parent, true);
4304 
4305   // Make one of the shared_stmt copies only reachable from stmt1, the
4306   // other only from stmt2..stmtn.
4307   Node* dom = nullptr;
4308   for (uint i = 1; i < region->req(); ++i) {
4309     if (i != inner) {
4310       _igvn.replace_input_of(region, i, C->top());
4311     }
4312     Node* in = region_clone->in(i);
4313     if (in->is_top()) {
4314       continue;
4315     }
4316     if (dom == nullptr) {
4317       dom = in;
4318     } else {
4319       dom = dom_lca(dom, in);
4320     }
4321   }
4322 
4323   set_idom(region_clone, dom, dd);
4324 
4325   // Set up the outer loop
4326   for (uint i = 0; i < head->outcnt(); i++) {
4327     Node* u = head->raw_out(i);
4328     if (u->is_Phi()) {
4329       Node* outer_phi = u->clone();
4330       outer_phi->set_req(0, outer_head);
4331       Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx];
4332       if (backedge == nullptr) {
4333         backedge = u->in(LoopNode::LoopBackControl);
4334       }
4335       outer_phi->set_req(LoopNode::LoopBackControl, backedge);
4336       register_new_node(outer_phi, outer_head);
4337       _igvn.replace_input_of(u, LoopNode::EntryControl, outer_phi);
4338     }
4339   }
4340 
4341   // create control and data nodes for out of loop uses (including region2)
4342   Node_List worklist;
4343   uint new_counter = C->unique();
4344   fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist);
4345 
4346   Node_List *split_if_set = nullptr;
4347   Node_List *split_bool_set = nullptr;
4348   Node_List *split_cex_set = nullptr;
4349   fix_data_uses(wq, loop, ControlAroundStripMined, head->is_strip_mined() ? loop->_parent : loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set);
4350 
4351   finish_clone_loop(split_if_set, split_bool_set, split_cex_set);
4352 
4353   if (exit_test != nullptr) {
4354     float cnt = exit_test->_fcnt;
4355     if (cnt != COUNT_UNKNOWN) {
4356       exit_test->_fcnt = cnt * f;
4357       old_new[exit_test->_idx]->as_If()->_fcnt = cnt * (1 - f);
4358     }
4359   }
4360 
4361   C->set_major_progress();
4362 
4363   return true;
4364 }
4365 
4366 // Having ReductionNodes in the loop is expensive. They need to recursively
4367 // fold together the vector values, for every vectorized loop iteration. If
4368 // we encounter the following pattern, we can vector accumulate the values
4369 // inside the loop, and only have a single UnorderedReduction after the loop.
4370 //
4371 // CountedLoop     init
4372 //          |        |
4373 //          +------+ | +-----------------------+
4374 //                 | | |                       |
4375 //                PhiNode (s)                  |
4376 //                  |                          |
4377 //                  |          Vector          |
4378 //                  |            |             |
4379 //               UnorderedReduction (first_ur) |
4380 //                  |                          |
4381 //                 ...         Vector          |
4382 //                  |            |             |
4383 //               UnorderedReduction (last_ur)  |
4384 //                       |                     |
4385 //                       +---------------------+
4386 //
4387 // We patch the graph to look like this:
4388 //
4389 // CountedLoop   identity_vector
4390 //         |         |
4391 //         +-------+ | +---------------+
4392 //                 | | |               |
4393 //                PhiNode (v)          |
4394 //                   |                 |
4395 //                   |         Vector  |
4396 //                   |           |     |
4397 //                 VectorAccumulator   |
4398 //                   |                 |
4399 //                  ...        Vector  |
4400 //                   |           |     |
4401 //      init       VectorAccumulator   |
4402 //        |          |     |           |
4403 //     UnorderedReduction  +-----------+
4404 //
4405 // We turned the scalar (s) Phi into a vectorized one (v). In the loop, we
4406 // use vector_accumulators, which do the same reductions, but only element
4407 // wise. This is a single operation per vector_accumulator, rather than many
4408 // for a UnorderedReduction. We can then reduce the last vector_accumulator
4409 // after the loop, and also reduce the init value into it.
4410 // We can not do this with all reductions. Some reductions do not allow the
4411 // reordering of operations (for example float addition).
4412 void PhaseIdealLoop::move_unordered_reduction_out_of_loop(IdealLoopTree* loop) {
4413   assert(!C->major_progress() && loop->is_counted() && loop->is_innermost(), "sanity");
4414 
4415   // Find all Phi nodes with UnorderedReduction on backedge.
4416   CountedLoopNode* cl = loop->_head->as_CountedLoop();
4417   for (DUIterator_Fast jmax, j = cl->fast_outs(jmax); j < jmax; j++) {
4418     Node* phi = cl->fast_out(j);
4419     // We have a phi with a single use, and a UnorderedReduction on the backedge.
4420     if (!phi->is_Phi() || phi->outcnt() != 1 || !phi->in(2)->is_UnorderedReduction()) {
4421       continue;
4422     }
4423 
4424     UnorderedReductionNode* last_ur = phi->in(2)->as_UnorderedReduction();
4425 
4426     // Determine types
4427     const TypeVect* vec_t = last_ur->vect_type();
4428     uint vector_length    = vec_t->length();
4429     BasicType bt          = vec_t->element_basic_type();
4430     const Type* bt_t      = Type::get_const_basic_type(bt);
4431 
4432     // Convert opcode from vector-reduction -> scalar -> normal-vector-op
4433     const int sopc        = VectorNode::scalar_opcode(last_ur->Opcode(), bt);
4434     const int vopc        = VectorNode::opcode(sopc, bt);
4435     if (!Matcher::match_rule_supported_vector(vopc, vector_length, bt)) {
4436         DEBUG_ONLY( last_ur->dump(); )
4437         assert(false, "do not have normal vector op for this reduction");
4438         continue; // not implemented -> fails
4439     }
4440 
4441     // Traverse up the chain of UnorderedReductions, checking that it loops back to
4442     // the phi. Check that all UnorderedReductions only have a single use, except for
4443     // the last (last_ur), which only has phi as a use in the loop, and all other uses
4444     // are outside the loop.
4445     UnorderedReductionNode* current = last_ur;
4446     UnorderedReductionNode* first_ur = nullptr;
4447     while (true) {
4448       assert(current->is_UnorderedReduction(), "sanity");
4449 
4450       // Expect no ctrl and a vector_input from within the loop.
4451       Node* ctrl = current->in(0);
4452       Node* vector_input = current->in(2);
4453       if (ctrl != nullptr || get_ctrl(vector_input) != cl) {
4454         DEBUG_ONLY( current->dump(1); )
4455         assert(false, "reduction has ctrl or bad vector_input");
4456         break; // Chain traversal fails.
4457       }
4458 
4459       assert(current->vect_type() != nullptr, "must have vector type");
4460       if (current->vect_type() != last_ur->vect_type()) {
4461         // Reductions do not have the same vector type (length and element type).
4462         break; // Chain traversal fails.
4463       }
4464 
4465       // Expect single use of UnorderedReduction, except for last_ur.
4466       if (current == last_ur) {
4467         // Expect all uses to be outside the loop, except phi.
4468         for (DUIterator_Fast kmax, k = current->fast_outs(kmax); k < kmax; k++) {
4469           Node* use = current->fast_out(k);
4470           if (use != phi && ctrl_or_self(use) == cl) {
4471             DEBUG_ONLY( current->dump(-1); )
4472             assert(false, "reduction has use inside loop");
4473             // Should not be allowed by SuperWord::mark_reductions
4474             return; // bail out of optimization
4475           }
4476         }
4477       } else {
4478         if (current->outcnt() != 1) {
4479           break; // Chain traversal fails.
4480         }
4481       }
4482 
4483       // Expect another UnorderedReduction or phi as the scalar input.
4484       Node* scalar_input = current->in(1);
4485       if (scalar_input->is_UnorderedReduction() &&
4486           scalar_input->Opcode() == current->Opcode()) {
4487         // Move up the UnorderedReduction chain.
4488         current = scalar_input->as_UnorderedReduction();
4489       } else if (scalar_input == phi) {
4490         // Chain terminates at phi.
4491         first_ur = current;
4492         current = nullptr;
4493         break; // Success.
4494       } else {
4495         // scalar_input is neither phi nor a matching reduction
4496         // Can for example be scalar reduction when we have
4497         // partial vectorization.
4498         break; // Chain traversal fails.
4499       }
4500     }
4501     if (current != nullptr) {
4502       // Chain traversal was not successful.
4503       continue;
4504     }
4505     assert(first_ur != nullptr, "must have successfully terminated chain traversal");
4506 
4507     Node* identity_scalar = ReductionNode::make_identity_con_scalar(_igvn, sopc, bt);
4508     set_ctrl(identity_scalar, C->root());
4509     VectorNode* identity_vector = VectorNode::scalar2vector(identity_scalar, vector_length, bt_t);
4510     register_new_node(identity_vector, C->root());
4511     assert(vec_t == identity_vector->vect_type(), "matching vector type");
4512     VectorNode::trace_new_vector(identity_vector, "UnorderedReduction");
4513 
4514     // Turn the scalar phi into a vector phi.
4515     _igvn.rehash_node_delayed(phi);
4516     Node* init = phi->in(1); // Remember init before replacing it.
4517     phi->set_req_X(1, identity_vector, &_igvn);
4518     phi->as_Type()->set_type(vec_t);
4519     _igvn.set_type(phi, vec_t);
4520 
4521     // Traverse down the chain of UnorderedReductions, and replace them with vector_accumulators.
4522     current = first_ur;
4523     while (true) {
4524       // Create vector_accumulator to replace current.
4525       Node* last_vector_accumulator = current->in(1);
4526       Node* vector_input            = current->in(2);
4527       VectorNode* vector_accumulator = VectorNode::make(vopc, last_vector_accumulator, vector_input, vec_t);
4528       register_new_node(vector_accumulator, cl);
4529       _igvn.replace_node(current, vector_accumulator);
4530       VectorNode::trace_new_vector(vector_accumulator, "UnorderedReduction");
4531       if (current == last_ur) {
4532         break;
4533       }
4534       current = vector_accumulator->unique_out()->as_UnorderedReduction();
4535     }
4536 
4537     // Create post-loop reduction.
4538     Node* last_accumulator = phi->in(2);
4539     Node* post_loop_reduction = ReductionNode::make(sopc, nullptr, init, last_accumulator, bt);
4540 
4541     // Take over uses of last_accumulator that are not in the loop.
4542     for (DUIterator i = last_accumulator->outs(); last_accumulator->has_out(i); i++) {
4543       Node* use = last_accumulator->out(i);
4544       if (use != phi && use != post_loop_reduction) {
4545         assert(ctrl_or_self(use) != cl, "use must be outside loop");
4546         use->replace_edge(last_accumulator, post_loop_reduction,  &_igvn);
4547         --i;
4548       }
4549     }
4550     register_new_node(post_loop_reduction, get_late_ctrl(post_loop_reduction, cl));
4551     VectorNode::trace_new_vector(post_loop_reduction, "UnorderedReduction");
4552 
4553     assert(last_accumulator->outcnt() == 2, "last_accumulator has 2 uses: phi and post_loop_reduction");
4554     assert(post_loop_reduction->outcnt() > 0, "should have taken over all non loop uses of last_accumulator");
4555     assert(phi->outcnt() == 1, "accumulator is the only use of phi");
4556   }
4557 }