1 /*
   2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciMethodData.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/connode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/divnode.hpp"
  39 #include "opto/idealGraphPrinter.hpp"
  40 #include "opto/loopnode.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/mulnode.hpp"
  43 #include "opto/opaquenode.hpp"
  44 #include "opto/predicates.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "opto/vectorization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "utilities/checkedCast.hpp"
  50 #include "utilities/powerOfTwo.hpp"
  51 
  52 //=============================================================================
  53 //--------------------------is_cloop_ind_var-----------------------------------
  54 // Determine if a node is a counted loop induction variable.
  55 // NOTE: The method is declared in "node.hpp".
  56 bool Node::is_cloop_ind_var() const {
  57   return (is_Phi() &&
  58           as_Phi()->region()->is_CountedLoop() &&
  59           as_Phi()->region()->as_CountedLoop()->phi() == this);
  60 }
  61 
  62 //=============================================================================
  63 //------------------------------dump_spec--------------------------------------
  64 // Dump special per-node info
  65 #ifndef PRODUCT
  66 void LoopNode::dump_spec(outputStream *st) const {
  67   RegionNode::dump_spec(st);
  68   if (is_inner_loop()) st->print( "inner " );
  69   if (is_partial_peel_loop()) st->print( "partial_peel " );
  70   if (partial_peel_has_failed()) st->print( "partial_peel_failed " );
  71 }
  72 #endif
  73 
  74 //------------------------------is_valid_counted_loop-------------------------
  75 bool LoopNode::is_valid_counted_loop(BasicType bt) const {
  76   if (is_BaseCountedLoop() && as_BaseCountedLoop()->bt() == bt) {
  77     BaseCountedLoopNode*    l  = as_BaseCountedLoop();
  78     BaseCountedLoopEndNode* le = l->loopexit_or_null();
  79     if (le != nullptr &&
  80         le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
  81       Node* phi  = l->phi();
  82       Node* exit = le->proj_out_or_null(0 /* false */);
  83       if (exit != nullptr && exit->Opcode() == Op_IfFalse &&
  84           phi != nullptr && phi->is_Phi() &&
  85           phi->in(LoopNode::LoopBackControl) == l->incr() &&
  86           le->loopnode() == l && le->stride_is_con()) {
  87         return true;
  88       }
  89     }
  90   }
  91   return false;
  92 }
  93 
  94 //------------------------------get_early_ctrl---------------------------------
  95 // Compute earliest legal control
  96 Node *PhaseIdealLoop::get_early_ctrl( Node *n ) {
  97   assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" );
  98   uint i;
  99   Node *early;
 100   if (n->in(0) && !n->is_expensive()) {
 101     early = n->in(0);
 102     if (!early->is_CFG()) // Might be a non-CFG multi-def
 103       early = get_ctrl(early);        // So treat input as a straight data input
 104     i = 1;
 105   } else {
 106     early = get_ctrl(n->in(1));
 107     i = 2;
 108   }
 109   uint e_d = dom_depth(early);
 110   assert( early, "" );
 111   for (; i < n->req(); i++) {
 112     Node *cin = get_ctrl(n->in(i));
 113     assert( cin, "" );
 114     // Keep deepest dominator depth
 115     uint c_d = dom_depth(cin);
 116     if (c_d > e_d) {           // Deeper guy?
 117       early = cin;              // Keep deepest found so far
 118       e_d = c_d;
 119     } else if (c_d == e_d &&    // Same depth?
 120                early != cin) { // If not equal, must use slower algorithm
 121       // If same depth but not equal, one _must_ dominate the other
 122       // and we want the deeper (i.e., dominated) guy.
 123       Node *n1 = early;
 124       Node *n2 = cin;
 125       while (1) {
 126         n1 = idom(n1);          // Walk up until break cycle
 127         n2 = idom(n2);
 128         if (n1 == cin ||        // Walked early up to cin
 129             dom_depth(n2) < c_d)
 130           break;                // early is deeper; keep him
 131         if (n2 == early ||      // Walked cin up to early
 132             dom_depth(n1) < c_d) {
 133           early = cin;          // cin is deeper; keep him
 134           break;
 135         }
 136       }
 137       e_d = dom_depth(early);   // Reset depth register cache
 138     }
 139   }
 140 
 141   // Return earliest legal location
 142   assert(early == find_non_split_ctrl(early), "unexpected early control");
 143 
 144   if (n->is_expensive() && !_verify_only && !_verify_me) {
 145     assert(n->in(0), "should have control input");
 146     early = get_early_ctrl_for_expensive(n, early);
 147   }
 148 
 149   return early;
 150 }
 151 
 152 //------------------------------get_early_ctrl_for_expensive---------------------------------
 153 // Move node up the dominator tree as high as legal while still beneficial
 154 Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
 155   assert(n->in(0) && n->is_expensive(), "expensive node with control input here");
 156   assert(OptimizeExpensiveOps, "optimization off?");
 157 
 158   Node* ctl = n->in(0);
 159   assert(ctl->is_CFG(), "expensive input 0 must be cfg");
 160   uint min_dom_depth = dom_depth(earliest);
 161 #ifdef ASSERT
 162   if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) {
 163     dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl);
 164     assert(false, "Bad graph detected in get_early_ctrl_for_expensive");
 165   }
 166 #endif
 167   if (dom_depth(ctl) < min_dom_depth) {
 168     return earliest;
 169   }
 170 
 171   while (true) {
 172     Node* next = ctl;
 173     // Moving the node out of a loop on the projection of an If
 174     // confuses Loop Predication. So, once we hit a loop in an If branch
 175     // that doesn't branch to an UNC, we stop. The code that process
 176     // expensive nodes will notice the loop and skip over it to try to
 177     // move the node further up.
 178     if (ctl->is_CountedLoop() && ctl->in(1) != nullptr && ctl->in(1)->in(0) != nullptr && ctl->in(1)->in(0)->is_If()) {
 179       if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern()) {
 180         break;
 181       }
 182       next = idom(ctl->in(1)->in(0));
 183     } else if (ctl->is_Proj()) {
 184       // We only move it up along a projection if the projection is
 185       // the single control projection for its parent: same code path,
 186       // if it's a If with UNC or fallthrough of a call.
 187       Node* parent_ctl = ctl->in(0);
 188       if (parent_ctl == nullptr) {
 189         break;
 190       } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != nullptr) {
 191         next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
 192       } else if (parent_ctl->is_If()) {
 193         if (!ctl->as_Proj()->is_uncommon_trap_if_pattern()) {
 194           break;
 195         }
 196         assert(idom(ctl) == parent_ctl, "strange");
 197         next = idom(parent_ctl);
 198       } else if (ctl->is_CatchProj()) {
 199         if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) {
 200           break;
 201         }
 202         assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph");
 203         next = parent_ctl->in(0)->in(0)->in(0);
 204       } else {
 205         // Check if parent control has a single projection (this
 206         // control is the only possible successor of the parent
 207         // control). If so, we can try to move the node above the
 208         // parent control.
 209         int nb_ctl_proj = 0;
 210         for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) {
 211           Node *p = parent_ctl->fast_out(i);
 212           if (p->is_Proj() && p->is_CFG()) {
 213             nb_ctl_proj++;
 214             if (nb_ctl_proj > 1) {
 215               break;
 216             }
 217           }
 218         }
 219 
 220         if (nb_ctl_proj > 1) {
 221           break;
 222         }
 223         assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() ||
 224                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node");
 225         assert(idom(ctl) == parent_ctl, "strange");
 226         next = idom(parent_ctl);
 227       }
 228     } else {
 229       next = idom(ctl);
 230     }
 231     if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) {
 232       break;
 233     }
 234     ctl = next;
 235   }
 236 
 237   if (ctl != n->in(0)) {
 238     _igvn.replace_input_of(n, 0, ctl);
 239     _igvn.hash_insert(n);
 240   }
 241 
 242   return ctl;
 243 }
 244 
 245 
 246 //------------------------------set_early_ctrl---------------------------------
 247 // Set earliest legal control
 248 void PhaseIdealLoop::set_early_ctrl(Node* n, bool update_body) {
 249   Node *early = get_early_ctrl(n);
 250 
 251   // Record earliest legal location
 252   set_ctrl(n, early);
 253   IdealLoopTree *loop = get_loop(early);
 254   if (update_body && loop->_child == nullptr) {
 255     loop->_body.push(n);
 256   }
 257 }
 258 
 259 //------------------------------set_subtree_ctrl-------------------------------
 260 // set missing _ctrl entries on new nodes
 261 void PhaseIdealLoop::set_subtree_ctrl(Node* n, bool update_body) {
 262   // Already set?  Get out.
 263   if (_loop_or_ctrl[n->_idx]) return;
 264   // Recursively set _loop_or_ctrl array to indicate where the Node goes
 265   uint i;
 266   for (i = 0; i < n->req(); ++i) {
 267     Node *m = n->in(i);
 268     if (m && m != C->root()) {
 269       set_subtree_ctrl(m, update_body);
 270     }
 271   }
 272 
 273   // Fixup self
 274   set_early_ctrl(n, update_body);
 275 }
 276 
 277 IdealLoopTree* PhaseIdealLoop::insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift) {
 278   IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift);
 279   IdealLoopTree* parent = loop->_parent;
 280   IdealLoopTree* sibling = parent->_child;
 281   if (sibling == loop) {
 282     parent->_child = outer_ilt;
 283   } else {
 284     while (sibling->_next != loop) {
 285       sibling = sibling->_next;
 286     }
 287     sibling->_next = outer_ilt;
 288   }
 289   outer_ilt->_next = loop->_next;
 290   outer_ilt->_parent = parent;
 291   outer_ilt->_child = loop;
 292   outer_ilt->_nest = loop->_nest;
 293   loop->_parent = outer_ilt;
 294   loop->_next = nullptr;
 295   loop->_nest++;
 296   assert(loop->_nest <= SHRT_MAX, "sanity");
 297   return outer_ilt;
 298 }
 299 
 300 // Create a skeleton strip mined outer loop: a Loop head before the
 301 // inner strip mined loop, a safepoint and an exit condition guarded
 302 // by an opaque node after the inner strip mined loop with a backedge
 303 // to the loop head. The inner strip mined loop is left as it is. Only
 304 // once loop optimizations are over, do we adjust the inner loop exit
 305 // condition to limit its number of iterations, set the outer loop
 306 // exit condition and add Phis to the outer loop head. Some loop
 307 // optimizations that operate on the inner strip mined loop need to be
 308 // aware of the outer strip mined loop: loop unswitching needs to
 309 // clone the outer loop as well as the inner, unrolling needs to only
 310 // clone the inner loop etc. No optimizations need to change the outer
 311 // strip mined loop as it is only a skeleton.
 312 IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
 313                                                              IdealLoopTree* loop, float cl_prob, float le_fcnt,
 314                                                              Node*& entry_control, Node*& iffalse) {
 315   Node* outer_test = intcon(0);
 316   Node *orig = iffalse;
 317   iffalse = iffalse->clone();
 318   _igvn.register_new_node_with_optimizer(iffalse);
 319   set_idom(iffalse, idom(orig), dom_depth(orig));
 320 
 321   IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt);
 322   Node *outer_ift = new IfTrueNode (outer_le);
 323   Node* outer_iff = orig;
 324   _igvn.replace_input_of(outer_iff, 0, outer_le);
 325 
 326   LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift);
 327   entry_control = outer_l;
 328 
 329   IdealLoopTree* outer_ilt = insert_outer_loop(loop, outer_l, outer_ift);
 330 
 331   set_loop(iffalse, outer_ilt);
 332   // When this code runs, loop bodies have not yet been populated.
 333   const bool body_populated = false;
 334   register_control(outer_le, outer_ilt, iffalse, body_populated);
 335   register_control(outer_ift, outer_ilt, outer_le, body_populated);
 336   set_idom(outer_iff, outer_le, dom_depth(outer_le));
 337   _igvn.register_new_node_with_optimizer(outer_l);
 338   set_loop(outer_l, outer_ilt);
 339   set_idom(outer_l, init_control, dom_depth(init_control)+1);
 340 
 341   return outer_ilt;
 342 }
 343 
 344 void PhaseIdealLoop::insert_loop_limit_check_predicate(ParsePredicateSuccessProj* loop_limit_check_parse_proj,
 345                                                        Node* cmp_limit, Node* bol) {
 346   assert(loop_limit_check_parse_proj->in(0)->is_ParsePredicate(), "must be parse predicate");
 347   Node* new_predicate_proj = create_new_if_for_predicate(loop_limit_check_parse_proj, nullptr,
 348                                                          Deoptimization::Reason_loop_limit_check,
 349                                                          Op_If);
 350   Node* iff = new_predicate_proj->in(0);
 351   cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit);
 352   bol = _igvn.register_new_node_with_optimizer(bol);
 353   set_subtree_ctrl(bol, false);
 354   _igvn.replace_input_of(iff, 1, bol);
 355 
 356 #ifndef PRODUCT
 357   // report that the loop predication has been actually performed
 358   // for this loop
 359   if (TraceLoopLimitCheck) {
 360     tty->print_cr("Counted Loop Limit Check generated:");
 361     debug_only( bol->dump(2); )
 362   }
 363 #endif
 364 }
 365 
 366 Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) {
 367   // Counted loop head must be a good RegionNode with only 3 not null
 368   // control input edges: Self, Entry, LoopBack.
 369   if (x->in(LoopNode::Self) == nullptr || x->req() != 3 || loop->_irreducible) {
 370     return nullptr;
 371   }
 372   Node *init_control = x->in(LoopNode::EntryControl);
 373   Node *back_control = x->in(LoopNode::LoopBackControl);
 374   if (init_control == nullptr || back_control == nullptr) {   // Partially dead
 375     return nullptr;
 376   }
 377   // Must also check for TOP when looking for a dead loop
 378   if (init_control->is_top() || back_control->is_top()) {
 379     return nullptr;
 380   }
 381 
 382   // Allow funny placement of Safepoint
 383   if (back_control->Opcode() == Op_SafePoint) {
 384     back_control = back_control->in(TypeFunc::Control);
 385   }
 386 
 387   // Controlling test for loop
 388   Node *iftrue = back_control;
 389   uint iftrue_op = iftrue->Opcode();
 390   if (iftrue_op != Op_IfTrue &&
 391       iftrue_op != Op_IfFalse) {
 392     // I have a weird back-control.  Probably the loop-exit test is in
 393     // the middle of the loop and I am looking at some trailing control-flow
 394     // merge point.  To fix this I would have to partially peel the loop.
 395     return nullptr; // Obscure back-control
 396   }
 397 
 398   // Get boolean guarding loop-back test
 399   Node *iff = iftrue->in(0);
 400   if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) {
 401     return nullptr;
 402   }
 403   return iftrue;
 404 }
 405 
 406 Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob) {
 407   Node* iftrue = back_control;
 408   uint iftrue_op = iftrue->Opcode();
 409   Node* iff = iftrue->in(0);
 410   BoolNode* test = iff->in(1)->as_Bool();
 411   bt = test->_test._test;
 412   cl_prob = iff->as_If()->_prob;
 413   if (iftrue_op == Op_IfFalse) {
 414     bt = BoolTest(bt).negate();
 415     cl_prob = 1.0 - cl_prob;
 416   }
 417   // Get backedge compare
 418   Node* cmp = test->in(1);
 419   if (!cmp->is_Cmp()) {
 420     return nullptr;
 421   }
 422 
 423   // Find the trip-counter increment & limit.  Limit must be loop invariant.
 424   incr  = cmp->in(1);
 425   limit = cmp->in(2);
 426 
 427   // ---------
 428   // need 'loop()' test to tell if limit is loop invariant
 429   // ---------
 430 
 431   if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit?
 432     Node* tmp = incr;            // Then reverse order into the CmpI
 433     incr = limit;
 434     limit = tmp;
 435     bt = BoolTest(bt).commute(); // And commute the exit test
 436   }
 437   if (is_member(loop, get_ctrl(limit))) { // Limit must be loop-invariant
 438     return nullptr;
 439   }
 440   if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
 441     return nullptr;
 442   }
 443   return cmp;
 444 }
 445 
 446 Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr) {
 447   if (incr->is_Phi()) {
 448     if (incr->as_Phi()->region() != x || incr->req() != 3) {
 449       return nullptr; // Not simple trip counter expression
 450     }
 451     phi_incr = incr;
 452     incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi
 453     if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
 454       return nullptr;
 455     }
 456   }
 457   return incr;
 458 }
 459 
 460 Node* PhaseIdealLoop::loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi) {
 461   assert(incr->Opcode() == Op_AddI || incr->Opcode() == Op_AddL, "caller resp.");
 462   // Get merge point
 463   xphi = incr->in(1);
 464   Node *stride = incr->in(2);
 465   if (!stride->is_Con()) {     // Oops, swap these
 466     if (!xphi->is_Con()) {     // Is the other guy a constant?
 467       return nullptr;          // Nope, unknown stride, bail out
 468     }
 469     Node *tmp = xphi;          // 'incr' is commutative, so ok to swap
 470     xphi = stride;
 471     stride = tmp;
 472   }
 473   return stride;
 474 }
 475 
 476 PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop) {
 477   if (!xphi->is_Phi()) {
 478     return nullptr; // Too much math on the trip counter
 479   }
 480   if (phi_incr != nullptr && phi_incr != xphi) {
 481     return nullptr;
 482   }
 483   PhiNode *phi = xphi->as_Phi();
 484 
 485   // Phi must be of loop header; backedge must wrap to increment
 486   if (phi->region() != x) {
 487     return nullptr;
 488   }
 489   return phi;
 490 }
 491 
 492 static int check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt) {
 493   if (final_correction > 0) {
 494     if (limit_t->lo_as_long() > (max_signed_integer(bt) - final_correction)) {
 495       return -1;
 496     }
 497     if (limit_t->hi_as_long() > (max_signed_integer(bt) - final_correction)) {
 498       return 1;
 499     }
 500   } else {
 501     if (limit_t->hi_as_long() < (min_signed_integer(bt) - final_correction)) {
 502       return -1;
 503     }
 504     if (limit_t->lo_as_long() < (min_signed_integer(bt) - final_correction)) {
 505       return 1;
 506     }
 507   }
 508   return 0;
 509 }
 510 
 511 static bool condition_stride_ok(BoolTest::mask bt, jlong stride_con) {
 512   // If the condition is inverted and we will be rolling
 513   // through MININT to MAXINT, then bail out.
 514   if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice!
 515       // Odd stride
 516       (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) ||
 517       // Count down loop rolls through MAXINT
 518       ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) ||
 519       // Count up loop rolls through MININT
 520       ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) {
 521     return false; // Bail out
 522   }
 523   return true;
 524 }
 525 
 526 Node* PhaseIdealLoop::loop_nest_replace_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head,
 527                                            BasicType bt) {
 528   Node* iv_as_long;
 529   if (bt == T_LONG) {
 530     iv_as_long = new ConvI2LNode(inner_iv, TypeLong::INT);
 531     register_new_node(iv_as_long, inner_head);
 532   } else {
 533     iv_as_long = inner_iv;
 534   }
 535   Node* iv_replacement = AddNode::make(outer_phi, iv_as_long, bt);
 536   register_new_node(iv_replacement, inner_head);
 537   for (DUIterator_Last imin, i = iv_to_replace->last_outs(imin); i >= imin;) {
 538     Node* u = iv_to_replace->last_out(i);
 539 #ifdef ASSERT
 540     if (!is_dominator(inner_head, ctrl_or_self(u))) {
 541       assert(u->is_Phi(), "should be a Phi");
 542       for (uint j = 1; j < u->req(); j++) {
 543         if (u->in(j) == iv_to_replace) {
 544           assert(is_dominator(inner_head, u->in(0)->in(j)), "iv use above loop?");
 545         }
 546       }
 547     }
 548 #endif
 549     _igvn.rehash_node_delayed(u);
 550     int nb = u->replace_edge(iv_to_replace, iv_replacement, &_igvn);
 551     i -= nb;
 552   }
 553   return iv_replacement;
 554 }
 555 
 556 // Add a Parse Predicate with an uncommon trap on the failing/false path. Normal control will continue on the true path.
 557 void PhaseIdealLoop::add_parse_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop,
 558                                          SafePointNode* sfpt) {
 559   if (!C->too_many_traps(reason)) {
 560     ParsePredicateNode* parse_predicate = new ParsePredicateNode(inner_head->in(LoopNode::EntryControl), reason, &_igvn);
 561     register_control(parse_predicate, loop, inner_head->in(LoopNode::EntryControl));
 562     Node* if_false = new IfFalseNode(parse_predicate);
 563     register_control(if_false, _ltree_root, parse_predicate);
 564     Node* if_true = new IfTrueNode(parse_predicate);
 565     register_control(if_true, loop, parse_predicate);
 566 
 567     int trap_request = Deoptimization::make_trap_request(reason, Deoptimization::Action_maybe_recompile);
 568     address call_addr = OptoRuntime::uncommon_trap_blob()->entry_point();
 569     const TypePtr* no_memory_effects = nullptr;
 570     JVMState* jvms = sfpt->jvms();
 571     CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap",
 572                                            no_memory_effects);
 573 
 574     Node* mem = nullptr;
 575     Node* i_o = nullptr;
 576     if (sfpt->is_Call()) {
 577       mem = sfpt->proj_out(TypeFunc::Memory);
 578       i_o = sfpt->proj_out(TypeFunc::I_O);
 579     } else {
 580       mem = sfpt->memory();
 581       i_o = sfpt->i_o();
 582     }
 583 
 584     Node *frame = new ParmNode(C->start(), TypeFunc::FramePtr);
 585     register_new_node(frame, C->start());
 586     Node *ret = new ParmNode(C->start(), TypeFunc::ReturnAdr);
 587     register_new_node(ret, C->start());
 588 
 589     unc->init_req(TypeFunc::Control, if_false);
 590     unc->init_req(TypeFunc::I_O, i_o);
 591     unc->init_req(TypeFunc::Memory, mem); // may gc ptrs
 592     unc->init_req(TypeFunc::FramePtr, frame);
 593     unc->init_req(TypeFunc::ReturnAdr, ret);
 594     unc->init_req(TypeFunc::Parms+0, _igvn.intcon(trap_request));
 595     unc->set_cnt(PROB_UNLIKELY_MAG(4));
 596     unc->copy_call_debug_info(&_igvn, sfpt);
 597 
 598     for (uint i = TypeFunc::Parms; i < unc->req(); i++) {
 599       set_subtree_ctrl(unc->in(i), false);
 600     }
 601     register_control(unc, _ltree_root, if_false);
 602 
 603     Node* ctrl = new ProjNode(unc, TypeFunc::Control);
 604     register_control(ctrl, _ltree_root, unc);
 605     Node* halt = new HaltNode(ctrl, frame, "uncommon trap returned which should never happen" PRODUCT_ONLY(COMMA /*reachable*/false));
 606     register_control(halt, _ltree_root, ctrl);
 607     _igvn.add_input_to(C->root(), halt);
 608 
 609     _igvn.replace_input_of(inner_head, LoopNode::EntryControl, if_true);
 610     set_idom(inner_head, if_true, dom_depth(inner_head));
 611   }
 612 }
 613 
 614 // Find a safepoint node that dominates the back edge. We need a
 615 // SafePointNode so we can use its jvm state to create empty
 616 // predicates.
 617 static bool no_side_effect_since_safepoint(Compile* C, Node* x, Node* mem, MergeMemNode* mm, PhaseIdealLoop* phase) {
 618   SafePointNode* safepoint = nullptr;
 619   for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
 620     Node* u = x->fast_out(i);
 621     if (u->is_memory_phi()) {
 622       Node* m = u->in(LoopNode::LoopBackControl);
 623       if (u->adr_type() == TypePtr::BOTTOM) {
 624         if (m->is_MergeMem() && mem->is_MergeMem()) {
 625           if (m != mem DEBUG_ONLY(|| true)) {
 626             // MergeMemStream can modify m, for example to adjust the length to mem.
 627             // This is unfortunate, and probably unnecessary. But as it is, we need
 628             // to add m to the igvn worklist, else we may have a modified node that
 629             // is not on the igvn worklist.
 630             phase->igvn()._worklist.push(m);
 631             for (MergeMemStream mms(m->as_MergeMem(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
 632               if (!mms.is_empty()) {
 633                 if (mms.memory() != mms.memory2()) {
 634                   return false;
 635                 }
 636 #ifdef ASSERT
 637                 if (mms.alias_idx() != Compile::AliasIdxBot) {
 638                   mm->set_memory_at(mms.alias_idx(), mem->as_MergeMem()->base_memory());
 639                 }
 640 #endif
 641               }
 642             }
 643           }
 644         } else if (mem->is_MergeMem()) {
 645           if (m != mem->as_MergeMem()->base_memory()) {
 646             return false;
 647           }
 648         } else {
 649           return false;
 650         }
 651       } else {
 652         if (mem->is_MergeMem()) {
 653           if (m != mem->as_MergeMem()->memory_at(C->get_alias_index(u->adr_type()))) {
 654             return false;
 655           }
 656 #ifdef ASSERT
 657           mm->set_memory_at(C->get_alias_index(u->adr_type()), mem->as_MergeMem()->base_memory());
 658 #endif
 659         } else {
 660           if (m != mem) {
 661             return false;
 662           }
 663         }
 664       }
 665     }
 666   }
 667   return true;
 668 }
 669 
 670 SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop) {
 671   IfNode* exit_test = back_control->in(0)->as_If();
 672   SafePointNode* safepoint = nullptr;
 673   if (exit_test->in(0)->is_SafePoint() && exit_test->in(0)->outcnt() == 1) {
 674     safepoint = exit_test->in(0)->as_SafePoint();
 675   } else {
 676     Node* c = back_control;
 677     while (c != x && c->Opcode() != Op_SafePoint) {
 678       c = idom(c);
 679     }
 680 
 681     if (c->Opcode() == Op_SafePoint) {
 682       safepoint = c->as_SafePoint();
 683     }
 684 
 685     if (safepoint == nullptr) {
 686       return nullptr;
 687     }
 688 
 689     Node* mem = safepoint->in(TypeFunc::Memory);
 690 
 691     // We can only use that safepoint if there's no side effect between the backedge and the safepoint.
 692 
 693     // mm is the memory state at the safepoint (when it's a MergeMem)
 694     // no_side_effect_since_safepoint() goes over the memory state at the backedge. It resets the mm input for each
 695     // component of the memory state it encounters so it points to the base memory. Once no_side_effect_since_safepoint()
 696     // is done, if no side effect after the safepoint was found, mm should transform to the base memory: the states at
 697     // the backedge and safepoint are the same so all components of the memory state at the safepoint should have been
 698     // reset.
 699     MergeMemNode* mm = nullptr;
 700 #ifdef ASSERT
 701     if (mem->is_MergeMem()) {
 702       mm = mem->clone()->as_MergeMem();
 703       _igvn._worklist.push(mm);
 704       for (MergeMemStream mms(mem->as_MergeMem()); mms.next_non_empty(); ) {
 705         // Loop invariant memory state won't be reset by no_side_effect_since_safepoint(). Do it here.
 706         // Escape Analysis can add state to mm that it doesn't add to the backedge memory Phis, breaking verification
 707         // code that relies on mm. Clear that extra state here.
 708         if (mms.alias_idx() != Compile::AliasIdxBot &&
 709             (loop != get_loop(ctrl_or_self(mms.memory())) ||
 710              (mms.adr_type()->isa_oop_ptr() && mms.adr_type()->is_known_instance()))) {
 711           mm->set_memory_at(mms.alias_idx(), mem->as_MergeMem()->base_memory());
 712         }
 713       }
 714     }
 715 #endif
 716     if (!no_side_effect_since_safepoint(C, x, mem, mm, this)) {
 717       safepoint = nullptr;
 718     } else {
 719       assert(mm == nullptr|| _igvn.transform(mm) == mem->as_MergeMem()->base_memory(), "all memory state should have been processed");
 720     }
 721 #ifdef ASSERT
 722     if (mm != nullptr) {
 723       _igvn.remove_dead_node(mm);
 724     }
 725 #endif
 726   }
 727   return safepoint;
 728 }
 729 
 730 // If the loop has the shape of a counted loop but with a long
 731 // induction variable, transform the loop in a loop nest: an inner
 732 // loop that iterates for at most max int iterations with an integer
 733 // induction variable and an outer loop that iterates over the full
 734 // range of long values from the initial loop in (at most) max int
 735 // steps. That is:
 736 //
 737 // x: for (long phi = init; phi < limit; phi += stride) {
 738 //   // phi := Phi(L, init, incr)
 739 //   // incr := AddL(phi, longcon(stride))
 740 //   long incr = phi + stride;
 741 //   ... use phi and incr ...
 742 // }
 743 //
 744 // OR:
 745 //
 746 // x: for (long phi = init; (phi += stride) < limit; ) {
 747 //   // phi := Phi(L, AddL(init, stride), incr)
 748 //   // incr := AddL(phi, longcon(stride))
 749 //   long incr = phi + stride;
 750 //   ... use phi and (phi + stride) ...
 751 // }
 752 //
 753 // ==transform=>
 754 //
 755 // const ulong inner_iters_limit = INT_MAX - stride - 1;  //near 0x7FFFFFF0
 756 // assert(stride <= inner_iters_limit);  // else abort transform
 757 // assert((extralong)limit + stride <= LONG_MAX);  // else deopt
 758 // outer_head: for (long outer_phi = init;;) {
 759 //   // outer_phi := Phi(outer_head, init, AddL(outer_phi, I2L(inner_phi)))
 760 //   ulong inner_iters_max = (ulong) MAX(0, ((extralong)limit + stride - outer_phi));
 761 //   long inner_iters_actual = MIN(inner_iters_limit, inner_iters_max);
 762 //   assert(inner_iters_actual == (int)inner_iters_actual);
 763 //   int inner_phi, inner_incr;
 764 //   x: for (inner_phi = 0;; inner_phi = inner_incr) {
 765 //     // inner_phi := Phi(x, intcon(0), inner_incr)
 766 //     // inner_incr := AddI(inner_phi, intcon(stride))
 767 //     inner_incr = inner_phi + stride;
 768 //     if (inner_incr < inner_iters_actual) {
 769 //       ... use phi=>(outer_phi+inner_phi) ...
 770 //       continue;
 771 //     }
 772 //     else break;
 773 //   }
 774 //   if ((outer_phi+inner_phi) < limit)  //OR (outer_phi+inner_incr) < limit
 775 //     continue;
 776 //   else break;
 777 // }
 778 //
 779 // The same logic is used to transform an int counted loop that contains long range checks into a loop nest of 2 int
 780 // loops with long range checks transformed to int range checks in the inner loop.
 781 bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
 782   Node* x = loop->_head;
 783   // Only for inner loops
 784   if (loop->_child != nullptr || !x->is_BaseCountedLoop() || x->as_Loop()->is_loop_nest_outer_loop()) {
 785     return false;
 786   }
 787 
 788   if (x->is_CountedLoop() && !x->as_CountedLoop()->is_main_loop() && !x->as_CountedLoop()->is_normal_loop()) {
 789     return false;
 790   }
 791 
 792   BaseCountedLoopNode* head = x->as_BaseCountedLoop();
 793   BasicType bt = x->as_BaseCountedLoop()->bt();
 794 
 795   check_counted_loop_shape(loop, x, bt);
 796 
 797 #ifndef PRODUCT
 798   if (bt == T_LONG) {
 799     Atomic::inc(&_long_loop_candidates);
 800   }
 801 #endif
 802 
 803   jlong stride_con_long = head->stride_con();
 804   assert(stride_con_long != 0, "missed some peephole opt");
 805   // We can't iterate for more than max int at a time.
 806   if (stride_con_long != (jint)stride_con_long || stride_con_long == min_jint) {
 807     assert(bt == T_LONG, "only for long loops");
 808     return false;
 809   }
 810   jint stride_con = checked_cast<jint>(stride_con_long);
 811   // The number of iterations for the integer count loop: guarantee no
 812   // overflow: max_jint - stride_con max. -1 so there's no need for a
 813   // loop limit check if the exit test is <= or >=.
 814   int iters_limit = max_jint - ABS(stride_con) - 1;
 815 #ifdef ASSERT
 816   if (bt == T_LONG && StressLongCountedLoop > 0) {
 817     iters_limit = iters_limit / StressLongCountedLoop;
 818   }
 819 #endif
 820   // At least 2 iterations so counted loop construction doesn't fail
 821   if (iters_limit/ABS(stride_con) < 2) {
 822     return false;
 823   }
 824 
 825   PhiNode* phi = head->phi()->as_Phi();
 826   Node* incr = head->incr();
 827 
 828   Node* back_control = head->in(LoopNode::LoopBackControl);
 829 
 830   // data nodes on back branch not supported
 831   if (back_control->outcnt() > 1) {
 832     return false;
 833   }
 834 
 835   Node* limit = head->limit();
 836   // We'll need to use the loop limit before the inner loop is entered
 837   if (!is_dominator(get_ctrl(limit), x)) {
 838     return false;
 839   }
 840 
 841   IfNode* exit_test = head->loopexit();
 842 
 843   assert(back_control->Opcode() == Op_IfTrue, "wrong projection for back edge");
 844 
 845   Node_List range_checks;
 846   iters_limit = extract_long_range_checks(loop, stride_con, iters_limit, phi, range_checks);
 847 
 848   if (bt == T_INT) {
 849     // The only purpose of creating a loop nest is to handle long range checks. If there are none, do not proceed further.
 850     if (range_checks.size() == 0) {
 851       return false;
 852     }
 853   }
 854 
 855   // Take what we know about the number of iterations of the long counted loop into account when computing the limit of
 856   // the inner loop.
 857   const Node* init = head->init_trip();
 858   const TypeInteger* lo = _igvn.type(init)->is_integer(bt);
 859   const TypeInteger* hi = _igvn.type(limit)->is_integer(bt);
 860   if (stride_con < 0) {
 861     swap(lo, hi);
 862   }
 863   if (hi->hi_as_long() <= lo->lo_as_long()) {
 864     // not a loop after all
 865     return false;
 866   }
 867 
 868   if (range_checks.size() > 0) {
 869     // This transformation requires peeling one iteration. Also, if it has range checks and they are eliminated by Loop
 870     // Predication, then 2 Hoisted Check Predicates are added for one range check. Finally, transforming a long range
 871     // check requires extra logic to be executed before the loop is entered and for the outer loop. As a result, the
 872     // transformations can't pay off for a small number of iterations: roughly, if the loop runs for 3 iterations, it's
 873     // going to execute as many range checks once transformed with range checks eliminated (1 peeled iteration with
 874     // range checks + 2 predicates per range checks) as it would have not transformed. It also has to pay for the extra
 875     // logic on loop entry and for the outer loop.
 876     loop->compute_trip_count(this);
 877     if (head->is_CountedLoop() && head->as_CountedLoop()->has_exact_trip_count()) {
 878       if (head->as_CountedLoop()->trip_count() <= 3) {
 879         return false;
 880       }
 881     } else {
 882       loop->compute_profile_trip_cnt(this);
 883       if (!head->is_profile_trip_failed() && head->profile_trip_cnt() <= 3) {
 884         return false;
 885       }
 886     }
 887   }
 888 
 889   julong orig_iters = (julong)hi->hi_as_long() - lo->lo_as_long();
 890   iters_limit = checked_cast<int>(MIN2((julong)iters_limit, orig_iters));
 891 
 892   // We need a safepoint to insert Parse Predicates for the inner loop.
 893   SafePointNode* safepoint;
 894   if (bt == T_INT && head->as_CountedLoop()->is_strip_mined()) {
 895     // Loop is strip mined: use the safepoint of the outer strip mined loop
 896     OuterStripMinedLoopNode* outer_loop = head->as_CountedLoop()->outer_loop();
 897     assert(outer_loop != nullptr, "no outer loop");
 898     safepoint = outer_loop->outer_safepoint();
 899     outer_loop->transform_to_counted_loop(&_igvn, this);
 900     exit_test = head->loopexit();
 901   } else {
 902     safepoint = find_safepoint(back_control, x, loop);
 903   }
 904 
 905   Node* exit_branch = exit_test->proj_out(false);
 906   Node* entry_control = head->in(LoopNode::EntryControl);
 907 
 908   // Clone the control flow of the loop to build an outer loop
 909   Node* outer_back_branch = back_control->clone();
 910   Node* outer_exit_test = new IfNode(exit_test->in(0), exit_test->in(1), exit_test->_prob, exit_test->_fcnt);
 911   Node* inner_exit_branch = exit_branch->clone();
 912 
 913   LoopNode* outer_head = new LoopNode(entry_control, outer_back_branch);
 914   IdealLoopTree* outer_ilt = insert_outer_loop(loop, outer_head, outer_back_branch);
 915 
 916   const bool body_populated = true;
 917   register_control(outer_head, outer_ilt, entry_control, body_populated);
 918 
 919   _igvn.register_new_node_with_optimizer(inner_exit_branch);
 920   set_loop(inner_exit_branch, outer_ilt);
 921   set_idom(inner_exit_branch, exit_test, dom_depth(exit_branch));
 922 
 923   outer_exit_test->set_req(0, inner_exit_branch);
 924   register_control(outer_exit_test, outer_ilt, inner_exit_branch, body_populated);
 925 
 926   _igvn.replace_input_of(exit_branch, 0, outer_exit_test);
 927   set_idom(exit_branch, outer_exit_test, dom_depth(exit_branch));
 928 
 929   outer_back_branch->set_req(0, outer_exit_test);
 930   register_control(outer_back_branch, outer_ilt, outer_exit_test, body_populated);
 931 
 932   _igvn.replace_input_of(x, LoopNode::EntryControl, outer_head);
 933   set_idom(x, outer_head, dom_depth(x));
 934 
 935   // add an iv phi to the outer loop and use it to compute the inner
 936   // loop iteration limit
 937   Node* outer_phi = phi->clone();
 938   outer_phi->set_req(0, outer_head);
 939   register_new_node(outer_phi, outer_head);
 940 
 941   Node* inner_iters_max = nullptr;
 942   if (stride_con > 0) {
 943     inner_iters_max = MaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn);
 944   } else {
 945     inner_iters_max = MaxNode::max_diff_with_zero(outer_phi, limit, TypeInteger::bottom(bt), _igvn);
 946   }
 947 
 948   Node* inner_iters_limit = _igvn.integercon(iters_limit, bt);
 949   // inner_iters_max may not fit in a signed integer (iterating from
 950   // Long.MIN_VALUE to Long.MAX_VALUE for instance). Use an unsigned
 951   // min.
 952   const TypeInteger* inner_iters_actual_range = TypeInteger::make(0, iters_limit, Type::WidenMin, bt);
 953   Node* inner_iters_actual = MaxNode::unsigned_min(inner_iters_max, inner_iters_limit, inner_iters_actual_range, _igvn);
 954 
 955   Node* inner_iters_actual_int;
 956   if (bt == T_LONG) {
 957     inner_iters_actual_int = new ConvL2INode(inner_iters_actual);
 958     _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
 959     // When the inner loop is transformed to a counted loop, a loop limit check is not expected to be needed because
 960     // the loop limit is less or equal to max_jint - stride - 1 (if stride is positive but a similar argument exists for
 961     // a negative stride). We add a CastII here to guarantee that, when the counted loop is created in a subsequent loop
 962     // opts pass, an accurate range of values for the limits is found.
 963     const TypeInt* inner_iters_actual_int_range = TypeInt::make(0, iters_limit, Type::WidenMin);
 964     inner_iters_actual_int = new CastIINode(outer_head, inner_iters_actual_int, inner_iters_actual_int_range, ConstraintCastNode::UnconditionalDependency);
 965     _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
 966   } else {
 967     inner_iters_actual_int = inner_iters_actual;
 968   }
 969 
 970   Node* int_zero = intcon(0);
 971   if (stride_con < 0) {
 972     inner_iters_actual_int = new SubINode(int_zero, inner_iters_actual_int);
 973     _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
 974   }
 975 
 976   // Clone the iv data nodes as an integer iv
 977   Node* int_stride = intcon(stride_con);
 978   Node* inner_phi = new PhiNode(x->in(0), TypeInt::INT);
 979   Node* inner_incr = new AddINode(inner_phi, int_stride);
 980   Node* inner_cmp = nullptr;
 981   inner_cmp = new CmpINode(inner_incr, inner_iters_actual_int);
 982   Node* inner_bol = new BoolNode(inner_cmp, exit_test->in(1)->as_Bool()->_test._test);
 983   inner_phi->set_req(LoopNode::EntryControl, int_zero);
 984   inner_phi->set_req(LoopNode::LoopBackControl, inner_incr);
 985   register_new_node(inner_phi, x);
 986   register_new_node(inner_incr, x);
 987   register_new_node(inner_cmp, x);
 988   register_new_node(inner_bol, x);
 989 
 990   _igvn.replace_input_of(exit_test, 1, inner_bol);
 991 
 992   // Clone inner loop phis to outer loop
 993   for (uint i = 0; i < head->outcnt(); i++) {
 994     Node* u = head->raw_out(i);
 995     if (u->is_Phi() && u != inner_phi && u != phi) {
 996       assert(u->in(0) == head, "inconsistent");
 997       Node* clone = u->clone();
 998       clone->set_req(0, outer_head);
 999       register_new_node(clone, outer_head);
1000       _igvn.replace_input_of(u, LoopNode::EntryControl, clone);
1001     }
1002   }
1003 
1004   // Replace inner loop long iv phi as inner loop int iv phi + outer
1005   // loop iv phi
1006   Node* iv_add = loop_nest_replace_iv(phi, inner_phi, outer_phi, head, bt);
1007 
1008   set_subtree_ctrl(inner_iters_actual_int, body_populated);
1009 
1010   LoopNode* inner_head = create_inner_head(loop, head, exit_test);
1011 
1012   // Summary of steps from initial loop to loop nest:
1013   //
1014   // == old IR nodes =>
1015   //
1016   // entry_control: {...}
1017   // x:
1018   // for (long phi = init;;) {
1019   //   // phi := Phi(x, init, incr)
1020   //   // incr := AddL(phi, longcon(stride))
1021   //   exit_test:
1022   //   if (phi < limit)
1023   //     back_control: fallthrough;
1024   //   else
1025   //     exit_branch: break;
1026   //   long incr = phi + stride;
1027   //   ... use phi and incr ...
1028   //   phi = incr;
1029   // }
1030   //
1031   // == new IR nodes (just before final peel) =>
1032   //
1033   // entry_control: {...}
1034   // long adjusted_limit = limit + stride;  //because phi_incr != nullptr
1035   // assert(!limit_check_required || (extralong)limit + stride == adjusted_limit);  // else deopt
1036   // ulong inner_iters_limit = max_jint - ABS(stride) - 1;  //near 0x7FFFFFF0
1037   // outer_head:
1038   // for (long outer_phi = init;;) {
1039   //   // outer_phi := phi->clone(), in(0):=outer_head, => Phi(outer_head, init, incr)
1040   //   // REPLACE phi  => AddL(outer_phi, I2L(inner_phi))
1041   //   // REPLACE incr => AddL(outer_phi, I2L(inner_incr))
1042   //   // SO THAT outer_phi := Phi(outer_head, init, AddL(outer_phi, I2L(inner_incr)))
1043   //   ulong inner_iters_max = (ulong) MAX(0, ((extralong)adjusted_limit - outer_phi) * SGN(stride));
1044   //   int inner_iters_actual_int = (int) MIN(inner_iters_limit, inner_iters_max) * SGN(stride);
1045   //   inner_head: x: //in(1) := outer_head
1046   //   int inner_phi;
1047   //   for (inner_phi = 0;;) {
1048   //     // inner_phi := Phi(x, intcon(0), inner_phi + stride)
1049   //     int inner_incr = inner_phi + stride;
1050   //     bool inner_bol = (inner_incr < inner_iters_actual_int);
1051   //     exit_test: //exit_test->in(1) := inner_bol;
1052   //     if (inner_bol) // WAS (phi < limit)
1053   //       back_control: fallthrough;
1054   //     else
1055   //       inner_exit_branch: break;  //exit_branch->clone()
1056   //     ... use phi=>(outer_phi+inner_phi) ...
1057   //     inner_phi = inner_phi + stride;  // inner_incr
1058   //   }
1059   //   outer_exit_test:  //exit_test->clone(), in(0):=inner_exit_branch
1060   //   if ((outer_phi+inner_phi) < limit)  // WAS (phi < limit)
1061   //     outer_back_branch: fallthrough;  //back_control->clone(), in(0):=outer_exit_test
1062   //   else
1063   //     exit_branch: break;  //in(0) := outer_exit_test
1064   // }
1065 
1066   if (bt == T_INT) {
1067     outer_phi = new ConvI2LNode(outer_phi);
1068     register_new_node(outer_phi, outer_head);
1069   }
1070 
1071   transform_long_range_checks(stride_con, range_checks, outer_phi, inner_iters_actual_int,
1072                               inner_phi, iv_add, inner_head);
1073   // Peel one iteration of the loop and use the safepoint at the end
1074   // of the peeled iteration to insert Parse Predicates. If no well
1075   // positioned safepoint peel to guarantee a safepoint in the outer
1076   // loop.
1077   if (safepoint != nullptr || !loop->_has_call) {
1078     old_new.clear();
1079     do_peeling(loop, old_new);
1080   } else {
1081     C->set_major_progress();
1082   }
1083 
1084   if (safepoint != nullptr) {
1085     SafePointNode* cloned_sfpt = old_new[safepoint->_idx]->as_SafePoint();
1086 
1087     if (UseLoopPredicate) {
1088       add_parse_predicate(Deoptimization::Reason_predicate, inner_head, outer_ilt, cloned_sfpt);
1089     }
1090     if (UseProfiledLoopPredicate) {
1091       add_parse_predicate(Deoptimization::Reason_profile_predicate, inner_head, outer_ilt, cloned_sfpt);
1092     }
1093     add_parse_predicate(Deoptimization::Reason_loop_limit_check, inner_head, outer_ilt, cloned_sfpt);
1094   }
1095 
1096 #ifndef PRODUCT
1097   if (bt == T_LONG) {
1098     Atomic::inc(&_long_loop_nests);
1099   }
1100 #endif
1101 
1102   inner_head->mark_loop_nest_inner_loop();
1103   outer_head->mark_loop_nest_outer_loop();
1104 
1105   return true;
1106 }
1107 
1108 int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jint stride_con, int iters_limit, PhiNode* phi,
1109                                               Node_List& range_checks) {
1110   const jlong min_iters = 2;
1111   jlong reduced_iters_limit = iters_limit;
1112   jlong original_iters_limit = iters_limit;
1113   for (uint i = 0; i < loop->_body.size(); i++) {
1114     Node* c = loop->_body.at(i);
1115     if (c->is_IfProj() && c->in(0)->is_RangeCheck()) {
1116       IfProjNode* if_proj = c->as_IfProj();
1117       CallStaticJavaNode* call = if_proj->is_uncommon_trap_if_pattern();
1118       if (call != nullptr) {
1119         Node* range = nullptr;
1120         Node* offset = nullptr;
1121         jlong scale = 0;
1122         if (loop->is_range_check_if(if_proj, this, T_LONG, phi, range, offset, scale) &&
1123             loop->is_invariant(range) && loop->is_invariant(offset) &&
1124             scale != min_jlong &&
1125             original_iters_limit / ABS(scale) >= min_iters * ABS(stride_con)) {
1126           assert(scale == (jint)scale, "scale should be an int");
1127           reduced_iters_limit = MIN2(reduced_iters_limit, original_iters_limit/ABS(scale));
1128           range_checks.push(c);
1129         }
1130       }
1131     }
1132   }
1133 
1134   return checked_cast<int>(reduced_iters_limit);
1135 }
1136 
1137 // One execution of the inner loop covers a sub-range of the entire iteration range of the loop: [A,Z), aka [A=init,
1138 // Z=limit). If the loop has at least one trip (which is the case here), the iteration variable i always takes A as its
1139 // first value, followed by A+S (S is the stride), next A+2S, etc. The limit is exclusive, so that the final value B of
1140 // i is never Z.  It will be B=Z-1 if S=1, or B=Z+1 if S=-1.
1141 
1142 // If |S|>1 the formula for the last value B would require a floor operation, specifically B=floor((Z-sgn(S)-A)/S)*S+A,
1143 // which is B=Z-sgn(S)U for some U in [1,|S|].  So when S>0, i ranges as i:[A,Z) or i:[A,B=Z-U], or else (in reverse)
1144 // as i:(Z,A] or i:[B=Z+U,A].  It will become important to reason about this inclusive range [A,B] or [B,A].
1145 
1146 // Within the loop there may be many range checks.  Each such range check (R.C.) is of the form 0 <= i*K+L < R, where K
1147 // is a scale factor applied to the loop iteration variable i, and L is some offset; K, L, and R are loop-invariant.
1148 // Because R is never negative (see below), this check can always be simplified to an unsigned check i*K+L <u R.
1149 
1150 // When a long loop over a 64-bit variable i (outer_iv) is decomposed into a series of shorter sub-loops over a 32-bit
1151 // variable j (inner_iv), j ranges over a shorter interval j:[0,B_2] or [0,Z_2) (assuming S > 0), where the limit is
1152 // chosen to prevent various cases of 32-bit overflow (including multiplications j*K below).  In the sub-loop the
1153 // logical value i is offset from j by a 64-bit constant C, so i ranges in i:C+[0,Z_2).
1154 
1155 // For S<0, j ranges (in reverse!) through j:[-|B_2|,0] or (-|Z_2|,0].  For either sign of S, we can say i=j+C and j
1156 // ranges through 32-bit ranges [A_2,B_2] or [B_2,A_2] (A_2=0 of course).
1157 
1158 // The disjoint union of all the C+[A_2,B_2] ranges from the sub-loops must be identical to the whole range [A,B].
1159 // Assuming S>0, the first C must be A itself, and the next C value is the previous C+B_2, plus S.  If |S|=1, the next
1160 // C value is also the previous C+Z_2.  In each sub-loop, j counts from j=A_2=0 and i counts from C+0 and exits at
1161 // j=B_2 (i=C+B_2), just before it gets to i=C+Z_2.  Both i and j count up (from C and 0) if S>0; otherwise they count
1162 // down (from C and 0 again).
1163 
1164 // Returning to range checks, we see that each i*K+L <u R expands to (C+j)*K+L <u R, or j*K+Q <u R, where Q=(C*K+L).
1165 // (Recall that K and L and R are loop-invariant scale, offset and range values for a particular R.C.)  This is still a
1166 // 64-bit comparison, so the range check elimination logic will not apply to it.  (The R.C.E. transforms operate only on
1167 // 32-bit indexes and comparisons, because they use 64-bit temporary values to avoid overflow; see
1168 // PhaseIdealLoop::add_constraint.)
1169 
1170 // We must transform this comparison so that it gets the same answer, but by means of a 32-bit R.C. (using j not i) of
1171 // the form j*K+L_2 <u32 R_2.  Note that L_2 and R_2 must be loop-invariant, but only with respect to the sub-loop.  Thus, the
1172 // problem reduces to computing values for L_2 and R_2 (for each R.C. in the loop) in the loop header for the sub-loop.
1173 // Then the standard R.C.E. transforms can take those as inputs and further compute the necessary minimum and maximum
1174 // values for the 32-bit counter j within which the range checks can be eliminated.
1175 
1176 // So, given j*K+Q <u R, we need to find some j*K+L_2 <u32 R_2, where L_2 and R_2 fit in 32 bits, and the 32-bit operations do
1177 // not overflow. We also need to cover the cases where i*K+L (= j*K+Q) overflows to a 64-bit negative, since that is
1178 // allowed as an input to the R.C., as long as the R.C. as a whole fails.
1179 
1180 // If 32-bit multiplication j*K might overflow, we adjust the sub-loop limit Z_2 closer to zero to reduce j's range.
1181 
1182 // For each R.C. j*K+Q <u32 R, the range of mathematical values of j*K+Q in the sub-loop is [Q_min, Q_max], where
1183 // Q_min=Q and Q_max=B_2*K+Q (if S>0 and K>0), Q_min=A_2*K+Q and Q_max=Q (if S<0 and K>0),
1184 // Q_min=B_2*K+Q and Q_max=Q if (S>0 and K<0), Q_min=Q and Q_max=A_2*K+Q (if S<0 and K<0)
1185 
1186 // Note that the first R.C. value is always Q=(S*K>0 ? Q_min : Q_max).  Also Q_{min,max} = Q + {min,max}(A_2*K,B_2*K).
1187 // If S*K>0 then, as the loop iterations progress, each R.C. value i*K+L = j*K+Q goes up from Q=Q_min towards Q_max.
1188 // If S*K<0 then j*K+Q starts at Q=Q_max and goes down towards Q_min.
1189 
1190 // Case A: Some Negatives (but no overflow).
1191 // Number line:
1192 // |s64_min   .    .    .    0    .    .    .   s64_max|
1193 // |    .  Q_min..Q_max .    0    .    .    .     .    |  s64 negative
1194 // |    .     .    .    .    R=0  R<   R<   R<    R<   |  (against R values)
1195 // |    .     .    .  Q_min..0..Q_max  .    .     .    |  small mixed
1196 // |    .     .    .    .    R    R    R<   R<    R<   |  (against R values)
1197 //
1198 // R values which are out of range (>Q_max+1) are reduced to max(0,Q_max+1).  They are marked on the number line as R<.
1199 //
1200 // So, if Q_min <s64 0, then use this test:
1201 // j*K + s32_trunc(Q_min) <u32 clamp(R, 0, Q_max+1) if S*K>0 (R.C.E. steps upward)
1202 // j*K + s32_trunc(Q_max) <u32 clamp(R, 0, Q_max+1) if S*K<0 (R.C.E. steps downward)
1203 // Both formulas reduce to adding j*K to the 32-bit truncated value of the first R.C. expression value, Q:
1204 // j*K + s32_trunc(Q) <u32 clamp(R, 0, Q_max+1) for all S,K
1205 
1206 // If the 32-bit truncation loses information, no harm is done, since certainly the clamp also will return R_2=zero.
1207 
1208 // Case B: No Negatives.
1209 // Number line:
1210 // |s64_min   .    .    .    0    .    .    .   s64_max|
1211 // |    .     .    .    .    0 Q_min..Q_max .     .    |  small positive
1212 // |    .     .    .    .    R>   R    R    R<    R<   |  (against R values)
1213 // |    .     .    .    .    0    . Q_min..Q_max  .    |  s64 positive
1214 // |    .     .    .    .    R>   R>   R    R     R<   |  (against R values)
1215 //
1216 // R values which are out of range (<Q_min or >Q_max+1) are reduced as marked: R> up to Q_min, R< down to Q_max+1.
1217 // Then the whole comparison is shifted left by Q_min, so it can take place at zero, which is a nice 32-bit value.
1218 //
1219 // So, if both Q_min, Q_max+1 >=s64 0, then use this test:
1220 // j*K + 0         <u32 clamp(R, Q_min, Q_max+1) - Q_min if S*K>0
1221 // More generally:
1222 // j*K + Q - Q_min <u32 clamp(R, Q_min, Q_max+1) - Q_min for all S,K
1223 
1224 // Case C: Overflow in the 64-bit domain
1225 // Number line:
1226 // |..Q_max-2^64   .    .    0    .    .    .   Q_min..|  s64 overflow
1227 // |    .     .    .    .    R>   R>   R>   R>    R    |  (against R values)
1228 //
1229 // In this case, Q_min >s64 Q_max+1, even though the mathematical values of Q_min and Q_max+1 are correctly ordered.
1230 // The formulas from the previous case can be used, except that the bad upper bound Q_max is replaced by max_jlong.
1231 // (In fact, we could use any replacement bound from R to max_jlong inclusive, as the input to the clamp function.)
1232 //
1233 // So if Q_min >=s64 0 but Q_max+1 <s64 0, use this test:
1234 // j*K + 0         <u32 clamp(R, Q_min, max_jlong) - Q_min if S*K>0
1235 // More generally:
1236 // j*K + Q - Q_min <u32 clamp(R, Q_min, max_jlong) - Q_min for all S,K
1237 //
1238 // Dropping the bad bound means only Q_min is used to reduce the range of R:
1239 // j*K + Q - Q_min <u32 max(Q_min, R) - Q_min for all S,K
1240 //
1241 // Here the clamp function is a 64-bit min/max that reduces the dynamic range of its R operand to the required [L,H]:
1242 //     clamp(X, L, H) := max(L, min(X, H))
1243 // When degenerately L > H, it returns L not H.
1244 //
1245 // All of the formulas above can be merged into a single one:
1246 //     L_clamp = Q_min < 0 ? 0 : Q_min        --whether and how far to left-shift
1247 //     H_clamp = Q_max+1 < Q_min ? max_jlong : Q_max+1
1248 //             = Q_max+1 < 0 && Q_min >= 0 ? max_jlong : Q_max+1
1249 //     Q_first = Q = (S*K>0 ? Q_min : Q_max) = (C*K+L)
1250 //     R_clamp = clamp(R, L_clamp, H_clamp)   --reduced dynamic range
1251 //     replacement R.C.:
1252 //       j*K + Q_first - L_clamp <u32 R_clamp - L_clamp
1253 //     or equivalently:
1254 //       j*K + L_2 <u32 R_2
1255 //     where
1256 //       L_2 = Q_first - L_clamp
1257 //       R_2 = R_clamp - L_clamp
1258 //
1259 // Note on why R is never negative:
1260 //
1261 // Various details of this transformation would break badly if R could be negative, so this transformation only
1262 // operates after obtaining hard evidence that R<0 is impossible.  For example, if R comes from a LoadRange node, we
1263 // know R cannot be negative.  For explicit checks (of both int and long) a proof is constructed in
1264 // inline_preconditions_checkIndex, which triggers an uncommon trap if R<0, then wraps R in a ConstraintCastNode with a
1265 // non-negative type.  Later on, when IdealLoopTree::is_range_check_if looks for an optimizable R.C., it checks that
1266 // the type of that R node is non-negative.  Any "wild" R node that could be negative is not treated as an optimizable
1267 // R.C., but R values from a.length and inside checkIndex are good to go.
1268 //
1269 void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List &range_checks, Node* outer_phi,
1270                                                  Node* inner_iters_actual_int, Node* inner_phi,
1271                                                  Node* iv_add, LoopNode* inner_head) {
1272   Node* long_zero = longcon(0);
1273   Node* int_zero = intcon(0);
1274   Node* long_one = longcon(1);
1275   Node* int_stride = intcon(checked_cast<int>(stride_con));
1276 
1277   for (uint i = 0; i < range_checks.size(); i++) {
1278     ProjNode* proj = range_checks.at(i)->as_Proj();
1279     ProjNode* unc_proj = proj->other_if_proj();
1280     RangeCheckNode* rc = proj->in(0)->as_RangeCheck();
1281     jlong scale = 0;
1282     Node* offset = nullptr;
1283     Node* rc_bol = rc->in(1);
1284     Node* rc_cmp = rc_bol->in(1);
1285     if (rc_cmp->Opcode() == Op_CmpU) {
1286       // could be shared and have already been taken care of
1287       continue;
1288     }
1289     bool short_scale = false;
1290     bool ok = is_scaled_iv_plus_offset(rc_cmp->in(1), iv_add, T_LONG, &scale, &offset, &short_scale);
1291     assert(ok, "inconsistent: was tested before");
1292     Node* range = rc_cmp->in(2);
1293     Node* c = rc->in(0);
1294     Node* entry_control = inner_head->in(LoopNode::EntryControl);
1295 
1296     Node* R = range;
1297     Node* K = longcon(scale);
1298 
1299     Node* L = offset;
1300 
1301     if (short_scale) {
1302       // This converts:
1303       // (int)i*K + L <u64 R
1304       // with K an int into:
1305       // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R)
1306       // to protect against an overflow of (int)i*K
1307       //
1308       // Because if (int)i*K overflows, there are K,L where:
1309       // (int)i*K + L <u64 R is false because (int)i*K+L overflows to a negative which becomes a huge u64 value.
1310       // But if i*(long)K + L is >u64 (long)max_jint and still is <u64 R, then
1311       // i*(long)K + L <u64 R is true.
1312       //
1313       // As a consequence simply converting i*K + L <u64 R to i*(long)K + L <u64 R could cause incorrect execution.
1314       //
1315       // It's always true that:
1316       // (int)i*K <u64 (long)max_jint + 1
1317       // which implies (int)i*K + L <u64 (long)max_jint + 1 + L
1318       // As a consequence:
1319       // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R)
1320       // is always false in case of overflow of i*K
1321       //
1322       // Note, there are also K,L where i*K overflows and
1323       // i*K + L <u64 R is true, but
1324       // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R) is false
1325       // So this transformation could cause spurious deoptimizations and failed range check elimination
1326       // (but not incorrect execution) for unlikely corner cases with overflow.
1327       // If this causes problems in practice, we could maybe direct execution to a post-loop, instead of deoptimizing.
1328       Node* max_jint_plus_one_long = longcon((jlong)max_jint + 1);
1329       Node* max_range = new AddLNode(max_jint_plus_one_long, L);
1330       register_new_node(max_range, entry_control);
1331       R = MaxNode::unsigned_min(R, max_range, TypeLong::POS, _igvn);
1332       set_subtree_ctrl(R, true);
1333     }
1334 
1335     Node* C = outer_phi;
1336 
1337     // Start with 64-bit values:
1338     //   i*K + L <u64 R
1339     //   (C+j)*K + L <u64 R
1340     //   j*K + Q <u64 R    where Q = Q_first = C*K+L
1341     Node* Q_first = new MulLNode(C, K);
1342     register_new_node(Q_first, entry_control);
1343     Q_first = new AddLNode(Q_first, L);
1344     register_new_node(Q_first, entry_control);
1345 
1346     // Compute endpoints of the range of values j*K + Q.
1347     //  Q_min = (j=0)*K + Q;  Q_max = (j=B_2)*K + Q
1348     Node* Q_min = Q_first;
1349 
1350     // Compute the exact ending value B_2 (which is really A_2 if S < 0)
1351     Node* B_2 = new LoopLimitNode(this->C, int_zero, inner_iters_actual_int, int_stride);
1352     register_new_node(B_2, entry_control);
1353     B_2 = new SubINode(B_2, int_stride);
1354     register_new_node(B_2, entry_control);
1355     B_2 = new ConvI2LNode(B_2);
1356     register_new_node(B_2, entry_control);
1357 
1358     Node* Q_max = new MulLNode(B_2, K);
1359     register_new_node(Q_max, entry_control);
1360     Q_max = new AddLNode(Q_max, Q_first);
1361     register_new_node(Q_max, entry_control);
1362 
1363     if (scale * stride_con < 0) {
1364       swap(Q_min, Q_max);
1365     }
1366     // Now, mathematically, Q_max > Q_min, and they are close enough so that (Q_max-Q_min) fits in 32 bits.
1367 
1368     // L_clamp = Q_min < 0 ? 0 : Q_min
1369     Node* Q_min_cmp = new CmpLNode(Q_min, long_zero);
1370     register_new_node(Q_min_cmp, entry_control);
1371     Node* Q_min_bool = new BoolNode(Q_min_cmp, BoolTest::lt);
1372     register_new_node(Q_min_bool, entry_control);
1373     Node* L_clamp = new CMoveLNode(Q_min_bool, Q_min, long_zero, TypeLong::LONG);
1374     register_new_node(L_clamp, entry_control);
1375     // (This could also be coded bitwise as L_clamp = Q_min & ~(Q_min>>63).)
1376 
1377     Node* Q_max_plus_one = new AddLNode(Q_max, long_one);
1378     register_new_node(Q_max_plus_one, entry_control);
1379 
1380     // H_clamp = Q_max+1 < Q_min ? max_jlong : Q_max+1
1381     // (Because Q_min and Q_max are close, the overflow check could also be encoded as Q_max+1 < 0 & Q_min >= 0.)
1382     Node* max_jlong_long = longcon(max_jlong);
1383     Node* Q_max_cmp = new CmpLNode(Q_max_plus_one, Q_min);
1384     register_new_node(Q_max_cmp, entry_control);
1385     Node* Q_max_bool = new BoolNode(Q_max_cmp, BoolTest::lt);
1386     register_new_node(Q_max_bool, entry_control);
1387     Node* H_clamp = new CMoveLNode(Q_max_bool, Q_max_plus_one, max_jlong_long, TypeLong::LONG);
1388     register_new_node(H_clamp, entry_control);
1389     // (This could also be coded bitwise as H_clamp = ((Q_max+1)<<1 | M)>>>1 where M = (Q_max+1)>>63 & ~Q_min>>63.)
1390 
1391     // R_2 = clamp(R, L_clamp, H_clamp) - L_clamp
1392     // that is:  R_2 = clamp(R, L_clamp=0, H_clamp=Q_max)      if Q_min < 0
1393     // or else:  R_2 = clamp(R, L_clamp,   H_clamp) - Q_min    if Q_min >= 0
1394     // and also: R_2 = clamp(R, L_clamp,   Q_max+1) - L_clamp  if Q_min < Q_max+1 (no overflow)
1395     // or else:  R_2 = clamp(R, L_clamp, *no limit*)- L_clamp  if Q_max+1 < Q_min (overflow)
1396     Node* R_2 = clamp(R, L_clamp, H_clamp);
1397     R_2 = new SubLNode(R_2, L_clamp);
1398     register_new_node(R_2, entry_control);
1399     R_2 = new ConvL2INode(R_2, TypeInt::POS);
1400     register_new_node(R_2, entry_control);
1401 
1402     // L_2 = Q_first - L_clamp
1403     // We are subtracting L_clamp from both sides of the <u32 comparison.
1404     // If S*K>0, then Q_first == 0 and the R.C. expression at -L_clamp and steps upward to Q_max-L_clamp.
1405     // If S*K<0, then Q_first != 0 and the R.C. expression starts high and steps downward to Q_min-L_clamp.
1406     Node* L_2 = new SubLNode(Q_first, L_clamp);
1407     register_new_node(L_2, entry_control);
1408     L_2 = new ConvL2INode(L_2, TypeInt::INT);
1409     register_new_node(L_2, entry_control);
1410 
1411     // Transform the range check using the computed values L_2/R_2
1412     // from:   i*K + L   <u64 R
1413     // to:     j*K + L_2 <u32 R_2
1414     // that is:
1415     //   (j*K + Q_first) - L_clamp <u32 clamp(R, L_clamp, H_clamp) - L_clamp
1416     K = intcon(checked_cast<int>(scale));
1417     Node* scaled_iv = new MulINode(inner_phi, K);
1418     register_new_node(scaled_iv, c);
1419     Node* scaled_iv_plus_offset = new AddINode(scaled_iv, L_2);
1420     register_new_node(scaled_iv_plus_offset, c);
1421 
1422     Node* new_rc_cmp = new CmpUNode(scaled_iv_plus_offset, R_2);
1423     register_new_node(new_rc_cmp, c);
1424 
1425     _igvn.replace_input_of(rc_bol, 1, new_rc_cmp);
1426   }
1427 }
1428 
1429 Node* PhaseIdealLoop::clamp(Node* R, Node* L, Node* H) {
1430   Node* min = MaxNode::signed_min(R, H, TypeLong::LONG, _igvn);
1431   set_subtree_ctrl(min, true);
1432   Node* max = MaxNode::signed_max(L, min, TypeLong::LONG, _igvn);
1433   set_subtree_ctrl(max, true);
1434   return max;
1435 }
1436 
1437 LoopNode* PhaseIdealLoop::create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head,
1438                                             IfNode* exit_test) {
1439   LoopNode* new_inner_head = new LoopNode(head->in(1), head->in(2));
1440   IfNode* new_inner_exit = new IfNode(exit_test->in(0), exit_test->in(1), exit_test->_prob, exit_test->_fcnt);
1441   _igvn.register_new_node_with_optimizer(new_inner_head);
1442   _igvn.register_new_node_with_optimizer(new_inner_exit);
1443   loop->_body.push(new_inner_head);
1444   loop->_body.push(new_inner_exit);
1445   loop->_body.yank(head);
1446   loop->_body.yank(exit_test);
1447   set_loop(new_inner_head, loop);
1448   set_loop(new_inner_exit, loop);
1449   set_idom(new_inner_head, idom(head), dom_depth(head));
1450   set_idom(new_inner_exit, idom(exit_test), dom_depth(exit_test));
1451   lazy_replace(head, new_inner_head);
1452   lazy_replace(exit_test, new_inner_exit);
1453   loop->_head = new_inner_head;
1454   return new_inner_head;
1455 }
1456 
1457 #ifdef ASSERT
1458 void PhaseIdealLoop::check_counted_loop_shape(IdealLoopTree* loop, Node* x, BasicType bt) {
1459   Node* back_control = loop_exit_control(x, loop);
1460   assert(back_control != nullptr, "no back control");
1461 
1462   BoolTest::mask mask = BoolTest::illegal;
1463   float cl_prob = 0;
1464   Node* incr = nullptr;
1465   Node* limit = nullptr;
1466 
1467   Node* cmp = loop_exit_test(back_control, loop, incr, limit, mask, cl_prob);
1468   assert(cmp != nullptr && cmp->Opcode() == Op_Cmp(bt), "no exit test");
1469 
1470   Node* phi_incr = nullptr;
1471   incr = loop_iv_incr(incr, x, loop, phi_incr);
1472   assert(incr != nullptr && incr->Opcode() == Op_Add(bt), "no incr");
1473 
1474   Node* xphi = nullptr;
1475   Node* stride = loop_iv_stride(incr, loop, xphi);
1476 
1477   assert(stride != nullptr, "no stride");
1478 
1479   PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop);
1480 
1481   assert(phi != nullptr && phi->in(LoopNode::LoopBackControl) == incr, "No phi");
1482 
1483   jlong stride_con = stride->get_integer_as_long(bt);
1484 
1485   assert(condition_stride_ok(mask, stride_con), "illegal condition");
1486 
1487   assert(mask != BoolTest::ne, "unexpected condition");
1488   assert(phi_incr == nullptr, "bad loop shape");
1489   assert(cmp->in(1) == incr, "bad exit test shape");
1490 
1491   // Safepoint on backedge not supported
1492   assert(x->in(LoopNode::LoopBackControl)->Opcode() != Op_SafePoint, "no safepoint on backedge");
1493 }
1494 #endif
1495 
1496 #ifdef ASSERT
1497 // convert an int counted loop to a long counted to stress handling of
1498 // long counted loops
1499 bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop) {
1500   Unique_Node_List iv_nodes;
1501   Node_List old_new;
1502   iv_nodes.push(cmp);
1503   bool failed = false;
1504 
1505   for (uint i = 0; i < iv_nodes.size() && !failed; i++) {
1506     Node* n = iv_nodes.at(i);
1507     switch(n->Opcode()) {
1508       case Op_Phi: {
1509         Node* clone = new PhiNode(n->in(0), TypeLong::LONG);
1510         old_new.map(n->_idx, clone);
1511         break;
1512       }
1513       case Op_CmpI: {
1514         Node* clone = new CmpLNode(nullptr, nullptr);
1515         old_new.map(n->_idx, clone);
1516         break;
1517       }
1518       case Op_AddI: {
1519         Node* clone = new AddLNode(nullptr, nullptr);
1520         old_new.map(n->_idx, clone);
1521         break;
1522       }
1523       case Op_CastII: {
1524         failed = true;
1525         break;
1526       }
1527       default:
1528         DEBUG_ONLY(n->dump());
1529         fatal("unexpected");
1530     }
1531 
1532     for (uint i = 1; i < n->req(); i++) {
1533       Node* in = n->in(i);
1534       if (in == nullptr) {
1535         continue;
1536       }
1537       if (loop->is_member(get_loop(get_ctrl(in)))) {
1538         iv_nodes.push(in);
1539       }
1540     }
1541   }
1542 
1543   if (failed) {
1544     for (uint i = 0; i < iv_nodes.size(); i++) {
1545       Node* n = iv_nodes.at(i);
1546       Node* clone = old_new[n->_idx];
1547       if (clone != nullptr) {
1548         _igvn.remove_dead_node(clone);
1549       }
1550     }
1551     return false;
1552   }
1553 
1554   for (uint i = 0; i < iv_nodes.size(); i++) {
1555     Node* n = iv_nodes.at(i);
1556     Node* clone = old_new[n->_idx];
1557     for (uint i = 1; i < n->req(); i++) {
1558       Node* in = n->in(i);
1559       if (in == nullptr) {
1560         continue;
1561       }
1562       Node* in_clone = old_new[in->_idx];
1563       if (in_clone == nullptr) {
1564         assert(_igvn.type(in)->isa_int(), "");
1565         in_clone = new ConvI2LNode(in);
1566         _igvn.register_new_node_with_optimizer(in_clone);
1567         set_subtree_ctrl(in_clone, false);
1568       }
1569       if (in_clone->in(0) == nullptr) {
1570         in_clone->set_req(0, C->top());
1571         clone->set_req(i, in_clone);
1572         in_clone->set_req(0, nullptr);
1573       } else {
1574         clone->set_req(i, in_clone);
1575       }
1576     }
1577     _igvn.register_new_node_with_optimizer(clone);
1578   }
1579   set_ctrl(old_new[phi->_idx], phi->in(0));
1580 
1581   for (uint i = 0; i < iv_nodes.size(); i++) {
1582     Node* n = iv_nodes.at(i);
1583     Node* clone = old_new[n->_idx];
1584     set_subtree_ctrl(clone, false);
1585     Node* m = n->Opcode() == Op_CmpI ? clone : nullptr;
1586     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1587       Node* u = n->fast_out(i);
1588       if (iv_nodes.member(u)) {
1589         continue;
1590       }
1591       if (m == nullptr) {
1592         m = new ConvL2INode(clone);
1593         _igvn.register_new_node_with_optimizer(m);
1594         set_subtree_ctrl(m, false);
1595       }
1596       _igvn.rehash_node_delayed(u);
1597       int nb = u->replace_edge(n, m, &_igvn);
1598       --i, imax -= nb;
1599     }
1600   }
1601   return true;
1602 }
1603 #endif
1604 
1605 //------------------------------is_counted_loop--------------------------------
1606 bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt) {
1607   PhaseGVN *gvn = &_igvn;
1608 
1609   Node* back_control = loop_exit_control(x, loop);
1610   if (back_control == nullptr) {
1611     return false;
1612   }
1613 
1614   BoolTest::mask bt = BoolTest::illegal;
1615   float cl_prob = 0;
1616   Node* incr = nullptr;
1617   Node* limit = nullptr;
1618   Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
1619   if (cmp == nullptr || cmp->Opcode() != Op_Cmp(iv_bt)) {
1620     return false; // Avoid pointer & float & 64-bit compares
1621   }
1622 
1623   // Trip-counter increment must be commutative & associative.
1624   if (incr->Opcode() == Op_Cast(iv_bt)) {
1625     incr = incr->in(1);
1626   }
1627 
1628   Node* phi_incr = nullptr;
1629   incr = loop_iv_incr(incr, x, loop, phi_incr);
1630   if (incr == nullptr) {
1631     return false;
1632   }
1633 
1634   Node* trunc1 = nullptr;
1635   Node* trunc2 = nullptr;
1636   const TypeInteger* iv_trunc_t = nullptr;
1637   Node* orig_incr = incr;
1638   if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t, iv_bt))) {
1639     return false; // Funny increment opcode
1640   }
1641   assert(incr->Opcode() == Op_Add(iv_bt), "wrong increment code");
1642 
1643   Node* xphi = nullptr;
1644   Node* stride = loop_iv_stride(incr, loop, xphi);
1645 
1646   if (stride == nullptr) {
1647     return false;
1648   }
1649 
1650   if (xphi->Opcode() == Op_Cast(iv_bt)) {
1651     xphi = xphi->in(1);
1652   }
1653 
1654   // Stride must be constant
1655   jlong stride_con = stride->get_integer_as_long(iv_bt);
1656   assert(stride_con != 0, "missed some peephole opt");
1657 
1658   PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop);
1659 
1660   if (phi == nullptr ||
1661       (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
1662       (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
1663     return false;
1664   }
1665 
1666   Node* iftrue = back_control;
1667   uint iftrue_op = iftrue->Opcode();
1668   Node* iff = iftrue->in(0);
1669   BoolNode* test = iff->in(1)->as_Bool();
1670 
1671   const TypeInteger* limit_t = gvn->type(limit)->is_integer(iv_bt);
1672   if (trunc1 != nullptr) {
1673     // When there is a truncation, we must be sure that after the truncation
1674     // the trip counter will end up higher than the limit, otherwise we are looking
1675     // at an endless loop. Can happen with range checks.
1676 
1677     // Example:
1678     // int i = 0;
1679     // while (true)
1680     //    sum + = array[i];
1681     //    i++;
1682     //    i = i && 0x7fff;
1683     //  }
1684     //
1685     // If the array is shorter than 0x8000 this exits through a AIOOB
1686     //  - Counted loop transformation is ok
1687     // If the array is longer then this is an endless loop
1688     //  - No transformation can be done.
1689 
1690     const TypeInteger* incr_t = gvn->type(orig_incr)->is_integer(iv_bt);
1691     if (limit_t->hi_as_long() > incr_t->hi_as_long()) {
1692       // if the limit can have a higher value than the increment (before the phi)
1693       return false;
1694     }
1695   }
1696 
1697   Node *init_trip = phi->in(LoopNode::EntryControl);
1698 
1699   // If iv trunc type is smaller than int, check for possible wrap.
1700   if (!TypeInteger::bottom(iv_bt)->higher_equal(iv_trunc_t)) {
1701     assert(trunc1 != nullptr, "must have found some truncation");
1702 
1703     // Get a better type for the phi (filtered thru if's)
1704     const TypeInteger* phi_ft = filtered_type(phi);
1705 
1706     // Can iv take on a value that will wrap?
1707     //
1708     // Ensure iv's limit is not within "stride" of the wrap value.
1709     //
1710     // Example for "short" type
1711     //    Truncation ensures value is in the range -32768..32767 (iv_trunc_t)
1712     //    If the stride is +10, then the last value of the induction
1713     //    variable before the increment (phi_ft->_hi) must be
1714     //    <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to
1715     //    ensure no truncation occurs after the increment.
1716 
1717     if (stride_con > 0) {
1718       if (iv_trunc_t->hi_as_long() - phi_ft->hi_as_long() < stride_con ||
1719           iv_trunc_t->lo_as_long() > phi_ft->lo_as_long()) {
1720         return false;  // truncation may occur
1721       }
1722     } else if (stride_con < 0) {
1723       if (iv_trunc_t->lo_as_long() - phi_ft->lo_as_long() > stride_con ||
1724           iv_trunc_t->hi_as_long() < phi_ft->hi_as_long()) {
1725         return false;  // truncation may occur
1726       }
1727     }
1728     // No possibility of wrap so truncation can be discarded
1729     // Promote iv type to Int
1730   } else {
1731     assert(trunc1 == nullptr && trunc2 == nullptr, "no truncation for int");
1732   }
1733 
1734   if (!condition_stride_ok(bt, stride_con)) {
1735     return false;
1736   }
1737 
1738   const TypeInteger* init_t = gvn->type(init_trip)->is_integer(iv_bt);
1739 
1740   if (stride_con > 0) {
1741     if (init_t->lo_as_long() > max_signed_integer(iv_bt) - stride_con) {
1742       return false; // cyclic loop
1743     }
1744   } else {
1745     if (init_t->hi_as_long() < min_signed_integer(iv_bt) - stride_con) {
1746       return false; // cyclic loop
1747     }
1748   }
1749 
1750   if (phi_incr != nullptr && bt != BoolTest::ne) {
1751     // check if there is a possibility of IV overflowing after the first increment
1752     if (stride_con > 0) {
1753       if (init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) {
1754         return false;
1755       }
1756     } else {
1757       if (init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con) {
1758         return false;
1759       }
1760     }
1761   }
1762 
1763   // =================================================
1764   // ---- SUCCESS!   Found A Trip-Counted Loop!  -----
1765   //
1766 
1767   if (x->Opcode() == Op_Region) {
1768     // x has not yet been transformed to Loop or LongCountedLoop.
1769     // This should only happen if we are inside an infinite loop.
1770     // It happens like this:
1771     //   build_loop_tree -> do not attach infinite loop and nested loops
1772     //   beautify_loops  -> does not transform the infinite and nested loops to LoopNode, because not attached yet
1773     //   build_loop_tree -> find and attach infinite and nested loops
1774     //   counted_loop    -> nested Regions are not yet transformed to LoopNodes, we land here
1775     assert(x->as_Region()->is_in_infinite_subgraph(),
1776            "x can only be a Region and not Loop if inside infinite loop");
1777     // Come back later when Region is transformed to LoopNode
1778     return false;
1779   }
1780 
1781   assert(x->Opcode() == Op_Loop || x->Opcode() == Op_LongCountedLoop, "regular loops only");
1782   C->print_method(PHASE_BEFORE_CLOOPS, 3, x);
1783 
1784   // ===================================================
1785   // We can only convert this loop to a counted loop if we can guarantee that the iv phi will never overflow at runtime.
1786   // This is an implicit assumption taken by some loop optimizations. We therefore must ensure this property at all cost.
1787   // At this point, we've already excluded some trivial cases where an overflow could have been proven statically.
1788   // But even though we cannot prove that an overflow will *not* happen, we still want to speculatively convert this loop
1789   // to a counted loop. This can be achieved by adding additional iv phi overflow checks before the loop. If they fail,
1790   // we trap and resume execution before the loop without having executed any iteration of the loop, yet.
1791   //
1792   // These additional iv phi overflow checks can be inserted as Loop Limit Check Predicates above the Loop Limit Check
1793   // Parse Predicate which captures a JVM state just before the entry of the loop. If there is no such Parse Predicate,
1794   // we cannot generate a Loop Limit Check Predicate and thus cannot speculatively convert the loop to a counted loop.
1795   //
1796   // In the following, we only focus on int loops with stride > 0 to keep things simple. The argumentation and proof
1797   // for stride < 0 is analogously. For long loops, we would replace max_int with max_long.
1798   //
1799   //
1800   // The loop to be converted does not always need to have the often used shape:
1801   //
1802   //                                                 i = init
1803   //     i = init                                loop:
1804   //     do {                                        ...
1805   //         // ...               equivalent         i+=stride
1806   //         i+=stride               <==>            if (i < limit)
1807   //     } while (i < limit);                          goto loop
1808   //                                             exit:
1809   //                                                 ...
1810   //
1811   // where the loop exit check uses the post-incremented iv phi and a '<'-operator.
1812   //
1813   // We could also have '<='-operator (or '>='-operator for negative strides) or use the pre-incremented iv phi value
1814   // in the loop exit check:
1815   //
1816   //         i = init
1817   //     loop:
1818   //         ...
1819   //         if (i <= limit)
1820   //             i+=stride
1821   //             goto loop
1822   //     exit:
1823   //         ...
1824   //
1825   // Let's define the following terms:
1826   // - iv_pre_i: The pre-incremented iv phi before the i-th iteration.
1827   // - iv_post_i: The post-incremented iv phi after the i-th iteration.
1828   //
1829   // The iv_pre_i and iv_post_i have the following relation:
1830   //      iv_pre_i + stride = iv_post_i
1831   //
1832   // When converting a loop to a counted loop, we want to have a canonicalized loop exit check of the form:
1833   //     iv_post_i < adjusted_limit
1834   //
1835   // If that is not the case, we need to canonicalize the loop exit check by using different values for adjusted_limit:
1836   // (LE1) iv_post_i < limit: Already canonicalized. We can directly use limit as adjusted_limit.
1837   //           -> adjusted_limit = limit.
1838   // (LE2) iv_post_i <= limit:
1839   //           iv_post_i < limit + 1
1840   //           -> adjusted limit = limit + 1
1841   // (LE3) iv_pre_i < limit:
1842   //           iv_pre_i + stride < limit + stride
1843   //           iv_post_i < limit + stride
1844   //           -> adjusted_limit = limit + stride
1845   // (LE4) iv_pre_i <= limit:
1846   //           iv_pre_i < limit + 1
1847   //           iv_pre_i + stride < limit + stride + 1
1848   //           iv_post_i < limit + stride + 1
1849   //           -> adjusted_limit = limit + stride + 1
1850   //
1851   // Note that:
1852   //     (AL) limit <= adjusted_limit.
1853   //
1854   // The following loop invariant has to hold for counted loops with n iterations (i.e. loop exit check true after n-th
1855   // loop iteration) and a canonicalized loop exit check to guarantee that no iv_post_i over- or underflows:
1856   // (INV) For i = 1..n, min_int <= iv_post_i <= max_int
1857   //
1858   // To prove (INV), we require the following two conditions/assumptions:
1859   // (i): adjusted_limit - 1 + stride <= max_int
1860   // (ii): init < limit
1861   //
1862   // If we can prove (INV), we know that there can be no over- or underflow of any iv phi value. We prove (INV) by
1863   // induction by assuming (i) and (ii).
1864   //
1865   // Proof by Induction
1866   // ------------------
1867   // > Base case (i = 1): We show that (INV) holds after the first iteration:
1868   //     min_int <= iv_post_1 = init + stride <= max_int
1869   // Proof:
1870   //     First, we note that (ii) implies
1871   //         (iii) init <= limit - 1
1872   //     max_int >= adjusted_limit - 1 + stride   [using (i)]
1873   //             >= limit - 1 + stride            [using (AL)]
1874   //             >= init + stride                 [using (iii)]
1875   //             >= min_int                       [using stride > 0, no underflow]
1876   // Thus, no overflow happens after the first iteration and (INV) holds for i = 1.
1877   //
1878   // Note that to prove the base case we need (i) and (ii).
1879   //
1880   // > Induction Hypothesis (i = j, j > 1): Assume that (INV) holds after the j-th iteration:
1881   //     min_int <= iv_post_j <= max_int
1882   // > Step case (i = j + 1): We show that (INV) also holds after the j+1-th iteration:
1883   //     min_int <= iv_post_{j+1} = iv_post_j + stride <= max_int
1884   // Proof:
1885   // If iv_post_j >= adjusted_limit:
1886   //     We exit the loop after the j-th iteration, and we don't execute the j+1-th iteration anymore. Thus, there is
1887   //     also no iv_{j+1}. Since (INV) holds for iv_j, there is nothing left to prove.
1888   // If iv_post_j < adjusted_limit:
1889   //     First, we note that:
1890   //         (iv) iv_post_j <= adjusted_limit - 1
1891   //     max_int >= adjusted_limit - 1 + stride    [using (i)]
1892   //             >= iv_post_j + stride             [using (iv)]
1893   //             >= min_int                        [using stride > 0, no underflow]
1894   //
1895   // Note that to prove the step case we only need (i).
1896   //
1897   // Thus, by assuming (i) and (ii), we proved (INV).
1898   //
1899   //
1900   // It is therefore enough to add the following two Loop Limit Check Predicates to check assumptions (i) and (ii):
1901   //
1902   // (1) Loop Limit Check Predicate for (i):
1903   //     Using (i): adjusted_limit - 1 + stride <= max_int
1904   //
1905   //     This condition is now restated to use limit instead of adjusted_limit:
1906   //
1907   //     To prevent an overflow of adjusted_limit -1 + stride itself, we rewrite this check to
1908   //         max_int - stride + 1 >= adjusted_limit
1909   //     We can merge the two constants into
1910   //         canonicalized_correction = stride - 1
1911   //     which gives us
1912   //        max_int - canonicalized_correction >= adjusted_limit
1913   //
1914   //     To directly use limit instead of adjusted_limit in the predicate condition, we split adjusted_limit into:
1915   //         adjusted_limit = limit + limit_correction
1916   //     Since stride > 0 and limit_correction <= stride + 1, we can restate this with no over- or underflow into:
1917   //         max_int - canonicalized_correction - limit_correction >= limit
1918   //     Since canonicalized_correction and limit_correction are both constants, we can replace them with a new constant:
1919   //         (v) final_correction = canonicalized_correction + limit_correction
1920   //
1921   //     which gives us:
1922   //
1923   //     Final predicate condition:
1924   //         max_int - final_correction >= limit
1925   //
1926   //     However, we need to be careful that (v) does not over- or underflow.
1927   //     We know that:
1928   //         canonicalized_correction = stride - 1
1929   //     and
1930   //         limit_correction <= stride + 1
1931   //     and thus
1932   //         canonicalized_correction + limit_correction <= 2 * stride
1933   //     To prevent an over- or underflow of (v), we must ensure that
1934   //         2 * stride <= max_int
1935   //     which can safely be checked without over- or underflow with
1936   //         (vi) stride != min_int AND abs(stride) <= max_int / 2
1937   //
1938   //     We could try to further optimize the cases where (vi) does not hold but given that such large strides are
1939   //     very uncommon and the loop would only run for a very few iterations anyway, we simply bail out if (vi) fails.
1940   //
1941   // (2) Loop Limit Check Predicate for (ii):
1942   //     Using (ii): init < limit
1943   //
1944   //     This Loop Limit Check Predicate is not required if we can prove at compile time that either:
1945   //        (2.1) type(init) < type(limit)
1946   //             In this case, we know:
1947   //                 all possible values of init < all possible values of limit
1948   //             and we can skip the predicate.
1949   //
1950   //        (2.2) init < limit is already checked before (i.e. found as a dominating check)
1951   //            In this case, we do not need to re-check the condition and can skip the predicate.
1952   //            This is often found for while- and for-loops which have the following shape:
1953   //
1954   //                if (init < limit) { // Dominating test. Do not need the Loop Limit Check Predicate below.
1955   //                    i = init;
1956   //                    if (init >= limit) { trap(); } // Here we would insert the Loop Limit Check Predicate
1957   //                    do {
1958   //                        i += stride;
1959   //                    } while (i < limit);
1960   //                }
1961   //
1962   //        (2.3) init + stride <= max_int
1963   //            In this case, there is no overflow of the iv phi after the first loop iteration.
1964   //            In the proof of the base case above we showed that init + stride <= max_int by using assumption (ii):
1965   //                init < limit
1966   //            In the proof of the step case above, we did not need (ii) anymore. Therefore, if we already know at
1967   //            compile time that init + stride <= max_int then we have trivially proven the base case and that
1968   //            there is no overflow of the iv phi after the first iteration. In this case, we don't need to check (ii)
1969   //            again and can skip the predicate.
1970 
1971   // Check (vi) and bail out if the stride is too big.
1972   if (stride_con == min_signed_integer(iv_bt) || (ABS(stride_con) > max_signed_integer(iv_bt) / 2)) {
1973     return false;
1974   }
1975 
1976   // Accounting for (LE3) and (LE4) where we use pre-incremented phis in the loop exit check.
1977   const jlong limit_correction_for_pre_iv_exit_check = (phi_incr != nullptr) ? stride_con : 0;
1978 
1979   // Accounting for (LE2) and (LE4) where we use <= or >= in the loop exit check.
1980   const bool includes_limit = (bt == BoolTest::le || bt == BoolTest::ge);
1981   const jlong limit_correction_for_le_ge_exit_check = (includes_limit ? (stride_con > 0 ? 1 : -1) : 0);
1982 
1983   const jlong limit_correction = limit_correction_for_pre_iv_exit_check + limit_correction_for_le_ge_exit_check;
1984   const jlong canonicalized_correction = stride_con + (stride_con > 0 ? -1 : 1);
1985   const jlong final_correction = canonicalized_correction + limit_correction;
1986 
1987   int sov = check_stride_overflow(final_correction, limit_t, iv_bt);
1988   Node* init_control = x->in(LoopNode::EntryControl);
1989 
1990   // If sov==0, limit's type always satisfies the condition, for
1991   // example, when it is an array length.
1992   if (sov != 0) {
1993     if (sov < 0) {
1994       return false;  // Bailout: integer overflow is certain.
1995     }
1996     // (1) Loop Limit Check Predicate is required because we could not statically prove that
1997     //     limit + final_correction = adjusted_limit - 1 + stride <= max_int
1998     assert(!x->as_Loop()->is_loop_nest_inner_loop(), "loop was transformed");
1999     const Predicates predicates(init_control);
2000     const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
2001     if (!loop_limit_check_predicate_block->has_parse_predicate()) {
2002       // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
2003 #ifdef ASSERT
2004       if (TraceLoopLimitCheck) {
2005         tty->print("Missing Loop Limit Check Parse Predicate:");
2006         loop->dump_head();
2007         x->dump(1);
2008       }
2009 #endif
2010       return false;
2011     }
2012 
2013     ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
2014     if (!is_dominator(get_ctrl(limit), loop_limit_check_parse_predicate->in(0))) {
2015       return false;
2016     }
2017 
2018     Node* cmp_limit;
2019     Node* bol;
2020 
2021     if (stride_con > 0) {
2022       cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
2023       bol = new BoolNode(cmp_limit, BoolTest::le);
2024     } else {
2025       cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
2026       bol = new BoolNode(cmp_limit, BoolTest::ge);
2027     }
2028 
2029     insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
2030   }
2031 
2032   // (2.3)
2033   const bool init_plus_stride_could_overflow =
2034           (stride_con > 0 && init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) ||
2035           (stride_con < 0 && init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con);
2036   // (2.1)
2037   const bool init_gte_limit = (stride_con > 0 && init_t->hi_as_long() >= limit_t->lo_as_long()) ||
2038                               (stride_con < 0 && init_t->lo_as_long() <= limit_t->hi_as_long());
2039 
2040   if (init_gte_limit && // (2.1)
2041      ((bt == BoolTest::ne || init_plus_stride_could_overflow) && // (2.3)
2042       !has_dominating_loop_limit_check(init_trip, limit, stride_con, iv_bt, init_control))) { // (2.2)
2043     // (2) Iteration Loop Limit Check Predicate is required because neither (2.1), (2.2), nor (2.3) holds.
2044     // We use the following condition:
2045     // - stride > 0: init < limit
2046     // - stride < 0: init > limit
2047     //
2048     // This predicate is always required if we have a non-equal-operator in the loop exit check (where stride = 1 is
2049     // a requirement). We transform the loop exit check by using a less-than-operator. By doing so, we must always
2050     // check that init < limit. Otherwise, we could have a different number of iterations at runtime.
2051 
2052     const Predicates predicates(init_control);
2053     const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
2054     if (!loop_limit_check_predicate_block->has_parse_predicate()) {
2055       // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
2056 #ifdef ASSERT
2057       if (TraceLoopLimitCheck) {
2058         tty->print("Missing Loop Limit Check Parse Predicate:");
2059         loop->dump_head();
2060         x->dump(1);
2061       }
2062 #endif
2063       return false;
2064     }
2065 
2066     ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
2067     Node* parse_predicate_entry = loop_limit_check_parse_predicate->in(0);
2068     if (!is_dominator(get_ctrl(limit), parse_predicate_entry) ||
2069         !is_dominator(get_ctrl(init_trip), parse_predicate_entry)) {
2070       return false;
2071     }
2072 
2073     Node* cmp_limit;
2074     Node* bol;
2075 
2076     if (stride_con > 0) {
2077       cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
2078       bol = new BoolNode(cmp_limit, BoolTest::lt);
2079     } else {
2080       cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
2081       bol = new BoolNode(cmp_limit, BoolTest::gt);
2082     }
2083 
2084     insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
2085   }
2086 
2087   if (bt == BoolTest::ne) {
2088     // Now we need to canonicalize the loop condition if it is 'ne'.
2089     assert(stride_con == 1 || stride_con == -1, "simple increment only - checked before");
2090     if (stride_con > 0) {
2091       // 'ne' can be replaced with 'lt' only when init < limit. This is ensured by the inserted predicate above.
2092       bt = BoolTest::lt;
2093     } else {
2094       assert(stride_con < 0, "must be");
2095       // 'ne' can be replaced with 'gt' only when init > limit. This is ensured by the inserted predicate above.
2096       bt = BoolTest::gt;
2097     }
2098   }
2099 
2100   Node* sfpt = nullptr;
2101   if (loop->_child == nullptr) {
2102     sfpt = find_safepoint(back_control, x, loop);
2103   } else {
2104     sfpt = iff->in(0);
2105     if (sfpt->Opcode() != Op_SafePoint) {
2106       sfpt = nullptr;
2107     }
2108   }
2109 
2110   if (x->in(LoopNode::LoopBackControl)->Opcode() == Op_SafePoint) {
2111     Node* backedge_sfpt = x->in(LoopNode::LoopBackControl);
2112     if (((iv_bt == T_INT && LoopStripMiningIter != 0) ||
2113          iv_bt == T_LONG) &&
2114         sfpt == nullptr) {
2115       // Leaving the safepoint on the backedge and creating a
2116       // CountedLoop will confuse optimizations. We can't move the
2117       // safepoint around because its jvm state wouldn't match a new
2118       // location. Give up on that loop.
2119       return false;
2120     }
2121     if (is_deleteable_safept(backedge_sfpt)) {
2122       lazy_replace(backedge_sfpt, iftrue);
2123       if (loop->_safepts != nullptr) {
2124         loop->_safepts->yank(backedge_sfpt);
2125       }
2126       loop->_tail = iftrue;
2127     }
2128   }
2129 
2130 
2131 #ifdef ASSERT
2132   if (iv_bt == T_INT &&
2133       !x->as_Loop()->is_loop_nest_inner_loop() &&
2134       StressLongCountedLoop > 0 &&
2135       trunc1 == nullptr &&
2136       convert_to_long_loop(cmp, phi, loop)) {
2137     return false;
2138   }
2139 #endif
2140 
2141   Node* adjusted_limit = limit;
2142   if (phi_incr != nullptr) {
2143     // If compare points directly to the phi we need to adjust
2144     // the compare so that it points to the incr. Limit have
2145     // to be adjusted to keep trip count the same and we
2146     // should avoid int overflow.
2147     //
2148     //   i = init; do {} while(i++ < limit);
2149     // is converted to
2150     //   i = init; do {} while(++i < limit+1);
2151     //
2152     adjusted_limit = gvn->transform(AddNode::make(limit, stride, iv_bt));
2153   }
2154 
2155   if (includes_limit) {
2156     // The limit check guaranties that 'limit <= (max_jint - stride)' so
2157     // we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
2158     //
2159     Node* one = (stride_con > 0) ? gvn->integercon( 1, iv_bt) : gvn->integercon(-1, iv_bt);
2160     adjusted_limit = gvn->transform(AddNode::make(adjusted_limit, one, iv_bt));
2161     if (bt == BoolTest::le)
2162       bt = BoolTest::lt;
2163     else if (bt == BoolTest::ge)
2164       bt = BoolTest::gt;
2165     else
2166       ShouldNotReachHere();
2167   }
2168   set_subtree_ctrl(adjusted_limit, false);
2169 
2170   // Build a canonical trip test.
2171   // Clone code, as old values may be in use.
2172   incr = incr->clone();
2173   incr->set_req(1,phi);
2174   incr->set_req(2,stride);
2175   incr = _igvn.register_new_node_with_optimizer(incr);
2176   set_early_ctrl(incr, false);
2177   _igvn.rehash_node_delayed(phi);
2178   phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
2179 
2180   // If phi type is more restrictive than Int, raise to
2181   // Int to prevent (almost) infinite recursion in igvn
2182   // which can only handle integer types for constants or minint..maxint.
2183   if (!TypeInteger::bottom(iv_bt)->higher_equal(phi->bottom_type())) {
2184     Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInteger::bottom(iv_bt));
2185     nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
2186     nphi = _igvn.register_new_node_with_optimizer(nphi);
2187     set_ctrl(nphi, get_ctrl(phi));
2188     _igvn.replace_node(phi, nphi);
2189     phi = nphi->as_Phi();
2190   }
2191   cmp = cmp->clone();
2192   cmp->set_req(1,incr);
2193   cmp->set_req(2, adjusted_limit);
2194   cmp = _igvn.register_new_node_with_optimizer(cmp);
2195   set_ctrl(cmp, iff->in(0));
2196 
2197   test = test->clone()->as_Bool();
2198   (*(BoolTest*)&test->_test)._test = bt;
2199   test->set_req(1,cmp);
2200   _igvn.register_new_node_with_optimizer(test);
2201   set_ctrl(test, iff->in(0));
2202 
2203   // Replace the old IfNode with a new LoopEndNode
2204   Node *lex = _igvn.register_new_node_with_optimizer(BaseCountedLoopEndNode::make(iff->in(0), test, cl_prob, iff->as_If()->_fcnt, iv_bt));
2205   IfNode *le = lex->as_If();
2206   uint dd = dom_depth(iff);
2207   set_idom(le, le->in(0), dd); // Update dominance for loop exit
2208   set_loop(le, loop);
2209 
2210   // Get the loop-exit control
2211   Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue));
2212 
2213   // Need to swap loop-exit and loop-back control?
2214   if (iftrue_op == Op_IfFalse) {
2215     Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le));
2216     Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le));
2217 
2218     loop->_tail = back_control = ift2;
2219     set_loop(ift2, loop);
2220     set_loop(iff2, get_loop(iffalse));
2221 
2222     // Lazy update of 'get_ctrl' mechanism.
2223     lazy_replace(iffalse, iff2);
2224     lazy_replace(iftrue,  ift2);
2225 
2226     // Swap names
2227     iffalse = iff2;
2228     iftrue  = ift2;
2229   } else {
2230     _igvn.rehash_node_delayed(iffalse);
2231     _igvn.rehash_node_delayed(iftrue);
2232     iffalse->set_req_X( 0, le, &_igvn );
2233     iftrue ->set_req_X( 0, le, &_igvn );
2234   }
2235 
2236   set_idom(iftrue,  le, dd+1);
2237   set_idom(iffalse, le, dd+1);
2238   assert(iff->outcnt() == 0, "should be dead now");
2239   lazy_replace( iff, le ); // fix 'get_ctrl'
2240 
2241   Node* entry_control = init_control;
2242   bool strip_mine_loop = iv_bt == T_INT &&
2243                          loop->_child == nullptr &&
2244                          sfpt != nullptr &&
2245                          !loop->_has_call &&
2246                          is_deleteable_safept(sfpt);
2247   IdealLoopTree* outer_ilt = nullptr;
2248   if (strip_mine_loop) {
2249     outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop,
2250                                               cl_prob, le->_fcnt, entry_control,
2251                                               iffalse);
2252   }
2253 
2254   // Now setup a new CountedLoopNode to replace the existing LoopNode
2255   BaseCountedLoopNode *l = BaseCountedLoopNode::make(entry_control, back_control, iv_bt);
2256   l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve
2257   // The following assert is approximately true, and defines the intention
2258   // of can_be_counted_loop.  It fails, however, because phase->type
2259   // is not yet initialized for this loop and its parts.
2260   //assert(l->can_be_counted_loop(this), "sanity");
2261   _igvn.register_new_node_with_optimizer(l);
2262   set_loop(l, loop);
2263   loop->_head = l;
2264   // Fix all data nodes placed at the old loop head.
2265   // Uses the lazy-update mechanism of 'get_ctrl'.
2266   lazy_replace( x, l );
2267   set_idom(l, entry_control, dom_depth(entry_control) + 1);
2268 
2269   if (iv_bt == T_INT && (LoopStripMiningIter == 0 || strip_mine_loop)) {
2270     // Check for immediately preceding SafePoint and remove
2271     if (sfpt != nullptr && (strip_mine_loop || is_deleteable_safept(sfpt))) {
2272       if (strip_mine_loop) {
2273         Node* outer_le = outer_ilt->_tail->in(0);
2274         Node* sfpt_clone = sfpt->clone();
2275         sfpt_clone->set_req(0, iffalse);
2276         outer_le->set_req(0, sfpt_clone);
2277 
2278         Node* polladdr = sfpt_clone->in(TypeFunc::Parms);
2279         if (polladdr != nullptr && polladdr->is_Load()) {
2280           // Polling load should be pinned outside inner loop.
2281           Node* new_polladdr = polladdr->clone();
2282           new_polladdr->set_req(0, iffalse);
2283           _igvn.register_new_node_with_optimizer(new_polladdr, polladdr);
2284           set_ctrl(new_polladdr, iffalse);
2285           sfpt_clone->set_req(TypeFunc::Parms, new_polladdr);
2286         }
2287         // When this code runs, loop bodies have not yet been populated.
2288         const bool body_populated = false;
2289         register_control(sfpt_clone, outer_ilt, iffalse, body_populated);
2290         set_idom(outer_le, sfpt_clone, dom_depth(sfpt_clone));
2291       }
2292       lazy_replace(sfpt, sfpt->in(TypeFunc::Control));
2293       if (loop->_safepts != nullptr) {
2294         loop->_safepts->yank(sfpt);
2295       }
2296     }
2297   }
2298 
2299 #ifdef ASSERT
2300   assert(l->is_valid_counted_loop(iv_bt), "counted loop shape is messed up");
2301   assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" );
2302 #endif
2303 #ifndef PRODUCT
2304   if (TraceLoopOpts) {
2305     tty->print("Counted      ");
2306     loop->dump_head();
2307   }
2308 #endif
2309 
2310   C->print_method(PHASE_AFTER_CLOOPS, 3, l);
2311 
2312   // Capture bounds of the loop in the induction variable Phi before
2313   // subsequent transformation (iteration splitting) obscures the
2314   // bounds
2315   l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn));
2316 
2317   if (strip_mine_loop) {
2318     l->mark_strip_mined();
2319     l->verify_strip_mined(1);
2320     outer_ilt->_head->as_Loop()->verify_strip_mined(1);
2321     loop = outer_ilt;
2322   }
2323 
2324 #ifndef PRODUCT
2325   if (x->as_Loop()->is_loop_nest_inner_loop() && iv_bt == T_LONG) {
2326     Atomic::inc(&_long_loop_counted_loops);
2327   }
2328 #endif
2329   if (iv_bt == T_LONG && x->as_Loop()->is_loop_nest_outer_loop()) {
2330     l->mark_loop_nest_outer_loop();
2331   }
2332 
2333   return true;
2334 }
2335 
2336 // Check if there is a dominating loop limit check of the form 'init < limit' starting at the loop entry.
2337 // If there is one, then we do not need to create an additional Loop Limit Check Predicate.
2338 bool PhaseIdealLoop::has_dominating_loop_limit_check(Node* init_trip, Node* limit, const jlong stride_con,
2339                                                      const BasicType iv_bt, Node* loop_entry) {
2340   // Eagerly call transform() on the Cmp and Bool node to common them up if possible. This is required in order to
2341   // successfully find a dominated test with the If node below.
2342   Node* cmp_limit;
2343   Node* bol;
2344   if (stride_con > 0) {
2345     cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
2346     bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::lt));
2347   } else {
2348     cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
2349     bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::gt));
2350   }
2351 
2352   // Check if there is already a dominating init < limit check. If so, we do not need a Loop Limit Check Predicate.
2353   IfNode* iff = new IfNode(loop_entry, bol, PROB_MIN, COUNT_UNKNOWN);
2354   // Also add fake IfProj nodes in order to call transform() on the newly created IfNode.
2355   IfFalseNode* if_false = new IfFalseNode(iff);
2356   IfTrueNode* if_true = new IfTrueNode(iff);
2357   Node* dominated_iff = _igvn.transform(iff);
2358   // ConI node? Found dominating test (IfNode::dominated_by() returns a ConI node).
2359   const bool found_dominating_test = dominated_iff != nullptr && dominated_iff->is_ConI();
2360 
2361   // Kill the If with its projections again in the next IGVN round by cutting it off from the graph.
2362   _igvn.replace_input_of(iff, 0, C->top());
2363   _igvn.replace_input_of(iff, 1, C->top());
2364   return found_dominating_test;
2365 }
2366 
2367 //----------------------exact_limit-------------------------------------------
2368 Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
2369   assert(loop->_head->is_CountedLoop(), "");
2370   CountedLoopNode *cl = loop->_head->as_CountedLoop();
2371   assert(cl->is_valid_counted_loop(T_INT), "");
2372 
2373   if (cl->stride_con() == 1 ||
2374       cl->stride_con() == -1 ||
2375       cl->limit()->Opcode() == Op_LoopLimit) {
2376     // Old code has exact limit (it could be incorrect in case of int overflow).
2377     // Loop limit is exact with stride == 1. And loop may already have exact limit.
2378     return cl->limit();
2379   }
2380   Node *limit = nullptr;
2381 #ifdef ASSERT
2382   BoolTest::mask bt = cl->loopexit()->test_trip();
2383   assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
2384 #endif
2385   if (cl->has_exact_trip_count()) {
2386     // Simple case: loop has constant boundaries.
2387     // Use jlongs to avoid integer overflow.
2388     int stride_con = cl->stride_con();
2389     jlong  init_con = cl->init_trip()->get_int();
2390     jlong limit_con = cl->limit()->get_int();
2391     julong trip_cnt = cl->trip_count();
2392     jlong final_con = init_con + trip_cnt*stride_con;
2393     int final_int = (int)final_con;
2394     // The final value should be in integer range since the loop
2395     // is counted and the limit was checked for overflow.
2396     assert(final_con == (jlong)final_int, "final value should be integer");
2397     limit = _igvn.intcon(final_int);
2398   } else {
2399     // Create new LoopLimit node to get exact limit (final iv value).
2400     limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride());
2401     register_new_node(limit, cl->in(LoopNode::EntryControl));
2402   }
2403   assert(limit != nullptr, "sanity");
2404   return limit;
2405 }
2406 
2407 //------------------------------Ideal------------------------------------------
2408 // Return a node which is more "ideal" than the current node.
2409 // Attempt to convert into a counted-loop.
2410 Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2411   if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) {
2412     phase->C->set_major_progress();
2413   }
2414   return RegionNode::Ideal(phase, can_reshape);
2415 }
2416 
2417 #ifdef ASSERT
2418 void LoopNode::verify_strip_mined(int expect_skeleton) const {
2419   const OuterStripMinedLoopNode* outer = nullptr;
2420   const CountedLoopNode* inner = nullptr;
2421   if (is_strip_mined()) {
2422     if (!is_valid_counted_loop(T_INT)) {
2423       return; // Skip malformed counted loop
2424     }
2425     assert(is_CountedLoop(), "no Loop should be marked strip mined");
2426     inner = as_CountedLoop();
2427     outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop();
2428   } else if (is_OuterStripMinedLoop()) {
2429     outer = this->as_OuterStripMinedLoop();
2430     inner = outer->unique_ctrl_out()->as_CountedLoop();
2431     assert(inner->is_valid_counted_loop(T_INT) && inner->is_strip_mined(), "OuterStripMinedLoop should have been removed");
2432     assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined");
2433   }
2434   if (inner != nullptr || outer != nullptr) {
2435     assert(inner != nullptr && outer != nullptr, "missing loop in strip mined nest");
2436     Node* outer_tail = outer->in(LoopNode::LoopBackControl);
2437     Node* outer_le = outer_tail->in(0);
2438     assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If");
2439     Node* sfpt = outer_le->in(0);
2440     assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?");
2441     Node* inner_out = sfpt->in(0);
2442     CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
2443     assert(cle == inner->loopexit_or_null(), "mismatch");
2444     bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
2445     if (has_skeleton) {
2446       assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node");
2447       assert(outer->outcnt() == 2, "only control nodes");
2448     } else {
2449       assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?");
2450       uint phis = 0;
2451       uint be_loads = 0;
2452       Node* be = inner->in(LoopNode::LoopBackControl);
2453       for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) {
2454         Node* u = inner->fast_out(i);
2455         if (u->is_Phi()) {
2456           phis++;
2457           for (DUIterator_Fast jmax, j = be->fast_outs(jmax); j < jmax; j++) {
2458             Node* n = be->fast_out(j);
2459             if (n->is_Load()) {
2460               assert(n->in(0) == be || n->find_prec_edge(be) > 0, "should be on the backedge");
2461               do {
2462                 n = n->raw_out(0);
2463               } while (!n->is_Phi());
2464               if (n == u) {
2465                 be_loads++;
2466                 break;
2467               }
2468             }
2469           }
2470         }
2471       }
2472       assert(be_loads <= phis, "wrong number phis that depends on a pinned load");
2473       for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) {
2474         Node* u = outer->fast_out(i);
2475         assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop");
2476       }
2477       uint stores = 0;
2478       for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) {
2479         Node* u = inner_out->fast_out(i);
2480         if (u->is_Store()) {
2481           stores++;
2482         }
2483       }
2484       // Late optimization of loads on backedge can cause Phi of outer loop to be eliminated but Phi of inner loop is
2485       // not guaranteed to be optimized out.
2486       assert(outer->outcnt() >= phis + 2 - be_loads && outer->outcnt() <= phis + 2 + stores + 1, "only phis");
2487     }
2488     assert(sfpt->outcnt() == 1, "no data node");
2489     assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node");
2490   }
2491 }
2492 #endif
2493 
2494 //=============================================================================
2495 //------------------------------Ideal------------------------------------------
2496 // Return a node which is more "ideal" than the current node.
2497 // Attempt to convert into a counted-loop.
2498 Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2499   return RegionNode::Ideal(phase, can_reshape);
2500 }
2501 
2502 //------------------------------dump_spec--------------------------------------
2503 // Dump special per-node info
2504 #ifndef PRODUCT
2505 void CountedLoopNode::dump_spec(outputStream *st) const {
2506   LoopNode::dump_spec(st);
2507   if (stride_is_con()) {
2508     st->print("stride: %d ",stride_con());
2509   }
2510   if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
2511   if (is_main_loop()) st->print("main of N%d", _idx);
2512   if (is_post_loop()) st->print("post of N%d", _main_idx);
2513   if (is_strip_mined()) st->print(" strip mined");
2514 }
2515 #endif
2516 
2517 //=============================================================================
2518 jlong BaseCountedLoopEndNode::stride_con() const {
2519   return stride()->bottom_type()->is_integer(bt())->get_con_as_long(bt());
2520 }
2521 
2522 
2523 BaseCountedLoopEndNode* BaseCountedLoopEndNode::make(Node* control, Node* test, float prob, float cnt, BasicType bt) {
2524   if (bt == T_INT) {
2525     return new CountedLoopEndNode(control, test, prob, cnt);
2526   }
2527   assert(bt == T_LONG, "unsupported");
2528   return new LongCountedLoopEndNode(control, test, prob, cnt);
2529 }
2530 
2531 //=============================================================================
2532 //------------------------------Value-----------------------------------------
2533 const Type* LoopLimitNode::Value(PhaseGVN* phase) const {
2534   const Type* init_t   = phase->type(in(Init));
2535   const Type* limit_t  = phase->type(in(Limit));
2536   const Type* stride_t = phase->type(in(Stride));
2537   // Either input is TOP ==> the result is TOP
2538   if (init_t   == Type::TOP) return Type::TOP;
2539   if (limit_t  == Type::TOP) return Type::TOP;
2540   if (stride_t == Type::TOP) return Type::TOP;
2541 
2542   int stride_con = stride_t->is_int()->get_con();
2543   if (stride_con == 1)
2544     return bottom_type();  // Identity
2545 
2546   if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
2547     // Use jlongs to avoid integer overflow.
2548     jlong init_con   =  init_t->is_int()->get_con();
2549     jlong limit_con  = limit_t->is_int()->get_con();
2550     int  stride_m   = stride_con - (stride_con > 0 ? 1 : -1);
2551     jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
2552     jlong final_con  = init_con + stride_con*trip_count;
2553     int final_int = (int)final_con;
2554     // The final value should be in integer range in almost all cases,
2555     // since the loop is counted and the limit was checked for overflow.
2556     // There some exceptions, for example:
2557     // - During CCP, there might be a temporary overflow from PhiNodes, see JDK-8309266.
2558     // - During PhaseIdealLoop::split_thru_phi, the LoopLimitNode floats possibly far above
2559     //   the loop and its predicates, and we might get constants on one side of the phi that
2560     //   would lead to overflows. Such a code path would never lead us to enter the loop
2561     //   because of the loop limit overflow check that happens after the LoopLimitNode
2562     //   computation with overflow, but before we enter the loop, see JDK-8335747.
2563     if (final_con == (jlong)final_int) {
2564       return TypeInt::make(final_int);
2565     } else {
2566       return bottom_type();
2567     }
2568   }
2569 
2570   return bottom_type(); // TypeInt::INT
2571 }
2572 
2573 //------------------------------Ideal------------------------------------------
2574 // Return a node which is more "ideal" than the current node.
2575 Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2576   if (phase->type(in(Init))   == Type::TOP ||
2577       phase->type(in(Limit))  == Type::TOP ||
2578       phase->type(in(Stride)) == Type::TOP)
2579     return nullptr;  // Dead
2580 
2581   int stride_con = phase->type(in(Stride))->is_int()->get_con();
2582   if (stride_con == 1)
2583     return nullptr;  // Identity
2584 
2585   // Delay following optimizations until all loop optimizations
2586   // done to keep Ideal graph simple.
2587   if (!can_reshape || !phase->C->post_loop_opts_phase()) {
2588     phase->C->record_for_post_loop_opts_igvn(this);
2589     return nullptr;
2590   }
2591 
2592   const TypeInt* init_t  = phase->type(in(Init) )->is_int();
2593   const TypeInt* limit_t = phase->type(in(Limit))->is_int();
2594   jlong stride_p;
2595   jlong lim, ini;
2596   julong max;
2597   if (stride_con > 0) {
2598     stride_p = stride_con;
2599     lim = limit_t->_hi;
2600     ini = init_t->_lo;
2601     max = (julong)max_jint;
2602   } else {
2603     stride_p = -(jlong)stride_con;
2604     lim = init_t->_hi;
2605     ini = limit_t->_lo;
2606     max = (julong)(juint)min_jint; // double cast to get 0x0000000080000000, not 0xffffffff80000000
2607   }
2608   julong range = lim - ini + stride_p;
2609   if (range <= max) {
2610     // Convert to integer expression if it is not overflow.
2611     Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1));
2612     Node *range = phase->transform(new SubINode(in(Limit), in(Init)));
2613     Node *bias  = phase->transform(new AddINode(range, stride_m));
2614     Node *trip  = phase->transform(new DivINode(nullptr, bias, in(Stride)));
2615     Node *span  = phase->transform(new MulINode(trip, in(Stride)));
2616     return new AddINode(span, in(Init)); // exact limit
2617   }
2618 
2619   if (is_power_of_2(stride_p) ||                // divisor is 2^n
2620       !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node?
2621     // Convert to long expression to avoid integer overflow
2622     // and let igvn optimizer convert this division.
2623     //
2624     Node*   init   = phase->transform( new ConvI2LNode(in(Init)));
2625     Node*  limit   = phase->transform( new ConvI2LNode(in(Limit)));
2626     Node* stride   = phase->longcon(stride_con);
2627     Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1));
2628 
2629     Node *range = phase->transform(new SubLNode(limit, init));
2630     Node *bias  = phase->transform(new AddLNode(range, stride_m));
2631     Node *span;
2632     if (stride_con > 0 && is_power_of_2(stride_p)) {
2633       // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride)
2634       // and avoid generating rounding for division. Zero trip guard should
2635       // guarantee that init < limit but sometimes the guard is missing and
2636       // we can get situation when init > limit. Note, for the empty loop
2637       // optimization zero trip guard is generated explicitly which leaves
2638       // only RCE predicate where exact limit is used and the predicate
2639       // will simply fail forcing recompilation.
2640       Node* neg_stride   = phase->longcon(-stride_con);
2641       span = phase->transform(new AndLNode(bias, neg_stride));
2642     } else {
2643       Node *trip  = phase->transform(new DivLNode(nullptr, bias, stride));
2644       span = phase->transform(new MulLNode(trip, stride));
2645     }
2646     // Convert back to int
2647     Node *span_int = phase->transform(new ConvL2INode(span));
2648     return new AddINode(span_int, in(Init)); // exact limit
2649   }
2650 
2651   return nullptr;    // No progress
2652 }
2653 
2654 //------------------------------Identity---------------------------------------
2655 // If stride == 1 return limit node.
2656 Node* LoopLimitNode::Identity(PhaseGVN* phase) {
2657   int stride_con = phase->type(in(Stride))->is_int()->get_con();
2658   if (stride_con == 1 || stride_con == -1)
2659     return in(Limit);
2660   return this;
2661 }
2662 
2663 //=============================================================================
2664 //----------------------match_incr_with_optional_truncation--------------------
2665 // Match increment with optional truncation:
2666 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
2667 // Return null for failure. Success returns the increment node.
2668 Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2,
2669                                                            const TypeInteger** trunc_type,
2670                                                            BasicType bt) {
2671   // Quick cutouts:
2672   if (expr == nullptr || expr->req() != 3)  return nullptr;
2673 
2674   Node *t1 = nullptr;
2675   Node *t2 = nullptr;
2676   Node* n1 = expr;
2677   int   n1op = n1->Opcode();
2678   const TypeInteger* trunc_t = TypeInteger::bottom(bt);
2679 
2680   if (bt == T_INT) {
2681     // Try to strip (n1 & M) or (n1 << N >> N) from n1.
2682     if (n1op == Op_AndI &&
2683         n1->in(2)->is_Con() &&
2684         n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) {
2685       // %%% This check should match any mask of 2**K-1.
2686       t1 = n1;
2687       n1 = t1->in(1);
2688       n1op = n1->Opcode();
2689       trunc_t = TypeInt::CHAR;
2690     } else if (n1op == Op_RShiftI &&
2691                n1->in(1) != nullptr &&
2692                n1->in(1)->Opcode() == Op_LShiftI &&
2693                n1->in(2) == n1->in(1)->in(2) &&
2694                n1->in(2)->is_Con()) {
2695       jint shift = n1->in(2)->bottom_type()->is_int()->get_con();
2696       // %%% This check should match any shift in [1..31].
2697       if (shift == 16 || shift == 8) {
2698         t1 = n1;
2699         t2 = t1->in(1);
2700         n1 = t2->in(1);
2701         n1op = n1->Opcode();
2702         if (shift == 16) {
2703           trunc_t = TypeInt::SHORT;
2704         } else if (shift == 8) {
2705           trunc_t = TypeInt::BYTE;
2706         }
2707       }
2708     }
2709   }
2710 
2711   // If (maybe after stripping) it is an AddI, we won:
2712   if (n1op == Op_Add(bt)) {
2713     *trunc1 = t1;
2714     *trunc2 = t2;
2715     *trunc_type = trunc_t;
2716     return n1;
2717   }
2718 
2719   // failed
2720   return nullptr;
2721 }
2722 
2723 LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) {
2724   if (is_strip_mined() && in(EntryControl) != nullptr && in(EntryControl)->is_OuterStripMinedLoop()) {
2725     verify_strip_mined(expect_skeleton);
2726     return in(EntryControl)->as_Loop();
2727   }
2728   return this;
2729 }
2730 
2731 OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const {
2732   assert(is_strip_mined(), "not a strip mined loop");
2733   Node* c = in(EntryControl);
2734   if (c == nullptr || c->is_top() || !c->is_OuterStripMinedLoop()) {
2735     return nullptr;
2736   }
2737   return c->as_OuterStripMinedLoop();
2738 }
2739 
2740 IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const {
2741   Node* c = in(LoopBackControl);
2742   if (c == nullptr || c->is_top()) {
2743     return nullptr;
2744   }
2745   return c->as_IfTrue();
2746 }
2747 
2748 IfTrueNode* CountedLoopNode::outer_loop_tail() const {
2749   LoopNode* l = outer_loop();
2750   if (l == nullptr) {
2751     return nullptr;
2752   }
2753   return l->outer_loop_tail();
2754 }
2755 
2756 OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const {
2757   IfTrueNode* proj = outer_loop_tail();
2758   if (proj == nullptr) {
2759     return nullptr;
2760   }
2761   Node* c = proj->in(0);
2762   if (c == nullptr || c->is_top() || c->outcnt() != 2) {
2763     return nullptr;
2764   }
2765   return c->as_OuterStripMinedLoopEnd();
2766 }
2767 
2768 OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const {
2769   LoopNode* l = outer_loop();
2770   if (l == nullptr) {
2771     return nullptr;
2772   }
2773   return l->outer_loop_end();
2774 }
2775 
2776 IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const {
2777   IfNode* le = outer_loop_end();
2778   if (le == nullptr) {
2779     return nullptr;
2780   }
2781   Node* c = le->proj_out_or_null(false);
2782   if (c == nullptr) {
2783     return nullptr;
2784   }
2785   return c->as_IfFalse();
2786 }
2787 
2788 IfFalseNode* CountedLoopNode::outer_loop_exit() const {
2789   LoopNode* l = outer_loop();
2790   if (l == nullptr) {
2791     return nullptr;
2792   }
2793   return l->outer_loop_exit();
2794 }
2795 
2796 SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const {
2797   IfNode* le = outer_loop_end();
2798   if (le == nullptr) {
2799     return nullptr;
2800   }
2801   Node* c = le->in(0);
2802   if (c == nullptr || c->is_top()) {
2803     return nullptr;
2804   }
2805   assert(c->Opcode() == Op_SafePoint, "broken outer loop");
2806   return c->as_SafePoint();
2807 }
2808 
2809 SafePointNode* CountedLoopNode::outer_safepoint() const {
2810   LoopNode* l = outer_loop();
2811   if (l == nullptr) {
2812     return nullptr;
2813   }
2814   return l->outer_safepoint();
2815 }
2816 
2817 Node* CountedLoopNode::skip_assertion_predicates_with_halt() {
2818   Node* ctrl = in(LoopNode::EntryControl);
2819   if (ctrl == nullptr) {
2820     // Dying loop.
2821     return nullptr;
2822   }
2823   if (is_main_loop()) {
2824     ctrl = skip_strip_mined()->in(LoopNode::EntryControl);
2825   }
2826   if (is_main_loop() || is_post_loop()) {
2827     AssertionPredicates assertion_predicates(ctrl);
2828     return assertion_predicates.entry();
2829   }
2830   return ctrl;
2831 }
2832 
2833 
2834 int CountedLoopNode::stride_con() const {
2835   CountedLoopEndNode* cle = loopexit_or_null();
2836   return cle != nullptr ? cle->stride_con() : 0;
2837 }
2838 
2839 BaseCountedLoopNode* BaseCountedLoopNode::make(Node* entry, Node* backedge, BasicType bt) {
2840   if (bt == T_INT) {
2841     return new CountedLoopNode(entry, backedge);
2842   }
2843   assert(bt == T_LONG, "unsupported");
2844   return new LongCountedLoopNode(entry, backedge);
2845 }
2846 
2847 void OuterStripMinedLoopNode::fix_sunk_stores(CountedLoopEndNode* inner_cle, LoopNode* inner_cl, PhaseIterGVN* igvn,
2848                                               PhaseIdealLoop* iloop) {
2849   Node* cle_out = inner_cle->proj_out(false);
2850   Node* cle_tail = inner_cle->proj_out(true);
2851   if (cle_out->outcnt() > 1) {
2852     // Look for chains of stores that were sunk
2853     // out of the inner loop and are in the outer loop
2854     for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) {
2855       Node* u = cle_out->fast_out(i);
2856       if (u->is_Store()) {
2857         int alias_idx = igvn->C->get_alias_index(u->adr_type());
2858         Node* first = u;
2859         for (;;) {
2860           Node* next = first->in(MemNode::Memory);
2861           if (!next->is_Store() || next->in(0) != cle_out) {
2862             break;
2863           }
2864           assert(igvn->C->get_alias_index(next->adr_type()) == alias_idx, "");
2865           first = next;
2866         }
2867         Node* last = u;
2868         for (;;) {
2869           Node* next = nullptr;
2870           for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) {
2871             Node* uu = last->fast_out(j);
2872             if (uu->is_Store() && uu->in(0) == cle_out) {
2873               assert(next == nullptr, "only one in the outer loop");
2874               next = uu;
2875               assert(igvn->C->get_alias_index(next->adr_type()) == alias_idx, "");
2876             }
2877           }
2878           if (next == nullptr) {
2879             break;
2880           }
2881           last = next;
2882         }
2883         Node* phi = nullptr;
2884         for (DUIterator_Fast jmax, j = inner_cl->fast_outs(jmax); j < jmax; j++) {
2885           Node* uu = inner_cl->fast_out(j);
2886           if (uu->is_Phi()) {
2887             Node* be = uu->in(LoopNode::LoopBackControl);
2888             if (be->is_Store() && be->in(0) == inner_cl->in(LoopNode::LoopBackControl)) {
2889               assert(igvn->C->get_alias_index(uu->adr_type()) != alias_idx && igvn->C->get_alias_index(uu->adr_type()) != Compile::AliasIdxBot, "unexpected store");
2890             }
2891             if (be == last || be == first->in(MemNode::Memory)) {
2892               assert(igvn->C->get_alias_index(uu->adr_type()) == alias_idx || igvn->C->get_alias_index(uu->adr_type()) == Compile::AliasIdxBot, "unexpected alias");
2893               assert(phi == nullptr, "only one phi");
2894               phi = uu;
2895             }
2896           }
2897         }
2898 #ifdef ASSERT
2899         for (DUIterator_Fast jmax, j = inner_cl->fast_outs(jmax); j < jmax; j++) {
2900           Node* uu = inner_cl->fast_out(j);
2901           if (uu->is_memory_phi()) {
2902             if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) {
2903               assert(phi == uu, "what's that phi?");
2904             } else if (uu->adr_type() == TypePtr::BOTTOM) {
2905               Node* n = uu->in(LoopNode::LoopBackControl);
2906               uint limit = igvn->C->live_nodes();
2907               uint i = 0;
2908               while (n != uu) {
2909                 i++;
2910                 assert(i < limit, "infinite loop");
2911                 if (n->is_Proj()) {
2912                   n = n->in(0);
2913                 } else if (n->is_SafePoint() || n->is_MemBar()) {
2914                   n = n->in(TypeFunc::Memory);
2915                 } else if (n->is_Phi()) {
2916                   n = n->in(1);
2917                 } else if (n->is_MergeMem()) {
2918                   n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type()));
2919                 } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) {
2920                   n = n->in(MemNode::Memory);
2921                 } else {
2922                   n->dump();
2923                   ShouldNotReachHere();
2924                 }
2925               }
2926             }
2927           }
2928         }
2929 #endif
2930         if (phi == nullptr) {
2931           // If an entire chains was sunk, the
2932           // inner loop has no phi for that memory
2933           // slice, create one for the outer loop
2934           phi = PhiNode::make(inner_cl, first->in(MemNode::Memory), Type::MEMORY,
2935                               igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type())));
2936           phi->set_req(LoopNode::LoopBackControl, last);
2937           phi = register_new_node(phi, inner_cl, igvn, iloop);
2938           igvn->replace_input_of(first, MemNode::Memory, phi);
2939         } else {
2940           // Or fix the outer loop fix to include
2941           // that chain of stores.
2942           Node* be = phi->in(LoopNode::LoopBackControl);
2943           assert(!(be->is_Store() && be->in(0) == inner_cl->in(LoopNode::LoopBackControl)), "store on the backedge + sunk stores: unsupported");
2944           if (be == first->in(MemNode::Memory)) {
2945             if (be == phi->in(LoopNode::LoopBackControl)) {
2946               igvn->replace_input_of(phi, LoopNode::LoopBackControl, last);
2947             } else {
2948               igvn->replace_input_of(be, MemNode::Memory, last);
2949             }
2950           } else {
2951 #ifdef ASSERT
2952             if (be == phi->in(LoopNode::LoopBackControl)) {
2953               assert(phi->in(LoopNode::LoopBackControl) == last, "");
2954             } else {
2955               assert(be->in(MemNode::Memory) == last, "");
2956             }
2957 #endif
2958           }
2959         }
2960       }
2961     }
2962   }
2963 }
2964 
2965 void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
2966   // Look for the outer & inner strip mined loop, reduce number of
2967   // iterations of the inner loop, set exit condition of outer loop,
2968   // construct required phi nodes for outer loop.
2969   CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
2970   assert(inner_cl->is_strip_mined(), "inner loop should be strip mined");
2971   if (LoopStripMiningIter == 0) {
2972     remove_outer_loop_and_safepoint(igvn);
2973     return;
2974   }
2975   if (LoopStripMiningIter == 1) {
2976     transform_to_counted_loop(igvn, nullptr);
2977     return;
2978   }
2979   Node* inner_iv_phi = inner_cl->phi();
2980   if (inner_iv_phi == nullptr) {
2981     IfNode* outer_le = outer_loop_end();
2982     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
2983     igvn->replace_node(outer_le, iff);
2984     inner_cl->clear_strip_mined();
2985     return;
2986   }
2987   CountedLoopEndNode* inner_cle = inner_cl->loopexit();
2988 
2989   int stride = inner_cl->stride_con();
2990   // For a min int stride, LoopStripMiningIter * stride overflows the int range for all values of LoopStripMiningIter
2991   // except 0 or 1. Those values are handled early on in this method and causes the method to return. So for a min int
2992   // stride, the method is guaranteed to return at the next check below.
2993   jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS((jlong)stride);
2994   int scaled_iters = (int)scaled_iters_long;
2995   if ((jlong)scaled_iters != scaled_iters_long) {
2996     // Remove outer loop and safepoint (too few iterations)
2997     remove_outer_loop_and_safepoint(igvn);
2998     return;
2999   }
3000   jlong short_scaled_iters = LoopStripMiningIterShortLoop * ABS(stride);
3001   const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int();
3002   jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo;
3003   assert(iter_estimate > 0, "broken");
3004   if (iter_estimate <= short_scaled_iters) {
3005     // Remove outer loop and safepoint: loop executes less than LoopStripMiningIterShortLoop
3006     remove_outer_loop_and_safepoint(igvn);
3007     return;
3008   }
3009   if (iter_estimate <= scaled_iters_long) {
3010     // We would only go through one iteration of
3011     // the outer loop: drop the outer loop but
3012     // keep the safepoint so we don't run for
3013     // too long without a safepoint
3014     IfNode* outer_le = outer_loop_end();
3015     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
3016     igvn->replace_node(outer_le, iff);
3017     inner_cl->clear_strip_mined();
3018     return;
3019   }
3020 
3021   Node* cle_tail = inner_cle->proj_out(true);
3022   ResourceMark rm;
3023   Node_List old_new;
3024   if (cle_tail->outcnt() > 1) {
3025     // Look for nodes on backedge of inner loop and clone them
3026     Unique_Node_List backedge_nodes;
3027     for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) {
3028       Node* u = cle_tail->fast_out(i);
3029       if (u != inner_cl) {
3030         assert(!u->is_CFG(), "control flow on the backedge?");
3031         backedge_nodes.push(u);
3032       }
3033     }
3034     uint last = igvn->C->unique();
3035     for (uint next = 0; next < backedge_nodes.size(); next++) {
3036       Node* n = backedge_nodes.at(next);
3037       old_new.map(n->_idx, n->clone());
3038       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3039         Node* u = n->fast_out(i);
3040         assert(!u->is_CFG(), "broken");
3041         if (u->_idx >= last) {
3042           continue;
3043         }
3044         if (!u->is_Phi()) {
3045           backedge_nodes.push(u);
3046         } else {
3047           assert(u->in(0) == inner_cl, "strange phi on the backedge");
3048         }
3049       }
3050     }
3051     // Put the clones on the outer loop backedge
3052     Node* le_tail = outer_loop_tail();
3053     for (uint next = 0; next < backedge_nodes.size(); next++) {
3054       Node *n = old_new[backedge_nodes.at(next)->_idx];
3055       for (uint i = 1; i < n->req(); i++) {
3056         if (n->in(i) != nullptr && old_new[n->in(i)->_idx] != nullptr) {
3057           n->set_req(i, old_new[n->in(i)->_idx]);
3058         }
3059       }
3060       if (n->in(0) != nullptr && n->in(0) == cle_tail) {
3061         n->set_req(0, le_tail);
3062       }
3063       igvn->register_new_node_with_optimizer(n);
3064     }
3065   }
3066 
3067   Node* iv_phi = nullptr;
3068   // Make a clone of each phi in the inner loop
3069   // for the outer loop
3070   for (uint i = 0; i < inner_cl->outcnt(); i++) {
3071     Node* u = inner_cl->raw_out(i);
3072     if (u->is_Phi()) {
3073       assert(u->in(0) == inner_cl, "inconsistent");
3074       Node* phi = u->clone();
3075       phi->set_req(0, this);
3076       Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx];
3077       if (be != nullptr) {
3078         phi->set_req(LoopNode::LoopBackControl, be);
3079       }
3080       phi = igvn->transform(phi);
3081       igvn->replace_input_of(u, LoopNode::EntryControl, phi);
3082       if (u == inner_iv_phi) {
3083         iv_phi = phi;
3084       }
3085     }
3086   }
3087 
3088   if (iv_phi != nullptr) {
3089     // Now adjust the inner loop's exit condition
3090     Node* limit = inner_cl->limit();
3091     // If limit < init for stride > 0 (or limit > init for stride < 0),
3092     // the loop body is run only once. Given limit - init (init - limit resp.)
3093     // would be negative, the unsigned comparison below would cause
3094     // the loop body to be run for LoopStripMiningIter.
3095     Node* max = nullptr;
3096     if (stride > 0) {
3097       max = MaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
3098     } else {
3099       max = MaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
3100     }
3101     // sub is positive and can be larger than the max signed int
3102     // value. Use an unsigned min.
3103     Node* const_iters = igvn->intcon(scaled_iters);
3104     Node* min = MaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
3105     // min is the number of iterations for the next inner loop execution:
3106     // unsigned_min(max(limit - iv_phi, 0), scaled_iters) if stride > 0
3107     // unsigned_min(max(iv_phi - limit, 0), scaled_iters) if stride < 0
3108 
3109     Node* new_limit = nullptr;
3110     if (stride > 0) {
3111       new_limit = igvn->transform(new AddINode(min, iv_phi));
3112     } else {
3113       new_limit = igvn->transform(new SubINode(iv_phi, min));
3114     }
3115     Node* inner_cmp = inner_cle->cmp_node();
3116     Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue);
3117     Node* outer_bol = inner_bol;
3118     // cmp node for inner loop may be shared
3119     inner_cmp = inner_cmp->clone();
3120     inner_cmp->set_req(2, new_limit);
3121     inner_bol = inner_bol->clone();
3122     inner_bol->set_req(1, igvn->transform(inner_cmp));
3123     igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol));
3124     // Set the outer loop's exit condition too
3125     igvn->replace_input_of(outer_loop_end(), 1, outer_bol);
3126   } else {
3127     assert(false, "should be able to adjust outer loop");
3128     IfNode* outer_le = outer_loop_end();
3129     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
3130     igvn->replace_node(outer_le, iff);
3131     inner_cl->clear_strip_mined();
3132   }
3133 }
3134 
3135 void OuterStripMinedLoopNode::transform_to_counted_loop(PhaseIterGVN* igvn, PhaseIdealLoop* iloop) {
3136   CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
3137   CountedLoopEndNode* cle = inner_cl->loopexit();
3138   Node* inner_test = cle->in(1);
3139   IfNode* outer_le = outer_loop_end();
3140   CountedLoopEndNode* inner_cle = inner_cl->loopexit();
3141   Node* safepoint = outer_safepoint();
3142 
3143   fix_sunk_stores(inner_cle, inner_cl, igvn, iloop);
3144 
3145   // make counted loop exit test always fail
3146   ConINode* zero = igvn->intcon(0);
3147   if (iloop != nullptr) {
3148     iloop->set_root_as_ctrl(zero);
3149   }
3150   igvn->replace_input_of(cle, 1, zero);
3151   // replace outer loop end with CountedLoopEndNode with formers' CLE's exit test
3152   Node* new_end = new CountedLoopEndNode(outer_le->in(0), inner_test, cle->_prob, cle->_fcnt);
3153   register_control(new_end, inner_cl, outer_le->in(0), igvn, iloop);
3154   if (iloop == nullptr) {
3155     igvn->replace_node(outer_le, new_end);
3156   } else {
3157     iloop->lazy_replace(outer_le, new_end);
3158   }
3159   // the backedge of the inner loop must be rewired to the new loop end
3160   Node* backedge = cle->proj_out(true);
3161   igvn->replace_input_of(backedge, 0, new_end);
3162   if (iloop != nullptr) {
3163     iloop->set_idom(backedge, new_end, iloop->dom_depth(new_end) + 1);
3164   }
3165   // make the outer loop go away
3166   igvn->replace_input_of(in(LoopBackControl), 0, igvn->C->top());
3167   igvn->replace_input_of(this, LoopBackControl, igvn->C->top());
3168   inner_cl->clear_strip_mined();
3169   if (iloop != nullptr) {
3170     Unique_Node_List wq;
3171     wq.push(safepoint);
3172 
3173     IdealLoopTree* outer_loop_ilt = iloop->get_loop(this);
3174     IdealLoopTree* loop = iloop->get_loop(inner_cl);
3175 
3176     for (uint i = 0; i < wq.size(); i++) {
3177       Node* n = wq.at(i);
3178       for (uint j = 0; j < n->req(); ++j) {
3179         Node* in = n->in(j);
3180         if (in == nullptr || in->is_CFG()) {
3181           continue;
3182         }
3183         if (iloop->get_loop(iloop->get_ctrl(in)) != outer_loop_ilt) {
3184           continue;
3185         }
3186         assert(!loop->_body.contains(in), "");
3187         loop->_body.push(in);
3188         wq.push(in);
3189       }
3190     }
3191     iloop->set_loop(safepoint, loop);
3192     loop->_body.push(safepoint);
3193     iloop->set_loop(safepoint->in(0), loop);
3194     loop->_body.push(safepoint->in(0));
3195     outer_loop_ilt->_tail = igvn->C->top();
3196   }
3197 }
3198 
3199 void OuterStripMinedLoopNode::remove_outer_loop_and_safepoint(PhaseIterGVN* igvn) const {
3200   CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
3201   Node* outer_sfpt = outer_safepoint();
3202   Node* outer_out = outer_loop_exit();
3203   igvn->replace_node(outer_out, outer_sfpt->in(0));
3204   igvn->replace_input_of(outer_sfpt, 0, igvn->C->top());
3205   inner_cl->clear_strip_mined();
3206 }
3207 
3208 Node* OuterStripMinedLoopNode::register_new_node(Node* node, LoopNode* ctrl, PhaseIterGVN* igvn, PhaseIdealLoop* iloop) {
3209   if (iloop == nullptr) {
3210     return igvn->transform(node);
3211   }
3212   iloop->register_new_node(node, ctrl);
3213   return node;
3214 }
3215 
3216 Node* OuterStripMinedLoopNode::register_control(Node* node, Node* loop, Node* idom, PhaseIterGVN* igvn,
3217                                                 PhaseIdealLoop* iloop) {
3218   if (iloop == nullptr) {
3219     return igvn->transform(node);
3220   }
3221   iloop->register_control(node, iloop->get_loop(loop), idom);
3222   return node;
3223 }
3224 
3225 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const {
3226   if (!in(0)) return Type::TOP;
3227   if (phase->type(in(0)) == Type::TOP)
3228     return Type::TOP;
3229 
3230   // Until expansion, the loop end condition is not set so this should not constant fold.
3231   if (is_expanded(phase)) {
3232     return IfNode::Value(phase);
3233   }
3234 
3235   return TypeTuple::IFBOTH;
3236 }
3237 
3238 bool OuterStripMinedLoopEndNode::is_expanded(PhaseGVN *phase) const {
3239   // The outer strip mined loop head only has Phi uses after expansion
3240   if (phase->is_IterGVN()) {
3241     Node* backedge = proj_out_or_null(true);
3242     if (backedge != nullptr) {
3243       Node* head = backedge->unique_ctrl_out_or_null();
3244       if (head != nullptr && head->is_OuterStripMinedLoop()) {
3245         if (head->find_out_with(Op_Phi) != nullptr) {
3246           return true;
3247         }
3248       }
3249     }
3250   }
3251   return false;
3252 }
3253 
3254 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3255   if (remove_dead_region(phase, can_reshape))  return this;
3256 
3257   return nullptr;
3258 }
3259 
3260 //------------------------------filtered_type--------------------------------
3261 // Return a type based on condition control flow
3262 // A successful return will be a type that is restricted due
3263 // to a series of dominating if-tests, such as:
3264 //    if (i < 10) {
3265 //       if (i > 0) {
3266 //          here: "i" type is [1..10)
3267 //       }
3268 //    }
3269 // or a control flow merge
3270 //    if (i < 10) {
3271 //       do {
3272 //          phi( , ) -- at top of loop type is [min_int..10)
3273 //         i = ?
3274 //       } while ( i < 10)
3275 //
3276 const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) {
3277   assert(n && n->bottom_type()->is_int(), "must be int");
3278   const TypeInt* filtered_t = nullptr;
3279   if (!n->is_Phi()) {
3280     assert(n_ctrl != nullptr || n_ctrl == C->top(), "valid control");
3281     filtered_t = filtered_type_from_dominators(n, n_ctrl);
3282 
3283   } else {
3284     Node* phi    = n->as_Phi();
3285     Node* region = phi->in(0);
3286     assert(n_ctrl == nullptr || n_ctrl == region, "ctrl parameter must be region");
3287     if (region && region != C->top()) {
3288       for (uint i = 1; i < phi->req(); i++) {
3289         Node* val   = phi->in(i);
3290         Node* use_c = region->in(i);
3291         const TypeInt* val_t = filtered_type_from_dominators(val, use_c);
3292         if (val_t != nullptr) {
3293           if (filtered_t == nullptr) {
3294             filtered_t = val_t;
3295           } else {
3296             filtered_t = filtered_t->meet(val_t)->is_int();
3297           }
3298         }
3299       }
3300     }
3301   }
3302   const TypeInt* n_t = _igvn.type(n)->is_int();
3303   if (filtered_t != nullptr) {
3304     n_t = n_t->join(filtered_t)->is_int();
3305   }
3306   return n_t;
3307 }
3308 
3309 
3310 //------------------------------filtered_type_from_dominators--------------------------------
3311 // Return a possibly more restrictive type for val based on condition control flow of dominators
3312 const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) {
3313   if (val->is_Con()) {
3314      return val->bottom_type()->is_int();
3315   }
3316   uint if_limit = 10; // Max number of dominating if's visited
3317   const TypeInt* rtn_t = nullptr;
3318 
3319   if (use_ctrl && use_ctrl != C->top()) {
3320     Node* val_ctrl = get_ctrl(val);
3321     uint val_dom_depth = dom_depth(val_ctrl);
3322     Node* pred = use_ctrl;
3323     uint if_cnt = 0;
3324     while (if_cnt < if_limit) {
3325       if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) {
3326         if_cnt++;
3327         const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred);
3328         if (if_t != nullptr) {
3329           if (rtn_t == nullptr) {
3330             rtn_t = if_t;
3331           } else {
3332             rtn_t = rtn_t->join(if_t)->is_int();
3333           }
3334         }
3335       }
3336       pred = idom(pred);
3337       if (pred == nullptr || pred == C->top()) {
3338         break;
3339       }
3340       // Stop if going beyond definition block of val
3341       if (dom_depth(pred) < val_dom_depth) {
3342         break;
3343       }
3344     }
3345   }
3346   return rtn_t;
3347 }
3348 
3349 
3350 //------------------------------dump_spec--------------------------------------
3351 // Dump special per-node info
3352 #ifndef PRODUCT
3353 void CountedLoopEndNode::dump_spec(outputStream *st) const {
3354   if( in(TestValue) != nullptr && in(TestValue)->is_Bool() ) {
3355     BoolTest bt( test_trip()); // Added this for g++.
3356 
3357     st->print("[");
3358     bt.dump_on(st);
3359     st->print("]");
3360   }
3361   st->print(" ");
3362   IfNode::dump_spec(st);
3363 }
3364 #endif
3365 
3366 //=============================================================================
3367 //------------------------------is_member--------------------------------------
3368 // Is 'l' a member of 'this'?
3369 bool IdealLoopTree::is_member(const IdealLoopTree *l) const {
3370   while( l->_nest > _nest ) l = l->_parent;
3371   return l == this;
3372 }
3373 
3374 //------------------------------set_nest---------------------------------------
3375 // Set loop tree nesting depth.  Accumulate _has_call bits.
3376 int IdealLoopTree::set_nest( uint depth ) {
3377   assert(depth <= SHRT_MAX, "sanity");
3378   _nest = depth;
3379   int bits = _has_call;
3380   if( _child ) bits |= _child->set_nest(depth+1);
3381   if( bits ) _has_call = 1;
3382   if( _next  ) bits |= _next ->set_nest(depth  );
3383   return bits;
3384 }
3385 
3386 //------------------------------split_fall_in----------------------------------
3387 // Split out multiple fall-in edges from the loop header.  Move them to a
3388 // private RegionNode before the loop.  This becomes the loop landing pad.
3389 void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) {
3390   PhaseIterGVN &igvn = phase->_igvn;
3391   uint i;
3392 
3393   // Make a new RegionNode to be the landing pad.
3394   RegionNode* landing_pad = new RegionNode(fall_in_cnt + 1);
3395   phase->set_loop(landing_pad,_parent);
3396   // If _head was irreducible loop entry, landing_pad may now be too
3397   landing_pad->set_loop_status(_head->as_Region()->loop_status());
3398   // Gather all the fall-in control paths into the landing pad
3399   uint icnt = fall_in_cnt;
3400   uint oreq = _head->req();
3401   for( i = oreq-1; i>0; i-- )
3402     if( !phase->is_member( this, _head->in(i) ) )
3403       landing_pad->set_req(icnt--,_head->in(i));
3404 
3405   // Peel off PhiNode edges as well
3406   for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3407     Node *oj = _head->fast_out(j);
3408     if( oj->is_Phi() ) {
3409       PhiNode* old_phi = oj->as_Phi();
3410       assert( old_phi->region() == _head, "" );
3411       igvn.hash_delete(old_phi);   // Yank from hash before hacking edges
3412       Node *p = PhiNode::make_blank(landing_pad, old_phi);
3413       uint icnt = fall_in_cnt;
3414       for( i = oreq-1; i>0; i-- ) {
3415         if( !phase->is_member( this, _head->in(i) ) ) {
3416           p->init_req(icnt--, old_phi->in(i));
3417           // Go ahead and clean out old edges from old phi
3418           old_phi->del_req(i);
3419         }
3420       }
3421       // Search for CSE's here, because ZKM.jar does a lot of
3422       // loop hackery and we need to be a little incremental
3423       // with the CSE to avoid O(N^2) node blow-up.
3424       Node *p2 = igvn.hash_find_insert(p); // Look for a CSE
3425       if( p2 ) {                // Found CSE
3426         p->destruct(&igvn);     // Recover useless new node
3427         p = p2;                 // Use old node
3428       } else {
3429         igvn.register_new_node_with_optimizer(p, old_phi);
3430       }
3431       // Make old Phi refer to new Phi.
3432       old_phi->add_req(p);
3433       // Check for the special case of making the old phi useless and
3434       // disappear it.  In JavaGrande I have a case where this useless
3435       // Phi is the loop limit and prevents recognizing a CountedLoop
3436       // which in turn prevents removing an empty loop.
3437       Node *id_old_phi = old_phi->Identity(&igvn);
3438       if( id_old_phi != old_phi ) { // Found a simple identity?
3439         // Note that I cannot call 'replace_node' here, because
3440         // that will yank the edge from old_phi to the Region and
3441         // I'm mid-iteration over the Region's uses.
3442         for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
3443           Node* use = old_phi->last_out(i);
3444           igvn.rehash_node_delayed(use);
3445           uint uses_found = 0;
3446           for (uint j = 0; j < use->len(); j++) {
3447             if (use->in(j) == old_phi) {
3448               if (j < use->req()) use->set_req (j, id_old_phi);
3449               else                use->set_prec(j, id_old_phi);
3450               uses_found++;
3451             }
3452           }
3453           i -= uses_found;    // we deleted 1 or more copies of this edge
3454         }
3455       }
3456       igvn._worklist.push(old_phi);
3457     }
3458   }
3459   // Finally clean out the fall-in edges from the RegionNode
3460   for( i = oreq-1; i>0; i-- ) {
3461     if( !phase->is_member( this, _head->in(i) ) ) {
3462       _head->del_req(i);
3463     }
3464   }
3465   igvn.rehash_node_delayed(_head);
3466   // Transform landing pad
3467   igvn.register_new_node_with_optimizer(landing_pad, _head);
3468   // Insert landing pad into the header
3469   _head->add_req(landing_pad);
3470 }
3471 
3472 //------------------------------split_outer_loop-------------------------------
3473 // Split out the outermost loop from this shared header.
3474 void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
3475   PhaseIterGVN &igvn = phase->_igvn;
3476 
3477   // Find index of outermost loop; it should also be my tail.
3478   uint outer_idx = 1;
3479   while( _head->in(outer_idx) != _tail ) outer_idx++;
3480 
3481   // Make a LoopNode for the outermost loop.
3482   Node *ctl = _head->in(LoopNode::EntryControl);
3483   Node *outer = new LoopNode( ctl, _head->in(outer_idx) );
3484   outer = igvn.register_new_node_with_optimizer(outer, _head);
3485   phase->set_created_loop_node();
3486 
3487   // Outermost loop falls into '_head' loop
3488   _head->set_req(LoopNode::EntryControl, outer);
3489   _head->del_req(outer_idx);
3490   // Split all the Phis up between '_head' loop and 'outer' loop.
3491   for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3492     Node *out = _head->fast_out(j);
3493     if( out->is_Phi() ) {
3494       PhiNode *old_phi = out->as_Phi();
3495       assert( old_phi->region() == _head, "" );
3496       Node *phi = PhiNode::make_blank(outer, old_phi);
3497       phi->init_req(LoopNode::EntryControl,    old_phi->in(LoopNode::EntryControl));
3498       phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
3499       phi = igvn.register_new_node_with_optimizer(phi, old_phi);
3500       // Make old Phi point to new Phi on the fall-in path
3501       igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
3502       old_phi->del_req(outer_idx);
3503     }
3504   }
3505 
3506   // Use the new loop head instead of the old shared one
3507   _head = outer;
3508   phase->set_loop(_head, this);
3509 }
3510 
3511 //------------------------------fix_parent-------------------------------------
3512 static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) {
3513   loop->_parent = parent;
3514   if( loop->_child ) fix_parent( loop->_child, loop   );
3515   if( loop->_next  ) fix_parent( loop->_next , parent );
3516 }
3517 
3518 //------------------------------estimate_path_freq-----------------------------
3519 static float estimate_path_freq( Node *n ) {
3520   // Try to extract some path frequency info
3521   IfNode *iff;
3522   for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests
3523     uint nop = n->Opcode();
3524     if( nop == Op_SafePoint ) {   // Skip any safepoint
3525       n = n->in(0);
3526       continue;
3527     }
3528     if( nop == Op_CatchProj ) {   // Get count from a prior call
3529       // Assume call does not always throw exceptions: means the call-site
3530       // count is also the frequency of the fall-through path.
3531       assert( n->is_CatchProj(), "" );
3532       if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index )
3533         return 0.0f;            // Assume call exception path is rare
3534       Node *call = n->in(0)->in(0)->in(0);
3535       assert( call->is_Call(), "expect a call here" );
3536       const JVMState *jvms = ((CallNode*)call)->jvms();
3537       ciMethodData* methodData = jvms->method()->method_data();
3538       if (!methodData->is_mature())  return 0.0f; // No call-site data
3539       ciProfileData* data = methodData->bci_to_data(jvms->bci());
3540       if ((data == nullptr) || !data->is_CounterData()) {
3541         // no call profile available, try call's control input
3542         n = n->in(0);
3543         continue;
3544       }
3545       return data->as_CounterData()->count()/FreqCountInvocations;
3546     }
3547     // See if there's a gating IF test
3548     Node *n_c = n->in(0);
3549     if( !n_c->is_If() ) break;       // No estimate available
3550     iff = n_c->as_If();
3551     if( iff->_fcnt != COUNT_UNKNOWN )   // Have a valid count?
3552       // Compute how much count comes on this path
3553       return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt;
3554     // Have no count info.  Skip dull uncommon-trap like branches.
3555     if( (nop == Op_IfTrue  && iff->_prob < PROB_LIKELY_MAG(5)) ||
3556         (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) )
3557       break;
3558     // Skip through never-taken branch; look for a real loop exit.
3559     n = iff->in(0);
3560   }
3561   return 0.0f;                  // No estimate available
3562 }
3563 
3564 //------------------------------merge_many_backedges---------------------------
3565 // Merge all the backedges from the shared header into a private Region.
3566 // Feed that region as the one backedge to this loop.
3567 void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) {
3568   uint i;
3569 
3570   // Scan for the top 2 hottest backedges
3571   float hotcnt = 0.0f;
3572   float warmcnt = 0.0f;
3573   uint hot_idx = 0;
3574   // Loop starts at 2 because slot 1 is the fall-in path
3575   for( i = 2; i < _head->req(); i++ ) {
3576     float cnt = estimate_path_freq(_head->in(i));
3577     if( cnt > hotcnt ) {       // Grab hottest path
3578       warmcnt = hotcnt;
3579       hotcnt = cnt;
3580       hot_idx = i;
3581     } else if( cnt > warmcnt ) { // And 2nd hottest path
3582       warmcnt = cnt;
3583     }
3584   }
3585 
3586   // See if the hottest backedge is worthy of being an inner loop
3587   // by being much hotter than the next hottest backedge.
3588   if( hotcnt <= 0.0001 ||
3589       hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge
3590 
3591   // Peel out the backedges into a private merge point; peel
3592   // them all except optionally hot_idx.
3593   PhaseIterGVN &igvn = phase->_igvn;
3594 
3595   Node *hot_tail = nullptr;
3596   // Make a Region for the merge point
3597   Node *r = new RegionNode(1);
3598   for( i = 2; i < _head->req(); i++ ) {
3599     if( i != hot_idx )
3600       r->add_req( _head->in(i) );
3601     else hot_tail = _head->in(i);
3602   }
3603   igvn.register_new_node_with_optimizer(r, _head);
3604   // Plug region into end of loop _head, followed by hot_tail
3605   while( _head->req() > 3 ) _head->del_req( _head->req()-1 );
3606   igvn.replace_input_of(_head, 2, r);
3607   if( hot_idx ) _head->add_req(hot_tail);
3608 
3609   // Split all the Phis up between '_head' loop and the Region 'r'
3610   for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3611     Node *out = _head->fast_out(j);
3612     if( out->is_Phi() ) {
3613       PhiNode* n = out->as_Phi();
3614       igvn.hash_delete(n);      // Delete from hash before hacking edges
3615       Node *hot_phi = nullptr;
3616       Node *phi = new PhiNode(r, n->type(), n->adr_type());
3617       // Check all inputs for the ones to peel out
3618       uint j = 1;
3619       for( uint i = 2; i < n->req(); i++ ) {
3620         if( i != hot_idx )
3621           phi->set_req( j++, n->in(i) );
3622         else hot_phi = n->in(i);
3623       }
3624       // Register the phi but do not transform until whole place transforms
3625       igvn.register_new_node_with_optimizer(phi, n);
3626       // Add the merge phi to the old Phi
3627       while( n->req() > 3 ) n->del_req( n->req()-1 );
3628       igvn.replace_input_of(n, 2, phi);
3629       if( hot_idx ) n->add_req(hot_phi);
3630     }
3631   }
3632 
3633 
3634   // Insert a new IdealLoopTree inserted below me.  Turn it into a clone
3635   // of self loop tree.  Turn self into a loop headed by _head and with
3636   // tail being the new merge point.
3637   IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail );
3638   phase->set_loop(_tail,ilt);   // Adjust tail
3639   _tail = r;                    // Self's tail is new merge point
3640   phase->set_loop(r,this);
3641   ilt->_child = _child;         // New guy has my children
3642   _child = ilt;                 // Self has new guy as only child
3643   ilt->_parent = this;          // new guy has self for parent
3644   ilt->_nest = _nest;           // Same nesting depth (for now)
3645 
3646   // Starting with 'ilt', look for child loop trees using the same shared
3647   // header.  Flatten these out; they will no longer be loops in the end.
3648   IdealLoopTree **pilt = &_child;
3649   while( ilt ) {
3650     if( ilt->_head == _head ) {
3651       uint i;
3652       for( i = 2; i < _head->req(); i++ )
3653         if( _head->in(i) == ilt->_tail )
3654           break;                // Still a loop
3655       if( i == _head->req() ) { // No longer a loop
3656         // Flatten ilt.  Hang ilt's "_next" list from the end of
3657         // ilt's '_child' list.  Move the ilt's _child up to replace ilt.
3658         IdealLoopTree **cp = &ilt->_child;
3659         while( *cp ) cp = &(*cp)->_next;   // Find end of child list
3660         *cp = ilt->_next;       // Hang next list at end of child list
3661         *pilt = ilt->_child;    // Move child up to replace ilt
3662         ilt->_head = nullptr;   // Flag as a loop UNIONED into parent
3663         ilt = ilt->_child;      // Repeat using new ilt
3664         continue;               // do not advance over ilt->_child
3665       }
3666       assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" );
3667       phase->set_loop(_head,ilt);
3668     }
3669     pilt = &ilt->_child;        // Advance to next
3670     ilt = *pilt;
3671   }
3672 
3673   if( _child ) fix_parent( _child, this );
3674 }
3675 
3676 //------------------------------beautify_loops---------------------------------
3677 // Split shared headers and insert loop landing pads.
3678 // Insert a LoopNode to replace the RegionNode.
3679 // Return TRUE if loop tree is structurally changed.
3680 bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) {
3681   bool result = false;
3682   // Cache parts in locals for easy
3683   PhaseIterGVN &igvn = phase->_igvn;
3684 
3685   igvn.hash_delete(_head);      // Yank from hash before hacking edges
3686 
3687   // Check for multiple fall-in paths.  Peel off a landing pad if need be.
3688   int fall_in_cnt = 0;
3689   for( uint i = 1; i < _head->req(); i++ )
3690     if( !phase->is_member( this, _head->in(i) ) )
3691       fall_in_cnt++;
3692   assert( fall_in_cnt, "at least 1 fall-in path" );
3693   if( fall_in_cnt > 1 )         // Need a loop landing pad to merge fall-ins
3694     split_fall_in( phase, fall_in_cnt );
3695 
3696   // Swap inputs to the _head and all Phis to move the fall-in edge to
3697   // the left.
3698   fall_in_cnt = 1;
3699   while( phase->is_member( this, _head->in(fall_in_cnt) ) )
3700     fall_in_cnt++;
3701   if( fall_in_cnt > 1 ) {
3702     // Since I am just swapping inputs I do not need to update def-use info
3703     Node *tmp = _head->in(1);
3704     igvn.rehash_node_delayed(_head);
3705     _head->set_req( 1, _head->in(fall_in_cnt) );
3706     _head->set_req( fall_in_cnt, tmp );
3707     // Swap also all Phis
3708     for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) {
3709       Node* phi = _head->fast_out(i);
3710       if( phi->is_Phi() ) {
3711         igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges
3712         tmp = phi->in(1);
3713         phi->set_req( 1, phi->in(fall_in_cnt) );
3714         phi->set_req( fall_in_cnt, tmp );
3715       }
3716     }
3717   }
3718   assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" );
3719   assert(  phase->is_member( this, _head->in(2) ), "right edge is loop" );
3720 
3721   // If I am a shared header (multiple backedges), peel off the many
3722   // backedges into a private merge point and use the merge point as
3723   // the one true backedge.
3724   if (_head->req() > 3) {
3725     // Merge the many backedges into a single backedge but leave
3726     // the hottest backedge as separate edge for the following peel.
3727     if (!_irreducible) {
3728       merge_many_backedges( phase );
3729     }
3730 
3731     // When recursively beautify my children, split_fall_in can change
3732     // loop tree structure when I am an irreducible loop. Then the head
3733     // of my children has a req() not bigger than 3. Here we need to set
3734     // result to true to catch that case in order to tell the caller to
3735     // rebuild loop tree. See issue JDK-8244407 for details.
3736     result = true;
3737   }
3738 
3739   // If I have one hot backedge, peel off myself loop.
3740   // I better be the outermost loop.
3741   if (_head->req() > 3 && !_irreducible) {
3742     split_outer_loop( phase );
3743     result = true;
3744 
3745   } else if (!_head->is_Loop() && !_irreducible) {
3746     // Make a new LoopNode to replace the old loop head
3747     Node *l = new LoopNode( _head->in(1), _head->in(2) );
3748     l = igvn.register_new_node_with_optimizer(l, _head);
3749     phase->set_created_loop_node();
3750     // Go ahead and replace _head
3751     phase->_igvn.replace_node( _head, l );
3752     _head = l;
3753     phase->set_loop(_head, this);
3754   }
3755 
3756   // Now recursively beautify nested loops
3757   if( _child ) result |= _child->beautify_loops( phase );
3758   if( _next  ) result |= _next ->beautify_loops( phase );
3759   return result;
3760 }
3761 
3762 //------------------------------allpaths_check_safepts----------------------------
3763 // Allpaths backwards scan. Starting at the head, traversing all backedges, and the body. Terminating each path at first
3764 // safepoint encountered.  Helper for check_safepts.
3765 void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) {
3766   assert(stack.size() == 0, "empty stack");
3767   stack.push(_head);
3768   visited.clear();
3769   visited.set(_head->_idx);
3770   while (stack.size() > 0) {
3771     Node* n = stack.pop();
3772     if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
3773       // Terminate this path
3774     } else if (n->Opcode() == Op_SafePoint) {
3775       if (_phase->get_loop(n) != this) {
3776         if (_required_safept == nullptr) _required_safept = new Node_List();
3777         // save the first we run into on that path: closest to the tail if the head has a single backedge
3778         _required_safept->push(n);
3779       }
3780       // Terminate this path
3781     } else {
3782       uint start = n->is_Region() ? 1 : 0;
3783       uint end   = n->is_Region() && (!n->is_Loop() || n == _head) ? n->req() : start + 1;
3784       for (uint i = start; i < end; i++) {
3785         Node* in = n->in(i);
3786         assert(in->is_CFG(), "must be");
3787         if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) {
3788           stack.push(in);
3789         }
3790       }
3791     }
3792   }
3793 }
3794 
3795 //------------------------------check_safepts----------------------------
3796 // Given dominators, try to find loops with calls that must always be
3797 // executed (call dominates loop tail).  These loops do not need non-call
3798 // safepoints (ncsfpt).
3799 //
3800 // A complication is that a safepoint in a inner loop may be needed
3801 // by an outer loop. In the following, the inner loop sees it has a
3802 // call (block 3) on every path from the head (block 2) to the
3803 // backedge (arc 3->2).  So it deletes the ncsfpt (non-call safepoint)
3804 // in block 2, _but_ this leaves the outer loop without a safepoint.
3805 //
3806 //          entry  0
3807 //                 |
3808 //                 v
3809 // outer 1,2    +->1
3810 //              |  |
3811 //              |  v
3812 //              |  2<---+  ncsfpt in 2
3813 //              |_/|\   |
3814 //                 | v  |
3815 // inner 2,3      /  3  |  call in 3
3816 //               /   |  |
3817 //              v    +--+
3818 //        exit  4
3819 //
3820 //
3821 // This method creates a list (_required_safept) of ncsfpt nodes that must
3822 // be protected is created for each loop. When a ncsfpt maybe deleted, it
3823 // is first looked for in the lists for the outer loops of the current loop.
3824 //
3825 // The insights into the problem:
3826 //  A) counted loops are okay
3827 //  B) innermost loops are okay (only an inner loop can delete
3828 //     a ncsfpt needed by an outer loop)
3829 //  C) a loop is immune from an inner loop deleting a safepoint
3830 //     if the loop has a call on the idom-path
3831 //  D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the
3832 //     idom-path that is not in a nested loop
3833 //  E) otherwise, an ncsfpt on the idom-path that is nested in an inner
3834 //     loop needs to be prevented from deletion by an inner loop
3835 //
3836 // There are two analyses:
3837 //  1) The first, and cheaper one, scans the loop body from
3838 //     tail to head following the idom (immediate dominator)
3839 //     chain, looking for the cases (C,D,E) above.
3840 //     Since inner loops are scanned before outer loops, there is summary
3841 //     information about inner loops.  Inner loops can be skipped over
3842 //     when the tail of an inner loop is encountered.
3843 //
3844 //  2) The second, invoked if the first fails to find a call or ncsfpt on
3845 //     the idom path (which is rare), scans all predecessor control paths
3846 //     from the tail to the head, terminating a path when a call or sfpt
3847 //     is encountered, to find the ncsfpt's that are closest to the tail.
3848 //
3849 void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) {
3850   // Bottom up traversal
3851   IdealLoopTree* ch = _child;
3852   if (_child) _child->check_safepts(visited, stack);
3853   if (_next)  _next ->check_safepts(visited, stack);
3854 
3855   if (!_head->is_CountedLoop() && !_has_sfpt && _parent != nullptr) {
3856     bool  has_call         = false;    // call on dom-path
3857     bool  has_local_ncsfpt = false;    // ncsfpt on dom-path at this loop depth
3858     Node* nonlocal_ncsfpt  = nullptr;  // ncsfpt on dom-path at a deeper depth
3859     if (!_irreducible) {
3860       // Scan the dom-path nodes from tail to head
3861       for (Node* n = tail(); n != _head; n = _phase->idom(n)) {
3862         if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
3863           has_call = true;
3864           _has_sfpt = 1;          // Then no need for a safept!
3865           break;
3866         } else if (n->Opcode() == Op_SafePoint) {
3867           if (_phase->get_loop(n) == this) {
3868             has_local_ncsfpt = true;
3869             break;
3870           }
3871           if (nonlocal_ncsfpt == nullptr) {
3872             nonlocal_ncsfpt = n; // save the one closest to the tail
3873           }
3874         } else {
3875           IdealLoopTree* nlpt = _phase->get_loop(n);
3876           if (this != nlpt) {
3877             // If at an inner loop tail, see if the inner loop has already
3878             // recorded seeing a call on the dom-path (and stop.)  If not,
3879             // jump to the head of the inner loop.
3880             assert(is_member(nlpt), "nested loop");
3881             Node* tail = nlpt->_tail;
3882             if (tail->in(0)->is_If()) tail = tail->in(0);
3883             if (n == tail) {
3884               // If inner loop has call on dom-path, so does outer loop
3885               if (nlpt->_has_sfpt) {
3886                 has_call = true;
3887                 _has_sfpt = 1;
3888                 break;
3889               }
3890               // Skip to head of inner loop
3891               assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head");
3892               n = nlpt->_head;
3893               if (_head == n) {
3894                 // this and nlpt (inner loop) have the same loop head. This should not happen because
3895                 // during beautify_loops we call merge_many_backedges. However, infinite loops may not
3896                 // have been attached to the loop-tree during build_loop_tree before beautify_loops,
3897                 // but then attached in the build_loop_tree afterwards, and so still have unmerged
3898                 // backedges. Check if we are indeed in an infinite subgraph, and terminate the scan,
3899                 // since we have reached the loop head of this.
3900                 assert(_head->as_Region()->is_in_infinite_subgraph(),
3901                        "only expect unmerged backedges in infinite loops");
3902                 break;
3903               }
3904             }
3905           }
3906         }
3907       }
3908     }
3909     // Record safept's that this loop needs preserved when an
3910     // inner loop attempts to delete it's safepoints.
3911     if (_child != nullptr && !has_call && !has_local_ncsfpt) {
3912       if (nonlocal_ncsfpt != nullptr) {
3913         if (_required_safept == nullptr) _required_safept = new Node_List();
3914         _required_safept->push(nonlocal_ncsfpt);
3915       } else {
3916         // Failed to find a suitable safept on the dom-path.  Now use
3917         // an all paths walk from tail to head, looking for safepoints to preserve.
3918         allpaths_check_safepts(visited, stack);
3919       }
3920     }
3921   }
3922 }
3923 
3924 //---------------------------is_deleteable_safept----------------------------
3925 // Is safept not required by an outer loop?
3926 bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) {
3927   assert(sfpt->Opcode() == Op_SafePoint, "");
3928   IdealLoopTree* lp = get_loop(sfpt)->_parent;
3929   while (lp != nullptr) {
3930     Node_List* sfpts = lp->_required_safept;
3931     if (sfpts != nullptr) {
3932       for (uint i = 0; i < sfpts->size(); i++) {
3933         if (sfpt == sfpts->at(i))
3934           return false;
3935       }
3936     }
3937     lp = lp->_parent;
3938   }
3939   return true;
3940 }
3941 
3942 //---------------------------replace_parallel_iv-------------------------------
3943 // Replace parallel induction variable (parallel to trip counter)
3944 // This optimization looks for patterns similar to:
3945 //
3946 //    int a = init2;
3947 //    for (int iv = init; iv < limit; iv += stride_con) {
3948 //      a += stride_con2;
3949 //    }
3950 //
3951 // and transforms it to:
3952 //
3953 //    int iv2 = init2
3954 //    int iv = init
3955 //    loop:
3956 //      if (iv >= limit) goto exit
3957 //      iv += stride_con
3958 //      iv2 = init2 + (iv - init) * (stride_con2 / stride_con)
3959 //      goto loop
3960 //    exit:
3961 //    ...
3962 //
3963 // Such transformation introduces more optimization opportunities. In this
3964 // particular example, the loop can be eliminated entirely given that
3965 // `stride_con2 / stride_con` is exact  (i.e., no remainder). Checks are in
3966 // place to only perform this optimization if such a division is exact. This
3967 // example will be transformed into its semantic equivalence:
3968 //
3969 //     int iv2 = (iv * stride_con2 / stride_con) + (init2 - (init * stride_con2 / stride_con))
3970 //
3971 // which corresponds to the structure of transformed subgraph.
3972 //
3973 // However, if there is a mismatch between types of the loop and the parallel
3974 // induction variable (e.g., a long-typed IV in an int-typed loop), type
3975 // conversions are required:
3976 //
3977 //     long iv2 = ((long) iv * stride_con2 / stride_con) + (init2 - ((long) init * stride_con2 / stride_con))
3978 //
3979 void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
3980   assert(loop->_head->is_CountedLoop(), "");
3981   CountedLoopNode *cl = loop->_head->as_CountedLoop();
3982   if (!cl->is_valid_counted_loop(T_INT)) {
3983     return;         // skip malformed counted loop
3984   }
3985   Node *incr = cl->incr();
3986   if (incr == nullptr) {
3987     return;         // Dead loop?
3988   }
3989   Node *init = cl->init_trip();
3990   Node *phi  = cl->phi();
3991   jlong stride_con = cl->stride_con();
3992 
3993   // Visit all children, looking for Phis
3994   for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
3995     Node *out = cl->out(i);
3996     // Look for other phis (secondary IVs). Skip dead ones
3997     if (!out->is_Phi() || out == phi || !has_node(out)) {
3998       continue;
3999     }
4000 
4001     PhiNode* phi2 = out->as_Phi();
4002     Node* incr2 = phi2->in(LoopNode::LoopBackControl);
4003     // Look for induction variables of the form:  X += constant
4004     if (phi2->region() != loop->_head ||
4005         incr2->req() != 3 ||
4006         incr2->in(1)->uncast() != phi2 ||
4007         incr2 == incr ||
4008         (incr2->Opcode() != Op_AddI && incr2->Opcode() != Op_AddL) ||
4009         !incr2->in(2)->is_Con()) {
4010       continue;
4011     }
4012 
4013     if (incr2->in(1)->is_ConstraintCast() &&
4014         !(incr2->in(1)->in(0)->is_IfProj() && incr2->in(1)->in(0)->in(0)->is_RangeCheck())) {
4015       // Skip AddI->CastII->Phi case if CastII is not controlled by local RangeCheck
4016       continue;
4017     }
4018     // Check for parallel induction variable (parallel to trip counter)
4019     // via an affine function.  In particular, count-down loops with
4020     // count-up array indices are common. We only RCE references off
4021     // the trip-counter, so we need to convert all these to trip-counter
4022     // expressions.
4023     Node* init2 = phi2->in(LoopNode::EntryControl);
4024 
4025     // Determine the basic type of the stride constant (and the iv being incremented).
4026     BasicType stride_con2_bt = incr2->Opcode() == Op_AddI ? T_INT : T_LONG;
4027     jlong stride_con2 = incr2->in(2)->get_integer_as_long(stride_con2_bt);
4028 
4029     // The ratio of the two strides cannot be represented as an int
4030     // if stride_con2 is min_jint (or min_jlong, respectively) and
4031     // stride_con is -1.
4032     if (stride_con2 == min_signed_integer(stride_con2_bt) && stride_con == -1) {
4033       continue;
4034     }
4035 
4036     // The general case here gets a little tricky.  We want to find the
4037     // GCD of all possible parallel IV's and make a new IV using this
4038     // GCD for the loop.  Then all possible IVs are simple multiples of
4039     // the GCD.  In practice, this will cover very few extra loops.
4040     // Instead we require 'stride_con2' to be a multiple of 'stride_con',
4041     // where +/-1 is the common case, but other integer multiples are
4042     // also easy to handle.
4043     jlong ratio_con = stride_con2 / stride_con;
4044 
4045     if ((ratio_con * stride_con) != stride_con2) { // Check for exact (no remainder)
4046         continue;
4047     }
4048 
4049 #ifndef PRODUCT
4050     if (TraceLoopOpts) {
4051       tty->print("Parallel IV: %d ", phi2->_idx);
4052       loop->dump_head();
4053     }
4054 #endif
4055 
4056     // Convert to using the trip counter.  The parallel induction
4057     // variable differs from the trip counter by a loop-invariant
4058     // amount, the difference between their respective initial values.
4059     // It is scaled by the 'ratio_con'.
4060     Node* ratio = integercon(ratio_con, stride_con2_bt);
4061 
4062     Node* init_converted = insert_convert_node_if_needed(stride_con2_bt, init);
4063     Node* phi_converted = insert_convert_node_if_needed(stride_con2_bt, phi);
4064 
4065     Node* ratio_init = MulNode::make(init_converted, ratio, stride_con2_bt);
4066     _igvn.register_new_node_with_optimizer(ratio_init, init_converted);
4067     set_early_ctrl(ratio_init, false);
4068 
4069     Node* diff = SubNode::make(init2, ratio_init, stride_con2_bt);
4070     _igvn.register_new_node_with_optimizer(diff, init2);
4071     set_early_ctrl(diff, false);
4072 
4073     Node* ratio_idx = MulNode::make(phi_converted, ratio, stride_con2_bt);
4074     _igvn.register_new_node_with_optimizer(ratio_idx, phi_converted);
4075     set_ctrl(ratio_idx, cl);
4076 
4077     Node* add = AddNode::make(ratio_idx, diff, stride_con2_bt);
4078     _igvn.register_new_node_with_optimizer(add);
4079     set_ctrl(add, cl);
4080 
4081     _igvn.replace_node( phi2, add );
4082     // Sometimes an induction variable is unused
4083     if (add->outcnt() == 0) {
4084       _igvn.remove_dead_node(add);
4085     }
4086     --i; // deleted this phi; rescan starting with next position
4087   }
4088 }
4089 
4090 Node* PhaseIdealLoop::insert_convert_node_if_needed(BasicType target, Node* input) {
4091   BasicType source = _igvn.type(input)->basic_type();
4092   if (source == target) {
4093     return input;
4094   }
4095 
4096   Node* converted = ConvertNode::create_convert(source, target, input);
4097   _igvn.register_new_node_with_optimizer(converted, input);
4098   set_early_ctrl(converted, false);
4099 
4100   return converted;
4101 }
4102 
4103 void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) {
4104   Node* keep = nullptr;
4105   if (keep_one) {
4106     // Look for a safepoint on the idom-path.
4107     for (Node* i = tail(); i != _head; i = phase->idom(i)) {
4108       if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) {
4109         keep = i;
4110         break; // Found one
4111       }
4112     }
4113   }
4114 
4115   // Don't remove any safepoints if it is requested to keep a single safepoint and
4116   // no safepoint was found on idom-path. It is not safe to remove any safepoint
4117   // in this case since there's no safepoint dominating all paths in the loop body.
4118   bool prune = !keep_one || keep != nullptr;
4119 
4120   // Delete other safepoints in this loop.
4121   Node_List* sfpts = _safepts;
4122   if (prune && sfpts != nullptr) {
4123     assert(keep == nullptr || keep->Opcode() == Op_SafePoint, "not safepoint");
4124     for (uint i = 0; i < sfpts->size(); i++) {
4125       Node* n = sfpts->at(i);
4126       assert(phase->get_loop(n) == this, "");
4127       if (n != keep && phase->is_deleteable_safept(n)) {
4128         phase->lazy_replace(n, n->in(TypeFunc::Control));
4129       }
4130     }
4131   }
4132 }
4133 
4134 //------------------------------counted_loop-----------------------------------
4135 // Convert to counted loops where possible
4136 void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
4137 
4138   // For grins, set the inner-loop flag here
4139   if (!_child) {
4140     if (_head->is_Loop()) _head->as_Loop()->set_inner_loop();
4141   }
4142 
4143   IdealLoopTree* loop = this;
4144   if (_head->is_CountedLoop() ||
4145       phase->is_counted_loop(_head, loop, T_INT)) {
4146 
4147     if (LoopStripMiningIter == 0 || _head->as_CountedLoop()->is_strip_mined()) {
4148       // Indicate we do not need a safepoint here
4149       _has_sfpt = 1;
4150     }
4151 
4152     // Remove safepoints
4153     bool keep_one_sfpt = !(_has_call || _has_sfpt);
4154     remove_safepoints(phase, keep_one_sfpt);
4155 
4156     // Look for induction variables
4157     phase->replace_parallel_iv(this);
4158   } else if (_head->is_LongCountedLoop() ||
4159              phase->is_counted_loop(_head, loop, T_LONG)) {
4160     remove_safepoints(phase, true);
4161   } else {
4162     assert(!_head->is_Loop() || !_head->as_Loop()->is_loop_nest_inner_loop(), "transformation to counted loop should not fail");
4163     if (_parent != nullptr && !_irreducible) {
4164       // Not a counted loop. Keep one safepoint.
4165       bool keep_one_sfpt = true;
4166       remove_safepoints(phase, keep_one_sfpt);
4167     }
4168   }
4169 
4170   // Recursively
4171   assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?");
4172   assert(loop->_child != this || (loop->_child->_child == nullptr && loop->_child->_next == nullptr), "would miss some loops");
4173   if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase);
4174   if (loop->_next)  loop->_next ->counted_loop(phase);
4175 }
4176 
4177 
4178 // The Estimated Loop Clone Size:
4179 //   CloneFactor * (~112% * BodySize + BC) + CC + FanOutTerm,
4180 // where  BC and  CC are  totally ad-hoc/magic  "body" and "clone" constants,
4181 // respectively, used to ensure that the node usage estimates made are on the
4182 // safe side, for the most part. The FanOutTerm is an attempt to estimate the
4183 // possible additional/excessive nodes generated due to data and control flow
4184 // merging, for edges reaching outside the loop.
4185 uint IdealLoopTree::est_loop_clone_sz(uint factor) const {
4186 
4187   precond(0 < factor && factor < 16);
4188 
4189   uint const bc = 13;
4190   uint const cc = 17;
4191   uint const sz = _body.size() + (_body.size() + 7) / 2;
4192   uint estimate = factor * (sz + bc) + cc;
4193 
4194   assert((estimate - cc) / factor == sz + bc, "overflow");
4195 
4196   return estimate + est_loop_flow_merge_sz();
4197 }
4198 
4199 // The Estimated Loop (full-) Unroll Size:
4200 //   UnrollFactor * (~106% * BodySize) + CC + FanOutTerm,
4201 // where CC is a (totally) ad-hoc/magic "clone" constant, used to ensure that
4202 // node usage estimates made are on the safe side, for the most part. This is
4203 // a "light" version of the loop clone size calculation (above), based on the
4204 // assumption that most of the loop-construct overhead will be unraveled when
4205 // (fully) unrolled. Defined for unroll factors larger or equal to one (>=1),
4206 // including an overflow check and returning UINT_MAX in case of an overflow.
4207 uint IdealLoopTree::est_loop_unroll_sz(uint factor) const {
4208 
4209   precond(factor > 0);
4210 
4211   // Take into account that after unroll conjoined heads and tails will fold.
4212   uint const b0 = _body.size() - EMPTY_LOOP_SIZE;
4213   uint const cc = 7;
4214   uint const sz = b0 + (b0 + 15) / 16;
4215   uint estimate = factor * sz + cc;
4216 
4217   if ((estimate - cc) / factor != sz) {
4218     return UINT_MAX;
4219   }
4220 
4221   return estimate + est_loop_flow_merge_sz();
4222 }
4223 
4224 // Estimate the growth effect (in nodes) of merging control and data flow when
4225 // cloning a loop body, based on the amount of  control and data flow reaching
4226 // outside of the (current) loop body.
4227 uint IdealLoopTree::est_loop_flow_merge_sz() const {
4228 
4229   uint ctrl_edge_out_cnt = 0;
4230   uint data_edge_out_cnt = 0;
4231 
4232   for (uint i = 0; i < _body.size(); i++) {
4233     Node* node = _body.at(i);
4234     uint outcnt = node->outcnt();
4235 
4236     for (uint k = 0; k < outcnt; k++) {
4237       Node* out = node->raw_out(k);
4238       if (out == nullptr) continue;
4239       if (out->is_CFG()) {
4240         if (!is_member(_phase->get_loop(out))) {
4241           ctrl_edge_out_cnt++;
4242         }
4243       } else if (_phase->has_ctrl(out)) {
4244         Node* ctrl = _phase->get_ctrl(out);
4245         assert(ctrl != nullptr, "must be");
4246         assert(ctrl->is_CFG(), "must be");
4247         if (!is_member(_phase->get_loop(ctrl))) {
4248           data_edge_out_cnt++;
4249         }
4250       }
4251     }
4252   }
4253   // Use data and control count (x2.0) in estimate iff both are > 0. This is
4254   // a rather pessimistic estimate for the most part, in particular for some
4255   // complex loops, but still not enough to capture all loops.
4256   if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) {
4257     return 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
4258   }
4259   return 0;
4260 }
4261 
4262 #ifndef PRODUCT
4263 //------------------------------dump_head--------------------------------------
4264 // Dump 1 liner for loop header info
4265 void IdealLoopTree::dump_head() {
4266   tty->sp(2 * _nest);
4267   tty->print("Loop: N%d/N%d ", _head->_idx, _tail->_idx);
4268   if (_irreducible) tty->print(" IRREDUCIBLE");
4269   Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl)
4270                                  : _head->in(LoopNode::EntryControl);
4271   const Predicates predicates(entry);
4272   if (predicates.loop_limit_check_predicate_block()->is_non_empty()) {
4273     tty->print(" limit_check");
4274   }
4275   if (UseProfiledLoopPredicate && predicates.profiled_loop_predicate_block()->is_non_empty()) {
4276     tty->print(" profile_predicated");
4277   }
4278   if (UseLoopPredicate && predicates.loop_predicate_block()->is_non_empty()) {
4279     tty->print(" predicated");
4280   }
4281   if (_head->is_CountedLoop()) {
4282     CountedLoopNode *cl = _head->as_CountedLoop();
4283     tty->print(" counted");
4284 
4285     Node* init_n = cl->init_trip();
4286     if (init_n  != nullptr &&  init_n->is_Con())
4287       tty->print(" [%d,", cl->init_trip()->get_int());
4288     else
4289       tty->print(" [int,");
4290     Node* limit_n = cl->limit();
4291     if (limit_n  != nullptr &&  limit_n->is_Con())
4292       tty->print("%d),", cl->limit()->get_int());
4293     else
4294       tty->print("int),");
4295     int stride_con  = cl->stride_con();
4296     if (stride_con > 0) tty->print("+");
4297     tty->print("%d", stride_con);
4298 
4299     tty->print(" (%0.f iters) ", cl->profile_trip_cnt());
4300 
4301     if (cl->is_pre_loop ()) tty->print(" pre" );
4302     if (cl->is_main_loop()) tty->print(" main");
4303     if (cl->is_post_loop()) tty->print(" post");
4304     if (cl->is_vectorized_loop()) tty->print(" vector");
4305     if (range_checks_present()) tty->print(" rc ");
4306   }
4307   if (_has_call) tty->print(" has_call");
4308   if (_has_sfpt) tty->print(" has_sfpt");
4309   if (_rce_candidate) tty->print(" rce");
4310   if (_safepts != nullptr && _safepts->size() > 0) {
4311     tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }");
4312   }
4313   if (_required_safept != nullptr && _required_safept->size() > 0) {
4314     tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }");
4315   }
4316   if (Verbose) {
4317     tty->print(" body={"); _body.dump_simple(); tty->print(" }");
4318   }
4319   if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) {
4320     tty->print(" strip_mined");
4321   }
4322   tty->cr();
4323 }
4324 
4325 //------------------------------dump-------------------------------------------
4326 // Dump loops by loop tree
4327 void IdealLoopTree::dump() {
4328   dump_head();
4329   if (_child) _child->dump();
4330   if (_next)  _next ->dump();
4331 }
4332 
4333 #endif
4334 
4335 static void log_loop_tree_helper(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) {
4336   if (loop == root) {
4337     if (loop->_child != nullptr) {
4338       log->begin_head("loop_tree");
4339       log->end_head();
4340       log_loop_tree_helper(root, loop->_child, log);
4341       log->tail("loop_tree");
4342       assert(loop->_next == nullptr, "what?");
4343     }
4344   } else if (loop != nullptr) {
4345     Node* head = loop->_head;
4346     log->begin_head("loop idx='%d'", head->_idx);
4347     if (loop->_irreducible) log->print(" irreducible='1'");
4348     if (head->is_Loop()) {
4349       if (head->as_Loop()->is_inner_loop())        log->print(" inner_loop='1'");
4350       if (head->as_Loop()->is_partial_peel_loop()) log->print(" partial_peel_loop='1'");
4351     } else if (head->is_CountedLoop()) {
4352       CountedLoopNode* cl = head->as_CountedLoop();
4353       if (cl->is_pre_loop())  log->print(" pre_loop='%d'",  cl->main_idx());
4354       if (cl->is_main_loop()) log->print(" main_loop='%d'", cl->_idx);
4355       if (cl->is_post_loop()) log->print(" post_loop='%d'", cl->main_idx());
4356     }
4357     log->end_head();
4358     log_loop_tree_helper(root, loop->_child, log);
4359     log->tail("loop");
4360     log_loop_tree_helper(root, loop->_next, log);
4361   }
4362 }
4363 
4364 void PhaseIdealLoop::log_loop_tree() {
4365   if (C->log() != nullptr) {
4366     log_loop_tree_helper(_ltree_root, _ltree_root, C->log());
4367   }
4368 }
4369 
4370 // Eliminate all Parse and Template Assertion Predicates that are not associated with a loop anymore. The eliminated
4371 // predicates will be removed during the next round of IGVN.
4372 void PhaseIdealLoop::eliminate_useless_predicates() {
4373   if (C->parse_predicate_count() == 0 && C->template_assertion_predicate_count() == 0) {
4374     return; // No predicates left.
4375   }
4376 
4377   eliminate_useless_parse_predicates();
4378   eliminate_useless_template_assertion_predicates();
4379 }
4380 
4381 // Eliminate all Parse Predicates that do not belong to a loop anymore by marking them useless. These will be removed
4382 // during the next round of IGVN.
4383 void PhaseIdealLoop::eliminate_useless_parse_predicates() {
4384   mark_all_parse_predicates_useless();
4385   if (C->has_loops()) {
4386     mark_loop_associated_parse_predicates_useful();
4387   }
4388   add_useless_parse_predicates_to_igvn_worklist();
4389 }
4390 
4391 void PhaseIdealLoop::mark_all_parse_predicates_useless() const {
4392   for (int i = 0; i < C->parse_predicate_count(); i++) {
4393     C->parse_predicate(i)->mark_useless();
4394   }
4395 }
4396 
4397 void PhaseIdealLoop::mark_loop_associated_parse_predicates_useful() {
4398   for (LoopTreeIterator iterator(_ltree_root); !iterator.done(); iterator.next()) {
4399     IdealLoopTree* loop = iterator.current();
4400     if (loop->can_apply_loop_predication()) {
4401       mark_useful_parse_predicates_for_loop(loop);
4402     }
4403   }
4404 }
4405 
4406 // This visitor marks all visited Parse Predicates useful.
4407 class ParsePredicateUsefulMarker : public PredicateVisitor {
4408  public:
4409   using PredicateVisitor::visit;
4410 
4411   void visit(const ParsePredicate& parse_predicate) override {
4412     parse_predicate.head()->mark_useful();
4413   }
4414 };
4415 
4416 void PhaseIdealLoop::mark_useful_parse_predicates_for_loop(IdealLoopTree* loop) {
4417   Node* entry = loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
4418   const PredicateIterator predicate_iterator(entry);
4419   ParsePredicateUsefulMarker useful_marker;
4420   predicate_iterator.for_each(useful_marker);
4421 }
4422 
4423 void PhaseIdealLoop::add_useless_parse_predicates_to_igvn_worklist() {
4424   for (int i = 0; i < C->parse_predicate_count(); i++) {
4425     ParsePredicateNode* parse_predicate_node = C->parse_predicate(i);
4426     if (parse_predicate_node->is_useless()) {
4427       _igvn._worklist.push(parse_predicate_node);
4428     }
4429   }
4430 }
4431 
4432 
4433 // Eliminate all Template Assertion Predicates that do not belong to their originally associated loop anymore by
4434 // replacing the OpaqueTemplateAssertionPredicate node of the If node with true. These nodes will be removed during the
4435 // next round of IGVN.
4436 void PhaseIdealLoop::eliminate_useless_template_assertion_predicates() {
4437   Unique_Node_List useful_predicates;
4438   if (C->has_loops()) {
4439     collect_useful_template_assertion_predicates(useful_predicates);
4440   }
4441   eliminate_useless_template_assertion_predicates(useful_predicates);
4442 }
4443 
4444 void PhaseIdealLoop::collect_useful_template_assertion_predicates(Unique_Node_List& useful_predicates) {
4445   for (LoopTreeIterator iterator(_ltree_root); !iterator.done(); iterator.next()) {
4446     IdealLoopTree* loop = iterator.current();
4447     if (loop->can_apply_loop_predication()) {
4448       collect_useful_template_assertion_predicates_for_loop(loop, useful_predicates);
4449     }
4450   }
4451 }
4452 
4453 void PhaseIdealLoop::collect_useful_template_assertion_predicates_for_loop(IdealLoopTree* loop,
4454                                                                            Unique_Node_List &useful_predicates) {
4455   Node* entry = loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
4456   const Predicates predicates(entry);
4457   if (UseProfiledLoopPredicate) {
4458     const PredicateBlock* profiled_loop_predicate_block = predicates.profiled_loop_predicate_block();
4459     if (profiled_loop_predicate_block->has_parse_predicate()) {
4460       ParsePredicateSuccessProj* parse_predicate_proj = profiled_loop_predicate_block->parse_predicate_success_proj();
4461       get_opaque_template_assertion_predicate_nodes(parse_predicate_proj, useful_predicates);
4462     }
4463   }
4464 
4465   if (UseLoopPredicate) {
4466     const PredicateBlock* loop_predicate_block = predicates.loop_predicate_block();
4467     if (loop_predicate_block->has_parse_predicate()) {
4468       ParsePredicateSuccessProj* parse_predicate_proj = loop_predicate_block->parse_predicate_success_proj();
4469       get_opaque_template_assertion_predicate_nodes(parse_predicate_proj, useful_predicates);
4470     }
4471   }
4472 }
4473 
4474 void PhaseIdealLoop::eliminate_useless_template_assertion_predicates(Unique_Node_List& useful_predicates) {
4475   for (int i = C->template_assertion_predicate_count(); i > 0; i--) {
4476     OpaqueTemplateAssertionPredicateNode* opaque_node =
4477         C->template_assertion_predicate_opaq_node(i - 1)->as_OpaqueTemplateAssertionPredicate();
4478     if (!useful_predicates.member(opaque_node)) { // not in the useful list
4479       ConINode* one = intcon(1);
4480       _igvn.replace_node(opaque_node, one);
4481     }
4482   }
4483 }
4484 
4485 // If a post or main loop is removed due to an assert predicate, the opaque that guards the loop is not needed anymore
4486 void PhaseIdealLoop::eliminate_useless_zero_trip_guard() {
4487   if (_zero_trip_guard_opaque_nodes.size() == 0) {
4488     return;
4489   }
4490   Unique_Node_List useful_zero_trip_guard_opaques_nodes;
4491   for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4492     IdealLoopTree* lpt = iter.current();
4493     if (lpt->_child == nullptr && lpt->is_counted()) {
4494       CountedLoopNode* head = lpt->_head->as_CountedLoop();
4495       Node* opaque = head->is_canonical_loop_entry();
4496       if (opaque != nullptr) {
4497         useful_zero_trip_guard_opaques_nodes.push(opaque);
4498       }
4499     }
4500   }
4501   for (uint i = 0; i < _zero_trip_guard_opaque_nodes.size(); ++i) {
4502     OpaqueZeroTripGuardNode* opaque = ((OpaqueZeroTripGuardNode*)_zero_trip_guard_opaque_nodes.at(i));
4503     DEBUG_ONLY(CountedLoopNode* guarded_loop = opaque->guarded_loop());
4504     if (!useful_zero_trip_guard_opaques_nodes.member(opaque)) {
4505       IfNode* iff = opaque->if_node();
4506       IdealLoopTree* loop = get_loop(iff);
4507       while (loop != _ltree_root && loop != nullptr) {
4508         loop = loop->_parent;
4509       }
4510       if (loop == nullptr) {
4511         // unreachable from _ltree_root: zero trip guard is in a newly discovered infinite loop.
4512         // We can't tell if the opaque node is useful or not
4513         assert(guarded_loop == nullptr || guarded_loop->is_in_infinite_subgraph(), "");
4514       } else {
4515         assert(guarded_loop == nullptr, "");
4516         this->_igvn.replace_node(opaque, opaque->in(1));
4517       }
4518     } else {
4519       assert(guarded_loop != nullptr, "");
4520     }
4521   }
4522 }
4523 
4524 //------------------------process_expensive_nodes-----------------------------
4525 // Expensive nodes have their control input set to prevent the GVN
4526 // from commoning them and as a result forcing the resulting node to
4527 // be in a more frequent path. Use CFG information here, to change the
4528 // control inputs so that some expensive nodes can be commoned while
4529 // not executed more frequently.
4530 bool PhaseIdealLoop::process_expensive_nodes() {
4531   assert(OptimizeExpensiveOps, "optimization off?");
4532 
4533   // Sort nodes to bring similar nodes together
4534   C->sort_expensive_nodes();
4535 
4536   bool progress = false;
4537 
4538   for (int i = 0; i < C->expensive_count(); ) {
4539     Node* n = C->expensive_node(i);
4540     int start = i;
4541     // Find nodes similar to n
4542     i++;
4543     for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++);
4544     int end = i;
4545     // And compare them two by two
4546     for (int j = start; j < end; j++) {
4547       Node* n1 = C->expensive_node(j);
4548       if (is_node_unreachable(n1)) {
4549         continue;
4550       }
4551       for (int k = j+1; k < end; k++) {
4552         Node* n2 = C->expensive_node(k);
4553         if (is_node_unreachable(n2)) {
4554           continue;
4555         }
4556 
4557         assert(n1 != n2, "should be pair of nodes");
4558 
4559         Node* c1 = n1->in(0);
4560         Node* c2 = n2->in(0);
4561 
4562         Node* parent_c1 = c1;
4563         Node* parent_c2 = c2;
4564 
4565         // The call to get_early_ctrl_for_expensive() moves the
4566         // expensive nodes up but stops at loops that are in a if
4567         // branch. See whether we can exit the loop and move above the
4568         // If.
4569         if (c1->is_Loop()) {
4570           parent_c1 = c1->in(1);
4571         }
4572         if (c2->is_Loop()) {
4573           parent_c2 = c2->in(1);
4574         }
4575 
4576         if (parent_c1 == parent_c2) {
4577           _igvn._worklist.push(n1);
4578           _igvn._worklist.push(n2);
4579           continue;
4580         }
4581 
4582         // Look for identical expensive node up the dominator chain.
4583         if (is_dominator(c1, c2)) {
4584           c2 = c1;
4585         } else if (is_dominator(c2, c1)) {
4586           c1 = c2;
4587         } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() &&
4588                    parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) {
4589           // Both branches have the same expensive node so move it up
4590           // before the if.
4591           c1 = c2 = idom(parent_c1->in(0));
4592         }
4593         // Do the actual moves
4594         if (n1->in(0) != c1) {
4595           _igvn.replace_input_of(n1, 0, c1);
4596           progress = true;
4597         }
4598         if (n2->in(0) != c2) {
4599           _igvn.replace_input_of(n2, 0, c2);
4600           progress = true;
4601         }
4602       }
4603     }
4604   }
4605 
4606   return progress;
4607 }
4608 
4609 //=============================================================================
4610 //----------------------------build_and_optimize-------------------------------
4611 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
4612 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
4613 void PhaseIdealLoop::build_and_optimize() {
4614   assert(!C->post_loop_opts_phase(), "no loop opts allowed");
4615 
4616   bool do_split_ifs = (_mode == LoopOptsDefault);
4617   bool skip_loop_opts = (_mode == LoopOptsNone);
4618   bool do_max_unroll = (_mode == LoopOptsMaxUnroll);
4619 
4620 
4621   int old_progress = C->major_progress();
4622   uint orig_worklist_size = _igvn._worklist.size();
4623 
4624   // Reset major-progress flag for the driver's heuristics
4625   C->clear_major_progress();
4626 
4627 #ifndef PRODUCT
4628   // Capture for later assert
4629   uint unique = C->unique();
4630   _loop_invokes++;
4631   _loop_work += unique;
4632 #endif
4633 
4634   // True if the method has at least 1 irreducible loop
4635   _has_irreducible_loops = false;
4636 
4637   _created_loop_node = false;
4638 
4639   VectorSet visited;
4640   // Pre-grow the mapping from Nodes to IdealLoopTrees.
4641   _loop_or_ctrl.map(C->unique(), nullptr);
4642   memset(_loop_or_ctrl.adr(), 0, wordSize * C->unique());
4643 
4644   // Pre-build the top-level outermost loop tree entry
4645   _ltree_root = new IdealLoopTree( this, C->root(), C->root() );
4646   // Do not need a safepoint at the top level
4647   _ltree_root->_has_sfpt = 1;
4648 
4649   // Initialize Dominators.
4650   // Checked in clone_loop_predicate() during beautify_loops().
4651   _idom_size = 0;
4652   _idom      = nullptr;
4653   _dom_depth = nullptr;
4654   _dom_stk   = nullptr;
4655 
4656   // Empty pre-order array
4657   allocate_preorders();
4658 
4659   // Build a loop tree on the fly.  Build a mapping from CFG nodes to
4660   // IdealLoopTree entries.  Data nodes are NOT walked.
4661   build_loop_tree();
4662   // Check for bailout, and return
4663   if (C->failing()) {
4664     return;
4665   }
4666 
4667   // Verify that the has_loops() flag set at parse time is consistent with the just built loop tree. When the back edge
4668   // is an exception edge, parsing doesn't set has_loops().
4669   assert(_ltree_root->_child == nullptr || C->has_loops() || C->has_exception_backedge(), "parsing found no loops but there are some");
4670   // No loops after all
4671   if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
4672 
4673   // There should always be an outer loop containing the Root and Return nodes.
4674   // If not, we have a degenerate empty program.  Bail out in this case.
4675   if (!has_node(C->root())) {
4676     if (!_verify_only) {
4677       C->clear_major_progress();
4678       assert(false, "empty program detected during loop optimization");
4679       C->record_method_not_compilable("empty program detected during loop optimization");
4680     }
4681     return;
4682   }
4683 
4684   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4685   // Nothing to do, so get out
4686   bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !do_max_unroll && !_verify_me &&
4687           !_verify_only && !bs->is_gc_specific_loop_opts_pass(_mode);
4688   bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
4689   bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(_mode);
4690   if (stop_early && !do_expensive_nodes) {
4691     return;
4692   }
4693 
4694   // Set loop nesting depth
4695   _ltree_root->set_nest( 0 );
4696 
4697   // Split shared headers and insert loop landing pads.
4698   // Do not bother doing this on the Root loop of course.
4699   if( !_verify_me && !_verify_only && _ltree_root->_child ) {
4700     C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
4701     if( _ltree_root->_child->beautify_loops( this ) ) {
4702       // Re-build loop tree!
4703       _ltree_root->_child = nullptr;
4704       _loop_or_ctrl.clear();
4705       reallocate_preorders();
4706       build_loop_tree();
4707       // Check for bailout, and return
4708       if (C->failing()) {
4709         return;
4710       }
4711       // Reset loop nesting depth
4712       _ltree_root->set_nest( 0 );
4713 
4714       C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
4715     }
4716   }
4717 
4718   // Build Dominators for elision of null checks & loop finding.
4719   // Since nodes do not have a slot for immediate dominator, make
4720   // a persistent side array for that info indexed on node->_idx.
4721   _idom_size = C->unique();
4722   _idom      = NEW_RESOURCE_ARRAY( Node*, _idom_size );
4723   _dom_depth = NEW_RESOURCE_ARRAY( uint,  _idom_size );
4724   _dom_stk   = nullptr; // Allocated on demand in recompute_dom_depth
4725   memset( _dom_depth, 0, _idom_size * sizeof(uint) );
4726 
4727   Dominators();
4728 
4729   if (!_verify_only) {
4730     // As a side effect, Dominators removed any unreachable CFG paths
4731     // into RegionNodes.  It doesn't do this test against Root, so
4732     // we do it here.
4733     for( uint i = 1; i < C->root()->req(); i++ ) {
4734       if (!_loop_or_ctrl[C->root()->in(i)->_idx]) { // Dead path into Root?
4735         _igvn.delete_input_of(C->root(), i);
4736         i--;                      // Rerun same iteration on compressed edges
4737       }
4738     }
4739 
4740     // Given dominators, try to find inner loops with calls that must
4741     // always be executed (call dominates loop tail).  These loops do
4742     // not need a separate safepoint.
4743     Node_List cisstack;
4744     _ltree_root->check_safepts(visited, cisstack);
4745   }
4746 
4747   // Walk the DATA nodes and place into loops.  Find earliest control
4748   // node.  For CFG nodes, the _loop_or_ctrl array starts out and remains
4749   // holding the associated IdealLoopTree pointer.  For DATA nodes, the
4750   // _loop_or_ctrl array holds the earliest legal controlling CFG node.
4751 
4752   // Allocate stack with enough space to avoid frequent realloc
4753   int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
4754   Node_Stack nstack(stack_size);
4755 
4756   visited.clear();
4757   Node_List worklist;
4758   // Don't need C->root() on worklist since
4759   // it will be processed among C->top() inputs
4760   worklist.push(C->top());
4761   visited.set(C->top()->_idx); // Set C->top() as visited now
4762   build_loop_early( visited, worklist, nstack );
4763 
4764   // Given early legal placement, try finding counted loops.  This placement
4765   // is good enough to discover most loop invariants.
4766   if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) {
4767     _ltree_root->counted_loop( this );
4768   }
4769 
4770   // Find latest loop placement.  Find ideal loop placement.
4771   visited.clear();
4772   init_dom_lca_tags();
4773   // Need C->root() on worklist when processing outs
4774   worklist.push(C->root());
4775   NOT_PRODUCT( C->verify_graph_edges(); )
4776   worklist.push(C->top());
4777   build_loop_late( visited, worklist, nstack );
4778   if (C->failing()) { return; }
4779 
4780   if (_verify_only) {
4781     C->restore_major_progress(old_progress);
4782     assert(C->unique() == unique, "verification _mode made Nodes? ? ?");
4783     assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
4784     return;
4785   }
4786 
4787   // clear out the dead code after build_loop_late
4788   while (_deadlist.size()) {
4789     _igvn.remove_globally_dead_node(_deadlist.pop());
4790   }
4791 
4792   eliminate_useless_zero_trip_guard();
4793 
4794   if (stop_early) {
4795     assert(do_expensive_nodes, "why are we here?");
4796     if (process_expensive_nodes()) {
4797       // If we made some progress when processing expensive nodes then
4798       // the IGVN may modify the graph in a way that will allow us to
4799       // make some more progress: we need to try processing expensive
4800       // nodes again.
4801       C->set_major_progress();
4802     }
4803     return;
4804   }
4805 
4806   // Some parser-inserted loop predicates could never be used by loop
4807   // predication or they were moved away from loop during some optimizations.
4808   // For example, peeling. Eliminate them before next loop optimizations.
4809   eliminate_useless_predicates();
4810 
4811 #ifndef PRODUCT
4812   C->verify_graph_edges();
4813   if (_verify_me) {             // Nested verify pass?
4814     // Check to see if the verify _mode is broken
4815     assert(C->unique() == unique, "non-optimize _mode made Nodes? ? ?");
4816     return;
4817   }
4818   DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
4819   if (TraceLoopOpts && C->has_loops()) {
4820     _ltree_root->dump();
4821   }
4822 #endif
4823 
4824   if (skip_loop_opts) {
4825     C->restore_major_progress(old_progress);
4826     return;
4827   }
4828 
4829   if (do_max_unroll) {
4830     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4831       IdealLoopTree* lpt = iter.current();
4832       if (lpt->is_innermost() && lpt->_allow_optimizations && !lpt->_has_call && lpt->is_counted()) {
4833         lpt->compute_trip_count(this);
4834         if (!lpt->do_one_iteration_loop(this) &&
4835             !lpt->do_remove_empty_loop(this)) {
4836           AutoNodeBudget node_budget(this);
4837           if (lpt->_head->as_CountedLoop()->is_normal_loop() &&
4838               lpt->policy_maximally_unroll(this)) {
4839             memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) );
4840             do_maximally_unroll(lpt, worklist);
4841           }
4842         }
4843       }
4844     }
4845 
4846     C->restore_major_progress(old_progress);
4847     return;
4848   }
4849 
4850   if (bs->optimize_loops(this, _mode, visited, nstack, worklist)) {
4851     return;
4852   }
4853 
4854   if (ReassociateInvariants && !C->major_progress()) {
4855     // Reassociate invariants and prep for split_thru_phi
4856     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4857       IdealLoopTree* lpt = iter.current();
4858       if (!lpt->is_loop()) {
4859         continue;
4860       }
4861       Node* head = lpt->_head;
4862       if (!head->is_BaseCountedLoop() || !lpt->is_innermost()) continue;
4863 
4864       // check for vectorized loops, any reassociation of invariants was already done
4865       if (head->is_CountedLoop()) {
4866         if (head->as_CountedLoop()->is_unroll_only()) {
4867           continue;
4868         } else {
4869           AutoNodeBudget node_budget(this);
4870           lpt->reassociate_invariants(this);
4871         }
4872       }
4873       // Because RCE opportunities can be masked by split_thru_phi,
4874       // look for RCE candidates and inhibit split_thru_phi
4875       // on just their loop-phi's for this pass of loop opts
4876       if (SplitIfBlocks && do_split_ifs &&
4877           head->as_BaseCountedLoop()->is_valid_counted_loop(head->as_BaseCountedLoop()->bt()) &&
4878           (lpt->policy_range_check(this, true, T_LONG) ||
4879            (head->is_CountedLoop() && lpt->policy_range_check(this, true, T_INT)))) {
4880         lpt->_rce_candidate = 1; // = true
4881       }
4882     }
4883   }
4884 
4885   // Check for aggressive application of split-if and other transforms
4886   // that require basic-block info (like cloning through Phi's)
4887   if (!C->major_progress() && SplitIfBlocks && do_split_ifs) {
4888     visited.clear();
4889     split_if_with_blocks(visited, nstack);
4890     if (C->failing()) {
4891       return;
4892     }
4893     DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
4894   }
4895 
4896   if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
4897     C->set_major_progress();
4898   }
4899 
4900   // Perform loop predication before iteration splitting
4901   if (UseLoopPredicate && C->has_loops() && !C->major_progress() && (C->parse_predicate_count() > 0)) {
4902     _ltree_root->_child->loop_predication(this);
4903   }
4904 
4905   if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) {
4906     if (do_intrinsify_fill()) {
4907       C->set_major_progress();
4908     }
4909   }
4910 
4911   // Perform iteration-splitting on inner loops.  Split iterations to avoid
4912   // range checks or one-shot null checks.
4913 
4914   // If split-if's didn't hack the graph too bad (no CFG changes)
4915   // then do loop opts.
4916   if (C->has_loops() && !C->major_progress()) {
4917     memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) );
4918     _ltree_root->_child->iteration_split( this, worklist );
4919     // No verify after peeling!  GCM has hoisted code out of the loop.
4920     // After peeling, the hoisted code could sink inside the peeled area.
4921     // The peeling code does not try to recompute the best location for
4922     // all the code before the peeled area, so the verify pass will always
4923     // complain about it.
4924   }
4925 
4926   // Check for bailout, and return
4927   if (C->failing()) {
4928     return;
4929   }
4930 
4931   // Do verify graph edges in any case
4932   NOT_PRODUCT( C->verify_graph_edges(); );
4933 
4934   if (!do_split_ifs) {
4935     // We saw major progress in Split-If to get here.  We forced a
4936     // pass with unrolling and not split-if, however more split-if's
4937     // might make progress.  If the unrolling didn't make progress
4938     // then the major-progress flag got cleared and we won't try
4939     // another round of Split-If.  In particular the ever-common
4940     // instance-of/check-cast pattern requires at least 2 rounds of
4941     // Split-If to clear out.
4942     C->set_major_progress();
4943   }
4944 
4945   // Repeat loop optimizations if new loops were seen
4946   if (created_loop_node()) {
4947     C->set_major_progress();
4948   }
4949 
4950   // Keep loop predicates and perform optimizations with them
4951   // until no more loop optimizations could be done.
4952   // After that switch predicates off and do more loop optimizations.
4953   if (!C->major_progress() && (C->parse_predicate_count() > 0)) {
4954     C->mark_parse_predicate_nodes_useless(_igvn);
4955     assert(C->parse_predicate_count() == 0, "should be zero now");
4956      if (TraceLoopOpts) {
4957        tty->print_cr("PredicatesOff");
4958      }
4959      C->set_major_progress();
4960   }
4961 
4962   // Auto-vectorize main-loop
4963   if (C->do_superword() && C->has_loops() && !C->major_progress()) {
4964     Compile::TracePhase tp(_t_autoVectorize);
4965 
4966     // Shared data structures for all AutoVectorizations, to reduce allocations
4967     // of large arrays.
4968     VSharedData vshared;
4969     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4970       IdealLoopTree* lpt = iter.current();
4971       AutoVectorizeStatus status = auto_vectorize(lpt, vshared);
4972 
4973       if (status == AutoVectorizeStatus::TriedAndFailed) {
4974         // We tried vectorization, but failed. From now on only unroll the loop.
4975         CountedLoopNode* cl = lpt->_head->as_CountedLoop();
4976         if (cl->has_passed_slp()) {
4977           C->set_major_progress();
4978           cl->set_notpassed_slp();
4979           cl->mark_do_unroll_only();
4980         }
4981       }
4982     }
4983   }
4984 
4985   // Move UnorderedReduction out of counted loop. Can be introduced by AutoVectorization.
4986   if (C->has_loops() && !C->major_progress()) {
4987     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4988       IdealLoopTree* lpt = iter.current();
4989       if (lpt->is_counted() && lpt->is_innermost()) {
4990         move_unordered_reduction_out_of_loop(lpt);
4991       }
4992     }
4993   }
4994 }
4995 
4996 #ifndef PRODUCT
4997 //------------------------------print_statistics-------------------------------
4998 int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes
4999 int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique
5000 volatile int PhaseIdealLoop::_long_loop_candidates=0; // Number of long loops seen
5001 volatile int PhaseIdealLoop::_long_loop_nests=0; // Number of long loops successfully transformed to a nest
5002 volatile int PhaseIdealLoop::_long_loop_counted_loops=0; // Number of long loops successfully transformed to a counted loop
5003 void PhaseIdealLoop::print_statistics() {
5004   tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d, long loops=%d/%d/%d", _loop_invokes, _loop_work, _long_loop_counted_loops, _long_loop_nests, _long_loop_candidates);
5005 }
5006 #endif
5007 
5008 #ifdef ASSERT
5009 // Build a verify-only PhaseIdealLoop, and see that it agrees with "this".
5010 void PhaseIdealLoop::verify() const {
5011   ResourceMark rm;
5012   int old_progress = C->major_progress();
5013   bool success = true;
5014 
5015   PhaseIdealLoop phase_verify(_igvn, this);
5016   if (C->failing_internal()) {
5017     return;
5018   }
5019 
5020   // Verify ctrl and idom of every node.
5021   success &= verify_idom_and_nodes(C->root(), &phase_verify);
5022 
5023   // Verify loop-tree.
5024   success &= _ltree_root->verify_tree(phase_verify._ltree_root);
5025 
5026   assert(success, "VerifyLoopOptimizations failed");
5027 
5028   // Major progress was cleared by creating a verify version of PhaseIdealLoop.
5029   C->restore_major_progress(old_progress);
5030 }
5031 
5032 // Perform a BFS starting at n, through all inputs.
5033 // Call verify_idom and verify_node on all nodes of BFS traversal.
5034 bool PhaseIdealLoop::verify_idom_and_nodes(Node* root, const PhaseIdealLoop* phase_verify) const {
5035   Unique_Node_List worklist;
5036   worklist.push(root);
5037   bool success = true;
5038   for (uint i = 0; i < worklist.size(); i++) {
5039     Node* n = worklist.at(i);
5040     // process node
5041     success &= verify_idom(n, phase_verify);
5042     success &= verify_loop_ctrl(n, phase_verify);
5043     // visit inputs
5044     for (uint j = 0; j < n->req(); j++) {
5045       if (n->in(j) != nullptr) {
5046         worklist.push(n->in(j));
5047       }
5048     }
5049   }
5050   return success;
5051 }
5052 
5053 // Verify dominator structure (IDOM).
5054 bool PhaseIdealLoop::verify_idom(Node* n, const PhaseIdealLoop* phase_verify) const {
5055   // Verify IDOM for all CFG nodes (except root).
5056   if (!n->is_CFG() || n->is_Root()) {
5057     return true; // pass
5058   }
5059 
5060   if (n->_idx >= _idom_size) {
5061     tty->print("CFG Node with no idom: ");
5062     n->dump();
5063     return false; // fail
5064   }
5065 
5066   Node* id = idom_no_update(n);
5067   Node* id_verify = phase_verify->idom_no_update(n);
5068   if (id != id_verify) {
5069     tty->print("Mismatching idom for node: ");
5070     n->dump();
5071     tty->print("  We have idom: ");
5072     id->dump();
5073     tty->print("  Verify has idom: ");
5074     id_verify->dump();
5075     tty->cr();
5076     return false; // fail
5077   }
5078   return true; // pass
5079 }
5080 
5081 // Verify "_loop_or_ctrl": control and loop membership.
5082 //  (0) _loop_or_ctrl[i] == nullptr -> node not reachable.
5083 //  (1) has_ctrl -> check lowest bit. 1 -> data node. 0 -> ctrl node.
5084 //  (2) has_ctrl true: get_ctrl_no_update returns ctrl of data node.
5085 //  (3) has_ctrl false: get_loop_idx returns IdealLoopTree for ctrl node.
5086 bool PhaseIdealLoop::verify_loop_ctrl(Node* n, const PhaseIdealLoop* phase_verify) const {
5087   const uint i = n->_idx;
5088   // The loop-tree was built from def to use (top-down).
5089   // The verification happens from use to def (bottom-up).
5090   // We may thus find nodes during verification that are not in the loop-tree.
5091   if (_loop_or_ctrl[i] == nullptr || phase_verify->_loop_or_ctrl[i] == nullptr) {
5092     if (_loop_or_ctrl[i] != nullptr || phase_verify->_loop_or_ctrl[i] != nullptr) {
5093       tty->print_cr("Was reachable in only one. this %d, verify %d.",
5094                  _loop_or_ctrl[i] != nullptr, phase_verify->_loop_or_ctrl[i] != nullptr);
5095       n->dump();
5096       return false; // fail
5097     }
5098     // Not reachable for both.
5099     return true; // pass
5100   }
5101 
5102   if (n->is_CFG() == has_ctrl(n)) {
5103     tty->print_cr("Exactly one should be true: %d for is_CFG, %d for has_ctrl.", n->is_CFG(), has_ctrl(n));
5104     n->dump();
5105     return false; // fail
5106   }
5107 
5108   if (has_ctrl(n) != phase_verify->has_ctrl(n)) {
5109     tty->print_cr("Mismatch has_ctrl: %d for this, %d for verify.", has_ctrl(n), phase_verify->has_ctrl(n));
5110     n->dump();
5111     return false; // fail
5112   } else if (has_ctrl(n)) {
5113     assert(phase_verify->has_ctrl(n), "sanity");
5114     // n is a data node.
5115     // Verify that its ctrl is the same.
5116 
5117     // Broken part of VerifyLoopOptimizations (A)
5118     // Reason:
5119     //   BUG, wrong control set for example in
5120     //   PhaseIdealLoop::split_if_with_blocks
5121     //   at "set_ctrl(x, new_ctrl);"
5122     /*
5123     if( _loop_or_ctrl[i] != loop_verify->_loop_or_ctrl[i] &&
5124         get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) {
5125       tty->print("Mismatched control setting for: ");
5126       n->dump();
5127       if( fail++ > 10 ) return;
5128       Node *c = get_ctrl_no_update(n);
5129       tty->print("We have it as: ");
5130       if( c->in(0) ) c->dump();
5131         else tty->print_cr("N%d",c->_idx);
5132       tty->print("Verify thinks: ");
5133       if( loop_verify->has_ctrl(n) )
5134         loop_verify->get_ctrl_no_update(n)->dump();
5135       else
5136         loop_verify->get_loop_idx(n)->dump();
5137       tty->cr();
5138     }
5139     */
5140     return true; // pass
5141   } else {
5142     assert(!phase_verify->has_ctrl(n), "sanity");
5143     // n is a ctrl node.
5144     // Verify that not has_ctrl, and that get_loop_idx is the same.
5145 
5146     // Broken part of VerifyLoopOptimizations (B)
5147     // Reason:
5148     //   NeverBranch node for example is added to loop outside its scope.
5149     //   Once we run build_loop_tree again, it is added to the correct loop.
5150     /*
5151     if (!C->major_progress()) {
5152       // Loop selection can be messed up if we did a major progress
5153       // operation, like split-if.  Do not verify in that case.
5154       IdealLoopTree *us = get_loop_idx(n);
5155       IdealLoopTree *them = loop_verify->get_loop_idx(n);
5156       if( us->_head != them->_head ||  us->_tail != them->_tail ) {
5157         tty->print("Unequals loops for: ");
5158         n->dump();
5159         if( fail++ > 10 ) return;
5160         tty->print("We have it as: ");
5161         us->dump();
5162         tty->print("Verify thinks: ");
5163         them->dump();
5164         tty->cr();
5165       }
5166     }
5167     */
5168     return true; // pass
5169   }
5170 }
5171 
5172 static int compare_tree(IdealLoopTree* const& a, IdealLoopTree* const& b) {
5173   assert(a != nullptr && b != nullptr, "must be");
5174   return a->_head->_idx - b->_head->_idx;
5175 }
5176 
5177 GrowableArray<IdealLoopTree*> IdealLoopTree::collect_sorted_children() const {
5178   GrowableArray<IdealLoopTree*> children;
5179   IdealLoopTree* child = _child;
5180   while (child != nullptr) {
5181     assert(child->_parent == this, "all must be children of this");
5182     children.insert_sorted<compare_tree>(child);
5183     child = child->_next;
5184   }
5185   return children;
5186 }
5187 
5188 // Verify that tree structures match. Because the CFG can change, siblings
5189 // within the loop tree can be reordered. We attempt to deal with that by
5190 // reordering the verify's loop tree if possible.
5191 bool IdealLoopTree::verify_tree(IdealLoopTree* loop_verify) const {
5192   assert(_head == loop_verify->_head, "mismatched loop head");
5193   assert(this->_parent != nullptr || this->_next == nullptr, "is_root_loop implies has_no_sibling");
5194 
5195   // Collect the children
5196   GrowableArray<IdealLoopTree*> children = collect_sorted_children();
5197   GrowableArray<IdealLoopTree*> children_verify = loop_verify->collect_sorted_children();
5198 
5199   bool success = true;
5200 
5201   // Compare the two children lists
5202   for (int i = 0, j = 0; i < children.length() || j < children_verify.length(); ) {
5203     IdealLoopTree* child        = nullptr;
5204     IdealLoopTree* child_verify = nullptr;
5205     // Read from both lists, if possible.
5206     if (i < children.length()) {
5207       child = children.at(i);
5208     }
5209     if (j < children_verify.length()) {
5210       child_verify = children_verify.at(j);
5211     }
5212     assert(child != nullptr || child_verify != nullptr, "must find at least one");
5213     if (child != nullptr && child_verify != nullptr && child->_head != child_verify->_head) {
5214       // We found two non-equal children. Select the smaller one.
5215       if (child->_head->_idx < child_verify->_head->_idx) {
5216         child_verify = nullptr;
5217       } else {
5218         child = nullptr;
5219       }
5220     }
5221     // Process the two children, or potentially log the failure if we only found one.
5222     if (child_verify == nullptr) {
5223       if (child->_irreducible && Compile::current()->major_progress()) {
5224         // Irreducible loops can pick a different header (one of its entries).
5225       } else {
5226         tty->print_cr("We have a loop that verify does not have");
5227         child->dump();
5228         success = false;
5229       }
5230       i++; // step for this
5231     } else if (child == nullptr) {
5232       if (child_verify->_irreducible && Compile::current()->major_progress()) {
5233         // Irreducible loops can pick a different header (one of its entries).
5234       } else if (child_verify->_head->as_Region()->is_in_infinite_subgraph()) {
5235         // Infinite loops do not get attached to the loop-tree on their first visit.
5236         // "this" runs before "loop_verify". It is thus possible that we find the
5237         // infinite loop only for "child_verify". Only finding it with "child" would
5238         // mean that we lost it, which is not ok.
5239       } else {
5240         tty->print_cr("Verify has a loop that we do not have");
5241         child_verify->dump();
5242         success = false;
5243       }
5244       j++; // step for verify
5245     } else {
5246       assert(child->_head == child_verify->_head, "We have both and they are equal");
5247       success &= child->verify_tree(child_verify); // Recursion
5248       i++; // step for this
5249       j++; // step for verify
5250     }
5251   }
5252 
5253   // Broken part of VerifyLoopOptimizations (D)
5254   // Reason:
5255   //   split_if has to update the _tail, if it is modified. But that is done by
5256   //   checking to what loop the iff belongs to. That info can be wrong, and then
5257   //   we do not update the _tail correctly.
5258   /*
5259   Node *tail = _tail;           // Inline a non-updating version of
5260   while( !tail->in(0) )         // the 'tail()' call.
5261     tail = tail->in(1);
5262   assert( tail == loop->_tail, "mismatched loop tail" );
5263   */
5264 
5265   if (_head->is_CountedLoop()) {
5266     CountedLoopNode *cl = _head->as_CountedLoop();
5267 
5268     Node* ctrl     = cl->init_control();
5269     Node* back     = cl->back_control();
5270     assert(ctrl != nullptr && ctrl->is_CFG(), "sane loop in-ctrl");
5271     assert(back != nullptr && back->is_CFG(), "sane loop backedge");
5272     cl->loopexit(); // assert implied
5273   }
5274 
5275   // Broken part of VerifyLoopOptimizations (E)
5276   // Reason:
5277   //   PhaseIdealLoop::split_thru_region creates new nodes for loop that are not added
5278   //   to the loop body. Or maybe they are not added to the correct loop.
5279   //   at "Node* x = n->clone();"
5280   /*
5281   // Innermost loops need to verify loop bodies,
5282   // but only if no 'major_progress'
5283   int fail = 0;
5284   if (!Compile::current()->major_progress() && _child == nullptr) {
5285     for( uint i = 0; i < _body.size(); i++ ) {
5286       Node *n = _body.at(i);
5287       if (n->outcnt() == 0)  continue; // Ignore dead
5288       uint j;
5289       for( j = 0; j < loop->_body.size(); j++ )
5290         if( loop->_body.at(j) == n )
5291           break;
5292       if( j == loop->_body.size() ) { // Not found in loop body
5293         // Last ditch effort to avoid assertion: Its possible that we
5294         // have some users (so outcnt not zero) but are still dead.
5295         // Try to find from root.
5296         if (Compile::current()->root()->find(n->_idx)) {
5297           fail++;
5298           tty->print("We have that verify does not: ");
5299           n->dump();
5300         }
5301       }
5302     }
5303     for( uint i2 = 0; i2 < loop->_body.size(); i2++ ) {
5304       Node *n = loop->_body.at(i2);
5305       if (n->outcnt() == 0)  continue; // Ignore dead
5306       uint j;
5307       for( j = 0; j < _body.size(); j++ )
5308         if( _body.at(j) == n )
5309           break;
5310       if( j == _body.size() ) { // Not found in loop body
5311         // Last ditch effort to avoid assertion: Its possible that we
5312         // have some users (so outcnt not zero) but are still dead.
5313         // Try to find from root.
5314         if (Compile::current()->root()->find(n->_idx)) {
5315           fail++;
5316           tty->print("Verify has that we do not: ");
5317           n->dump();
5318         }
5319       }
5320     }
5321     assert( !fail, "loop body mismatch" );
5322   }
5323   */
5324   return success;
5325 }
5326 #endif
5327 
5328 //------------------------------set_idom---------------------------------------
5329 void PhaseIdealLoop::set_idom(Node* d, Node* n, uint dom_depth) {
5330   _nesting.check(); // Check if a potential reallocation in the resource arena is safe
5331   uint idx = d->_idx;
5332   if (idx >= _idom_size) {
5333     uint newsize = next_power_of_2(idx);
5334     _idom      = REALLOC_RESOURCE_ARRAY( Node*,     _idom,_idom_size,newsize);
5335     _dom_depth = REALLOC_RESOURCE_ARRAY( uint, _dom_depth,_idom_size,newsize);
5336     memset( _dom_depth + _idom_size, 0, (newsize - _idom_size) * sizeof(uint) );
5337     _idom_size = newsize;
5338   }
5339   _idom[idx] = n;
5340   _dom_depth[idx] = dom_depth;
5341 }
5342 
5343 //------------------------------recompute_dom_depth---------------------------------------
5344 // The dominator tree is constructed with only parent pointers.
5345 // This recomputes the depth in the tree by first tagging all
5346 // nodes as "no depth yet" marker.  The next pass then runs up
5347 // the dom tree from each node marked "no depth yet", and computes
5348 // the depth on the way back down.
5349 void PhaseIdealLoop::recompute_dom_depth() {
5350   uint no_depth_marker = C->unique();
5351   uint i;
5352   // Initialize depth to "no depth yet" and realize all lazy updates
5353   for (i = 0; i < _idom_size; i++) {
5354     // Only indices with a _dom_depth has a Node* or null (otherwise uninitialized).
5355     if (_dom_depth[i] > 0 && _idom[i] != nullptr) {
5356       _dom_depth[i] = no_depth_marker;
5357 
5358       // heal _idom if it has a fwd mapping in _loop_or_ctrl
5359       if (_idom[i]->in(0) == nullptr) {
5360         idom(i);
5361       }
5362     }
5363   }
5364   if (_dom_stk == nullptr) {
5365     uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
5366     if (init_size < 10) init_size = 10;
5367     _dom_stk = new GrowableArray<uint>(init_size);
5368   }
5369   // Compute new depth for each node.
5370   for (i = 0; i < _idom_size; i++) {
5371     uint j = i;
5372     // Run up the dom tree to find a node with a depth
5373     while (_dom_depth[j] == no_depth_marker) {
5374       _dom_stk->push(j);
5375       j = _idom[j]->_idx;
5376     }
5377     // Compute the depth on the way back down this tree branch
5378     uint dd = _dom_depth[j] + 1;
5379     while (_dom_stk->length() > 0) {
5380       uint j = _dom_stk->pop();
5381       _dom_depth[j] = dd;
5382       dd++;
5383     }
5384   }
5385 }
5386 
5387 //------------------------------sort-------------------------------------------
5388 // Insert 'loop' into the existing loop tree.  'innermost' is a leaf of the
5389 // loop tree, not the root.
5390 IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermost ) {
5391   if( !innermost ) return loop; // New innermost loop
5392 
5393   int loop_preorder = get_preorder(loop->_head); // Cache pre-order number
5394   assert( loop_preorder, "not yet post-walked loop" );
5395   IdealLoopTree **pp = &innermost;      // Pointer to previous next-pointer
5396   IdealLoopTree *l = *pp;               // Do I go before or after 'l'?
5397 
5398   // Insert at start of list
5399   while( l ) {                  // Insertion sort based on pre-order
5400     if( l == loop ) return innermost; // Already on list!
5401     int l_preorder = get_preorder(l->_head); // Cache pre-order number
5402     assert( l_preorder, "not yet post-walked l" );
5403     // Check header pre-order number to figure proper nesting
5404     if( loop_preorder > l_preorder )
5405       break;                    // End of insertion
5406     // If headers tie (e.g., shared headers) check tail pre-order numbers.
5407     // Since I split shared headers, you'd think this could not happen.
5408     // BUT: I must first do the preorder numbering before I can discover I
5409     // have shared headers, so the split headers all get the same preorder
5410     // number as the RegionNode they split from.
5411     if( loop_preorder == l_preorder &&
5412         get_preorder(loop->_tail) < get_preorder(l->_tail) )
5413       break;                    // Also check for shared headers (same pre#)
5414     pp = &l->_parent;           // Chain up list
5415     l = *pp;
5416   }
5417   // Link into list
5418   // Point predecessor to me
5419   *pp = loop;
5420   // Point me to successor
5421   IdealLoopTree *p = loop->_parent;
5422   loop->_parent = l;            // Point me to successor
5423   if( p ) sort( p, innermost ); // Insert my parents into list as well
5424   return innermost;
5425 }
5426 
5427 //------------------------------build_loop_tree--------------------------------
5428 // I use a modified Vick/Tarjan algorithm.  I need pre- and a post- visit
5429 // bits.  The _loop_or_ctrl[] array is mapped by Node index and holds a null for
5430 // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the
5431 // tightest enclosing IdealLoopTree for post-walked.
5432 //
5433 // During my forward walk I do a short 1-layer lookahead to see if I can find
5434 // a loop backedge with that doesn't have any work on the backedge.  This
5435 // helps me construct nested loops with shared headers better.
5436 //
5437 // Once I've done the forward recursion, I do the post-work.  For each child
5438 // I check to see if there is a backedge.  Backedges define a loop!  I
5439 // insert an IdealLoopTree at the target of the backedge.
5440 //
5441 // During the post-work I also check to see if I have several children
5442 // belonging to different loops.  If so, then this Node is a decision point
5443 // where control flow can choose to change loop nests.  It is at this
5444 // decision point where I can figure out how loops are nested.  At this
5445 // time I can properly order the different loop nests from my children.
5446 // Note that there may not be any backedges at the decision point!
5447 //
5448 // Since the decision point can be far removed from the backedges, I can't
5449 // order my loops at the time I discover them.  Thus at the decision point
5450 // I need to inspect loop header pre-order numbers to properly nest my
5451 // loops.  This means I need to sort my childrens' loops by pre-order.
5452 // The sort is of size number-of-control-children, which generally limits
5453 // it to size 2 (i.e., I just choose between my 2 target loops).
5454 void PhaseIdealLoop::build_loop_tree() {
5455   // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
5456   GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
5457   Node *n = C->root();
5458   bltstack.push(n);
5459   int pre_order = 1;
5460   int stack_size;
5461 
5462   while ( ( stack_size = bltstack.length() ) != 0 ) {
5463     n = bltstack.top(); // Leave node on stack
5464     if ( !is_visited(n) ) {
5465       // ---- Pre-pass Work ----
5466       // Pre-walked but not post-walked nodes need a pre_order number.
5467 
5468       set_preorder_visited( n, pre_order ); // set as visited
5469 
5470       // ---- Scan over children ----
5471       // Scan first over control projections that lead to loop headers.
5472       // This helps us find inner-to-outer loops with shared headers better.
5473 
5474       // Scan children's children for loop headers.
5475       for ( int i = n->outcnt() - 1; i >= 0; --i ) {
5476         Node* m = n->raw_out(i);       // Child
5477         if( m->is_CFG() && !is_visited(m) ) { // Only for CFG children
5478           // Scan over children's children to find loop
5479           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
5480             Node* l = m->fast_out(j);
5481             if( is_visited(l) &&       // Been visited?
5482                 !is_postvisited(l) &&  // But not post-visited
5483                 get_preorder(l) < pre_order ) { // And smaller pre-order
5484               // Found!  Scan the DFS down this path before doing other paths
5485               bltstack.push(m);
5486               break;
5487             }
5488           }
5489         }
5490       }
5491       pre_order++;
5492     }
5493     else if ( !is_postvisited(n) ) {
5494       // Note: build_loop_tree_impl() adds out edges on rare occasions,
5495       // such as com.sun.rsasign.am::a.
5496       // For non-recursive version, first, process current children.
5497       // On next iteration, check if additional children were added.
5498       for ( int k = n->outcnt() - 1; k >= 0; --k ) {
5499         Node* u = n->raw_out(k);
5500         if ( u->is_CFG() && !is_visited(u) ) {
5501           bltstack.push(u);
5502         }
5503       }
5504       if ( bltstack.length() == stack_size ) {
5505         // There were no additional children, post visit node now
5506         (void)bltstack.pop(); // Remove node from stack
5507         pre_order = build_loop_tree_impl(n, pre_order);
5508         // Check for bailout
5509         if (C->failing()) {
5510           return;
5511         }
5512         // Check to grow _preorders[] array for the case when
5513         // build_loop_tree_impl() adds new nodes.
5514         check_grow_preorders();
5515       }
5516     }
5517     else {
5518       (void)bltstack.pop(); // Remove post-visited node from stack
5519     }
5520   }
5521   DEBUG_ONLY(verify_regions_in_irreducible_loops();)
5522 }
5523 
5524 //------------------------------build_loop_tree_impl---------------------------
5525 int PhaseIdealLoop::build_loop_tree_impl(Node* n, int pre_order) {
5526   // ---- Post-pass Work ----
5527   // Pre-walked but not post-walked nodes need a pre_order number.
5528 
5529   // Tightest enclosing loop for this Node
5530   IdealLoopTree *innermost = nullptr;
5531 
5532   // For all children, see if any edge is a backedge.  If so, make a loop
5533   // for it.  Then find the tightest enclosing loop for the self Node.
5534   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5535     Node* m = n->fast_out(i);   // Child
5536     if (n == m) continue;      // Ignore control self-cycles
5537     if (!m->is_CFG()) continue;// Ignore non-CFG edges
5538 
5539     IdealLoopTree *l;           // Child's loop
5540     if (!is_postvisited(m)) {  // Child visited but not post-visited?
5541       // Found a backedge
5542       assert(get_preorder(m) < pre_order, "should be backedge");
5543       // Check for the RootNode, which is already a LoopNode and is allowed
5544       // to have multiple "backedges".
5545       if (m == C->root()) {     // Found the root?
5546         l = _ltree_root;        // Root is the outermost LoopNode
5547       } else {                  // Else found a nested loop
5548         // Insert a LoopNode to mark this loop.
5549         l = new IdealLoopTree(this, m, n);
5550       } // End of Else found a nested loop
5551       if (!has_loop(m)) {        // If 'm' does not already have a loop set
5552         set_loop(m, l);         // Set loop header to loop now
5553       }
5554     } else {                    // Else not a nested loop
5555       if (!_loop_or_ctrl[m->_idx]) continue; // Dead code has no loop
5556       IdealLoopTree* m_loop = get_loop(m);
5557       l = m_loop;          // Get previously determined loop
5558       // If successor is header of a loop (nest), move up-loop till it
5559       // is a member of some outer enclosing loop.  Since there are no
5560       // shared headers (I've split them already) I only need to go up
5561       // at most 1 level.
5562       while (l && l->_head == m) { // Successor heads loop?
5563         l = l->_parent;         // Move up 1 for me
5564       }
5565       // If this loop is not properly parented, then this loop
5566       // has no exit path out, i.e. its an infinite loop.
5567       if (!l) {
5568         // Make loop "reachable" from root so the CFG is reachable.  Basically
5569         // insert a bogus loop exit that is never taken.  'm', the loop head,
5570         // points to 'n', one (of possibly many) fall-in paths.  There may be
5571         // many backedges as well.
5572 
5573         if (!_verify_only) {
5574           // Insert the NeverBranch between 'm' and it's control user.
5575           NeverBranchNode *iff = new NeverBranchNode( m );
5576           _igvn.register_new_node_with_optimizer(iff);
5577           set_loop(iff, m_loop);
5578           Node *if_t = new CProjNode( iff, 0 );
5579           _igvn.register_new_node_with_optimizer(if_t);
5580           set_loop(if_t, m_loop);
5581 
5582           Node* cfg = nullptr;       // Find the One True Control User of m
5583           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
5584             Node* x = m->fast_out(j);
5585             if (x->is_CFG() && x != m && x != iff)
5586               { cfg = x; break; }
5587           }
5588           assert(cfg != nullptr, "must find the control user of m");
5589           uint k = 0;             // Probably cfg->in(0)
5590           while( cfg->in(k) != m ) k++; // But check in case cfg is a Region
5591           _igvn.replace_input_of(cfg, k, if_t); // Now point to NeverBranch
5592 
5593           // Now create the never-taken loop exit
5594           Node *if_f = new CProjNode( iff, 1 );
5595           _igvn.register_new_node_with_optimizer(if_f);
5596           set_loop(if_f, _ltree_root);
5597           // Find frame ptr for Halt.  Relies on the optimizer
5598           // V-N'ing.  Easier and quicker than searching through
5599           // the program structure.
5600           Node *frame = new ParmNode( C->start(), TypeFunc::FramePtr );
5601           _igvn.register_new_node_with_optimizer(frame);
5602           // Halt & Catch Fire
5603           Node* halt = new HaltNode(if_f, frame, "never-taken loop exit reached");
5604           _igvn.register_new_node_with_optimizer(halt);
5605           set_loop(halt, _ltree_root);
5606           _igvn.add_input_to(C->root(), halt);
5607         }
5608         set_loop(C->root(), _ltree_root);
5609         // move to outer most loop with same header
5610         l = m_loop;
5611         while (true) {
5612           IdealLoopTree* next = l->_parent;
5613           if (next == nullptr || next->_head != m) {
5614             break;
5615           }
5616           l = next;
5617         }
5618         // properly insert infinite loop in loop tree
5619         sort(_ltree_root, l);
5620         // fix child link from parent
5621         IdealLoopTree* p = l->_parent;
5622         l->_next = p->_child;
5623         p->_child = l;
5624         // code below needs enclosing loop
5625         l = l->_parent;
5626       }
5627     }
5628     if (is_postvisited(l->_head)) {
5629       // We are currently visiting l, but its head has already been post-visited.
5630       // l is irreducible: we just found a second entry m.
5631       _has_irreducible_loops = true;
5632       RegionNode* secondary_entry = m->as_Region();
5633 
5634       if (!secondary_entry->can_be_irreducible_entry()) {
5635         assert(!VerifyNoNewIrreducibleLoops, "A new irreducible loop was created after parsing.");
5636         C->record_method_not_compilable("A new irreducible loop was created after parsing.");
5637         return pre_order;
5638       }
5639 
5640       // Walk up the loop-tree, mark all loops that are already post-visited as irreducible
5641       // Since m is a secondary entry to them all.
5642       while( is_postvisited(l->_head) ) {
5643         l->_irreducible = 1; // = true
5644         RegionNode* head = l->_head->as_Region();
5645         if (!head->can_be_irreducible_entry()) {
5646           assert(!VerifyNoNewIrreducibleLoops, "A new irreducible loop was created after parsing.");
5647           C->record_method_not_compilable("A new irreducible loop was created after parsing.");
5648           return pre_order;
5649         }
5650         l = l->_parent;
5651         // Check for bad CFG here to prevent crash, and bailout of compile
5652         if (l == nullptr) {
5653 #ifndef PRODUCT
5654           if (TraceLoopOpts) {
5655             tty->print_cr("bailout: unhandled CFG: infinite irreducible loop");
5656             m->dump();
5657           }
5658 #endif
5659           // This is a rare case that we do not want to handle in C2.
5660           C->record_method_not_compilable("unhandled CFG detected during loop optimization");
5661           return pre_order;
5662         }
5663       }
5664     }
5665     if (!_verify_only) {
5666       C->set_has_irreducible_loop(_has_irreducible_loops);
5667     }
5668 
5669     // This Node might be a decision point for loops.  It is only if
5670     // it's children belong to several different loops.  The sort call
5671     // does a trivial amount of work if there is only 1 child or all
5672     // children belong to the same loop.  If however, the children
5673     // belong to different loops, the sort call will properly set the
5674     // _parent pointers to show how the loops nest.
5675     //
5676     // In any case, it returns the tightest enclosing loop.
5677     innermost = sort( l, innermost );
5678   }
5679 
5680   // Def-use info will have some dead stuff; dead stuff will have no
5681   // loop decided on.
5682 
5683   // Am I a loop header?  If so fix up my parent's child and next ptrs.
5684   if( innermost && innermost->_head == n ) {
5685     assert( get_loop(n) == innermost, "" );
5686     IdealLoopTree *p = innermost->_parent;
5687     IdealLoopTree *l = innermost;
5688     while (p && l->_head == n) {
5689       l->_next = p->_child;     // Put self on parents 'next child'
5690       p->_child = l;            // Make self as first child of parent
5691       l = p;                    // Now walk up the parent chain
5692       p = l->_parent;
5693     }
5694   } else {
5695     // Note that it is possible for a LoopNode to reach here, if the
5696     // backedge has been made unreachable (hence the LoopNode no longer
5697     // denotes a Loop, and will eventually be removed).
5698 
5699     // Record tightest enclosing loop for self.  Mark as post-visited.
5700     set_loop(n, innermost);
5701     // Also record has_call flag early on
5702     if (innermost) {
5703       if( n->is_Call() && !n->is_CallLeaf() && !n->is_macro() ) {
5704         // Do not count uncommon calls
5705         if( !n->is_CallStaticJava() || !n->as_CallStaticJava()->_name ) {
5706           Node *iff = n->in(0)->in(0);
5707           // No any calls for vectorized loops.
5708           if (C->do_superword() ||
5709               !iff->is_If() ||
5710               (n->in(0)->Opcode() == Op_IfFalse && (1.0 - iff->as_If()->_prob) >= 0.01) ||
5711               iff->as_If()->_prob >= 0.01) {
5712             innermost->_has_call = 1;
5713           }
5714         }
5715       } else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) {
5716         // Disable loop optimizations if the loop has a scalar replaceable
5717         // allocation. This disabling may cause a potential performance lost
5718         // if the allocation is not eliminated for some reason.
5719         innermost->_allow_optimizations = false;
5720         innermost->_has_call = 1; // = true
5721       } else if (n->Opcode() == Op_SafePoint) {
5722         // Record all safepoints in this loop.
5723         if (innermost->_safepts == nullptr) innermost->_safepts = new Node_List();
5724         innermost->_safepts->push(n);
5725       }
5726     }
5727   }
5728 
5729   // Flag as post-visited now
5730   set_postvisited(n);
5731   return pre_order;
5732 }
5733 
5734 #ifdef ASSERT
5735 //--------------------------verify_regions_in_irreducible_loops----------------
5736 // Iterate down from Root through CFG, verify for every region:
5737 // if it is in an irreducible loop it must be marked as such
5738 void PhaseIdealLoop::verify_regions_in_irreducible_loops() {
5739   ResourceMark rm;
5740   if (!_has_irreducible_loops) {
5741     // last build_loop_tree has not found any irreducible loops
5742     // hence no region has to be marked is_in_irreduible_loop
5743     return;
5744   }
5745 
5746   RootNode* root = C->root();
5747   Unique_Node_List worklist; // visit all nodes once
5748   worklist.push(root);
5749   bool failure = false;
5750   for (uint i = 0; i < worklist.size(); i++) {
5751     Node* n = worklist.at(i);
5752     if (n->is_Region()) {
5753       RegionNode* region = n->as_Region();
5754       if (is_in_irreducible_loop(region) &&
5755           region->loop_status() == RegionNode::LoopStatus::Reducible) {
5756         failure = true;
5757         tty->print("irreducible! ");
5758         region->dump();
5759       }
5760     }
5761     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
5762       Node* use = n->fast_out(j);
5763       if (use->is_CFG()) {
5764         worklist.push(use); // push if was not pushed before
5765       }
5766     }
5767   }
5768   assert(!failure, "region in irreducible loop was marked as reducible");
5769 }
5770 
5771 //---------------------------is_in_irreducible_loop-------------------------
5772 // Analogous to ciTypeFlow::Block::is_in_irreducible_loop
5773 bool PhaseIdealLoop::is_in_irreducible_loop(RegionNode* region) {
5774   if (!_has_irreducible_loops) {
5775     return false; // no irreducible loop in graph
5776   }
5777   IdealLoopTree* l = get_loop(region); // l: innermost loop that contains region
5778   do {
5779     if (l->_irreducible) {
5780       return true; // found it
5781     }
5782     if (l == _ltree_root) {
5783       return false; // reached root, terimnate
5784     }
5785     l = l->_parent;
5786   } while (l != nullptr);
5787   assert(region->is_in_infinite_subgraph(), "must be in infinite subgraph");
5788   // We have "l->_parent == nullptr", which happens only for infinite loops,
5789   // where no parent is attached to the loop. We did not find any irreducible
5790   // loop from this block out to lp. Thus lp only has one entry, and no exit
5791   // (it is infinite and reducible). We can always rewrite an infinite loop
5792   // that is nested inside other loops:
5793   // while(condition) { infinite_loop; }
5794   // with an equivalent program where the infinite loop is an outermost loop
5795   // that is not nested in any loop:
5796   // while(condition) { break; } infinite_loop;
5797   // Thus, we can understand lp as an outermost loop, and can terminate and
5798   // conclude: this block is in no irreducible loop.
5799   return false;
5800 }
5801 #endif
5802 
5803 //------------------------------build_loop_early-------------------------------
5804 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
5805 // First pass computes the earliest controlling node possible.  This is the
5806 // controlling input with the deepest dominating depth.
5807 void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
5808   while (worklist.size() != 0) {
5809     // Use local variables nstack_top_n & nstack_top_i to cache values
5810     // on nstack's top.
5811     Node *nstack_top_n = worklist.pop();
5812     uint  nstack_top_i = 0;
5813 //while_nstack_nonempty:
5814     while (true) {
5815       // Get parent node and next input's index from stack's top.
5816       Node  *n = nstack_top_n;
5817       uint   i = nstack_top_i;
5818       uint cnt = n->req(); // Count of inputs
5819       if (i == 0) {        // Pre-process the node.
5820         if( has_node(n) &&            // Have either loop or control already?
5821             !has_ctrl(n) ) {          // Have loop picked out already?
5822           // During "merge_many_backedges" we fold up several nested loops
5823           // into a single loop.  This makes the members of the original
5824           // loop bodies pointing to dead loops; they need to move up
5825           // to the new UNION'd larger loop.  I set the _head field of these
5826           // dead loops to null and the _parent field points to the owning
5827           // loop.  Shades of UNION-FIND algorithm.
5828           IdealLoopTree *ilt;
5829           while( !(ilt = get_loop(n))->_head ) {
5830             // Normally I would use a set_loop here.  But in this one special
5831             // case, it is legal (and expected) to change what loop a Node
5832             // belongs to.
5833             _loop_or_ctrl.map(n->_idx, (Node*)(ilt->_parent));
5834           }
5835           // Remove safepoints ONLY if I've already seen I don't need one.
5836           // (the old code here would yank a 2nd safepoint after seeing a
5837           // first one, even though the 1st did not dominate in the loop body
5838           // and thus could be avoided indefinitely)
5839           if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
5840               is_deleteable_safept(n)) {
5841             Node *in = n->in(TypeFunc::Control);
5842             lazy_replace(n,in);       // Pull safepoint now
5843             if (ilt->_safepts != nullptr) {
5844               ilt->_safepts->yank(n);
5845             }
5846             // Carry on with the recursion "as if" we are walking
5847             // only the control input
5848             if( !visited.test_set( in->_idx ) ) {
5849               worklist.push(in);      // Visit this guy later, using worklist
5850             }
5851             // Get next node from nstack:
5852             // - skip n's inputs processing by setting i > cnt;
5853             // - we also will not call set_early_ctrl(n) since
5854             //   has_node(n) == true (see the condition above).
5855             i = cnt + 1;
5856           }
5857         }
5858       } // if (i == 0)
5859 
5860       // Visit all inputs
5861       bool done = true;       // Assume all n's inputs will be processed
5862       while (i < cnt) {
5863         Node *in = n->in(i);
5864         ++i;
5865         if (in == nullptr) continue;
5866         if (in->pinned() && !in->is_CFG())
5867           set_ctrl(in, in->in(0));
5868         int is_visited = visited.test_set( in->_idx );
5869         if (!has_node(in)) {  // No controlling input yet?
5870           assert( !in->is_CFG(), "CFG Node with no controlling input?" );
5871           assert( !is_visited, "visit only once" );
5872           nstack.push(n, i);  // Save parent node and next input's index.
5873           nstack_top_n = in;  // Process current input now.
5874           nstack_top_i = 0;
5875           done = false;       // Not all n's inputs processed.
5876           break; // continue while_nstack_nonempty;
5877         } else if (!is_visited) {
5878           // This guy has a location picked out for him, but has not yet
5879           // been visited.  Happens to all CFG nodes, for instance.
5880           // Visit him using the worklist instead of recursion, to break
5881           // cycles.  Since he has a location already we do not need to
5882           // find his location before proceeding with the current Node.
5883           worklist.push(in);  // Visit this guy later, using worklist
5884         }
5885       }
5886       if (done) {
5887         // All of n's inputs have been processed, complete post-processing.
5888 
5889         // Compute earliest point this Node can go.
5890         // CFG, Phi, pinned nodes already know their controlling input.
5891         if (!has_node(n)) {
5892           // Record earliest legal location
5893           set_early_ctrl(n, false);
5894         }
5895         if (nstack.is_empty()) {
5896           // Finished all nodes on stack.
5897           // Process next node on the worklist.
5898           break;
5899         }
5900         // Get saved parent node and next input's index.
5901         nstack_top_n = nstack.node();
5902         nstack_top_i = nstack.index();
5903         nstack.pop();
5904       }
5905     } // while (true)
5906   }
5907 }
5908 
5909 //------------------------------dom_lca_internal--------------------------------
5910 // Pair-wise LCA
5911 Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const {
5912   if( !n1 ) return n2;          // Handle null original LCA
5913   assert( n1->is_CFG(), "" );
5914   assert( n2->is_CFG(), "" );
5915   // find LCA of all uses
5916   uint d1 = dom_depth(n1);
5917   uint d2 = dom_depth(n2);
5918   while (n1 != n2) {
5919     if (d1 > d2) {
5920       n1 =      idom(n1);
5921       d1 = dom_depth(n1);
5922     } else if (d1 < d2) {
5923       n2 =      idom(n2);
5924       d2 = dom_depth(n2);
5925     } else {
5926       // Here d1 == d2.  Due to edits of the dominator-tree, sections
5927       // of the tree might have the same depth.  These sections have
5928       // to be searched more carefully.
5929 
5930       // Scan up all the n1's with equal depth, looking for n2.
5931       Node *t1 = idom(n1);
5932       while (dom_depth(t1) == d1) {
5933         if (t1 == n2)  return n2;
5934         t1 = idom(t1);
5935       }
5936       // Scan up all the n2's with equal depth, looking for n1.
5937       Node *t2 = idom(n2);
5938       while (dom_depth(t2) == d2) {
5939         if (t2 == n1)  return n1;
5940         t2 = idom(t2);
5941       }
5942       // Move up to a new dominator-depth value as well as up the dom-tree.
5943       n1 = t1;
5944       n2 = t2;
5945       d1 = dom_depth(n1);
5946       d2 = dom_depth(n2);
5947     }
5948   }
5949   return n1;
5950 }
5951 
5952 //------------------------------compute_idom-----------------------------------
5953 // Locally compute IDOM using dom_lca call.  Correct only if the incoming
5954 // IDOMs are correct.
5955 Node *PhaseIdealLoop::compute_idom( Node *region ) const {
5956   assert( region->is_Region(), "" );
5957   Node *LCA = nullptr;
5958   for( uint i = 1; i < region->req(); i++ ) {
5959     if( region->in(i) != C->top() )
5960       LCA = dom_lca( LCA, region->in(i) );
5961   }
5962   return LCA;
5963 }
5964 
5965 bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) {
5966   bool had_error = false;
5967 #ifdef ASSERT
5968   if (early != C->root()) {
5969     // Make sure that there's a dominance path from LCA to early
5970     Node* d = LCA;
5971     while (d != early) {
5972       if (d == C->root()) {
5973         dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA);
5974         tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx);
5975         had_error = true;
5976         break;
5977       }
5978       d = idom(d);
5979     }
5980   }
5981 #endif
5982   return had_error;
5983 }
5984 
5985 
5986 Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) {
5987   // Compute LCA over list of uses
5988   bool had_error = false;
5989   Node *LCA = nullptr;
5990   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) {
5991     Node* c = n->fast_out(i);
5992     if (_loop_or_ctrl[c->_idx] == nullptr)
5993       continue;                 // Skip the occasional dead node
5994     if( c->is_Phi() ) {         // For Phis, we must land above on the path
5995       for( uint j=1; j<c->req(); j++ ) {// For all inputs
5996         if( c->in(j) == n ) {   // Found matching input?
5997           Node *use = c->in(0)->in(j);
5998           if (_verify_only && use->is_top()) continue;
5999           LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
6000           if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
6001         }
6002       }
6003     } else {
6004       // For CFG data-users, use is in the block just prior
6005       Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0);
6006       LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
6007       if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
6008     }
6009   }
6010   assert(!had_error, "bad dominance");
6011   return LCA;
6012 }
6013 
6014 // Check the shape of the graph at the loop entry. In some cases,
6015 // the shape of the graph does not match the shape outlined below.
6016 // That is caused by the Opaque1 node "protecting" the shape of
6017 // the graph being removed by, for example, the IGVN performed
6018 // in PhaseIdealLoop::build_and_optimize().
6019 //
6020 // After the Opaque1 node has been removed, optimizations (e.g., split-if,
6021 // loop unswitching, and IGVN, or a combination of them) can freely change
6022 // the graph's shape. As a result, the graph shape outlined below cannot
6023 // be guaranteed anymore.
6024 Node* CountedLoopNode::is_canonical_loop_entry() {
6025   if (!is_main_loop() && !is_post_loop()) {
6026     return nullptr;
6027   }
6028   Node* ctrl = skip_assertion_predicates_with_halt();
6029 
6030   if (ctrl == nullptr || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) {
6031     return nullptr;
6032   }
6033   Node* iffm = ctrl->in(0);
6034   if (iffm == nullptr || iffm->Opcode() != Op_If) {
6035     return nullptr;
6036   }
6037   Node* bolzm = iffm->in(1);
6038   if (bolzm == nullptr || !bolzm->is_Bool()) {
6039     return nullptr;
6040   }
6041   Node* cmpzm = bolzm->in(1);
6042   if (cmpzm == nullptr || !cmpzm->is_Cmp()) {
6043     return nullptr;
6044   }
6045 
6046   uint input = is_main_loop() ? 2 : 1;
6047   if (input >= cmpzm->req() || cmpzm->in(input) == nullptr) {
6048     return nullptr;
6049   }
6050   bool res = cmpzm->in(input)->Opcode() == Op_OpaqueZeroTripGuard;
6051 #ifdef ASSERT
6052   bool found_opaque = false;
6053   for (uint i = 1; i < cmpzm->req(); i++) {
6054     Node* opnd = cmpzm->in(i);
6055     if (opnd && opnd->is_Opaque1()) {
6056       found_opaque = true;
6057       break;
6058     }
6059   }
6060   assert(found_opaque == res, "wrong pattern");
6061 #endif
6062   return res ? cmpzm->in(input) : nullptr;
6063 }
6064 
6065 // Find pre loop end from main loop. Returns nullptr if none.
6066 CountedLoopEndNode* CountedLoopNode::find_pre_loop_end() {
6067   assert(is_main_loop(), "Can only find pre-loop from main-loop");
6068   // The loop cannot be optimized if the graph shape at the loop entry is
6069   // inappropriate.
6070   if (is_canonical_loop_entry() == nullptr) {
6071     return nullptr;
6072   }
6073 
6074   Node* p_f = skip_assertion_predicates_with_halt()->in(0)->in(0);
6075   if (!p_f->is_IfFalse() || !p_f->in(0)->is_CountedLoopEnd()) {
6076     return nullptr;
6077   }
6078   CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd();
6079   CountedLoopNode* loop_node = pre_end->loopnode();
6080   if (loop_node == nullptr || !loop_node->is_pre_loop()) {
6081     return nullptr;
6082   }
6083   return pre_end;
6084 }
6085 
6086 //------------------------------get_late_ctrl----------------------------------
6087 // Compute latest legal control.
6088 Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
6089   assert(early != nullptr, "early control should not be null");
6090 
6091   Node* LCA = compute_lca_of_uses(n, early);
6092 #ifdef ASSERT
6093   if (LCA == C->root() && LCA != early) {
6094     // def doesn't dominate uses so print some useful debugging output
6095     compute_lca_of_uses(n, early, true);
6096   }
6097 #endif
6098 
6099   if (n->is_Load() && LCA != early) {
6100     LCA = get_late_ctrl_with_anti_dep(n->as_Load(), early, LCA);
6101   }
6102 
6103   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
6104   return LCA;
6105 }
6106 
6107 // if this is a load, check for anti-dependent stores
6108 // We use a conservative algorithm to identify potential interfering
6109 // instructions and for rescheduling the load.  The users of the memory
6110 // input of this load are examined.  Any use which is not a load and is
6111 // dominated by early is considered a potentially interfering store.
6112 // This can produce false positives.
6113 Node* PhaseIdealLoop::get_late_ctrl_with_anti_dep(LoadNode* n, Node* early, Node* LCA) {
6114   int load_alias_idx = C->get_alias_index(n->adr_type());
6115   if (C->alias_type(load_alias_idx)->is_rewritable()) {
6116     Unique_Node_List worklist;
6117 
6118     Node* mem = n->in(MemNode::Memory);
6119     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
6120       Node* s = mem->fast_out(i);
6121       worklist.push(s);
6122     }
6123     for (uint i = 0; i < worklist.size() && LCA != early; i++) {
6124       Node* s = worklist.at(i);
6125       if (s->is_Load() || s->Opcode() == Op_SafePoint ||
6126           (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0) ||
6127           s->is_Phi()) {
6128         continue;
6129       } else if (s->is_MergeMem()) {
6130         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
6131           Node* s1 = s->fast_out(i);
6132           worklist.push(s1);
6133         }
6134       } else {
6135         Node* sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
6136         assert(sctrl != nullptr || !s->is_reachable_from_root(), "must have control");
6137         if (sctrl != nullptr && !sctrl->is_top() && is_dominator(early, sctrl)) {
6138           const TypePtr* adr_type = s->adr_type();
6139           if (s->is_ArrayCopy()) {
6140             // Copy to known instance needs destination type to test for aliasing
6141             const TypePtr* dest_type = s->as_ArrayCopy()->_dest_type;
6142             if (dest_type != TypeOopPtr::BOTTOM) {
6143               adr_type = dest_type;
6144             }
6145           }
6146           if (C->can_alias(adr_type, load_alias_idx)) {
6147             LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
6148           } else if (s->is_CFG() && s->is_Multi()) {
6149             // Look for the memory use of s (that is the use of its memory projection)
6150             for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
6151               Node* s1 = s->fast_out(i);
6152               assert(s1->is_Proj(), "projection expected");
6153               if (_igvn.type(s1) == Type::MEMORY) {
6154                 for (DUIterator_Fast jmax, j = s1->fast_outs(jmax); j < jmax; j++) {
6155                   Node* s2 = s1->fast_out(j);
6156                   worklist.push(s2);
6157                 }
6158               }
6159             }
6160           }
6161         }
6162       }
6163     }
6164     // For Phis only consider Region's inputs that were reached by following the memory edges
6165     if (LCA != early) {
6166       for (uint i = 0; i < worklist.size(); i++) {
6167         Node* s = worklist.at(i);
6168         if (s->is_Phi() && C->can_alias(s->adr_type(), load_alias_idx)) {
6169           Node* r = s->in(0);
6170           for (uint j = 1; j < s->req(); j++) {
6171             Node* in = s->in(j);
6172             Node* r_in = r->in(j);
6173             // We can't reach any node from a Phi because we don't enqueue Phi's uses above
6174             if (((worklist.member(in) && !in->is_Phi()) || in == mem) && is_dominator(early, r_in)) {
6175               LCA = dom_lca_for_get_late_ctrl(LCA, r_in, n);
6176             }
6177           }
6178         }
6179       }
6180     }
6181   }
6182   return LCA;
6183 }
6184 
6185 // Is CFG node 'dominator' dominating node 'n'?
6186 bool PhaseIdealLoop::is_dominator(Node* dominator, Node* n) {
6187   if (dominator == n) {
6188     return true;
6189   }
6190   assert(dominator->is_CFG() && n->is_CFG(), "must have CFG nodes");
6191   uint dd = dom_depth(dominator);
6192   while (dom_depth(n) >= dd) {
6193     if (n == dominator) {
6194       return true;
6195     }
6196     n = idom(n);
6197   }
6198   return false;
6199 }
6200 
6201 // Is CFG node 'dominator' strictly dominating node 'n'?
6202 bool PhaseIdealLoop::is_strict_dominator(Node* dominator, Node* n) {
6203   return dominator != n && is_dominator(dominator, n);
6204 }
6205 
6206 //------------------------------dom_lca_for_get_late_ctrl_internal-------------
6207 // Pair-wise LCA with tags.
6208 // Tag each index with the node 'tag' currently being processed
6209 // before advancing up the dominator chain using idom().
6210 // Later calls that find a match to 'tag' know that this path has already
6211 // been considered in the current LCA (which is input 'n1' by convention).
6212 // Since get_late_ctrl() is only called once for each node, the tag array
6213 // does not need to be cleared between calls to get_late_ctrl().
6214 // Algorithm trades a larger constant factor for better asymptotic behavior
6215 //
6216 Node *PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal(Node *n1, Node *n2, Node *tag_node) {
6217   uint d1 = dom_depth(n1);
6218   uint d2 = dom_depth(n2);
6219   jlong tag = tag_node->_idx | (((jlong)_dom_lca_tags_round) << 32);
6220 
6221   do {
6222     if (d1 > d2) {
6223       // current lca is deeper than n2
6224       _dom_lca_tags.at_put_grow(n1->_idx, tag);
6225       n1 =      idom(n1);
6226       d1 = dom_depth(n1);
6227     } else if (d1 < d2) {
6228       // n2 is deeper than current lca
6229       jlong memo = _dom_lca_tags.at_grow(n2->_idx, 0);
6230       if (memo == tag) {
6231         return n1;    // Return the current LCA
6232       }
6233       _dom_lca_tags.at_put_grow(n2->_idx, tag);
6234       n2 =      idom(n2);
6235       d2 = dom_depth(n2);
6236     } else {
6237       // Here d1 == d2.  Due to edits of the dominator-tree, sections
6238       // of the tree might have the same depth.  These sections have
6239       // to be searched more carefully.
6240 
6241       // Scan up all the n1's with equal depth, looking for n2.
6242       _dom_lca_tags.at_put_grow(n1->_idx, tag);
6243       Node *t1 = idom(n1);
6244       while (dom_depth(t1) == d1) {
6245         if (t1 == n2)  return n2;
6246         _dom_lca_tags.at_put_grow(t1->_idx, tag);
6247         t1 = idom(t1);
6248       }
6249       // Scan up all the n2's with equal depth, looking for n1.
6250       _dom_lca_tags.at_put_grow(n2->_idx, tag);
6251       Node *t2 = idom(n2);
6252       while (dom_depth(t2) == d2) {
6253         if (t2 == n1)  return n1;
6254         _dom_lca_tags.at_put_grow(t2->_idx, tag);
6255         t2 = idom(t2);
6256       }
6257       // Move up to a new dominator-depth value as well as up the dom-tree.
6258       n1 = t1;
6259       n2 = t2;
6260       d1 = dom_depth(n1);
6261       d2 = dom_depth(n2);
6262     }
6263   } while (n1 != n2);
6264   return n1;
6265 }
6266 
6267 //------------------------------init_dom_lca_tags------------------------------
6268 // Tag could be a node's integer index, 32bits instead of 64bits in some cases
6269 // Intended use does not involve any growth for the array, so it could
6270 // be of fixed size.
6271 void PhaseIdealLoop::init_dom_lca_tags() {
6272   uint limit = C->unique() + 1;
6273   _dom_lca_tags.at_grow(limit, 0);
6274   _dom_lca_tags_round = 0;
6275 #ifdef ASSERT
6276   for (uint i = 0; i < limit; ++i) {
6277     assert(_dom_lca_tags.at(i) == 0, "Must be distinct from each node pointer");
6278   }
6279 #endif // ASSERT
6280 }
6281 
6282 //------------------------------build_loop_late--------------------------------
6283 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
6284 // Second pass finds latest legal placement, and ideal loop placement.
6285 void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
6286   while (worklist.size() != 0) {
6287     Node *n = worklist.pop();
6288     // Only visit once
6289     if (visited.test_set(n->_idx)) continue;
6290     uint cnt = n->outcnt();
6291     uint   i = 0;
6292     while (true) {
6293       assert(_loop_or_ctrl[n->_idx], "no dead nodes");
6294       // Visit all children
6295       if (i < cnt) {
6296         Node* use = n->raw_out(i);
6297         ++i;
6298         // Check for dead uses.  Aggressively prune such junk.  It might be
6299         // dead in the global sense, but still have local uses so I cannot
6300         // easily call 'remove_dead_node'.
6301         if (_loop_or_ctrl[use->_idx] != nullptr || use->is_top()) { // Not dead?
6302           // Due to cycles, we might not hit the same fixed point in the verify
6303           // pass as we do in the regular pass.  Instead, visit such phis as
6304           // simple uses of the loop head.
6305           if( use->in(0) && (use->is_CFG() || use->is_Phi()) ) {
6306             if( !visited.test(use->_idx) )
6307               worklist.push(use);
6308           } else if( !visited.test_set(use->_idx) ) {
6309             nstack.push(n, i); // Save parent and next use's index.
6310             n   = use;         // Process all children of current use.
6311             cnt = use->outcnt();
6312             i   = 0;
6313           }
6314         } else {
6315           // Do not visit around the backedge of loops via data edges.
6316           // push dead code onto a worklist
6317           _deadlist.push(use);
6318         }
6319       } else {
6320         // All of n's children have been processed, complete post-processing.
6321         build_loop_late_post(n);
6322         if (C->failing()) { return; }
6323         if (nstack.is_empty()) {
6324           // Finished all nodes on stack.
6325           // Process next node on the worklist.
6326           break;
6327         }
6328         // Get saved parent node and next use's index. Visit the rest of uses.
6329         n   = nstack.node();
6330         cnt = n->outcnt();
6331         i   = nstack.index();
6332         nstack.pop();
6333       }
6334     }
6335   }
6336 }
6337 
6338 // Verify that no data node is scheduled in the outer loop of a strip
6339 // mined loop.
6340 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
6341 #ifdef ASSERT
6342   if (get_loop(least)->_nest == 0) {
6343     return;
6344   }
6345   IdealLoopTree* loop = get_loop(least);
6346   Node* head = loop->_head;
6347   if (head->is_OuterStripMinedLoop() &&
6348       // Verification can't be applied to fully built strip mined loops
6349       head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) {
6350     Node* sfpt = head->as_Loop()->outer_safepoint();
6351     ResourceMark rm;
6352     Unique_Node_List wq;
6353     wq.push(sfpt);
6354     for (uint i = 0; i < wq.size(); i++) {
6355       Node *m = wq.at(i);
6356       for (uint i = 1; i < m->req(); i++) {
6357         Node* nn = m->in(i);
6358         if (nn == n) {
6359           return;
6360         }
6361         if (nn != nullptr && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
6362           wq.push(nn);
6363         }
6364       }
6365     }
6366     ShouldNotReachHere();
6367   }
6368 #endif
6369 }
6370 
6371 
6372 //------------------------------build_loop_late_post---------------------------
6373 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
6374 // Second pass finds latest legal placement, and ideal loop placement.
6375 void PhaseIdealLoop::build_loop_late_post(Node *n) {
6376   build_loop_late_post_work(n, true);
6377 }
6378 
6379 // Class to visit all predicates in a predicate chain to find out which are dominated by a given node. Keeps track of
6380 // the entry to the earliest predicate that is still dominated by the given dominator. This class is used when trying to
6381 // legally skip all predicates when figuring out the latest placement such that a node does not interfere with Loop
6382 // Predication or creating a Loop Limit Check Predicate later.
6383 class DominatedPredicates : public UnifiedPredicateVisitor {
6384   Node* const _dominator;
6385   Node* _earliest_dominated_predicate_entry;
6386   bool _should_continue;
6387   PhaseIdealLoop* const _phase;
6388 
6389  public:
6390   DominatedPredicates(Node* dominator, Node* start_node, PhaseIdealLoop* phase)
6391       : _dominator(dominator),
6392         _earliest_dominated_predicate_entry(start_node),
6393         _should_continue(true),
6394         _phase(phase) {}
6395   NONCOPYABLE(DominatedPredicates);
6396 
6397   bool should_continue() const override {
6398     return _should_continue;
6399   }
6400 
6401   // Returns the entry to the earliest predicate that is still dominated by the given dominator (all could be dominated).
6402   Node* earliest_dominated_predicate_entry() const {
6403     return _earliest_dominated_predicate_entry;
6404   }
6405 
6406   void visit_predicate(const Predicate& predicate) override {
6407     Node* entry = predicate.entry();
6408     if (_phase->is_strict_dominator(entry, _dominator)) {
6409       _should_continue = false;
6410     } else {
6411       _earliest_dominated_predicate_entry = entry;
6412     }
6413   }
6414 };
6415 
6416 void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
6417 
6418   if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) {
6419     _igvn._worklist.push(n);  // Maybe we'll normalize it, if no more loops.
6420   }
6421 
6422 #ifdef ASSERT
6423   if (_verify_only && !n->is_CFG()) {
6424     // Check def-use domination.
6425     // We would like to expose this check in product but it appears to be expensive.
6426     compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
6427   }
6428 #endif
6429 
6430   // CFG and pinned nodes already handled
6431   if( n->in(0) ) {
6432     if( n->in(0)->is_top() ) return; // Dead?
6433 
6434     // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads
6435     // _must_ be pinned (they have to observe their control edge of course).
6436     // Unlike Stores (which modify an unallocable resource, the memory
6437     // state), Mods/Loads can float around.  So free them up.
6438     switch( n->Opcode() ) {
6439     case Op_DivI:
6440     case Op_DivF:
6441     case Op_DivD:
6442     case Op_ModI:
6443     case Op_LoadB:              // Same with Loads; they can sink
6444     case Op_LoadUB:             // during loop optimizations.
6445     case Op_LoadUS:
6446     case Op_LoadD:
6447     case Op_LoadF:
6448     case Op_LoadI:
6449     case Op_LoadKlass:
6450     case Op_LoadNKlass:
6451     case Op_LoadL:
6452     case Op_LoadS:
6453     case Op_LoadP:
6454     case Op_LoadN:
6455     case Op_LoadRange:
6456     case Op_LoadD_unaligned:
6457     case Op_LoadL_unaligned:
6458     case Op_StrComp:            // Does a bunch of load-like effects
6459     case Op_StrEquals:
6460     case Op_StrIndexOf:
6461     case Op_StrIndexOfChar:
6462     case Op_AryEq:
6463     case Op_VectorizedHashCode:
6464     case Op_CountPositives:
6465       pinned = false;
6466     }
6467     if (n->is_CMove() || n->is_ConstraintCast()) {
6468       pinned = false;
6469     }
6470     if( pinned ) {
6471       IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
6472       if( !chosen_loop->_child )       // Inner loop?
6473         chosen_loop->_body.push(n); // Collect inner loops
6474       return;
6475     }
6476   } else {                      // No slot zero
6477     if( n->is_CFG() ) {         // CFG with no slot 0 is dead
6478       _loop_or_ctrl.map(n->_idx,nullptr); // No block setting, it's globally dead
6479       return;
6480     }
6481     assert(!n->is_CFG() || n->outcnt() == 0, "");
6482   }
6483 
6484   // Do I have a "safe range" I can select over?
6485   Node *early = get_ctrl(n);// Early location already computed
6486 
6487   // Compute latest point this Node can go
6488   Node *LCA = get_late_ctrl( n, early );
6489   // LCA is null due to uses being dead
6490   if( LCA == nullptr ) {
6491 #ifdef ASSERT
6492     for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) {
6493       assert(_loop_or_ctrl[n->out(i1)->_idx] == nullptr, "all uses must also be dead");
6494     }
6495 #endif
6496     _loop_or_ctrl.map(n->_idx, nullptr); // This node is useless
6497     _deadlist.push(n);
6498     return;
6499   }
6500   assert(LCA != nullptr && !LCA->is_top(), "no dead nodes");
6501 
6502   Node *legal = LCA;            // Walk 'legal' up the IDOM chain
6503   Node *least = legal;          // Best legal position so far
6504   while( early != legal ) {     // While not at earliest legal
6505     if (legal->is_Start() && !early->is_Root()) {
6506 #ifdef ASSERT
6507       // Bad graph. Print idom path and fail.
6508       dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA);
6509       assert(false, "Bad graph detected in build_loop_late");
6510 #endif
6511       C->record_method_not_compilable("Bad graph detected in build_loop_late");
6512       return;
6513     }
6514     // Find least loop nesting depth
6515     legal = idom(legal);        // Bump up the IDOM tree
6516     // Check for lower nesting depth
6517     if( get_loop(legal)->_nest < get_loop(least)->_nest )
6518       least = legal;
6519   }
6520   assert(early == legal || legal != C->root(), "bad dominance of inputs");
6521 
6522   if (least != early) {
6523     // Move the node above predicates as far up as possible so a
6524     // following pass of Loop Predication doesn't hoist a predicate
6525     // that depends on it above that node.
6526     const PredicateIterator predicate_iterator(least);
6527     DominatedPredicates dominated_predicates(early, least, this);
6528     predicate_iterator.for_each(dominated_predicates);
6529     least = dominated_predicates.earliest_dominated_predicate_entry();
6530   }
6531   // Try not to place code on a loop entry projection
6532   // which can inhibit range check elimination.
6533   if (least != early && !BarrierSet::barrier_set()->barrier_set_c2()->is_gc_specific_loop_opts_pass(_mode)) {
6534     Node* ctrl_out = least->unique_ctrl_out_or_null();
6535     if (ctrl_out != nullptr && ctrl_out->is_Loop() &&
6536         least == ctrl_out->in(LoopNode::EntryControl) &&
6537         (ctrl_out->is_CountedLoop() || ctrl_out->is_OuterStripMinedLoop())) {
6538       Node* least_dom = idom(least);
6539       if (get_loop(least_dom)->is_member(get_loop(least))) {
6540         least = least_dom;
6541       }
6542     }
6543   }
6544   // Don't extend live ranges of raw oops
6545   if (least != early && n->is_ConstraintCast() && n->in(1)->bottom_type()->isa_rawptr() &&
6546       !n->bottom_type()->isa_rawptr()) {
6547     least = early;
6548   }
6549 
6550 #ifdef ASSERT
6551   // Broken part of VerifyLoopOptimizations (F)
6552   // Reason:
6553   //   _verify_me->get_ctrl_no_update(n) seems to return wrong result
6554   /*
6555   // If verifying, verify that 'verify_me' has a legal location
6556   // and choose it as our location.
6557   if( _verify_me ) {
6558     Node *v_ctrl = _verify_me->get_ctrl_no_update(n);
6559     Node *legal = LCA;
6560     while( early != legal ) {   // While not at earliest legal
6561       if( legal == v_ctrl ) break;  // Check for prior good location
6562       legal = idom(legal)      ;// Bump up the IDOM tree
6563     }
6564     // Check for prior good location
6565     if( legal == v_ctrl ) least = legal; // Keep prior if found
6566   }
6567   */
6568 #endif
6569 
6570   // Assign discovered "here or above" point
6571   least = find_non_split_ctrl(least);
6572   verify_strip_mined_scheduling(n, least);
6573   set_ctrl(n, least);
6574 
6575   // Collect inner loop bodies
6576   IdealLoopTree *chosen_loop = get_loop(least);
6577   if( !chosen_loop->_child )   // Inner loop?
6578     chosen_loop->_body.push(n);// Collect inner loops
6579 
6580   if (!_verify_only && n->Opcode() == Op_OpaqueZeroTripGuard) {
6581     _zero_trip_guard_opaque_nodes.push(n);
6582   }
6583 
6584 }
6585 
6586 #ifdef ASSERT
6587 void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) {
6588   tty->print_cr("%s", msg);
6589   tty->print("n: "); n->dump();
6590   tty->print("early(n): "); early->dump();
6591   if (n->in(0) != nullptr  && !n->in(0)->is_top() &&
6592       n->in(0) != early && !n->in(0)->is_Root()) {
6593     tty->print("n->in(0): "); n->in(0)->dump();
6594   }
6595   for (uint i = 1; i < n->req(); i++) {
6596     Node* in1 = n->in(i);
6597     if (in1 != nullptr && in1 != n && !in1->is_top()) {
6598       tty->print("n->in(%d): ", i); in1->dump();
6599       Node* in1_early = get_ctrl(in1);
6600       tty->print("early(n->in(%d)): ", i); in1_early->dump();
6601       if (in1->in(0) != nullptr     && !in1->in(0)->is_top() &&
6602           in1->in(0) != in1_early && !in1->in(0)->is_Root()) {
6603         tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump();
6604       }
6605       for (uint j = 1; j < in1->req(); j++) {
6606         Node* in2 = in1->in(j);
6607         if (in2 != nullptr && in2 != n && in2 != in1 && !in2->is_top()) {
6608           tty->print("n->in(%d)->in(%d): ", i, j); in2->dump();
6609           Node* in2_early = get_ctrl(in2);
6610           tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump();
6611           if (in2->in(0) != nullptr     && !in2->in(0)->is_top() &&
6612               in2->in(0) != in2_early && !in2->in(0)->is_Root()) {
6613             tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump();
6614           }
6615         }
6616       }
6617     }
6618   }
6619   tty->cr();
6620   tty->print("LCA(n): "); LCA->dump();
6621   for (uint i = 0; i < n->outcnt(); i++) {
6622     Node* u1 = n->raw_out(i);
6623     if (u1 == n)
6624       continue;
6625     tty->print("n->out(%d): ", i); u1->dump();
6626     if (u1->is_CFG()) {
6627       for (uint j = 0; j < u1->outcnt(); j++) {
6628         Node* u2 = u1->raw_out(j);
6629         if (u2 != u1 && u2 != n && u2->is_CFG()) {
6630           tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
6631         }
6632       }
6633     } else {
6634       Node* u1_later = get_ctrl(u1);
6635       tty->print("later(n->out(%d)): ", i); u1_later->dump();
6636       if (u1->in(0) != nullptr     && !u1->in(0)->is_top() &&
6637           u1->in(0) != u1_later && !u1->in(0)->is_Root()) {
6638         tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump();
6639       }
6640       for (uint j = 0; j < u1->outcnt(); j++) {
6641         Node* u2 = u1->raw_out(j);
6642         if (u2 == n || u2 == u1)
6643           continue;
6644         tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
6645         if (!u2->is_CFG()) {
6646           Node* u2_later = get_ctrl(u2);
6647           tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump();
6648           if (u2->in(0) != nullptr     && !u2->in(0)->is_top() &&
6649               u2->in(0) != u2_later && !u2->in(0)->is_Root()) {
6650             tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump();
6651           }
6652         }
6653       }
6654     }
6655   }
6656   dump_idoms(early, LCA);
6657   tty->cr();
6658 }
6659 
6660 // Class to compute the real LCA given an early node and a wrong LCA in a bad graph.
6661 class RealLCA {
6662   const PhaseIdealLoop* _phase;
6663   Node* _early;
6664   Node* _wrong_lca;
6665   uint _early_index;
6666   int _wrong_lca_index;
6667 
6668   // Given idom chains of early and wrong LCA: Walk through idoms starting at StartNode and find the first node which
6669   // is different: Return the previously visited node which must be the real LCA.
6670   // The node lists also contain _early and _wrong_lca, respectively.
6671   Node* find_real_lca(Unique_Node_List& early_with_idoms, Unique_Node_List& wrong_lca_with_idoms) {
6672     int early_index = early_with_idoms.size() - 1;
6673     int wrong_lca_index = wrong_lca_with_idoms.size() - 1;
6674     bool found_difference = false;
6675     do {
6676       if (early_with_idoms[early_index] != wrong_lca_with_idoms[wrong_lca_index]) {
6677         // First time early and wrong LCA idoms differ. Real LCA must be at the previous index.
6678         found_difference = true;
6679         break;
6680       }
6681       early_index--;
6682       wrong_lca_index--;
6683     } while (wrong_lca_index >= 0);
6684 
6685     assert(early_index >= 0, "must always find an LCA - cannot be early");
6686     _early_index = early_index;
6687     _wrong_lca_index = wrong_lca_index;
6688     Node* real_lca = early_with_idoms[_early_index + 1]; // Plus one to skip _early.
6689     assert(found_difference || real_lca == _wrong_lca, "wrong LCA dominates early and is therefore the real LCA");
6690     return real_lca;
6691   }
6692 
6693   void dump(Node* real_lca) {
6694     tty->cr();
6695     tty->print_cr("idoms of early \"%d %s\":", _early->_idx, _early->Name());
6696     _phase->dump_idom(_early, _early_index + 1);
6697 
6698     tty->cr();
6699     tty->print_cr("idoms of (wrong) LCA \"%d %s\":", _wrong_lca->_idx, _wrong_lca->Name());
6700     _phase->dump_idom(_wrong_lca, _wrong_lca_index + 1);
6701 
6702     tty->cr();
6703     tty->print("Real LCA of early \"%d %s\" (idom[%d]) and wrong LCA \"%d %s\"",
6704                _early->_idx, _early->Name(), _early_index, _wrong_lca->_idx, _wrong_lca->Name());
6705     if (_wrong_lca_index >= 0) {
6706       tty->print(" (idom[%d])", _wrong_lca_index);
6707     }
6708     tty->print_cr(":");
6709     real_lca->dump();
6710   }
6711 
6712  public:
6713   RealLCA(const PhaseIdealLoop* phase, Node* early, Node* wrong_lca)
6714       : _phase(phase), _early(early), _wrong_lca(wrong_lca), _early_index(0), _wrong_lca_index(0) {
6715     assert(!wrong_lca->is_Start(), "StartNode is always a common dominator");
6716   }
6717 
6718   void compute_and_dump() {
6719     ResourceMark rm;
6720     Unique_Node_List early_with_idoms;
6721     Unique_Node_List wrong_lca_with_idoms;
6722     early_with_idoms.push(_early);
6723     wrong_lca_with_idoms.push(_wrong_lca);
6724     _phase->get_idoms(_early, 10000, early_with_idoms);
6725     _phase->get_idoms(_wrong_lca, 10000, wrong_lca_with_idoms);
6726     Node* real_lca = find_real_lca(early_with_idoms, wrong_lca_with_idoms);
6727     dump(real_lca);
6728   }
6729 };
6730 
6731 // Dump the idom chain of early, of the wrong LCA and dump the real LCA of early and wrong LCA.
6732 void PhaseIdealLoop::dump_idoms(Node* early, Node* wrong_lca) {
6733   assert(!is_dominator(early, wrong_lca), "sanity check that early does not dominate wrong lca");
6734   assert(!has_ctrl(early) && !has_ctrl(wrong_lca), "sanity check, no data nodes");
6735 
6736   RealLCA real_lca(this, early, wrong_lca);
6737   real_lca.compute_and_dump();
6738 }
6739 #endif // ASSERT
6740 
6741 #ifndef PRODUCT
6742 //------------------------------dump-------------------------------------------
6743 void PhaseIdealLoop::dump() const {
6744   ResourceMark rm;
6745   Node_Stack stack(C->live_nodes() >> 2);
6746   Node_List rpo_list;
6747   VectorSet visited;
6748   visited.set(C->top()->_idx);
6749   rpo(C->root(), stack, visited, rpo_list);
6750   // Dump root loop indexed by last element in PO order
6751   dump(_ltree_root, rpo_list.size(), rpo_list);
6752 }
6753 
6754 void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) const {
6755   loop->dump_head();
6756 
6757   // Now scan for CFG nodes in the same loop
6758   for (uint j = idx; j > 0; j--) {
6759     Node* n = rpo_list[j-1];
6760     if (!_loop_or_ctrl[n->_idx])      // Skip dead nodes
6761       continue;
6762 
6763     if (get_loop(n) != loop) { // Wrong loop nest
6764       if (get_loop(n)->_head == n &&    // Found nested loop?
6765           get_loop(n)->_parent == loop)
6766         dump(get_loop(n), rpo_list.size(), rpo_list);     // Print it nested-ly
6767       continue;
6768     }
6769 
6770     // Dump controlling node
6771     tty->sp(2 * loop->_nest);
6772     tty->print("C");
6773     if (n == C->root()) {
6774       n->dump();
6775     } else {
6776       Node* cached_idom   = idom_no_update(n);
6777       Node* computed_idom = n->in(0);
6778       if (n->is_Region()) {
6779         computed_idom = compute_idom(n);
6780         // computed_idom() will return n->in(0) when idom(n) is an IfNode (or
6781         // any MultiBranch ctrl node), so apply a similar transform to
6782         // the cached idom returned from idom_no_update.
6783         cached_idom = find_non_split_ctrl(cached_idom);
6784       }
6785       tty->print(" ID:%d", computed_idom->_idx);
6786       n->dump();
6787       if (cached_idom != computed_idom) {
6788         tty->print_cr("*** BROKEN IDOM!  Computed as: %d, cached as: %d",
6789                       computed_idom->_idx, cached_idom->_idx);
6790       }
6791     }
6792     // Dump nodes it controls
6793     for (uint k = 0; k < _loop_or_ctrl.max(); k++) {
6794       // (k < C->unique() && get_ctrl(find(k)) == n)
6795       if (k < C->unique() && _loop_or_ctrl[k] == (Node*)((intptr_t)n + 1)) {
6796         Node* m = C->root()->find(k);
6797         if (m && m->outcnt() > 0) {
6798           if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
6799             tty->print_cr("*** BROKEN CTRL ACCESSOR!  _loop_or_ctrl[k] is %p, ctrl is %p",
6800                           _loop_or_ctrl[k], has_ctrl(m) ? get_ctrl_no_update(m) : nullptr);
6801           }
6802           tty->sp(2 * loop->_nest + 1);
6803           m->dump();
6804         }
6805       }
6806     }
6807   }
6808 }
6809 
6810 void PhaseIdealLoop::dump_idom(Node* n, const uint count) const {
6811   if (has_ctrl(n)) {
6812     tty->print_cr("No idom for data nodes");
6813   } else {
6814     ResourceMark rm;
6815     Unique_Node_List idoms;
6816     get_idoms(n, count, idoms);
6817     dump_idoms_in_reverse(n, idoms);
6818   }
6819 }
6820 
6821 void PhaseIdealLoop::get_idoms(Node* n, const uint count, Unique_Node_List& idoms) const {
6822   Node* next = n;
6823   for (uint i = 0; !next->is_Start() && i < count; i++) {
6824     next = idom(next);
6825     assert(!idoms.member(next), "duplicated idom is not possible");
6826     idoms.push(next);
6827   }
6828 }
6829 
6830 void PhaseIdealLoop::dump_idoms_in_reverse(const Node* n, const Node_List& idom_list) const {
6831   Node* next;
6832   uint padding = 3;
6833   uint node_index_padding_width = static_cast<int>(log10(static_cast<double>(C->unique()))) + 1;
6834   for (int i = idom_list.size() - 1; i >= 0; i--) {
6835     if (i == 9 || i == 99) {
6836       padding++;
6837     }
6838     next = idom_list[i];
6839     tty->print_cr("idom[%d]:%*c%*d  %s", i, padding, ' ', node_index_padding_width, next->_idx, next->Name());
6840   }
6841   tty->print_cr("n:      %*c%*d  %s", padding, ' ', node_index_padding_width, n->_idx, n->Name());
6842 }
6843 #endif // NOT PRODUCT
6844 
6845 // Collect a R-P-O for the whole CFG.
6846 // Result list is in post-order (scan backwards for RPO)
6847 void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const {
6848   stk.push(start, 0);
6849   visited.set(start->_idx);
6850 
6851   while (stk.is_nonempty()) {
6852     Node* m   = stk.node();
6853     uint  idx = stk.index();
6854     if (idx < m->outcnt()) {
6855       stk.set_index(idx + 1);
6856       Node* n = m->raw_out(idx);
6857       if (n->is_CFG() && !visited.test_set(n->_idx)) {
6858         stk.push(n, 0);
6859       }
6860     } else {
6861       rpo_list.push(m);
6862       stk.pop();
6863     }
6864   }
6865 }
6866 
6867 ConINode* PhaseIdealLoop::intcon(jint i) {
6868   ConINode* node = _igvn.intcon(i);
6869   set_root_as_ctrl(node);
6870   return node;
6871 }
6872 
6873 ConLNode* PhaseIdealLoop::longcon(jlong i) {
6874   ConLNode* node = _igvn.longcon(i);
6875   set_root_as_ctrl(node);
6876   return node;
6877 }
6878 
6879 ConNode* PhaseIdealLoop::makecon(const Type* t) {
6880   ConNode* node = _igvn.makecon(t);
6881   set_root_as_ctrl(node);
6882   return node;
6883 }
6884 
6885 ConNode* PhaseIdealLoop::integercon(jlong l, BasicType bt) {
6886   ConNode* node = _igvn.integercon(l, bt);
6887   set_root_as_ctrl(node);
6888   return node;
6889 }
6890 
6891 ConNode* PhaseIdealLoop::zerocon(BasicType bt) {
6892   ConNode* node = _igvn.zerocon(bt);
6893   set_root_as_ctrl(node);
6894   return node;
6895 }
6896 
6897 
6898 //=============================================================================
6899 //------------------------------LoopTreeIterator-------------------------------
6900 
6901 // Advance to next loop tree using a preorder, left-to-right traversal.
6902 void LoopTreeIterator::next() {
6903   assert(!done(), "must not be done.");
6904   if (_curnt->_child != nullptr) {
6905     _curnt = _curnt->_child;
6906   } else if (_curnt->_next != nullptr) {
6907     _curnt = _curnt->_next;
6908   } else {
6909     while (_curnt != _root && _curnt->_next == nullptr) {
6910       _curnt = _curnt->_parent;
6911     }
6912     if (_curnt == _root) {
6913       _curnt = nullptr;
6914       assert(done(), "must be done.");
6915     } else {
6916       assert(_curnt->_next != nullptr, "must be more to do");
6917       _curnt = _curnt->_next;
6918     }
6919   }
6920 }