1 /*
   2  * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/connode.hpp"
  38 #include "opto/convertnode.hpp"
  39 #include "opto/divnode.hpp"
  40 #include "opto/idealGraphPrinter.hpp"
  41 #include "opto/loopnode.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/opaquenode.hpp"
  45 #include "opto/predicates.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "opto/runtime.hpp"
  48 #include "opto/superword.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "utilities/checkedCast.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 
  53 //=============================================================================
  54 //--------------------------is_cloop_ind_var-----------------------------------
  55 // Determine if a node is a counted loop induction variable.
  56 // NOTE: The method is declared in "node.hpp".
  57 bool Node::is_cloop_ind_var() const {
  58   return (is_Phi() &&
  59           as_Phi()->region()->is_CountedLoop() &&
  60           as_Phi()->region()->as_CountedLoop()->phi() == this);
  61 }
  62 
  63 //=============================================================================
  64 //------------------------------dump_spec--------------------------------------
  65 // Dump special per-node info
  66 #ifndef PRODUCT
  67 void LoopNode::dump_spec(outputStream *st) const {
  68   RegionNode::dump_spec(st);
  69   if (is_inner_loop()) st->print( "inner " );
  70   if (is_partial_peel_loop()) st->print( "partial_peel " );
  71   if (partial_peel_has_failed()) st->print( "partial_peel_failed " );
  72 }
  73 #endif
  74 
  75 //------------------------------is_valid_counted_loop-------------------------
  76 bool LoopNode::is_valid_counted_loop(BasicType bt) const {
  77   if (is_BaseCountedLoop() && as_BaseCountedLoop()->bt() == bt) {
  78     BaseCountedLoopNode*    l  = as_BaseCountedLoop();
  79     BaseCountedLoopEndNode* le = l->loopexit_or_null();
  80     if (le != nullptr &&
  81         le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
  82       Node* phi  = l->phi();
  83       Node* exit = le->proj_out_or_null(0 /* false */);
  84       if (exit != nullptr && exit->Opcode() == Op_IfFalse &&
  85           phi != nullptr && phi->is_Phi() &&
  86           phi->in(LoopNode::LoopBackControl) == l->incr() &&
  87           le->loopnode() == l && le->stride_is_con()) {
  88         return true;
  89       }
  90     }
  91   }
  92   return false;
  93 }
  94 
  95 //------------------------------get_early_ctrl---------------------------------
  96 // Compute earliest legal control
  97 Node *PhaseIdealLoop::get_early_ctrl( Node *n ) {
  98   assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" );
  99   uint i;
 100   Node *early;
 101   if (n->in(0) && !n->is_expensive()) {
 102     early = n->in(0);
 103     if (!early->is_CFG()) // Might be a non-CFG multi-def
 104       early = get_ctrl(early);        // So treat input as a straight data input
 105     i = 1;
 106   } else {
 107     early = get_ctrl(n->in(1));
 108     i = 2;
 109   }
 110   uint e_d = dom_depth(early);
 111   assert( early, "" );
 112   for (; i < n->req(); i++) {
 113     Node *cin = get_ctrl(n->in(i));
 114     assert( cin, "" );
 115     // Keep deepest dominator depth
 116     uint c_d = dom_depth(cin);
 117     if (c_d > e_d) {           // Deeper guy?
 118       early = cin;              // Keep deepest found so far
 119       e_d = c_d;
 120     } else if (c_d == e_d &&    // Same depth?
 121                early != cin) { // If not equal, must use slower algorithm
 122       // If same depth but not equal, one _must_ dominate the other
 123       // and we want the deeper (i.e., dominated) guy.
 124       Node *n1 = early;
 125       Node *n2 = cin;
 126       while (1) {
 127         n1 = idom(n1);          // Walk up until break cycle
 128         n2 = idom(n2);
 129         if (n1 == cin ||        // Walked early up to cin
 130             dom_depth(n2) < c_d)
 131           break;                // early is deeper; keep him
 132         if (n2 == early ||      // Walked cin up to early
 133             dom_depth(n1) < c_d) {
 134           early = cin;          // cin is deeper; keep him
 135           break;
 136         }
 137       }
 138       e_d = dom_depth(early);   // Reset depth register cache
 139     }
 140   }
 141 
 142   // Return earliest legal location
 143   assert(early == find_non_split_ctrl(early), "unexpected early control");
 144 
 145   if (n->is_expensive() && !_verify_only && !_verify_me) {
 146     assert(n->in(0), "should have control input");
 147     early = get_early_ctrl_for_expensive(n, early);
 148   }
 149 
 150   return early;
 151 }
 152 
 153 //------------------------------get_early_ctrl_for_expensive---------------------------------
 154 // Move node up the dominator tree as high as legal while still beneficial
 155 Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
 156   assert(n->in(0) && n->is_expensive(), "expensive node with control input here");
 157   assert(OptimizeExpensiveOps, "optimization off?");
 158 
 159   Node* ctl = n->in(0);
 160   assert(ctl->is_CFG(), "expensive input 0 must be cfg");
 161   uint min_dom_depth = dom_depth(earliest);
 162 #ifdef ASSERT
 163   if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) {
 164     dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl);
 165     assert(false, "Bad graph detected in get_early_ctrl_for_expensive");
 166   }
 167 #endif
 168   if (dom_depth(ctl) < min_dom_depth) {
 169     return earliest;
 170   }
 171 
 172   while (1) {
 173     Node *next = ctl;
 174     // Moving the node out of a loop on the projection of a If
 175     // confuses loop predication. So once we hit a Loop in a If branch
 176     // that doesn't branch to an UNC, we stop. The code that process
 177     // expensive nodes will notice the loop and skip over it to try to
 178     // move the node further up.
 179     if (ctl->is_CountedLoop() && ctl->in(1) != nullptr && ctl->in(1)->in(0) != nullptr && ctl->in(1)->in(0)->is_If()) {
 180       if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern()) {
 181         break;
 182       }
 183       next = idom(ctl->in(1)->in(0));
 184     } else if (ctl->is_Proj()) {
 185       // We only move it up along a projection if the projection is
 186       // the single control projection for its parent: same code path,
 187       // if it's a If with UNC or fallthrough of a call.
 188       Node* parent_ctl = ctl->in(0);
 189       if (parent_ctl == nullptr) {
 190         break;
 191       } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != nullptr) {
 192         next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
 193       } else if (parent_ctl->is_If()) {
 194         if (!ctl->as_Proj()->is_uncommon_trap_if_pattern()) {
 195           break;
 196         }
 197         assert(idom(ctl) == parent_ctl, "strange");
 198         next = idom(parent_ctl);
 199       } else if (ctl->is_CatchProj()) {
 200         if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) {
 201           break;
 202         }
 203         assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph");
 204         next = parent_ctl->in(0)->in(0)->in(0);
 205       } else {
 206         // Check if parent control has a single projection (this
 207         // control is the only possible successor of the parent
 208         // control). If so, we can try to move the node above the
 209         // parent control.
 210         int nb_ctl_proj = 0;
 211         for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) {
 212           Node *p = parent_ctl->fast_out(i);
 213           if (p->is_Proj() && p->is_CFG()) {
 214             nb_ctl_proj++;
 215             if (nb_ctl_proj > 1) {
 216               break;
 217             }
 218           }
 219         }
 220 
 221         if (nb_ctl_proj > 1) {
 222           break;
 223         }
 224         assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() ||
 225                BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node");
 226         assert(idom(ctl) == parent_ctl, "strange");
 227         next = idom(parent_ctl);
 228       }
 229     } else {
 230       next = idom(ctl);
 231     }
 232     if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) {
 233       break;
 234     }
 235     ctl = next;
 236   }
 237 
 238   if (ctl != n->in(0)) {
 239     _igvn.replace_input_of(n, 0, ctl);
 240     _igvn.hash_insert(n);
 241   }
 242 
 243   return ctl;
 244 }
 245 
 246 
 247 //------------------------------set_early_ctrl---------------------------------
 248 // Set earliest legal control
 249 void PhaseIdealLoop::set_early_ctrl(Node* n, bool update_body) {
 250   Node *early = get_early_ctrl(n);
 251 
 252   // Record earliest legal location
 253   set_ctrl(n, early);
 254   IdealLoopTree *loop = get_loop(early);
 255   if (update_body && loop->_child == nullptr) {
 256     loop->_body.push(n);
 257   }
 258 }
 259 
 260 //------------------------------set_subtree_ctrl-------------------------------
 261 // set missing _ctrl entries on new nodes
 262 void PhaseIdealLoop::set_subtree_ctrl(Node* n, bool update_body) {
 263   // Already set?  Get out.
 264   if (_loop_or_ctrl[n->_idx]) return;
 265   // Recursively set _loop_or_ctrl array to indicate where the Node goes
 266   uint i;
 267   for (i = 0; i < n->req(); ++i) {
 268     Node *m = n->in(i);
 269     if (m && m != C->root()) {
 270       set_subtree_ctrl(m, update_body);
 271     }
 272   }
 273 
 274   // Fixup self
 275   set_early_ctrl(n, update_body);
 276 }
 277 
 278 IdealLoopTree* PhaseIdealLoop::insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_l, Node* outer_ift) {
 279   IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift);
 280   IdealLoopTree* parent = loop->_parent;
 281   IdealLoopTree* sibling = parent->_child;
 282   if (sibling == loop) {
 283     parent->_child = outer_ilt;
 284   } else {
 285     while (sibling->_next != loop) {
 286       sibling = sibling->_next;
 287     }
 288     sibling->_next = outer_ilt;
 289   }
 290   outer_ilt->_next = loop->_next;
 291   outer_ilt->_parent = parent;
 292   outer_ilt->_child = loop;
 293   outer_ilt->_nest = loop->_nest;
 294   loop->_parent = outer_ilt;
 295   loop->_next = nullptr;
 296   loop->_nest++;
 297   assert(loop->_nest <= SHRT_MAX, "sanity");
 298   return outer_ilt;
 299 }
 300 
 301 // Create a skeleton strip mined outer loop: a Loop head before the
 302 // inner strip mined loop, a safepoint and an exit condition guarded
 303 // by an opaque node after the inner strip mined loop with a backedge
 304 // to the loop head. The inner strip mined loop is left as it is. Only
 305 // once loop optimizations are over, do we adjust the inner loop exit
 306 // condition to limit its number of iterations, set the outer loop
 307 // exit condition and add Phis to the outer loop head. Some loop
 308 // optimizations that operate on the inner strip mined loop need to be
 309 // aware of the outer strip mined loop: loop unswitching needs to
 310 // clone the outer loop as well as the inner, unrolling needs to only
 311 // clone the inner loop etc. No optimizations need to change the outer
 312 // strip mined loop as it is only a skeleton.
 313 IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control,
 314                                                              IdealLoopTree* loop, float cl_prob, float le_fcnt,
 315                                                              Node*& entry_control, Node*& iffalse) {
 316   Node* outer_test = _igvn.intcon(0);
 317   set_ctrl(outer_test, C->root());
 318   Node *orig = iffalse;
 319   iffalse = iffalse->clone();
 320   _igvn.register_new_node_with_optimizer(iffalse);
 321   set_idom(iffalse, idom(orig), dom_depth(orig));
 322 
 323   IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt);
 324   Node *outer_ift = new IfTrueNode (outer_le);
 325   Node* outer_iff = orig;
 326   _igvn.replace_input_of(outer_iff, 0, outer_le);
 327 
 328   LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift);
 329   entry_control = outer_l;
 330 
 331   IdealLoopTree* outer_ilt = insert_outer_loop(loop, outer_l, outer_ift);
 332 
 333   set_loop(iffalse, outer_ilt);
 334   // When this code runs, loop bodies have not yet been populated.
 335   const bool body_populated = false;
 336   register_control(outer_le, outer_ilt, iffalse, body_populated);
 337   register_control(outer_ift, outer_ilt, outer_le, body_populated);
 338   set_idom(outer_iff, outer_le, dom_depth(outer_le));
 339   _igvn.register_new_node_with_optimizer(outer_l);
 340   set_loop(outer_l, outer_ilt);
 341   set_idom(outer_l, init_control, dom_depth(init_control)+1);
 342 
 343   return outer_ilt;
 344 }
 345 
 346 void PhaseIdealLoop::insert_loop_limit_check_predicate(ParsePredicateSuccessProj* loop_limit_check_parse_proj,
 347                                                        Node* cmp_limit, Node* bol) {
 348   assert(loop_limit_check_parse_proj->in(0)->is_ParsePredicate(), "must be parse predicate");
 349   Node* new_predicate_proj = create_new_if_for_predicate(loop_limit_check_parse_proj, nullptr,
 350                                                          Deoptimization::Reason_loop_limit_check,
 351                                                          Op_If);
 352   Node* iff = new_predicate_proj->in(0);
 353   cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit);
 354   bol = _igvn.register_new_node_with_optimizer(bol);
 355   set_subtree_ctrl(bol, false);
 356   _igvn.replace_input_of(iff, 1, bol);
 357 
 358 #ifndef PRODUCT
 359   // report that the loop predication has been actually performed
 360   // for this loop
 361   if (TraceLoopLimitCheck) {
 362     tty->print_cr("Counted Loop Limit Check generated:");
 363     debug_only( bol->dump(2); )
 364   }
 365 #endif
 366 }
 367 
 368 Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) {
 369   // Counted loop head must be a good RegionNode with only 3 not null
 370   // control input edges: Self, Entry, LoopBack.
 371   if (x->in(LoopNode::Self) == nullptr || x->req() != 3 || loop->_irreducible) {
 372     return nullptr;
 373   }
 374   Node *init_control = x->in(LoopNode::EntryControl);
 375   Node *back_control = x->in(LoopNode::LoopBackControl);
 376   if (init_control == nullptr || back_control == nullptr) {   // Partially dead
 377     return nullptr;
 378   }
 379   // Must also check for TOP when looking for a dead loop
 380   if (init_control->is_top() || back_control->is_top()) {
 381     return nullptr;
 382   }
 383 
 384   // Allow funny placement of Safepoint
 385   if (back_control->Opcode() == Op_SafePoint) {
 386     back_control = back_control->in(TypeFunc::Control);
 387   }
 388 
 389   // Controlling test for loop
 390   Node *iftrue = back_control;
 391   uint iftrue_op = iftrue->Opcode();
 392   if (iftrue_op != Op_IfTrue &&
 393       iftrue_op != Op_IfFalse) {
 394     // I have a weird back-control.  Probably the loop-exit test is in
 395     // the middle of the loop and I am looking at some trailing control-flow
 396     // merge point.  To fix this I would have to partially peel the loop.
 397     return nullptr; // Obscure back-control
 398   }
 399 
 400   // Get boolean guarding loop-back test
 401   Node *iff = iftrue->in(0);
 402   if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) {
 403     return nullptr;
 404   }
 405   return iftrue;
 406 }
 407 
 408 Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob) {
 409   Node* iftrue = back_control;
 410   uint iftrue_op = iftrue->Opcode();
 411   Node* iff = iftrue->in(0);
 412   BoolNode* test = iff->in(1)->as_Bool();
 413   bt = test->_test._test;
 414   cl_prob = iff->as_If()->_prob;
 415   if (iftrue_op == Op_IfFalse) {
 416     bt = BoolTest(bt).negate();
 417     cl_prob = 1.0 - cl_prob;
 418   }
 419   // Get backedge compare
 420   Node* cmp = test->in(1);
 421   if (!cmp->is_Cmp()) {
 422     return nullptr;
 423   }
 424 
 425   // Find the trip-counter increment & limit.  Limit must be loop invariant.
 426   incr  = cmp->in(1);
 427   limit = cmp->in(2);
 428 
 429   // ---------
 430   // need 'loop()' test to tell if limit is loop invariant
 431   // ---------
 432 
 433   if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit?
 434     Node* tmp = incr;            // Then reverse order into the CmpI
 435     incr = limit;
 436     limit = tmp;
 437     bt = BoolTest(bt).commute(); // And commute the exit test
 438   }
 439   if (is_member(loop, get_ctrl(limit))) { // Limit must be loop-invariant
 440     return nullptr;
 441   }
 442   if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
 443     return nullptr;
 444   }
 445   return cmp;
 446 }
 447 
 448 Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr) {
 449   if (incr->is_Phi()) {
 450     if (incr->as_Phi()->region() != x || incr->req() != 3) {
 451       return nullptr; // Not simple trip counter expression
 452     }
 453     phi_incr = incr;
 454     incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi
 455     if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
 456       return nullptr;
 457     }
 458   }
 459   return incr;
 460 }
 461 
 462 Node* PhaseIdealLoop::loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi) {
 463   assert(incr->Opcode() == Op_AddI || incr->Opcode() == Op_AddL, "caller resp.");
 464   // Get merge point
 465   xphi = incr->in(1);
 466   Node *stride = incr->in(2);
 467   if (!stride->is_Con()) {     // Oops, swap these
 468     if (!xphi->is_Con()) {     // Is the other guy a constant?
 469       return nullptr;          // Nope, unknown stride, bail out
 470     }
 471     Node *tmp = xphi;          // 'incr' is commutative, so ok to swap
 472     xphi = stride;
 473     stride = tmp;
 474   }
 475   return stride;
 476 }
 477 
 478 PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop) {
 479   if (!xphi->is_Phi()) {
 480     return nullptr; // Too much math on the trip counter
 481   }
 482   if (phi_incr != nullptr && phi_incr != xphi) {
 483     return nullptr;
 484   }
 485   PhiNode *phi = xphi->as_Phi();
 486 
 487   // Phi must be of loop header; backedge must wrap to increment
 488   if (phi->region() != x) {
 489     return nullptr;
 490   }
 491   return phi;
 492 }
 493 
 494 static int check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt) {
 495   if (final_correction > 0) {
 496     if (limit_t->lo_as_long() > (max_signed_integer(bt) - final_correction)) {
 497       return -1;
 498     }
 499     if (limit_t->hi_as_long() > (max_signed_integer(bt) - final_correction)) {
 500       return 1;
 501     }
 502   } else {
 503     if (limit_t->hi_as_long() < (min_signed_integer(bt) - final_correction)) {
 504       return -1;
 505     }
 506     if (limit_t->lo_as_long() < (min_signed_integer(bt) - final_correction)) {
 507       return 1;
 508     }
 509   }
 510   return 0;
 511 }
 512 
 513 static bool condition_stride_ok(BoolTest::mask bt, jlong stride_con) {
 514   // If the condition is inverted and we will be rolling
 515   // through MININT to MAXINT, then bail out.
 516   if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice!
 517       // Odd stride
 518       (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) ||
 519       // Count down loop rolls through MAXINT
 520       ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) ||
 521       // Count up loop rolls through MININT
 522       ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) {
 523     return false; // Bail out
 524   }
 525   return true;
 526 }
 527 
 528 Node* PhaseIdealLoop::loop_nest_replace_iv(Node* iv_to_replace, Node* inner_iv, Node* outer_phi, Node* inner_head,
 529                                            BasicType bt) {
 530   Node* iv_as_long;
 531   if (bt == T_LONG) {
 532     iv_as_long = new ConvI2LNode(inner_iv, TypeLong::INT);
 533     register_new_node(iv_as_long, inner_head);
 534   } else {
 535     iv_as_long = inner_iv;
 536   }
 537   Node* iv_replacement = AddNode::make(outer_phi, iv_as_long, bt);
 538   register_new_node(iv_replacement, inner_head);
 539   for (DUIterator_Last imin, i = iv_to_replace->last_outs(imin); i >= imin;) {
 540     Node* u = iv_to_replace->last_out(i);
 541 #ifdef ASSERT
 542     if (!is_dominator(inner_head, ctrl_or_self(u))) {
 543       assert(u->is_Phi(), "should be a Phi");
 544       for (uint j = 1; j < u->req(); j++) {
 545         if (u->in(j) == iv_to_replace) {
 546           assert(is_dominator(inner_head, u->in(0)->in(j)), "iv use above loop?");
 547         }
 548       }
 549     }
 550 #endif
 551     _igvn.rehash_node_delayed(u);
 552     int nb = u->replace_edge(iv_to_replace, iv_replacement, &_igvn);
 553     i -= nb;
 554   }
 555   return iv_replacement;
 556 }
 557 
 558 // Add a Parse Predicate with an uncommon trap on the failing/false path. Normal control will continue on the true path.
 559 void PhaseIdealLoop::add_parse_predicate(Deoptimization::DeoptReason reason, Node* inner_head, IdealLoopTree* loop,
 560                                          SafePointNode* sfpt) {
 561   if (!C->too_many_traps(reason)) {
 562     ParsePredicateNode* parse_predicate = new ParsePredicateNode(inner_head->in(LoopNode::EntryControl), reason, &_igvn);
 563     register_control(parse_predicate, loop, inner_head->in(LoopNode::EntryControl));
 564     Node* if_false = new IfFalseNode(parse_predicate);
 565     register_control(if_false, _ltree_root, parse_predicate);
 566     Node* if_true = new IfTrueNode(parse_predicate);
 567     register_control(if_true, loop, parse_predicate);
 568 
 569     int trap_request = Deoptimization::make_trap_request(reason, Deoptimization::Action_maybe_recompile);
 570     address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
 571     const TypePtr* no_memory_effects = nullptr;
 572     JVMState* jvms = sfpt->jvms();
 573     CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap",
 574                                            no_memory_effects);
 575 
 576     Node* mem = nullptr;
 577     Node* i_o = nullptr;
 578     if (sfpt->is_Call()) {
 579       mem = sfpt->proj_out(TypeFunc::Memory);
 580       i_o = sfpt->proj_out(TypeFunc::I_O);
 581     } else {
 582       mem = sfpt->memory();
 583       i_o = sfpt->i_o();
 584     }
 585 
 586     Node *frame = new ParmNode(C->start(), TypeFunc::FramePtr);
 587     register_new_node(frame, C->start());
 588     Node *ret = new ParmNode(C->start(), TypeFunc::ReturnAdr);
 589     register_new_node(ret, C->start());
 590 
 591     unc->init_req(TypeFunc::Control, if_false);
 592     unc->init_req(TypeFunc::I_O, i_o);
 593     unc->init_req(TypeFunc::Memory, mem); // may gc ptrs
 594     unc->init_req(TypeFunc::FramePtr, frame);
 595     unc->init_req(TypeFunc::ReturnAdr, ret);
 596     unc->init_req(TypeFunc::Parms+0, _igvn.intcon(trap_request));
 597     unc->set_cnt(PROB_UNLIKELY_MAG(4));
 598     unc->copy_call_debug_info(&_igvn, sfpt);
 599 
 600     for (uint i = TypeFunc::Parms; i < unc->req(); i++) {
 601       set_subtree_ctrl(unc->in(i), false);
 602     }
 603     register_control(unc, _ltree_root, if_false);
 604 
 605     Node* ctrl = new ProjNode(unc, TypeFunc::Control);
 606     register_control(ctrl, _ltree_root, unc);
 607     Node* halt = new HaltNode(ctrl, frame, "uncommon trap returned which should never happen" PRODUCT_ONLY(COMMA /*reachable*/false));
 608     register_control(halt, _ltree_root, ctrl);
 609     _igvn.add_input_to(C->root(), halt);
 610 
 611     _igvn.replace_input_of(inner_head, LoopNode::EntryControl, if_true);
 612     set_idom(inner_head, if_true, dom_depth(inner_head));
 613   }
 614 }
 615 
 616 // Find a safepoint node that dominates the back edge. We need a
 617 // SafePointNode so we can use its jvm state to create empty
 618 // predicates.
 619 static bool no_side_effect_since_safepoint(Compile* C, Node* x, Node* mem, MergeMemNode* mm, PhaseIdealLoop* phase) {
 620   SafePointNode* safepoint = nullptr;
 621   for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
 622     Node* u = x->fast_out(i);
 623     if (u->is_memory_phi()) {
 624       Node* m = u->in(LoopNode::LoopBackControl);
 625       if (u->adr_type() == TypePtr::BOTTOM) {
 626         if (m->is_MergeMem() && mem->is_MergeMem()) {
 627           if (m != mem DEBUG_ONLY(|| true)) {
 628             // MergeMemStream can modify m, for example to adjust the length to mem.
 629             // This is unfortunate, and probably unnecessary. But as it is, we need
 630             // to add m to the igvn worklist, else we may have a modified node that
 631             // is not on the igvn worklist.
 632             phase->igvn()._worklist.push(m);
 633             for (MergeMemStream mms(m->as_MergeMem(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
 634               if (!mms.is_empty()) {
 635                 if (mms.memory() != mms.memory2()) {
 636                   return false;
 637                 }
 638 #ifdef ASSERT
 639                 if (mms.alias_idx() != Compile::AliasIdxBot) {
 640                   mm->set_memory_at(mms.alias_idx(), mem->as_MergeMem()->base_memory());
 641                 }
 642 #endif
 643               }
 644             }
 645           }
 646         } else if (mem->is_MergeMem()) {
 647           if (m != mem->as_MergeMem()->base_memory()) {
 648             return false;
 649           }
 650         } else {
 651           return false;
 652         }
 653       } else {
 654         if (mem->is_MergeMem()) {
 655           if (m != mem->as_MergeMem()->memory_at(C->get_alias_index(u->adr_type()))) {
 656             return false;
 657           }
 658 #ifdef ASSERT
 659           mm->set_memory_at(C->get_alias_index(u->adr_type()), mem->as_MergeMem()->base_memory());
 660 #endif
 661         } else {
 662           if (m != mem) {
 663             return false;
 664           }
 665         }
 666       }
 667     }
 668   }
 669   return true;
 670 }
 671 
 672 SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop) {
 673   IfNode* exit_test = back_control->in(0)->as_If();
 674   SafePointNode* safepoint = nullptr;
 675   if (exit_test->in(0)->is_SafePoint() && exit_test->in(0)->outcnt() == 1) {
 676     safepoint = exit_test->in(0)->as_SafePoint();
 677   } else {
 678     Node* c = back_control;
 679     while (c != x && c->Opcode() != Op_SafePoint) {
 680       c = idom(c);
 681     }
 682 
 683     if (c->Opcode() == Op_SafePoint) {
 684       safepoint = c->as_SafePoint();
 685     }
 686 
 687     if (safepoint == nullptr) {
 688       return nullptr;
 689     }
 690 
 691     Node* mem = safepoint->in(TypeFunc::Memory);
 692 
 693     // We can only use that safepoint if there's no side effect between the backedge and the safepoint.
 694 
 695     // mm is used for book keeping
 696     MergeMemNode* mm = nullptr;
 697 #ifdef ASSERT
 698     if (mem->is_MergeMem()) {
 699       mm = mem->clone()->as_MergeMem();
 700       _igvn._worklist.push(mm);
 701       for (MergeMemStream mms(mem->as_MergeMem()); mms.next_non_empty(); ) {
 702         if (mms.alias_idx() != Compile::AliasIdxBot && loop != get_loop(ctrl_or_self(mms.memory()))) {
 703           mm->set_memory_at(mms.alias_idx(), mem->as_MergeMem()->base_memory());
 704         }
 705       }
 706     }
 707 #endif
 708     if (!no_side_effect_since_safepoint(C, x, mem, mm, this)) {
 709       safepoint = nullptr;
 710     } else {
 711       assert(mm == nullptr|| _igvn.transform(mm) == mem->as_MergeMem()->base_memory(), "all memory state should have been processed");
 712     }
 713 #ifdef ASSERT
 714     if (mm != nullptr) {
 715       _igvn.remove_dead_node(mm);
 716     }
 717 #endif
 718   }
 719   return safepoint;
 720 }
 721 
 722 // If the loop has the shape of a counted loop but with a long
 723 // induction variable, transform the loop in a loop nest: an inner
 724 // loop that iterates for at most max int iterations with an integer
 725 // induction variable and an outer loop that iterates over the full
 726 // range of long values from the initial loop in (at most) max int
 727 // steps. That is:
 728 //
 729 // x: for (long phi = init; phi < limit; phi += stride) {
 730 //   // phi := Phi(L, init, incr)
 731 //   // incr := AddL(phi, longcon(stride))
 732 //   long incr = phi + stride;
 733 //   ... use phi and incr ...
 734 // }
 735 //
 736 // OR:
 737 //
 738 // x: for (long phi = init; (phi += stride) < limit; ) {
 739 //   // phi := Phi(L, AddL(init, stride), incr)
 740 //   // incr := AddL(phi, longcon(stride))
 741 //   long incr = phi + stride;
 742 //   ... use phi and (phi + stride) ...
 743 // }
 744 //
 745 // ==transform=>
 746 //
 747 // const ulong inner_iters_limit = INT_MAX - stride - 1;  //near 0x7FFFFFF0
 748 // assert(stride <= inner_iters_limit);  // else abort transform
 749 // assert((extralong)limit + stride <= LONG_MAX);  // else deopt
 750 // outer_head: for (long outer_phi = init;;) {
 751 //   // outer_phi := Phi(outer_head, init, AddL(outer_phi, I2L(inner_phi)))
 752 //   ulong inner_iters_max = (ulong) MAX(0, ((extralong)limit + stride - outer_phi));
 753 //   long inner_iters_actual = MIN(inner_iters_limit, inner_iters_max);
 754 //   assert(inner_iters_actual == (int)inner_iters_actual);
 755 //   int inner_phi, inner_incr;
 756 //   x: for (inner_phi = 0;; inner_phi = inner_incr) {
 757 //     // inner_phi := Phi(x, intcon(0), inner_incr)
 758 //     // inner_incr := AddI(inner_phi, intcon(stride))
 759 //     inner_incr = inner_phi + stride;
 760 //     if (inner_incr < inner_iters_actual) {
 761 //       ... use phi=>(outer_phi+inner_phi) ...
 762 //       continue;
 763 //     }
 764 //     else break;
 765 //   }
 766 //   if ((outer_phi+inner_phi) < limit)  //OR (outer_phi+inner_incr) < limit
 767 //     continue;
 768 //   else break;
 769 // }
 770 //
 771 // The same logic is used to transform an int counted loop that contains long range checks into a loop nest of 2 int
 772 // loops with long range checks transformed to int range checks in the inner loop.
 773 bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
 774   Node* x = loop->_head;
 775   // Only for inner loops
 776   if (loop->_child != nullptr || !x->is_BaseCountedLoop() || x->as_Loop()->is_loop_nest_outer_loop()) {
 777     return false;
 778   }
 779 
 780   if (x->is_CountedLoop() && !x->as_CountedLoop()->is_main_loop() && !x->as_CountedLoop()->is_normal_loop()) {
 781     return false;
 782   }
 783 
 784   BaseCountedLoopNode* head = x->as_BaseCountedLoop();
 785   BasicType bt = x->as_BaseCountedLoop()->bt();
 786 
 787   check_counted_loop_shape(loop, x, bt);
 788 
 789 #ifndef PRODUCT
 790   if (bt == T_LONG) {
 791     Atomic::inc(&_long_loop_candidates);
 792   }
 793 #endif
 794 
 795   jlong stride_con = head->stride_con();
 796   assert(stride_con != 0, "missed some peephole opt");
 797   // We can't iterate for more than max int at a time.
 798   if (stride_con != (jint)stride_con) {
 799     assert(bt == T_LONG, "only for long loops");
 800     return false;
 801   }
 802   // The number of iterations for the integer count loop: guarantee no
 803   // overflow: max_jint - stride_con max. -1 so there's no need for a
 804   // loop limit check if the exit test is <= or >=.
 805   int iters_limit = max_jint - ABS(stride_con) - 1;
 806 #ifdef ASSERT
 807   if (bt == T_LONG && StressLongCountedLoop > 0) {
 808     iters_limit = iters_limit / StressLongCountedLoop;
 809   }
 810 #endif
 811   // At least 2 iterations so counted loop construction doesn't fail
 812   if (iters_limit/ABS(stride_con) < 2) {
 813     return false;
 814   }
 815 
 816   PhiNode* phi = head->phi()->as_Phi();
 817   Node* incr = head->incr();
 818 
 819   Node* back_control = head->in(LoopNode::LoopBackControl);
 820 
 821   // data nodes on back branch not supported
 822   if (back_control->outcnt() > 1) {
 823     return false;
 824   }
 825 
 826   Node* limit = head->limit();
 827   // We'll need to use the loop limit before the inner loop is entered
 828   if (!is_dominator(get_ctrl(limit), x)) {
 829     return false;
 830   }
 831 
 832   IfNode* exit_test = head->loopexit();
 833 
 834   assert(back_control->Opcode() == Op_IfTrue, "wrong projection for back edge");
 835 
 836   Node_List range_checks;
 837   iters_limit = extract_long_range_checks(loop, stride_con, iters_limit, phi, range_checks);
 838 
 839   if (bt == T_INT) {
 840     // The only purpose of creating a loop nest is to handle long range checks. If there are none, do not proceed further.
 841     if (range_checks.size() == 0) {
 842       return false;
 843     }
 844   }
 845 
 846   // Take what we know about the number of iterations of the long counted loop into account when computing the limit of
 847   // the inner loop.
 848   const Node* init = head->init_trip();
 849   const TypeInteger* lo = _igvn.type(init)->is_integer(bt);
 850   const TypeInteger* hi = _igvn.type(limit)->is_integer(bt);
 851   if (stride_con < 0) {
 852     swap(lo, hi);
 853   }
 854   if (hi->hi_as_long() <= lo->lo_as_long()) {
 855     // not a loop after all
 856     return false;
 857   }
 858 
 859   if (range_checks.size() > 0) {
 860     // This transformation requires peeling one iteration. Also, if it has range checks and they are eliminated by Loop
 861     // Predication, then 2 Hoisted Check Predicates are added for one range check. Finally, transforming a long range
 862     // check requires extra logic to be executed before the loop is entered and for the outer loop. As a result, the
 863     // transformations can't pay off for a small number of iterations: roughly, if the loop runs for 3 iterations, it's
 864     // going to execute as many range checks once transformed with range checks eliminated (1 peeled iteration with
 865     // range checks + 2 predicates per range checks) as it would have not transformed. It also has to pay for the extra
 866     // logic on loop entry and for the outer loop.
 867     loop->compute_trip_count(this);
 868     if (head->is_CountedLoop() && head->as_CountedLoop()->has_exact_trip_count()) {
 869       if (head->as_CountedLoop()->trip_count() <= 3) {
 870         return false;
 871       }
 872     } else {
 873       loop->compute_profile_trip_cnt(this);
 874       if (!head->is_profile_trip_failed() && head->profile_trip_cnt() <= 3) {
 875         return false;
 876       }
 877     }
 878   }
 879 
 880   julong orig_iters = (julong)hi->hi_as_long() - lo->lo_as_long();
 881   iters_limit = checked_cast<int>(MIN2((julong)iters_limit, orig_iters));
 882 
 883   // We need a safepoint to insert Parse Predicates for the inner loop.
 884   SafePointNode* safepoint;
 885   if (bt == T_INT && head->as_CountedLoop()->is_strip_mined()) {
 886     // Loop is strip mined: use the safepoint of the outer strip mined loop
 887     OuterStripMinedLoopNode* outer_loop = head->as_CountedLoop()->outer_loop();
 888     assert(outer_loop != nullptr, "no outer loop");
 889     safepoint = outer_loop->outer_safepoint();
 890     outer_loop->transform_to_counted_loop(&_igvn, this);
 891     exit_test = head->loopexit();
 892   } else {
 893     safepoint = find_safepoint(back_control, x, loop);
 894   }
 895 
 896   Node* exit_branch = exit_test->proj_out(false);
 897   Node* entry_control = head->in(LoopNode::EntryControl);
 898 
 899   // Clone the control flow of the loop to build an outer loop
 900   Node* outer_back_branch = back_control->clone();
 901   Node* outer_exit_test = new IfNode(exit_test->in(0), exit_test->in(1), exit_test->_prob, exit_test->_fcnt);
 902   Node* inner_exit_branch = exit_branch->clone();
 903 
 904   LoopNode* outer_head = new LoopNode(entry_control, outer_back_branch);
 905   IdealLoopTree* outer_ilt = insert_outer_loop(loop, outer_head, outer_back_branch);
 906 
 907   const bool body_populated = true;
 908   register_control(outer_head, outer_ilt, entry_control, body_populated);
 909 
 910   _igvn.register_new_node_with_optimizer(inner_exit_branch);
 911   set_loop(inner_exit_branch, outer_ilt);
 912   set_idom(inner_exit_branch, exit_test, dom_depth(exit_branch));
 913 
 914   outer_exit_test->set_req(0, inner_exit_branch);
 915   register_control(outer_exit_test, outer_ilt, inner_exit_branch, body_populated);
 916 
 917   _igvn.replace_input_of(exit_branch, 0, outer_exit_test);
 918   set_idom(exit_branch, outer_exit_test, dom_depth(exit_branch));
 919 
 920   outer_back_branch->set_req(0, outer_exit_test);
 921   register_control(outer_back_branch, outer_ilt, outer_exit_test, body_populated);
 922 
 923   _igvn.replace_input_of(x, LoopNode::EntryControl, outer_head);
 924   set_idom(x, outer_head, dom_depth(x));
 925 
 926   // add an iv phi to the outer loop and use it to compute the inner
 927   // loop iteration limit
 928   Node* outer_phi = phi->clone();
 929   outer_phi->set_req(0, outer_head);
 930   register_new_node(outer_phi, outer_head);
 931 
 932   Node* inner_iters_max = nullptr;
 933   if (stride_con > 0) {
 934     inner_iters_max = MaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn);
 935   } else {
 936     inner_iters_max = MaxNode::max_diff_with_zero(outer_phi, limit, TypeInteger::bottom(bt), _igvn);
 937   }
 938 
 939   Node* inner_iters_limit = _igvn.integercon(iters_limit, bt);
 940   // inner_iters_max may not fit in a signed integer (iterating from
 941   // Long.MIN_VALUE to Long.MAX_VALUE for instance). Use an unsigned
 942   // min.
 943   Node* inner_iters_actual = MaxNode::unsigned_min(inner_iters_max, inner_iters_limit, TypeInteger::make(0, iters_limit, Type::WidenMin, bt), _igvn);
 944 
 945   Node* inner_iters_actual_int;
 946   if (bt == T_LONG) {
 947     inner_iters_actual_int = new ConvL2INode(inner_iters_actual);
 948     _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
 949   } else {
 950     inner_iters_actual_int = inner_iters_actual;
 951   }
 952 
 953   Node* int_zero = _igvn.intcon(0);
 954   set_ctrl(int_zero, C->root());
 955   if (stride_con < 0) {
 956     inner_iters_actual_int = new SubINode(int_zero, inner_iters_actual_int);
 957     _igvn.register_new_node_with_optimizer(inner_iters_actual_int);
 958   }
 959 
 960   // Clone the iv data nodes as an integer iv
 961   Node* int_stride = _igvn.intcon(checked_cast<int>(stride_con));
 962   set_ctrl(int_stride, C->root());
 963   Node* inner_phi = new PhiNode(x->in(0), TypeInt::INT);
 964   Node* inner_incr = new AddINode(inner_phi, int_stride);
 965   Node* inner_cmp = nullptr;
 966   inner_cmp = new CmpINode(inner_incr, inner_iters_actual_int);
 967   Node* inner_bol = new BoolNode(inner_cmp, exit_test->in(1)->as_Bool()->_test._test);
 968   inner_phi->set_req(LoopNode::EntryControl, int_zero);
 969   inner_phi->set_req(LoopNode::LoopBackControl, inner_incr);
 970   register_new_node(inner_phi, x);
 971   register_new_node(inner_incr, x);
 972   register_new_node(inner_cmp, x);
 973   register_new_node(inner_bol, x);
 974 
 975   _igvn.replace_input_of(exit_test, 1, inner_bol);
 976 
 977   // Clone inner loop phis to outer loop
 978   for (uint i = 0; i < head->outcnt(); i++) {
 979     Node* u = head->raw_out(i);
 980     if (u->is_Phi() && u != inner_phi && u != phi) {
 981       assert(u->in(0) == head, "inconsistent");
 982       Node* clone = u->clone();
 983       clone->set_req(0, outer_head);
 984       register_new_node(clone, outer_head);
 985       _igvn.replace_input_of(u, LoopNode::EntryControl, clone);
 986     }
 987   }
 988 
 989   // Replace inner loop long iv phi as inner loop int iv phi + outer
 990   // loop iv phi
 991   Node* iv_add = loop_nest_replace_iv(phi, inner_phi, outer_phi, head, bt);
 992 
 993   set_subtree_ctrl(inner_iters_actual_int, body_populated);
 994 
 995   LoopNode* inner_head = create_inner_head(loop, head, exit_test);
 996 
 997   // Summary of steps from initial loop to loop nest:
 998   //
 999   // == old IR nodes =>
1000   //
1001   // entry_control: {...}
1002   // x:
1003   // for (long phi = init;;) {
1004   //   // phi := Phi(x, init, incr)
1005   //   // incr := AddL(phi, longcon(stride))
1006   //   exit_test:
1007   //   if (phi < limit)
1008   //     back_control: fallthrough;
1009   //   else
1010   //     exit_branch: break;
1011   //   long incr = phi + stride;
1012   //   ... use phi and incr ...
1013   //   phi = incr;
1014   // }
1015   //
1016   // == new IR nodes (just before final peel) =>
1017   //
1018   // entry_control: {...}
1019   // long adjusted_limit = limit + stride;  //because phi_incr != nullptr
1020   // assert(!limit_check_required || (extralong)limit + stride == adjusted_limit);  // else deopt
1021   // ulong inner_iters_limit = max_jint - ABS(stride) - 1;  //near 0x7FFFFFF0
1022   // outer_head:
1023   // for (long outer_phi = init;;) {
1024   //   // outer_phi := phi->clone(), in(0):=outer_head, => Phi(outer_head, init, incr)
1025   //   // REPLACE phi  => AddL(outer_phi, I2L(inner_phi))
1026   //   // REPLACE incr => AddL(outer_phi, I2L(inner_incr))
1027   //   // SO THAT outer_phi := Phi(outer_head, init, AddL(outer_phi, I2L(inner_incr)))
1028   //   ulong inner_iters_max = (ulong) MAX(0, ((extralong)adjusted_limit - outer_phi) * SGN(stride));
1029   //   int inner_iters_actual_int = (int) MIN(inner_iters_limit, inner_iters_max) * SGN(stride);
1030   //   inner_head: x: //in(1) := outer_head
1031   //   int inner_phi;
1032   //   for (inner_phi = 0;;) {
1033   //     // inner_phi := Phi(x, intcon(0), inner_phi + stride)
1034   //     int inner_incr = inner_phi + stride;
1035   //     bool inner_bol = (inner_incr < inner_iters_actual_int);
1036   //     exit_test: //exit_test->in(1) := inner_bol;
1037   //     if (inner_bol) // WAS (phi < limit)
1038   //       back_control: fallthrough;
1039   //     else
1040   //       inner_exit_branch: break;  //exit_branch->clone()
1041   //     ... use phi=>(outer_phi+inner_phi) ...
1042   //     inner_phi = inner_phi + stride;  // inner_incr
1043   //   }
1044   //   outer_exit_test:  //exit_test->clone(), in(0):=inner_exit_branch
1045   //   if ((outer_phi+inner_phi) < limit)  // WAS (phi < limit)
1046   //     outer_back_branch: fallthrough;  //back_control->clone(), in(0):=outer_exit_test
1047   //   else
1048   //     exit_branch: break;  //in(0) := outer_exit_test
1049   // }
1050 
1051   if (bt == T_INT) {
1052     outer_phi = new ConvI2LNode(outer_phi);
1053     register_new_node(outer_phi, outer_head);
1054   }
1055 
1056   transform_long_range_checks(checked_cast<int>(stride_con), range_checks, outer_phi, inner_iters_actual_int,
1057                               inner_phi, iv_add, inner_head);
1058   // Peel one iteration of the loop and use the safepoint at the end
1059   // of the peeled iteration to insert Parse Predicates. If no well
1060   // positioned safepoint peel to guarantee a safepoint in the outer
1061   // loop.
1062   if (safepoint != nullptr || !loop->_has_call) {
1063     old_new.clear();
1064     do_peeling(loop, old_new);
1065   } else {
1066     C->set_major_progress();
1067   }
1068 
1069   if (safepoint != nullptr) {
1070     SafePointNode* cloned_sfpt = old_new[safepoint->_idx]->as_SafePoint();
1071 
1072     if (UseLoopPredicate) {
1073       add_parse_predicate(Deoptimization::Reason_predicate, inner_head, outer_ilt, cloned_sfpt);
1074     }
1075     if (UseProfiledLoopPredicate) {
1076       add_parse_predicate(Deoptimization::Reason_profile_predicate, inner_head, outer_ilt, cloned_sfpt);
1077     }
1078     add_parse_predicate(Deoptimization::Reason_loop_limit_check, inner_head, outer_ilt, cloned_sfpt);
1079   }
1080 
1081 #ifndef PRODUCT
1082   if (bt == T_LONG) {
1083     Atomic::inc(&_long_loop_nests);
1084   }
1085 #endif
1086 
1087   inner_head->mark_loop_nest_inner_loop();
1088   outer_head->mark_loop_nest_outer_loop();
1089 
1090   return true;
1091 }
1092 
1093 int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jlong stride_con, int iters_limit, PhiNode* phi,
1094                                               Node_List& range_checks) {
1095   const jlong min_iters = 2;
1096   jlong reduced_iters_limit = iters_limit;
1097   jlong original_iters_limit = iters_limit;
1098   for (uint i = 0; i < loop->_body.size(); i++) {
1099     Node* c = loop->_body.at(i);
1100     if (c->is_IfProj() && c->in(0)->is_RangeCheck()) {
1101       IfProjNode* if_proj = c->as_IfProj();
1102       CallStaticJavaNode* call = if_proj->is_uncommon_trap_if_pattern();
1103       if (call != nullptr) {
1104         Node* range = nullptr;
1105         Node* offset = nullptr;
1106         jlong scale = 0;
1107         if (loop->is_range_check_if(if_proj, this, T_LONG, phi, range, offset, scale) &&
1108             loop->is_invariant(range) && loop->is_invariant(offset) &&
1109             original_iters_limit / ABS(scale * stride_con) >= min_iters) {
1110           reduced_iters_limit = MIN2(reduced_iters_limit, original_iters_limit/ABS(scale));
1111           range_checks.push(c);
1112         }
1113       }
1114     }
1115   }
1116 
1117   return checked_cast<int>(reduced_iters_limit);
1118 }
1119 
1120 // One execution of the inner loop covers a sub-range of the entire iteration range of the loop: [A,Z), aka [A=init,
1121 // Z=limit). If the loop has at least one trip (which is the case here), the iteration variable i always takes A as its
1122 // first value, followed by A+S (S is the stride), next A+2S, etc. The limit is exclusive, so that the final value B of
1123 // i is never Z.  It will be B=Z-1 if S=1, or B=Z+1 if S=-1.
1124 
1125 // If |S|>1 the formula for the last value B would require a floor operation, specifically B=floor((Z-sgn(S)-A)/S)*S+A,
1126 // which is B=Z-sgn(S)U for some U in [1,|S|].  So when S>0, i ranges as i:[A,Z) or i:[A,B=Z-U], or else (in reverse)
1127 // as i:(Z,A] or i:[B=Z+U,A].  It will become important to reason about this inclusive range [A,B] or [B,A].
1128 
1129 // Within the loop there may be many range checks.  Each such range check (R.C.) is of the form 0 <= i*K+L < R, where K
1130 // is a scale factor applied to the loop iteration variable i, and L is some offset; K, L, and R are loop-invariant.
1131 // Because R is never negative (see below), this check can always be simplified to an unsigned check i*K+L <u R.
1132 
1133 // When a long loop over a 64-bit variable i (outer_iv) is decomposed into a series of shorter sub-loops over a 32-bit
1134 // variable j (inner_iv), j ranges over a shorter interval j:[0,B_2] or [0,Z_2) (assuming S > 0), where the limit is
1135 // chosen to prevent various cases of 32-bit overflow (including multiplications j*K below).  In the sub-loop the
1136 // logical value i is offset from j by a 64-bit constant C, so i ranges in i:C+[0,Z_2).
1137 
1138 // For S<0, j ranges (in reverse!) through j:[-|B_2|,0] or (-|Z_2|,0].  For either sign of S, we can say i=j+C and j
1139 // ranges through 32-bit ranges [A_2,B_2] or [B_2,A_2] (A_2=0 of course).
1140 
1141 // The disjoint union of all the C+[A_2,B_2] ranges from the sub-loops must be identical to the whole range [A,B].
1142 // Assuming S>0, the first C must be A itself, and the next C value is the previous C+B_2, plus S.  If |S|=1, the next
1143 // C value is also the previous C+Z_2.  In each sub-loop, j counts from j=A_2=0 and i counts from C+0 and exits at
1144 // j=B_2 (i=C+B_2), just before it gets to i=C+Z_2.  Both i and j count up (from C and 0) if S>0; otherwise they count
1145 // down (from C and 0 again).
1146 
1147 // Returning to range checks, we see that each i*K+L <u R expands to (C+j)*K+L <u R, or j*K+Q <u R, where Q=(C*K+L).
1148 // (Recall that K and L and R are loop-invariant scale, offset and range values for a particular R.C.)  This is still a
1149 // 64-bit comparison, so the range check elimination logic will not apply to it.  (The R.C.E. transforms operate only on
1150 // 32-bit indexes and comparisons, because they use 64-bit temporary values to avoid overflow; see
1151 // PhaseIdealLoop::add_constraint.)
1152 
1153 // We must transform this comparison so that it gets the same answer, but by means of a 32-bit R.C. (using j not i) of
1154 // the form j*K+L_2 <u32 R_2.  Note that L_2 and R_2 must be loop-invariant, but only with respect to the sub-loop.  Thus, the
1155 // problem reduces to computing values for L_2 and R_2 (for each R.C. in the loop) in the loop header for the sub-loop.
1156 // Then the standard R.C.E. transforms can take those as inputs and further compute the necessary minimum and maximum
1157 // values for the 32-bit counter j within which the range checks can be eliminated.
1158 
1159 // So, given j*K+Q <u R, we need to find some j*K+L_2 <u32 R_2, where L_2 and R_2 fit in 32 bits, and the 32-bit operations do
1160 // not overflow. We also need to cover the cases where i*K+L (= j*K+Q) overflows to a 64-bit negative, since that is
1161 // allowed as an input to the R.C., as long as the R.C. as a whole fails.
1162 
1163 // If 32-bit multiplication j*K might overflow, we adjust the sub-loop limit Z_2 closer to zero to reduce j's range.
1164 
1165 // For each R.C. j*K+Q <u32 R, the range of mathematical values of j*K+Q in the sub-loop is [Q_min, Q_max], where
1166 // Q_min=Q and Q_max=B_2*K+Q (if S>0 and K>0), Q_min=A_2*K+Q and Q_max=Q (if S<0 and K>0),
1167 // Q_min=B_2*K+Q and Q_max=Q if (S>0 and K<0), Q_min=Q and Q_max=A_2*K+Q (if S<0 and K<0)
1168 
1169 // Note that the first R.C. value is always Q=(S*K>0 ? Q_min : Q_max).  Also Q_{min,max} = Q + {min,max}(A_2*K,B_2*K).
1170 // If S*K>0 then, as the loop iterations progress, each R.C. value i*K+L = j*K+Q goes up from Q=Q_min towards Q_max.
1171 // If S*K<0 then j*K+Q starts at Q=Q_max and goes down towards Q_min.
1172 
1173 // Case A: Some Negatives (but no overflow).
1174 // Number line:
1175 // |s64_min   .    .    .    0    .    .    .   s64_max|
1176 // |    .  Q_min..Q_max .    0    .    .    .     .    |  s64 negative
1177 // |    .     .    .    .    R=0  R<   R<   R<    R<   |  (against R values)
1178 // |    .     .    .  Q_min..0..Q_max  .    .     .    |  small mixed
1179 // |    .     .    .    .    R    R    R<   R<    R<   |  (against R values)
1180 //
1181 // R values which are out of range (>Q_max+1) are reduced to max(0,Q_max+1).  They are marked on the number line as R<.
1182 //
1183 // So, if Q_min <s64 0, then use this test:
1184 // j*K + s32_trunc(Q_min) <u32 clamp(R, 0, Q_max+1) if S*K>0 (R.C.E. steps upward)
1185 // j*K + s32_trunc(Q_max) <u32 clamp(R, 0, Q_max+1) if S*K<0 (R.C.E. steps downward)
1186 // Both formulas reduce to adding j*K to the 32-bit truncated value of the first R.C. expression value, Q:
1187 // j*K + s32_trunc(Q) <u32 clamp(R, 0, Q_max+1) for all S,K
1188 
1189 // If the 32-bit truncation loses information, no harm is done, since certainly the clamp also will return R_2=zero.
1190 
1191 // Case B: No Negatives.
1192 // Number line:
1193 // |s64_min   .    .    .    0    .    .    .   s64_max|
1194 // |    .     .    .    .    0 Q_min..Q_max .     .    |  small positive
1195 // |    .     .    .    .    R>   R    R    R<    R<   |  (against R values)
1196 // |    .     .    .    .    0    . Q_min..Q_max  .    |  s64 positive
1197 // |    .     .    .    .    R>   R>   R    R     R<   |  (against R values)
1198 //
1199 // R values which are out of range (<Q_min or >Q_max+1) are reduced as marked: R> up to Q_min, R< down to Q_max+1.
1200 // Then the whole comparison is shifted left by Q_min, so it can take place at zero, which is a nice 32-bit value.
1201 //
1202 // So, if both Q_min, Q_max+1 >=s64 0, then use this test:
1203 // j*K + 0         <u32 clamp(R, Q_min, Q_max+1) - Q_min if S*K>0
1204 // More generally:
1205 // j*K + Q - Q_min <u32 clamp(R, Q_min, Q_max+1) - Q_min for all S,K
1206 
1207 // Case C: Overflow in the 64-bit domain
1208 // Number line:
1209 // |..Q_max-2^64   .    .    0    .    .    .   Q_min..|  s64 overflow
1210 // |    .     .    .    .    R>   R>   R>   R>    R    |  (against R values)
1211 //
1212 // In this case, Q_min >s64 Q_max+1, even though the mathematical values of Q_min and Q_max+1 are correctly ordered.
1213 // The formulas from the previous case can be used, except that the bad upper bound Q_max is replaced by max_jlong.
1214 // (In fact, we could use any replacement bound from R to max_jlong inclusive, as the input to the clamp function.)
1215 //
1216 // So if Q_min >=s64 0 but Q_max+1 <s64 0, use this test:
1217 // j*K + 0         <u32 clamp(R, Q_min, max_jlong) - Q_min if S*K>0
1218 // More generally:
1219 // j*K + Q - Q_min <u32 clamp(R, Q_min, max_jlong) - Q_min for all S,K
1220 //
1221 // Dropping the bad bound means only Q_min is used to reduce the range of R:
1222 // j*K + Q - Q_min <u32 max(Q_min, R) - Q_min for all S,K
1223 //
1224 // Here the clamp function is a 64-bit min/max that reduces the dynamic range of its R operand to the required [L,H]:
1225 //     clamp(X, L, H) := max(L, min(X, H))
1226 // When degenerately L > H, it returns L not H.
1227 //
1228 // All of the formulas above can be merged into a single one:
1229 //     L_clamp = Q_min < 0 ? 0 : Q_min        --whether and how far to left-shift
1230 //     H_clamp = Q_max+1 < Q_min ? max_jlong : Q_max+1
1231 //             = Q_max+1 < 0 && Q_min >= 0 ? max_jlong : Q_max+1
1232 //     Q_first = Q = (S*K>0 ? Q_min : Q_max) = (C*K+L)
1233 //     R_clamp = clamp(R, L_clamp, H_clamp)   --reduced dynamic range
1234 //     replacement R.C.:
1235 //       j*K + Q_first - L_clamp <u32 R_clamp - L_clamp
1236 //     or equivalently:
1237 //       j*K + L_2 <u32 R_2
1238 //     where
1239 //       L_2 = Q_first - L_clamp
1240 //       R_2 = R_clamp - L_clamp
1241 //
1242 // Note on why R is never negative:
1243 //
1244 // Various details of this transformation would break badly if R could be negative, so this transformation only
1245 // operates after obtaining hard evidence that R<0 is impossible.  For example, if R comes from a LoadRange node, we
1246 // know R cannot be negative.  For explicit checks (of both int and long) a proof is constructed in
1247 // inline_preconditions_checkIndex, which triggers an uncommon trap if R<0, then wraps R in a ConstraintCastNode with a
1248 // non-negative type.  Later on, when IdealLoopTree::is_range_check_if looks for an optimizable R.C., it checks that
1249 // the type of that R node is non-negative.  Any "wild" R node that could be negative is not treated as an optimizable
1250 // R.C., but R values from a.length and inside checkIndex are good to go.
1251 //
1252 void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List &range_checks, Node* outer_phi,
1253                                                  Node* inner_iters_actual_int, Node* inner_phi,
1254                                                  Node* iv_add, LoopNode* inner_head) {
1255   Node* long_zero = _igvn.longcon(0);
1256   set_ctrl(long_zero, C->root());
1257   Node* int_zero = _igvn.intcon(0);
1258   set_ctrl(int_zero, this->C->root());
1259   Node* long_one = _igvn.longcon(1);
1260   set_ctrl(long_one, this->C->root());
1261   Node* int_stride = _igvn.intcon(checked_cast<int>(stride_con));
1262   set_ctrl(int_stride, this->C->root());
1263 
1264   for (uint i = 0; i < range_checks.size(); i++) {
1265     ProjNode* proj = range_checks.at(i)->as_Proj();
1266     ProjNode* unc_proj = proj->other_if_proj();
1267     RangeCheckNode* rc = proj->in(0)->as_RangeCheck();
1268     jlong scale = 0;
1269     Node* offset = nullptr;
1270     Node* rc_bol = rc->in(1);
1271     Node* rc_cmp = rc_bol->in(1);
1272     if (rc_cmp->Opcode() == Op_CmpU) {
1273       // could be shared and have already been taken care of
1274       continue;
1275     }
1276     bool short_scale = false;
1277     bool ok = is_scaled_iv_plus_offset(rc_cmp->in(1), iv_add, T_LONG, &scale, &offset, &short_scale);
1278     assert(ok, "inconsistent: was tested before");
1279     Node* range = rc_cmp->in(2);
1280     Node* c = rc->in(0);
1281     Node* entry_control = inner_head->in(LoopNode::EntryControl);
1282 
1283     Node* R = range;
1284     Node* K = _igvn.longcon(scale);
1285     set_ctrl(K, this->C->root());
1286 
1287     Node* L = offset;
1288 
1289     if (short_scale) {
1290       // This converts:
1291       // (int)i*K + L <u64 R
1292       // with K an int into:
1293       // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R)
1294       // to protect against an overflow of (int)i*K
1295       //
1296       // Because if (int)i*K overflows, there are K,L where:
1297       // (int)i*K + L <u64 R is false because (int)i*K+L overflows to a negative which becomes a huge u64 value.
1298       // But if i*(long)K + L is >u64 (long)max_jint and still is <u64 R, then
1299       // i*(long)K + L <u64 R is true.
1300       //
1301       // As a consequence simply converting i*K + L <u64 R to i*(long)K + L <u64 R could cause incorrect execution.
1302       //
1303       // It's always true that:
1304       // (int)i*K <u64 (long)max_jint + 1
1305       // which implies (int)i*K + L <u64 (long)max_jint + 1 + L
1306       // As a consequence:
1307       // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R)
1308       // is always false in case of overflow of i*K
1309       //
1310       // Note, there are also K,L where i*K overflows and
1311       // i*K + L <u64 R is true, but
1312       // i*(long)K + L <u64 unsigned_min((long)max_jint + L + 1, R) is false
1313       // So this transformation could cause spurious deoptimizations and failed range check elimination
1314       // (but not incorrect execution) for unlikely corner cases with overflow.
1315       // If this causes problems in practice, we could maybe direct execution to a post-loop, instead of deoptimizing.
1316       Node* max_jint_plus_one_long = _igvn.longcon((jlong)max_jint + 1);
1317       set_ctrl(max_jint_plus_one_long, C->root());
1318       Node* max_range = new AddLNode(max_jint_plus_one_long, L);
1319       register_new_node(max_range, entry_control);
1320       R = MaxNode::unsigned_min(R, max_range, TypeLong::POS, _igvn);
1321       set_subtree_ctrl(R, true);
1322     }
1323 
1324     Node* C = outer_phi;
1325 
1326     // Start with 64-bit values:
1327     //   i*K + L <u64 R
1328     //   (C+j)*K + L <u64 R
1329     //   j*K + Q <u64 R    where Q = Q_first = C*K+L
1330     Node* Q_first = new MulLNode(C, K);
1331     register_new_node(Q_first, entry_control);
1332     Q_first = new AddLNode(Q_first, L);
1333     register_new_node(Q_first, entry_control);
1334 
1335     // Compute endpoints of the range of values j*K + Q.
1336     //  Q_min = (j=0)*K + Q;  Q_max = (j=B_2)*K + Q
1337     Node* Q_min = Q_first;
1338 
1339     // Compute the exact ending value B_2 (which is really A_2 if S < 0)
1340     Node* B_2 = new LoopLimitNode(this->C, int_zero, inner_iters_actual_int, int_stride);
1341     register_new_node(B_2, entry_control);
1342     B_2 = new SubINode(B_2, int_stride);
1343     register_new_node(B_2, entry_control);
1344     B_2 = new ConvI2LNode(B_2);
1345     register_new_node(B_2, entry_control);
1346 
1347     Node* Q_max = new MulLNode(B_2, K);
1348     register_new_node(Q_max, entry_control);
1349     Q_max = new AddLNode(Q_max, Q_first);
1350     register_new_node(Q_max, entry_control);
1351 
1352     if (scale * stride_con < 0) {
1353       swap(Q_min, Q_max);
1354     }
1355     // Now, mathematically, Q_max > Q_min, and they are close enough so that (Q_max-Q_min) fits in 32 bits.
1356 
1357     // L_clamp = Q_min < 0 ? 0 : Q_min
1358     Node* Q_min_cmp = new CmpLNode(Q_min, long_zero);
1359     register_new_node(Q_min_cmp, entry_control);
1360     Node* Q_min_bool = new BoolNode(Q_min_cmp, BoolTest::lt);
1361     register_new_node(Q_min_bool, entry_control);
1362     Node* L_clamp = new CMoveLNode(Q_min_bool, Q_min, long_zero, TypeLong::LONG);
1363     register_new_node(L_clamp, entry_control);
1364     // (This could also be coded bitwise as L_clamp = Q_min & ~(Q_min>>63).)
1365 
1366     Node* Q_max_plus_one = new AddLNode(Q_max, long_one);
1367     register_new_node(Q_max_plus_one, entry_control);
1368 
1369     // H_clamp = Q_max+1 < Q_min ? max_jlong : Q_max+1
1370     // (Because Q_min and Q_max are close, the overflow check could also be encoded as Q_max+1 < 0 & Q_min >= 0.)
1371     Node* max_jlong_long = _igvn.longcon(max_jlong);
1372     set_ctrl(max_jlong_long, this->C->root());
1373     Node* Q_max_cmp = new CmpLNode(Q_max_plus_one, Q_min);
1374     register_new_node(Q_max_cmp, entry_control);
1375     Node* Q_max_bool = new BoolNode(Q_max_cmp, BoolTest::lt);
1376     register_new_node(Q_max_bool, entry_control);
1377     Node* H_clamp = new CMoveLNode(Q_max_bool, Q_max_plus_one, max_jlong_long, TypeLong::LONG);
1378     register_new_node(H_clamp, entry_control);
1379     // (This could also be coded bitwise as H_clamp = ((Q_max+1)<<1 | M)>>>1 where M = (Q_max+1)>>63 & ~Q_min>>63.)
1380 
1381     // R_2 = clamp(R, L_clamp, H_clamp) - L_clamp
1382     // that is:  R_2 = clamp(R, L_clamp=0, H_clamp=Q_max)      if Q_min < 0
1383     // or else:  R_2 = clamp(R, L_clamp,   H_clamp) - Q_min    if Q_min >= 0
1384     // and also: R_2 = clamp(R, L_clamp,   Q_max+1) - L_clamp  if Q_min < Q_max+1 (no overflow)
1385     // or else:  R_2 = clamp(R, L_clamp, *no limit*)- L_clamp  if Q_max+1 < Q_min (overflow)
1386     Node* R_2 = clamp(R, L_clamp, H_clamp);
1387     R_2 = new SubLNode(R_2, L_clamp);
1388     register_new_node(R_2, entry_control);
1389     R_2 = new ConvL2INode(R_2, TypeInt::POS);
1390     register_new_node(R_2, entry_control);
1391 
1392     // L_2 = Q_first - L_clamp
1393     // We are subtracting L_clamp from both sides of the <u32 comparison.
1394     // If S*K>0, then Q_first == 0 and the R.C. expression at -L_clamp and steps upward to Q_max-L_clamp.
1395     // If S*K<0, then Q_first != 0 and the R.C. expression starts high and steps downward to Q_min-L_clamp.
1396     Node* L_2 = new SubLNode(Q_first, L_clamp);
1397     register_new_node(L_2, entry_control);
1398     L_2 = new ConvL2INode(L_2, TypeInt::INT);
1399     register_new_node(L_2, entry_control);
1400 
1401     // Transform the range check using the computed values L_2/R_2
1402     // from:   i*K + L   <u64 R
1403     // to:     j*K + L_2 <u32 R_2
1404     // that is:
1405     //   (j*K + Q_first) - L_clamp <u32 clamp(R, L_clamp, H_clamp) - L_clamp
1406     K = _igvn.intcon(checked_cast<int>(scale));
1407     set_ctrl(K, this->C->root());
1408     Node* scaled_iv = new MulINode(inner_phi, K);
1409     register_new_node(scaled_iv, c);
1410     Node* scaled_iv_plus_offset = new AddINode(scaled_iv, L_2);
1411     register_new_node(scaled_iv_plus_offset, c);
1412 
1413     Node* new_rc_cmp = new CmpUNode(scaled_iv_plus_offset, R_2);
1414     register_new_node(new_rc_cmp, c);
1415 
1416     _igvn.replace_input_of(rc_bol, 1, new_rc_cmp);
1417   }
1418 }
1419 
1420 Node* PhaseIdealLoop::clamp(Node* R, Node* L, Node* H) {
1421   Node* min = MaxNode::signed_min(R, H, TypeLong::LONG, _igvn);
1422   set_subtree_ctrl(min, true);
1423   Node* max = MaxNode::signed_max(L, min, TypeLong::LONG, _igvn);
1424   set_subtree_ctrl(max, true);
1425   return max;
1426 }
1427 
1428 LoopNode* PhaseIdealLoop::create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head,
1429                                             IfNode* exit_test) {
1430   LoopNode* new_inner_head = new LoopNode(head->in(1), head->in(2));
1431   IfNode* new_inner_exit = new IfNode(exit_test->in(0), exit_test->in(1), exit_test->_prob, exit_test->_fcnt);
1432   _igvn.register_new_node_with_optimizer(new_inner_head);
1433   _igvn.register_new_node_with_optimizer(new_inner_exit);
1434   loop->_body.push(new_inner_head);
1435   loop->_body.push(new_inner_exit);
1436   loop->_body.yank(head);
1437   loop->_body.yank(exit_test);
1438   set_loop(new_inner_head, loop);
1439   set_loop(new_inner_exit, loop);
1440   set_idom(new_inner_head, idom(head), dom_depth(head));
1441   set_idom(new_inner_exit, idom(exit_test), dom_depth(exit_test));
1442   lazy_replace(head, new_inner_head);
1443   lazy_replace(exit_test, new_inner_exit);
1444   loop->_head = new_inner_head;
1445   return new_inner_head;
1446 }
1447 
1448 #ifdef ASSERT
1449 void PhaseIdealLoop::check_counted_loop_shape(IdealLoopTree* loop, Node* x, BasicType bt) {
1450   Node* back_control = loop_exit_control(x, loop);
1451   assert(back_control != nullptr, "no back control");
1452 
1453   BoolTest::mask mask = BoolTest::illegal;
1454   float cl_prob = 0;
1455   Node* incr = nullptr;
1456   Node* limit = nullptr;
1457 
1458   Node* cmp = loop_exit_test(back_control, loop, incr, limit, mask, cl_prob);
1459   assert(cmp != nullptr && cmp->Opcode() == Op_Cmp(bt), "no exit test");
1460 
1461   Node* phi_incr = nullptr;
1462   incr = loop_iv_incr(incr, x, loop, phi_incr);
1463   assert(incr != nullptr && incr->Opcode() == Op_Add(bt), "no incr");
1464 
1465   Node* xphi = nullptr;
1466   Node* stride = loop_iv_stride(incr, loop, xphi);
1467 
1468   assert(stride != nullptr, "no stride");
1469 
1470   PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop);
1471 
1472   assert(phi != nullptr && phi->in(LoopNode::LoopBackControl) == incr, "No phi");
1473 
1474   jlong stride_con = stride->get_integer_as_long(bt);
1475 
1476   assert(condition_stride_ok(mask, stride_con), "illegal condition");
1477 
1478   assert(mask != BoolTest::ne, "unexpected condition");
1479   assert(phi_incr == nullptr, "bad loop shape");
1480   assert(cmp->in(1) == incr, "bad exit test shape");
1481 
1482   // Safepoint on backedge not supported
1483   assert(x->in(LoopNode::LoopBackControl)->Opcode() != Op_SafePoint, "no safepoint on backedge");
1484 }
1485 #endif
1486 
1487 #ifdef ASSERT
1488 // convert an int counted loop to a long counted to stress handling of
1489 // long counted loops
1490 bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* loop) {
1491   Unique_Node_List iv_nodes;
1492   Node_List old_new;
1493   iv_nodes.push(cmp);
1494   bool failed = false;
1495 
1496   for (uint i = 0; i < iv_nodes.size() && !failed; i++) {
1497     Node* n = iv_nodes.at(i);
1498     switch(n->Opcode()) {
1499       case Op_Phi: {
1500         Node* clone = new PhiNode(n->in(0), TypeLong::LONG);
1501         old_new.map(n->_idx, clone);
1502         break;
1503       }
1504       case Op_CmpI: {
1505         Node* clone = new CmpLNode(nullptr, nullptr);
1506         old_new.map(n->_idx, clone);
1507         break;
1508       }
1509       case Op_AddI: {
1510         Node* clone = new AddLNode(nullptr, nullptr);
1511         old_new.map(n->_idx, clone);
1512         break;
1513       }
1514       case Op_CastII: {
1515         failed = true;
1516         break;
1517       }
1518       default:
1519         DEBUG_ONLY(n->dump());
1520         fatal("unexpected");
1521     }
1522 
1523     for (uint i = 1; i < n->req(); i++) {
1524       Node* in = n->in(i);
1525       if (in == nullptr) {
1526         continue;
1527       }
1528       if (loop->is_member(get_loop(get_ctrl(in)))) {
1529         iv_nodes.push(in);
1530       }
1531     }
1532   }
1533 
1534   if (failed) {
1535     for (uint i = 0; i < iv_nodes.size(); i++) {
1536       Node* n = iv_nodes.at(i);
1537       Node* clone = old_new[n->_idx];
1538       if (clone != nullptr) {
1539         _igvn.remove_dead_node(clone);
1540       }
1541     }
1542     return false;
1543   }
1544 
1545   for (uint i = 0; i < iv_nodes.size(); i++) {
1546     Node* n = iv_nodes.at(i);
1547     Node* clone = old_new[n->_idx];
1548     for (uint i = 1; i < n->req(); i++) {
1549       Node* in = n->in(i);
1550       if (in == nullptr) {
1551         continue;
1552       }
1553       Node* in_clone = old_new[in->_idx];
1554       if (in_clone == nullptr) {
1555         assert(_igvn.type(in)->isa_int(), "");
1556         in_clone = new ConvI2LNode(in);
1557         _igvn.register_new_node_with_optimizer(in_clone);
1558         set_subtree_ctrl(in_clone, false);
1559       }
1560       if (in_clone->in(0) == nullptr) {
1561         in_clone->set_req(0, C->top());
1562         clone->set_req(i, in_clone);
1563         in_clone->set_req(0, nullptr);
1564       } else {
1565         clone->set_req(i, in_clone);
1566       }
1567     }
1568     _igvn.register_new_node_with_optimizer(clone);
1569   }
1570   set_ctrl(old_new[phi->_idx], phi->in(0));
1571 
1572   for (uint i = 0; i < iv_nodes.size(); i++) {
1573     Node* n = iv_nodes.at(i);
1574     Node* clone = old_new[n->_idx];
1575     set_subtree_ctrl(clone, false);
1576     Node* m = n->Opcode() == Op_CmpI ? clone : nullptr;
1577     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1578       Node* u = n->fast_out(i);
1579       if (iv_nodes.member(u)) {
1580         continue;
1581       }
1582       if (m == nullptr) {
1583         m = new ConvL2INode(clone);
1584         _igvn.register_new_node_with_optimizer(m);
1585         set_subtree_ctrl(m, false);
1586       }
1587       _igvn.rehash_node_delayed(u);
1588       int nb = u->replace_edge(n, m, &_igvn);
1589       --i, imax -= nb;
1590     }
1591   }
1592   return true;
1593 }
1594 #endif
1595 
1596 //------------------------------is_counted_loop--------------------------------
1597 bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt) {
1598   PhaseGVN *gvn = &_igvn;
1599 
1600   Node* back_control = loop_exit_control(x, loop);
1601   if (back_control == nullptr) {
1602     return false;
1603   }
1604 
1605   BoolTest::mask bt = BoolTest::illegal;
1606   float cl_prob = 0;
1607   Node* incr = nullptr;
1608   Node* limit = nullptr;
1609   Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob);
1610   if (cmp == nullptr || cmp->Opcode() != Op_Cmp(iv_bt)) {
1611     return false; // Avoid pointer & float & 64-bit compares
1612   }
1613 
1614   // Trip-counter increment must be commutative & associative.
1615   if (incr->Opcode() == Op_Cast(iv_bt)) {
1616     incr = incr->in(1);
1617   }
1618 
1619   Node* phi_incr = nullptr;
1620   incr = loop_iv_incr(incr, x, loop, phi_incr);
1621   if (incr == nullptr) {
1622     return false;
1623   }
1624 
1625   Node* trunc1 = nullptr;
1626   Node* trunc2 = nullptr;
1627   const TypeInteger* iv_trunc_t = nullptr;
1628   Node* orig_incr = incr;
1629   if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t, iv_bt))) {
1630     return false; // Funny increment opcode
1631   }
1632   assert(incr->Opcode() == Op_Add(iv_bt), "wrong increment code");
1633 
1634   Node* xphi = nullptr;
1635   Node* stride = loop_iv_stride(incr, loop, xphi);
1636 
1637   if (stride == nullptr) {
1638     return false;
1639   }
1640 
1641   if (xphi->Opcode() == Op_Cast(iv_bt)) {
1642     xphi = xphi->in(1);
1643   }
1644 
1645   // Stride must be constant
1646   jlong stride_con = stride->get_integer_as_long(iv_bt);
1647   assert(stride_con != 0, "missed some peephole opt");
1648 
1649   PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop);
1650 
1651   if (phi == nullptr ||
1652       (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
1653       (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
1654     return false;
1655   }
1656 
1657   Node* iftrue = back_control;
1658   uint iftrue_op = iftrue->Opcode();
1659   Node* iff = iftrue->in(0);
1660   BoolNode* test = iff->in(1)->as_Bool();
1661 
1662   const TypeInteger* limit_t = gvn->type(limit)->is_integer(iv_bt);
1663   if (trunc1 != nullptr) {
1664     // When there is a truncation, we must be sure that after the truncation
1665     // the trip counter will end up higher than the limit, otherwise we are looking
1666     // at an endless loop. Can happen with range checks.
1667 
1668     // Example:
1669     // int i = 0;
1670     // while (true)
1671     //    sum + = array[i];
1672     //    i++;
1673     //    i = i && 0x7fff;
1674     //  }
1675     //
1676     // If the array is shorter than 0x8000 this exits through a AIOOB
1677     //  - Counted loop transformation is ok
1678     // If the array is longer then this is an endless loop
1679     //  - No transformation can be done.
1680 
1681     const TypeInteger* incr_t = gvn->type(orig_incr)->is_integer(iv_bt);
1682     if (limit_t->hi_as_long() > incr_t->hi_as_long()) {
1683       // if the limit can have a higher value than the increment (before the phi)
1684       return false;
1685     }
1686   }
1687 
1688   Node *init_trip = phi->in(LoopNode::EntryControl);
1689 
1690   // If iv trunc type is smaller than int, check for possible wrap.
1691   if (!TypeInteger::bottom(iv_bt)->higher_equal(iv_trunc_t)) {
1692     assert(trunc1 != nullptr, "must have found some truncation");
1693 
1694     // Get a better type for the phi (filtered thru if's)
1695     const TypeInteger* phi_ft = filtered_type(phi);
1696 
1697     // Can iv take on a value that will wrap?
1698     //
1699     // Ensure iv's limit is not within "stride" of the wrap value.
1700     //
1701     // Example for "short" type
1702     //    Truncation ensures value is in the range -32768..32767 (iv_trunc_t)
1703     //    If the stride is +10, then the last value of the induction
1704     //    variable before the increment (phi_ft->_hi) must be
1705     //    <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to
1706     //    ensure no truncation occurs after the increment.
1707 
1708     if (stride_con > 0) {
1709       if (iv_trunc_t->hi_as_long() - phi_ft->hi_as_long() < stride_con ||
1710           iv_trunc_t->lo_as_long() > phi_ft->lo_as_long()) {
1711         return false;  // truncation may occur
1712       }
1713     } else if (stride_con < 0) {
1714       if (iv_trunc_t->lo_as_long() - phi_ft->lo_as_long() > stride_con ||
1715           iv_trunc_t->hi_as_long() < phi_ft->hi_as_long()) {
1716         return false;  // truncation may occur
1717       }
1718     }
1719     // No possibility of wrap so truncation can be discarded
1720     // Promote iv type to Int
1721   } else {
1722     assert(trunc1 == nullptr && trunc2 == nullptr, "no truncation for int");
1723   }
1724 
1725   if (!condition_stride_ok(bt, stride_con)) {
1726     return false;
1727   }
1728 
1729   const TypeInteger* init_t = gvn->type(init_trip)->is_integer(iv_bt);
1730 
1731   if (stride_con > 0) {
1732     if (init_t->lo_as_long() > max_signed_integer(iv_bt) - stride_con) {
1733       return false; // cyclic loop
1734     }
1735   } else {
1736     if (init_t->hi_as_long() < min_signed_integer(iv_bt) - stride_con) {
1737       return false; // cyclic loop
1738     }
1739   }
1740 
1741   if (phi_incr != nullptr && bt != BoolTest::ne) {
1742     // check if there is a possibility of IV overflowing after the first increment
1743     if (stride_con > 0) {
1744       if (init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) {
1745         return false;
1746       }
1747     } else {
1748       if (init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con) {
1749         return false;
1750       }
1751     }
1752   }
1753 
1754   // =================================================
1755   // ---- SUCCESS!   Found A Trip-Counted Loop!  -----
1756   //
1757 
1758   if (x->Opcode() == Op_Region) {
1759     // x has not yet been transformed to Loop or LongCountedLoop.
1760     // This should only happen if we are inside an infinite loop.
1761     // It happens like this:
1762     //   build_loop_tree -> do not attach infinite loop and nested loops
1763     //   beautify_loops  -> does not transform the infinite and nested loops to LoopNode, because not attached yet
1764     //   build_loop_tree -> find and attach infinite and nested loops
1765     //   counted_loop    -> nested Regions are not yet transformed to LoopNodes, we land here
1766     assert(x->as_Region()->is_in_infinite_subgraph(),
1767            "x can only be a Region and not Loop if inside infinite loop");
1768     // Come back later when Region is transformed to LoopNode
1769     return false;
1770   }
1771 
1772   assert(x->Opcode() == Op_Loop || x->Opcode() == Op_LongCountedLoop, "regular loops only");
1773   C->print_method(PHASE_BEFORE_CLOOPS, 3);
1774 
1775   // ===================================================
1776   // We can only convert this loop to a counted loop if we can guarantee that the iv phi will never overflow at runtime.
1777   // This is an implicit assumption taken by some loop optimizations. We therefore must ensure this property at all cost.
1778   // At this point, we've already excluded some trivial cases where an overflow could have been proven statically.
1779   // But even though we cannot prove that an overflow will *not* happen, we still want to speculatively convert this loop
1780   // to a counted loop. This can be achieved by adding additional iv phi overflow checks before the loop. If they fail,
1781   // we trap and resume execution before the loop without having executed any iteration of the loop, yet.
1782   //
1783   // These additional iv phi overflow checks can be inserted as Loop Limit Check Predicates above the Loop Limit Check
1784   // Parse Predicate which captures a JVM state just before the entry of the loop. If there is no such Parse Predicate,
1785   // we cannot generate a Loop Limit Check Predicate and thus cannot speculatively convert the loop to a counted loop.
1786   //
1787   // In the following, we only focus on int loops with stride > 0 to keep things simple. The argumentation and proof
1788   // for stride < 0 is analogously. For long loops, we would replace max_int with max_long.
1789   //
1790   //
1791   // The loop to be converted does not always need to have the often used shape:
1792   //
1793   //                                                 i = init
1794   //     i = init                                loop:
1795   //     do {                                        ...
1796   //         // ...               equivalent         i+=stride
1797   //         i+=stride               <==>            if (i < limit)
1798   //     } while (i < limit);                          goto loop
1799   //                                             exit:
1800   //                                                 ...
1801   //
1802   // where the loop exit check uses the post-incremented iv phi and a '<'-operator.
1803   //
1804   // We could also have '<='-operator (or '>='-operator for negative strides) or use the pre-incremented iv phi value
1805   // in the loop exit check:
1806   //
1807   //         i = init
1808   //     loop:
1809   //         ...
1810   //         if (i <= limit)
1811   //             i+=stride
1812   //             goto loop
1813   //     exit:
1814   //         ...
1815   //
1816   // Let's define the following terms:
1817   // - iv_pre_i: The pre-incremented iv phi before the i-th iteration.
1818   // - iv_post_i: The post-incremented iv phi after the i-th iteration.
1819   //
1820   // The iv_pre_i and iv_post_i have the following relation:
1821   //      iv_pre_i + stride = iv_post_i
1822   //
1823   // When converting a loop to a counted loop, we want to have a canonicalized loop exit check of the form:
1824   //     iv_post_i < adjusted_limit
1825   //
1826   // If that is not the case, we need to canonicalize the loop exit check by using different values for adjusted_limit:
1827   // (LE1) iv_post_i < limit: Already canonicalized. We can directly use limit as adjusted_limit.
1828   //           -> adjusted_limit = limit.
1829   // (LE2) iv_post_i <= limit:
1830   //           iv_post_i < limit + 1
1831   //           -> adjusted limit = limit + 1
1832   // (LE3) iv_pre_i < limit:
1833   //           iv_pre_i + stride < limit + stride
1834   //           iv_post_i < limit + stride
1835   //           -> adjusted_limit = limit + stride
1836   // (LE4) iv_pre_i <= limit:
1837   //           iv_pre_i < limit + 1
1838   //           iv_pre_i + stride < limit + stride + 1
1839   //           iv_post_i < limit + stride + 1
1840   //           -> adjusted_limit = limit + stride + 1
1841   //
1842   // Note that:
1843   //     (AL) limit <= adjusted_limit.
1844   //
1845   // The following loop invariant has to hold for counted loops with n iterations (i.e. loop exit check true after n-th
1846   // loop iteration) and a canonicalized loop exit check to guarantee that no iv_post_i over- or underflows:
1847   // (INV) For i = 1..n, min_int <= iv_post_i <= max_int
1848   //
1849   // To prove (INV), we require the following two conditions/assumptions:
1850   // (i): adjusted_limit - 1 + stride <= max_int
1851   // (ii): init < limit
1852   //
1853   // If we can prove (INV), we know that there can be no over- or underflow of any iv phi value. We prove (INV) by
1854   // induction by assuming (i) and (ii).
1855   //
1856   // Proof by Induction
1857   // ------------------
1858   // > Base case (i = 1): We show that (INV) holds after the first iteration:
1859   //     min_int <= iv_post_1 = init + stride <= max_int
1860   // Proof:
1861   //     First, we note that (ii) implies
1862   //         (iii) init <= limit - 1
1863   //     max_int >= adjusted_limit - 1 + stride   [using (i)]
1864   //             >= limit - 1 + stride            [using (AL)]
1865   //             >= init + stride                 [using (iii)]
1866   //             >= min_int                       [using stride > 0, no underflow]
1867   // Thus, no overflow happens after the first iteration and (INV) holds for i = 1.
1868   //
1869   // Note that to prove the base case we need (i) and (ii).
1870   //
1871   // > Induction Hypothesis (i = j, j > 1): Assume that (INV) holds after the j-th iteration:
1872   //     min_int <= iv_post_j <= max_int
1873   // > Step case (i = j + 1): We show that (INV) also holds after the j+1-th iteration:
1874   //     min_int <= iv_post_{j+1} = iv_post_j + stride <= max_int
1875   // Proof:
1876   // If iv_post_j >= adjusted_limit:
1877   //     We exit the loop after the j-th iteration, and we don't execute the j+1-th iteration anymore. Thus, there is
1878   //     also no iv_{j+1}. Since (INV) holds for iv_j, there is nothing left to prove.
1879   // If iv_post_j < adjusted_limit:
1880   //     First, we note that:
1881   //         (iv) iv_post_j <= adjusted_limit - 1
1882   //     max_int >= adjusted_limit - 1 + stride    [using (i)]
1883   //             >= iv_post_j + stride             [using (iv)]
1884   //             >= min_int                        [using stride > 0, no underflow]
1885   //
1886   // Note that to prove the step case we only need (i).
1887   //
1888   // Thus, by assuming (i) and (ii), we proved (INV).
1889   //
1890   //
1891   // It is therefore enough to add the following two Loop Limit Check Predicates to check assumptions (i) and (ii):
1892   //
1893   // (1) Loop Limit Check Predicate for (i):
1894   //     Using (i): adjusted_limit - 1 + stride <= max_int
1895   //
1896   //     This condition is now restated to use limit instead of adjusted_limit:
1897   //
1898   //     To prevent an overflow of adjusted_limit -1 + stride itself, we rewrite this check to
1899   //         max_int - stride + 1 >= adjusted_limit
1900   //     We can merge the two constants into
1901   //         canonicalized_correction = stride - 1
1902   //     which gives us
1903   //        max_int - canonicalized_correction >= adjusted_limit
1904   //
1905   //     To directly use limit instead of adjusted_limit in the predicate condition, we split adjusted_limit into:
1906   //         adjusted_limit = limit + limit_correction
1907   //     Since stride > 0 and limit_correction <= stride + 1, we can restate this with no over- or underflow into:
1908   //         max_int - canonicalized_correction - limit_correction >= limit
1909   //     Since canonicalized_correction and limit_correction are both constants, we can replace them with a new constant:
1910   //         final_correction = canonicalized_correction + limit_correction
1911   //     which gives us:
1912   //
1913   //     Final predicate condition:
1914   //         max_int - final_correction >= limit
1915   //
1916   // (2) Loop Limit Check Predicate for (ii):
1917   //     Using (ii): init < limit
1918   //
1919   //     This Loop Limit Check Predicate is not required if we can prove at compile time that either:
1920   //        (2.1) type(init) < type(limit)
1921   //             In this case, we know:
1922   //                 all possible values of init < all possible values of limit
1923   //             and we can skip the predicate.
1924   //
1925   //        (2.2) init < limit is already checked before (i.e. found as a dominating check)
1926   //            In this case, we do not need to re-check the condition and can skip the predicate.
1927   //            This is often found for while- and for-loops which have the following shape:
1928   //
1929   //                if (init < limit) { // Dominating test. Do not need the Loop Limit Check Predicate below.
1930   //                    i = init;
1931   //                    if (init >= limit) { trap(); } // Here we would insert the Loop Limit Check Predicate
1932   //                    do {
1933   //                        i += stride;
1934   //                    } while (i < limit);
1935   //                }
1936   //
1937   //        (2.3) init + stride <= max_int
1938   //            In this case, there is no overflow of the iv phi after the first loop iteration.
1939   //            In the proof of the base case above we showed that init + stride <= max_int by using assumption (ii):
1940   //                init < limit
1941   //            In the proof of the step case above, we did not need (ii) anymore. Therefore, if we already know at
1942   //            compile time that init + stride <= max_int then we have trivially proven the base case and that
1943   //            there is no overflow of the iv phi after the first iteration. In this case, we don't need to check (ii)
1944   //            again and can skip the predicate.
1945 
1946 
1947   // Accounting for (LE3) and (LE4) where we use pre-incremented phis in the loop exit check.
1948   const jlong limit_correction_for_pre_iv_exit_check = (phi_incr != nullptr) ? stride_con : 0;
1949 
1950   // Accounting for (LE2) and (LE4) where we use <= or >= in the loop exit check.
1951   const bool includes_limit = (bt == BoolTest::le || bt == BoolTest::ge);
1952   const jlong limit_correction_for_le_ge_exit_check = (includes_limit ? (stride_con > 0 ? 1 : -1) : 0);
1953 
1954   const jlong limit_correction = limit_correction_for_pre_iv_exit_check + limit_correction_for_le_ge_exit_check;
1955   const jlong canonicalized_correction = stride_con + (stride_con > 0 ? -1 : 1);
1956   const jlong final_correction = canonicalized_correction + limit_correction;
1957 
1958   int sov = check_stride_overflow(final_correction, limit_t, iv_bt);
1959   Node* init_control = x->in(LoopNode::EntryControl);
1960 
1961   // If sov==0, limit's type always satisfies the condition, for
1962   // example, when it is an array length.
1963   if (sov != 0) {
1964     if (sov < 0) {
1965       return false;  // Bailout: integer overflow is certain.
1966     }
1967     // (1) Loop Limit Check Predicate is required because we could not statically prove that
1968     //     limit + final_correction = adjusted_limit - 1 + stride <= max_int
1969     assert(!x->as_Loop()->is_loop_nest_inner_loop(), "loop was transformed");
1970     const Predicates predicates(init_control);
1971     const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
1972     if (!loop_limit_check_predicate_block->has_parse_predicate()) {
1973       // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
1974 #ifdef ASSERT
1975       if (TraceLoopLimitCheck) {
1976         tty->print("Missing Loop Limit Check Parse Predicate:");
1977         loop->dump_head();
1978         x->dump(1);
1979       }
1980 #endif
1981       return false;
1982     }
1983 
1984     ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
1985     if (!is_dominator(get_ctrl(limit), loop_limit_check_parse_predicate->in(0))) {
1986       return false;
1987     }
1988 
1989     Node* cmp_limit;
1990     Node* bol;
1991 
1992     if (stride_con > 0) {
1993       cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
1994       bol = new BoolNode(cmp_limit, BoolTest::le);
1995     } else {
1996       cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
1997       bol = new BoolNode(cmp_limit, BoolTest::ge);
1998     }
1999 
2000     insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
2001   }
2002 
2003   // (2.3)
2004   const bool init_plus_stride_could_overflow =
2005           (stride_con > 0 && init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) ||
2006           (stride_con < 0 && init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con);
2007   // (2.1)
2008   const bool init_gte_limit = (stride_con > 0 && init_t->hi_as_long() >= limit_t->lo_as_long()) ||
2009                               (stride_con < 0 && init_t->lo_as_long() <= limit_t->hi_as_long());
2010 
2011   if (init_gte_limit && // (2.1)
2012      ((bt == BoolTest::ne || init_plus_stride_could_overflow) && // (2.3)
2013       !has_dominating_loop_limit_check(init_trip, limit, stride_con, iv_bt, init_control))) { // (2.2)
2014     // (2) Iteration Loop Limit Check Predicate is required because neither (2.1), (2.2), nor (2.3) holds.
2015     // We use the following condition:
2016     // - stride > 0: init < limit
2017     // - stride < 0: init > limit
2018     //
2019     // This predicate is always required if we have a non-equal-operator in the loop exit check (where stride = 1 is
2020     // a requirement). We transform the loop exit check by using a less-than-operator. By doing so, we must always
2021     // check that init < limit. Otherwise, we could have a different number of iterations at runtime.
2022 
2023     const Predicates predicates(init_control);
2024     const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
2025     if (!loop_limit_check_predicate_block->has_parse_predicate()) {
2026       // The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
2027 #ifdef ASSERT
2028       if (TraceLoopLimitCheck) {
2029         tty->print("Missing Loop Limit Check Parse Predicate:");
2030         loop->dump_head();
2031         x->dump(1);
2032       }
2033 #endif
2034       return false;
2035     }
2036 
2037     ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
2038     Node* parse_predicate_entry = loop_limit_check_parse_predicate->in(0);
2039     if (!is_dominator(get_ctrl(limit), parse_predicate_entry) ||
2040         !is_dominator(get_ctrl(init_trip), parse_predicate_entry)) {
2041       return false;
2042     }
2043 
2044     Node* cmp_limit;
2045     Node* bol;
2046 
2047     if (stride_con > 0) {
2048       cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
2049       bol = new BoolNode(cmp_limit, BoolTest::lt);
2050     } else {
2051       cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
2052       bol = new BoolNode(cmp_limit, BoolTest::gt);
2053     }
2054 
2055     insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
2056   }
2057 
2058   if (bt == BoolTest::ne) {
2059     // Now we need to canonicalize the loop condition if it is 'ne'.
2060     assert(stride_con == 1 || stride_con == -1, "simple increment only - checked before");
2061     if (stride_con > 0) {
2062       // 'ne' can be replaced with 'lt' only when init < limit. This is ensured by the inserted predicate above.
2063       bt = BoolTest::lt;
2064     } else {
2065       assert(stride_con < 0, "must be");
2066       // 'ne' can be replaced with 'gt' only when init > limit. This is ensured by the inserted predicate above.
2067       bt = BoolTest::gt;
2068     }
2069   }
2070 
2071   Node* sfpt = nullptr;
2072   if (loop->_child == nullptr) {
2073     sfpt = find_safepoint(back_control, x, loop);
2074   } else {
2075     sfpt = iff->in(0);
2076     if (sfpt->Opcode() != Op_SafePoint) {
2077       sfpt = nullptr;
2078     }
2079   }
2080 
2081   if (x->in(LoopNode::LoopBackControl)->Opcode() == Op_SafePoint) {
2082     Node* backedge_sfpt = x->in(LoopNode::LoopBackControl);
2083     if (((iv_bt == T_INT && LoopStripMiningIter != 0) ||
2084          iv_bt == T_LONG) &&
2085         sfpt == nullptr) {
2086       // Leaving the safepoint on the backedge and creating a
2087       // CountedLoop will confuse optimizations. We can't move the
2088       // safepoint around because its jvm state wouldn't match a new
2089       // location. Give up on that loop.
2090       return false;
2091     }
2092     if (is_deleteable_safept(backedge_sfpt)) {
2093       lazy_replace(backedge_sfpt, iftrue);
2094       if (loop->_safepts != nullptr) {
2095         loop->_safepts->yank(backedge_sfpt);
2096       }
2097       loop->_tail = iftrue;
2098     }
2099   }
2100 
2101 
2102 #ifdef ASSERT
2103   if (iv_bt == T_INT &&
2104       !x->as_Loop()->is_loop_nest_inner_loop() &&
2105       StressLongCountedLoop > 0 &&
2106       trunc1 == nullptr &&
2107       convert_to_long_loop(cmp, phi, loop)) {
2108     return false;
2109   }
2110 #endif
2111 
2112   Node* adjusted_limit = limit;
2113   if (phi_incr != nullptr) {
2114     // If compare points directly to the phi we need to adjust
2115     // the compare so that it points to the incr. Limit have
2116     // to be adjusted to keep trip count the same and we
2117     // should avoid int overflow.
2118     //
2119     //   i = init; do {} while(i++ < limit);
2120     // is converted to
2121     //   i = init; do {} while(++i < limit+1);
2122     //
2123     adjusted_limit = gvn->transform(AddNode::make(limit, stride, iv_bt));
2124   }
2125 
2126   if (includes_limit) {
2127     // The limit check guaranties that 'limit <= (max_jint - stride)' so
2128     // we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
2129     //
2130     Node* one = (stride_con > 0) ? gvn->integercon( 1, iv_bt) : gvn->integercon(-1, iv_bt);
2131     adjusted_limit = gvn->transform(AddNode::make(adjusted_limit, one, iv_bt));
2132     if (bt == BoolTest::le)
2133       bt = BoolTest::lt;
2134     else if (bt == BoolTest::ge)
2135       bt = BoolTest::gt;
2136     else
2137       ShouldNotReachHere();
2138   }
2139   set_subtree_ctrl(adjusted_limit, false);
2140 
2141   // Build a canonical trip test.
2142   // Clone code, as old values may be in use.
2143   incr = incr->clone();
2144   incr->set_req(1,phi);
2145   incr->set_req(2,stride);
2146   incr = _igvn.register_new_node_with_optimizer(incr);
2147   set_early_ctrl(incr, false);
2148   _igvn.rehash_node_delayed(phi);
2149   phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
2150 
2151   // If phi type is more restrictive than Int, raise to
2152   // Int to prevent (almost) infinite recursion in igvn
2153   // which can only handle integer types for constants or minint..maxint.
2154   if (!TypeInteger::bottom(iv_bt)->higher_equal(phi->bottom_type())) {
2155     Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInteger::bottom(iv_bt));
2156     nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
2157     nphi = _igvn.register_new_node_with_optimizer(nphi);
2158     set_ctrl(nphi, get_ctrl(phi));
2159     _igvn.replace_node(phi, nphi);
2160     phi = nphi->as_Phi();
2161   }
2162   cmp = cmp->clone();
2163   cmp->set_req(1,incr);
2164   cmp->set_req(2, adjusted_limit);
2165   cmp = _igvn.register_new_node_with_optimizer(cmp);
2166   set_ctrl(cmp, iff->in(0));
2167 
2168   test = test->clone()->as_Bool();
2169   (*(BoolTest*)&test->_test)._test = bt;
2170   test->set_req(1,cmp);
2171   _igvn.register_new_node_with_optimizer(test);
2172   set_ctrl(test, iff->in(0));
2173 
2174   // Replace the old IfNode with a new LoopEndNode
2175   Node *lex = _igvn.register_new_node_with_optimizer(BaseCountedLoopEndNode::make(iff->in(0), test, cl_prob, iff->as_If()->_fcnt, iv_bt));
2176   IfNode *le = lex->as_If();
2177   uint dd = dom_depth(iff);
2178   set_idom(le, le->in(0), dd); // Update dominance for loop exit
2179   set_loop(le, loop);
2180 
2181   // Get the loop-exit control
2182   Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue));
2183 
2184   // Need to swap loop-exit and loop-back control?
2185   if (iftrue_op == Op_IfFalse) {
2186     Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le));
2187     Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le));
2188 
2189     loop->_tail = back_control = ift2;
2190     set_loop(ift2, loop);
2191     set_loop(iff2, get_loop(iffalse));
2192 
2193     // Lazy update of 'get_ctrl' mechanism.
2194     lazy_replace(iffalse, iff2);
2195     lazy_replace(iftrue,  ift2);
2196 
2197     // Swap names
2198     iffalse = iff2;
2199     iftrue  = ift2;
2200   } else {
2201     _igvn.rehash_node_delayed(iffalse);
2202     _igvn.rehash_node_delayed(iftrue);
2203     iffalse->set_req_X( 0, le, &_igvn );
2204     iftrue ->set_req_X( 0, le, &_igvn );
2205   }
2206 
2207   set_idom(iftrue,  le, dd+1);
2208   set_idom(iffalse, le, dd+1);
2209   assert(iff->outcnt() == 0, "should be dead now");
2210   lazy_replace( iff, le ); // fix 'get_ctrl'
2211 
2212   Node* entry_control = init_control;
2213   bool strip_mine_loop = iv_bt == T_INT &&
2214                          loop->_child == nullptr &&
2215                          sfpt != nullptr &&
2216                          !loop->_has_call &&
2217                          is_deleteable_safept(sfpt);
2218   IdealLoopTree* outer_ilt = nullptr;
2219   if (strip_mine_loop) {
2220     outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop,
2221                                               cl_prob, le->_fcnt, entry_control,
2222                                               iffalse);
2223   }
2224 
2225   // Now setup a new CountedLoopNode to replace the existing LoopNode
2226   BaseCountedLoopNode *l = BaseCountedLoopNode::make(entry_control, back_control, iv_bt);
2227   l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve
2228   // The following assert is approximately true, and defines the intention
2229   // of can_be_counted_loop.  It fails, however, because phase->type
2230   // is not yet initialized for this loop and its parts.
2231   //assert(l->can_be_counted_loop(this), "sanity");
2232   _igvn.register_new_node_with_optimizer(l);
2233   set_loop(l, loop);
2234   loop->_head = l;
2235   // Fix all data nodes placed at the old loop head.
2236   // Uses the lazy-update mechanism of 'get_ctrl'.
2237   lazy_replace( x, l );
2238   set_idom(l, entry_control, dom_depth(entry_control) + 1);
2239 
2240   if (iv_bt == T_INT && (LoopStripMiningIter == 0 || strip_mine_loop)) {
2241     // Check for immediately preceding SafePoint and remove
2242     if (sfpt != nullptr && (strip_mine_loop || is_deleteable_safept(sfpt))) {
2243       if (strip_mine_loop) {
2244         Node* outer_le = outer_ilt->_tail->in(0);
2245         Node* sfpt_clone = sfpt->clone();
2246         sfpt_clone->set_req(0, iffalse);
2247         outer_le->set_req(0, sfpt_clone);
2248 
2249         Node* polladdr = sfpt_clone->in(TypeFunc::Parms);
2250         if (polladdr != nullptr && polladdr->is_Load()) {
2251           // Polling load should be pinned outside inner loop.
2252           Node* new_polladdr = polladdr->clone();
2253           new_polladdr->set_req(0, iffalse);
2254           _igvn.register_new_node_with_optimizer(new_polladdr, polladdr);
2255           set_ctrl(new_polladdr, iffalse);
2256           sfpt_clone->set_req(TypeFunc::Parms, new_polladdr);
2257         }
2258         // When this code runs, loop bodies have not yet been populated.
2259         const bool body_populated = false;
2260         register_control(sfpt_clone, outer_ilt, iffalse, body_populated);
2261         set_idom(outer_le, sfpt_clone, dom_depth(sfpt_clone));
2262       }
2263       lazy_replace(sfpt, sfpt->in(TypeFunc::Control));
2264       if (loop->_safepts != nullptr) {
2265         loop->_safepts->yank(sfpt);
2266       }
2267     }
2268   }
2269 
2270 #ifdef ASSERT
2271   assert(l->is_valid_counted_loop(iv_bt), "counted loop shape is messed up");
2272   assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" );
2273 #endif
2274 #ifndef PRODUCT
2275   if (TraceLoopOpts) {
2276     tty->print("Counted      ");
2277     loop->dump_head();
2278   }
2279 #endif
2280 
2281   C->print_method(PHASE_AFTER_CLOOPS, 3);
2282 
2283   // Capture bounds of the loop in the induction variable Phi before
2284   // subsequent transformation (iteration splitting) obscures the
2285   // bounds
2286   l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn));
2287 
2288   if (strip_mine_loop) {
2289     l->mark_strip_mined();
2290     l->verify_strip_mined(1);
2291     outer_ilt->_head->as_Loop()->verify_strip_mined(1);
2292     loop = outer_ilt;
2293   }
2294 
2295 #ifndef PRODUCT
2296   if (x->as_Loop()->is_loop_nest_inner_loop() && iv_bt == T_LONG) {
2297     Atomic::inc(&_long_loop_counted_loops);
2298   }
2299 #endif
2300   if (iv_bt == T_LONG && x->as_Loop()->is_loop_nest_outer_loop()) {
2301     l->mark_loop_nest_outer_loop();
2302   }
2303 
2304   return true;
2305 }
2306 
2307 // Check if there is a dominating loop limit check of the form 'init < limit' starting at the loop entry.
2308 // If there is one, then we do not need to create an additional Loop Limit Check Predicate.
2309 bool PhaseIdealLoop::has_dominating_loop_limit_check(Node* init_trip, Node* limit, const jlong stride_con,
2310                                                      const BasicType iv_bt, Node* loop_entry) {
2311   // Eagerly call transform() on the Cmp and Bool node to common them up if possible. This is required in order to
2312   // successfully find a dominated test with the If node below.
2313   Node* cmp_limit;
2314   Node* bol;
2315   if (stride_con > 0) {
2316     cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
2317     bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::lt));
2318   } else {
2319     cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
2320     bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::gt));
2321   }
2322 
2323   // Check if there is already a dominating init < limit check. If so, we do not need a Loop Limit Check Predicate.
2324   IfNode* iff = new IfNode(loop_entry, bol, PROB_MIN, COUNT_UNKNOWN);
2325   // Also add fake IfProj nodes in order to call transform() on the newly created IfNode.
2326   IfFalseNode* if_false = new IfFalseNode(iff);
2327   IfTrueNode* if_true = new IfTrueNode(iff);
2328   Node* dominated_iff = _igvn.transform(iff);
2329   // ConI node? Found dominating test (IfNode::dominated_by() returns a ConI node).
2330   const bool found_dominating_test = dominated_iff != nullptr && dominated_iff->is_ConI();
2331 
2332   // Kill the If with its projections again in the next IGVN round by cutting it off from the graph.
2333   _igvn.replace_input_of(iff, 0, C->top());
2334   _igvn.replace_input_of(iff, 1, C->top());
2335   return found_dominating_test;
2336 }
2337 
2338 //----------------------exact_limit-------------------------------------------
2339 Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
2340   assert(loop->_head->is_CountedLoop(), "");
2341   CountedLoopNode *cl = loop->_head->as_CountedLoop();
2342   assert(cl->is_valid_counted_loop(T_INT), "");
2343 
2344   if (ABS(cl->stride_con()) == 1 ||
2345       cl->limit()->Opcode() == Op_LoopLimit) {
2346     // Old code has exact limit (it could be incorrect in case of int overflow).
2347     // Loop limit is exact with stride == 1. And loop may already have exact limit.
2348     return cl->limit();
2349   }
2350   Node *limit = nullptr;
2351 #ifdef ASSERT
2352   BoolTest::mask bt = cl->loopexit()->test_trip();
2353   assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
2354 #endif
2355   if (cl->has_exact_trip_count()) {
2356     // Simple case: loop has constant boundaries.
2357     // Use jlongs to avoid integer overflow.
2358     int stride_con = cl->stride_con();
2359     jlong  init_con = cl->init_trip()->get_int();
2360     jlong limit_con = cl->limit()->get_int();
2361     julong trip_cnt = cl->trip_count();
2362     jlong final_con = init_con + trip_cnt*stride_con;
2363     int final_int = (int)final_con;
2364     // The final value should be in integer range since the loop
2365     // is counted and the limit was checked for overflow.
2366     assert(final_con == (jlong)final_int, "final value should be integer");
2367     limit = _igvn.intcon(final_int);
2368   } else {
2369     // Create new LoopLimit node to get exact limit (final iv value).
2370     limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride());
2371     register_new_node(limit, cl->in(LoopNode::EntryControl));
2372   }
2373   assert(limit != nullptr, "sanity");
2374   return limit;
2375 }
2376 
2377 //------------------------------Ideal------------------------------------------
2378 // Return a node which is more "ideal" than the current node.
2379 // Attempt to convert into a counted-loop.
2380 Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2381   if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) {
2382     phase->C->set_major_progress();
2383   }
2384   return RegionNode::Ideal(phase, can_reshape);
2385 }
2386 
2387 #ifdef ASSERT
2388 void LoopNode::verify_strip_mined(int expect_skeleton) const {
2389   const OuterStripMinedLoopNode* outer = nullptr;
2390   const CountedLoopNode* inner = nullptr;
2391   if (is_strip_mined()) {
2392     if (!is_valid_counted_loop(T_INT)) {
2393       return; // Skip malformed counted loop
2394     }
2395     assert(is_CountedLoop(), "no Loop should be marked strip mined");
2396     inner = as_CountedLoop();
2397     outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop();
2398   } else if (is_OuterStripMinedLoop()) {
2399     outer = this->as_OuterStripMinedLoop();
2400     inner = outer->unique_ctrl_out()->as_CountedLoop();
2401     assert(inner->is_valid_counted_loop(T_INT) && inner->is_strip_mined(), "OuterStripMinedLoop should have been removed");
2402     assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined");
2403   }
2404   if (inner != nullptr || outer != nullptr) {
2405     assert(inner != nullptr && outer != nullptr, "missing loop in strip mined nest");
2406     Node* outer_tail = outer->in(LoopNode::LoopBackControl);
2407     Node* outer_le = outer_tail->in(0);
2408     assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If");
2409     Node* sfpt = outer_le->in(0);
2410     assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?");
2411     Node* inner_out = sfpt->in(0);
2412     CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
2413     assert(cle == inner->loopexit_or_null(), "mismatch");
2414     bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
2415     if (has_skeleton) {
2416       assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node");
2417       assert(outer->outcnt() == 2, "only control nodes");
2418     } else {
2419       assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?");
2420       uint phis = 0;
2421       uint be_loads = 0;
2422       Node* be = inner->in(LoopNode::LoopBackControl);
2423       for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) {
2424         Node* u = inner->fast_out(i);
2425         if (u->is_Phi()) {
2426           phis++;
2427           for (DUIterator_Fast jmax, j = be->fast_outs(jmax); j < jmax; j++) {
2428             Node* n = be->fast_out(j);
2429             if (n->is_Load()) {
2430               assert(n->in(0) == be || n->find_prec_edge(be) > 0, "should be on the backedge");
2431               do {
2432                 n = n->raw_out(0);
2433               } while (!n->is_Phi());
2434               if (n == u) {
2435                 be_loads++;
2436                 break;
2437               }
2438             }
2439           }
2440         }
2441       }
2442       assert(be_loads <= phis, "wrong number phis that depends on a pinned load");
2443       for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) {
2444         Node* u = outer->fast_out(i);
2445         assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop");
2446       }
2447       uint stores = 0;
2448       for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) {
2449         Node* u = inner_out->fast_out(i);
2450         if (u->is_Store()) {
2451           stores++;
2452         }
2453       }
2454       // Late optimization of loads on backedge can cause Phi of outer loop to be eliminated but Phi of inner loop is
2455       // not guaranteed to be optimized out.
2456       assert(outer->outcnt() >= phis + 2 - be_loads && outer->outcnt() <= phis + 2 + stores + 1, "only phis");
2457     }
2458     assert(sfpt->outcnt() == 1, "no data node");
2459     assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node");
2460   }
2461 }
2462 #endif
2463 
2464 //=============================================================================
2465 //------------------------------Ideal------------------------------------------
2466 // Return a node which is more "ideal" than the current node.
2467 // Attempt to convert into a counted-loop.
2468 Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2469   return RegionNode::Ideal(phase, can_reshape);
2470 }
2471 
2472 //------------------------------dump_spec--------------------------------------
2473 // Dump special per-node info
2474 #ifndef PRODUCT
2475 void CountedLoopNode::dump_spec(outputStream *st) const {
2476   LoopNode::dump_spec(st);
2477   if (stride_is_con()) {
2478     st->print("stride: %d ",stride_con());
2479   }
2480   if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
2481   if (is_main_loop()) st->print("main of N%d", _idx);
2482   if (is_post_loop()) st->print("post of N%d", _main_idx);
2483   if (is_strip_mined()) st->print(" strip mined");
2484 }
2485 #endif
2486 
2487 //=============================================================================
2488 jlong BaseCountedLoopEndNode::stride_con() const {
2489   return stride()->bottom_type()->is_integer(bt())->get_con_as_long(bt());
2490 }
2491 
2492 
2493 BaseCountedLoopEndNode* BaseCountedLoopEndNode::make(Node* control, Node* test, float prob, float cnt, BasicType bt) {
2494   if (bt == T_INT) {
2495     return new CountedLoopEndNode(control, test, prob, cnt);
2496   }
2497   assert(bt == T_LONG, "unsupported");
2498   return new LongCountedLoopEndNode(control, test, prob, cnt);
2499 }
2500 
2501 //=============================================================================
2502 //------------------------------Value-----------------------------------------
2503 const Type* LoopLimitNode::Value(PhaseGVN* phase) const {
2504   const Type* init_t   = phase->type(in(Init));
2505   const Type* limit_t  = phase->type(in(Limit));
2506   const Type* stride_t = phase->type(in(Stride));
2507   // Either input is TOP ==> the result is TOP
2508   if (init_t   == Type::TOP) return Type::TOP;
2509   if (limit_t  == Type::TOP) return Type::TOP;
2510   if (stride_t == Type::TOP) return Type::TOP;
2511 
2512   int stride_con = stride_t->is_int()->get_con();
2513   if (stride_con == 1)
2514     return bottom_type();  // Identity
2515 
2516   if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
2517     // Use jlongs to avoid integer overflow.
2518     jlong init_con   =  init_t->is_int()->get_con();
2519     jlong limit_con  = limit_t->is_int()->get_con();
2520     int  stride_m   = stride_con - (stride_con > 0 ? 1 : -1);
2521     jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
2522     jlong final_con  = init_con + stride_con*trip_count;
2523     int final_int = (int)final_con;
2524     // The final value should be in integer range since the loop
2525     // is counted and the limit was checked for overflow.
2526     // Assert checks for overflow only if all input nodes are ConINodes, as during CCP
2527     // there might be a temporary overflow from PhiNodes see JDK-8309266
2528     assert((in(Init)->is_ConI() && in(Limit)->is_ConI() && in(Stride)->is_ConI()) ? final_con == (jlong)final_int : true, "final value should be integer");
2529     if (final_con == (jlong)final_int) {
2530       return TypeInt::make(final_int);
2531     } else {
2532       return bottom_type();
2533     }
2534   }
2535 
2536   return bottom_type(); // TypeInt::INT
2537 }
2538 
2539 //------------------------------Ideal------------------------------------------
2540 // Return a node which is more "ideal" than the current node.
2541 Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2542   if (phase->type(in(Init))   == Type::TOP ||
2543       phase->type(in(Limit))  == Type::TOP ||
2544       phase->type(in(Stride)) == Type::TOP)
2545     return nullptr;  // Dead
2546 
2547   int stride_con = phase->type(in(Stride))->is_int()->get_con();
2548   if (stride_con == 1)
2549     return nullptr;  // Identity
2550 
2551   if (in(Init)->is_Con() && in(Limit)->is_Con())
2552     return nullptr;  // Value
2553 
2554   // Delay following optimizations until all loop optimizations
2555   // done to keep Ideal graph simple.
2556   if (!can_reshape || !phase->C->post_loop_opts_phase()) {
2557     return nullptr;
2558   }
2559 
2560   const TypeInt* init_t  = phase->type(in(Init) )->is_int();
2561   const TypeInt* limit_t = phase->type(in(Limit))->is_int();
2562   int stride_p;
2563   jlong lim, ini;
2564   julong max;
2565   if (stride_con > 0) {
2566     stride_p = stride_con;
2567     lim = limit_t->_hi;
2568     ini = init_t->_lo;
2569     max = (julong)max_jint;
2570   } else {
2571     stride_p = -stride_con;
2572     lim = init_t->_hi;
2573     ini = limit_t->_lo;
2574     max = (julong)min_jint;
2575   }
2576   julong range = lim - ini + stride_p;
2577   if (range <= max) {
2578     // Convert to integer expression if it is not overflow.
2579     Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1));
2580     Node *range = phase->transform(new SubINode(in(Limit), in(Init)));
2581     Node *bias  = phase->transform(new AddINode(range, stride_m));
2582     Node *trip  = phase->transform(new DivINode(0, bias, in(Stride)));
2583     Node *span  = phase->transform(new MulINode(trip, in(Stride)));
2584     return new AddINode(span, in(Init)); // exact limit
2585   }
2586 
2587   if (is_power_of_2(stride_p) ||                // divisor is 2^n
2588       !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node?
2589     // Convert to long expression to avoid integer overflow
2590     // and let igvn optimizer convert this division.
2591     //
2592     Node*   init   = phase->transform( new ConvI2LNode(in(Init)));
2593     Node*  limit   = phase->transform( new ConvI2LNode(in(Limit)));
2594     Node* stride   = phase->longcon(stride_con);
2595     Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1));
2596 
2597     Node *range = phase->transform(new SubLNode(limit, init));
2598     Node *bias  = phase->transform(new AddLNode(range, stride_m));
2599     Node *span;
2600     if (stride_con > 0 && is_power_of_2(stride_p)) {
2601       // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride)
2602       // and avoid generating rounding for division. Zero trip guard should
2603       // guarantee that init < limit but sometimes the guard is missing and
2604       // we can get situation when init > limit. Note, for the empty loop
2605       // optimization zero trip guard is generated explicitly which leaves
2606       // only RCE predicate where exact limit is used and the predicate
2607       // will simply fail forcing recompilation.
2608       Node* neg_stride   = phase->longcon(-stride_con);
2609       span = phase->transform(new AndLNode(bias, neg_stride));
2610     } else {
2611       Node *trip  = phase->transform(new DivLNode(0, bias, stride));
2612       span = phase->transform(new MulLNode(trip, stride));
2613     }
2614     // Convert back to int
2615     Node *span_int = phase->transform(new ConvL2INode(span));
2616     return new AddINode(span_int, in(Init)); // exact limit
2617   }
2618 
2619   return nullptr;    // No progress
2620 }
2621 
2622 //------------------------------Identity---------------------------------------
2623 // If stride == 1 return limit node.
2624 Node* LoopLimitNode::Identity(PhaseGVN* phase) {
2625   int stride_con = phase->type(in(Stride))->is_int()->get_con();
2626   if (stride_con == 1 || stride_con == -1)
2627     return in(Limit);
2628   return this;
2629 }
2630 
2631 //=============================================================================
2632 //----------------------match_incr_with_optional_truncation--------------------
2633 // Match increment with optional truncation:
2634 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
2635 // Return null for failure. Success returns the increment node.
2636 Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2,
2637                                                            const TypeInteger** trunc_type,
2638                                                            BasicType bt) {
2639   // Quick cutouts:
2640   if (expr == nullptr || expr->req() != 3)  return nullptr;
2641 
2642   Node *t1 = nullptr;
2643   Node *t2 = nullptr;
2644   Node* n1 = expr;
2645   int   n1op = n1->Opcode();
2646   const TypeInteger* trunc_t = TypeInteger::bottom(bt);
2647 
2648   if (bt == T_INT) {
2649     // Try to strip (n1 & M) or (n1 << N >> N) from n1.
2650     if (n1op == Op_AndI &&
2651         n1->in(2)->is_Con() &&
2652         n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) {
2653       // %%% This check should match any mask of 2**K-1.
2654       t1 = n1;
2655       n1 = t1->in(1);
2656       n1op = n1->Opcode();
2657       trunc_t = TypeInt::CHAR;
2658     } else if (n1op == Op_RShiftI &&
2659                n1->in(1) != nullptr &&
2660                n1->in(1)->Opcode() == Op_LShiftI &&
2661                n1->in(2) == n1->in(1)->in(2) &&
2662                n1->in(2)->is_Con()) {
2663       jint shift = n1->in(2)->bottom_type()->is_int()->get_con();
2664       // %%% This check should match any shift in [1..31].
2665       if (shift == 16 || shift == 8) {
2666         t1 = n1;
2667         t2 = t1->in(1);
2668         n1 = t2->in(1);
2669         n1op = n1->Opcode();
2670         if (shift == 16) {
2671           trunc_t = TypeInt::SHORT;
2672         } else if (shift == 8) {
2673           trunc_t = TypeInt::BYTE;
2674         }
2675       }
2676     }
2677   }
2678 
2679   // If (maybe after stripping) it is an AddI, we won:
2680   if (n1op == Op_Add(bt)) {
2681     *trunc1 = t1;
2682     *trunc2 = t2;
2683     *trunc_type = trunc_t;
2684     return n1;
2685   }
2686 
2687   // failed
2688   return nullptr;
2689 }
2690 
2691 LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) {
2692   if (is_strip_mined() && in(EntryControl) != nullptr && in(EntryControl)->is_OuterStripMinedLoop()) {
2693     verify_strip_mined(expect_skeleton);
2694     return in(EntryControl)->as_Loop();
2695   }
2696   return this;
2697 }
2698 
2699 OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const {
2700   assert(is_strip_mined(), "not a strip mined loop");
2701   Node* c = in(EntryControl);
2702   if (c == nullptr || c->is_top() || !c->is_OuterStripMinedLoop()) {
2703     return nullptr;
2704   }
2705   return c->as_OuterStripMinedLoop();
2706 }
2707 
2708 IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const {
2709   Node* c = in(LoopBackControl);
2710   if (c == nullptr || c->is_top()) {
2711     return nullptr;
2712   }
2713   return c->as_IfTrue();
2714 }
2715 
2716 IfTrueNode* CountedLoopNode::outer_loop_tail() const {
2717   LoopNode* l = outer_loop();
2718   if (l == nullptr) {
2719     return nullptr;
2720   }
2721   return l->outer_loop_tail();
2722 }
2723 
2724 OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const {
2725   IfTrueNode* proj = outer_loop_tail();
2726   if (proj == nullptr) {
2727     return nullptr;
2728   }
2729   Node* c = proj->in(0);
2730   if (c == nullptr || c->is_top() || c->outcnt() != 2) {
2731     return nullptr;
2732   }
2733   return c->as_OuterStripMinedLoopEnd();
2734 }
2735 
2736 OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const {
2737   LoopNode* l = outer_loop();
2738   if (l == nullptr) {
2739     return nullptr;
2740   }
2741   return l->outer_loop_end();
2742 }
2743 
2744 IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const {
2745   IfNode* le = outer_loop_end();
2746   if (le == nullptr) {
2747     return nullptr;
2748   }
2749   Node* c = le->proj_out_or_null(false);
2750   if (c == nullptr) {
2751     return nullptr;
2752   }
2753   return c->as_IfFalse();
2754 }
2755 
2756 IfFalseNode* CountedLoopNode::outer_loop_exit() const {
2757   LoopNode* l = outer_loop();
2758   if (l == nullptr) {
2759     return nullptr;
2760   }
2761   return l->outer_loop_exit();
2762 }
2763 
2764 SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const {
2765   IfNode* le = outer_loop_end();
2766   if (le == nullptr) {
2767     return nullptr;
2768   }
2769   Node* c = le->in(0);
2770   if (c == nullptr || c->is_top()) {
2771     return nullptr;
2772   }
2773   assert(c->Opcode() == Op_SafePoint, "broken outer loop");
2774   return c->as_SafePoint();
2775 }
2776 
2777 SafePointNode* CountedLoopNode::outer_safepoint() const {
2778   LoopNode* l = outer_loop();
2779   if (l == nullptr) {
2780     return nullptr;
2781   }
2782   return l->outer_safepoint();
2783 }
2784 
2785 Node* CountedLoopNode::skip_assertion_predicates_with_halt() {
2786   Node* ctrl = in(LoopNode::EntryControl);
2787   if (is_main_loop()) {
2788     ctrl = skip_strip_mined()->in(LoopNode::EntryControl);
2789   }
2790   if (is_main_loop() || is_post_loop()) {
2791     AssertionPredicatesWithHalt assertion_predicates(ctrl);
2792     return assertion_predicates.entry();
2793   }
2794   return ctrl;
2795 }
2796 
2797 
2798 int CountedLoopNode::stride_con() const {
2799   CountedLoopEndNode* cle = loopexit_or_null();
2800   return cle != nullptr ? cle->stride_con() : 0;
2801 }
2802 
2803 BaseCountedLoopNode* BaseCountedLoopNode::make(Node* entry, Node* backedge, BasicType bt) {
2804   if (bt == T_INT) {
2805     return new CountedLoopNode(entry, backedge);
2806   }
2807   assert(bt == T_LONG, "unsupported");
2808   return new LongCountedLoopNode(entry, backedge);
2809 }
2810 
2811 void OuterStripMinedLoopNode::fix_sunk_stores(CountedLoopEndNode* inner_cle, LoopNode* inner_cl, PhaseIterGVN* igvn,
2812                                               PhaseIdealLoop* iloop) {
2813   Node* cle_out = inner_cle->proj_out(false);
2814   Node* cle_tail = inner_cle->proj_out(true);
2815   if (cle_out->outcnt() > 1) {
2816     // Look for chains of stores that were sunk
2817     // out of the inner loop and are in the outer loop
2818     for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) {
2819       Node* u = cle_out->fast_out(i);
2820       if (u->is_Store()) {
2821         int alias_idx = igvn->C->get_alias_index(u->adr_type());
2822         Node* first = u;
2823         for (;;) {
2824           Node* next = first->in(MemNode::Memory);
2825           if (!next->is_Store() || next->in(0) != cle_out) {
2826             break;
2827           }
2828           assert(igvn->C->get_alias_index(next->adr_type()) == alias_idx, "");
2829           first = next;
2830         }
2831         Node* last = u;
2832         for (;;) {
2833           Node* next = nullptr;
2834           for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) {
2835             Node* uu = last->fast_out(j);
2836             if (uu->is_Store() && uu->in(0) == cle_out) {
2837               assert(next == nullptr, "only one in the outer loop");
2838               next = uu;
2839               assert(igvn->C->get_alias_index(next->adr_type()) == alias_idx, "");
2840             }
2841           }
2842           if (next == nullptr) {
2843             break;
2844           }
2845           last = next;
2846         }
2847         Node* phi = nullptr;
2848         for (DUIterator_Fast jmax, j = inner_cl->fast_outs(jmax); j < jmax; j++) {
2849           Node* uu = inner_cl->fast_out(j);
2850           if (uu->is_Phi()) {
2851             Node* be = uu->in(LoopNode::LoopBackControl);
2852             if (be->is_Store() && be->in(0) == inner_cl->in(LoopNode::LoopBackControl)) {
2853               assert(igvn->C->get_alias_index(uu->adr_type()) != alias_idx && igvn->C->get_alias_index(uu->adr_type()) != Compile::AliasIdxBot, "unexpected store");
2854             }
2855             if (be == last || be == first->in(MemNode::Memory)) {
2856               assert(igvn->C->get_alias_index(uu->adr_type()) == alias_idx || igvn->C->get_alias_index(uu->adr_type()) == Compile::AliasIdxBot, "unexpected alias");
2857               assert(phi == nullptr, "only one phi");
2858               phi = uu;
2859             }
2860           }
2861         }
2862 #ifdef ASSERT
2863         for (DUIterator_Fast jmax, j = inner_cl->fast_outs(jmax); j < jmax; j++) {
2864           Node* uu = inner_cl->fast_out(j);
2865           if (uu->is_memory_phi()) {
2866             if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) {
2867               assert(phi == uu, "what's that phi?");
2868             } else if (uu->adr_type() == TypePtr::BOTTOM) {
2869               Node* n = uu->in(LoopNode::LoopBackControl);
2870               uint limit = igvn->C->live_nodes();
2871               uint i = 0;
2872               while (n != uu) {
2873                 i++;
2874                 assert(i < limit, "infinite loop");
2875                 if (n->is_Proj()) {
2876                   n = n->in(0);
2877                 } else if (n->is_SafePoint() || n->is_MemBar()) {
2878                   n = n->in(TypeFunc::Memory);
2879                 } else if (n->is_Phi()) {
2880                   n = n->in(1);
2881                 } else if (n->is_MergeMem()) {
2882                   n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type()));
2883                 } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) {
2884                   n = n->in(MemNode::Memory);
2885                 } else {
2886                   n->dump();
2887                   ShouldNotReachHere();
2888                 }
2889               }
2890             }
2891           }
2892         }
2893 #endif
2894         if (phi == nullptr) {
2895           // If an entire chains was sunk, the
2896           // inner loop has no phi for that memory
2897           // slice, create one for the outer loop
2898           phi = PhiNode::make(inner_cl, first->in(MemNode::Memory), Type::MEMORY,
2899                               igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type())));
2900           phi->set_req(LoopNode::LoopBackControl, last);
2901           phi = register_new_node(phi, inner_cl, igvn, iloop);
2902           igvn->replace_input_of(first, MemNode::Memory, phi);
2903         } else {
2904           // Or fix the outer loop fix to include
2905           // that chain of stores.
2906           Node* be = phi->in(LoopNode::LoopBackControl);
2907           assert(!(be->is_Store() && be->in(0) == inner_cl->in(LoopNode::LoopBackControl)), "store on the backedge + sunk stores: unsupported");
2908           if (be == first->in(MemNode::Memory)) {
2909             if (be == phi->in(LoopNode::LoopBackControl)) {
2910               igvn->replace_input_of(phi, LoopNode::LoopBackControl, last);
2911             } else {
2912               igvn->replace_input_of(be, MemNode::Memory, last);
2913             }
2914           } else {
2915 #ifdef ASSERT
2916             if (be == phi->in(LoopNode::LoopBackControl)) {
2917               assert(phi->in(LoopNode::LoopBackControl) == last, "");
2918             } else {
2919               assert(be->in(MemNode::Memory) == last, "");
2920             }
2921 #endif
2922           }
2923         }
2924       }
2925     }
2926   }
2927 }
2928 
2929 void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
2930   // Look for the outer & inner strip mined loop, reduce number of
2931   // iterations of the inner loop, set exit condition of outer loop,
2932   // construct required phi nodes for outer loop.
2933   CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
2934   assert(inner_cl->is_strip_mined(), "inner loop should be strip mined");
2935   if (LoopStripMiningIter == 0) {
2936     remove_outer_loop_and_safepoint(igvn);
2937     return;
2938   }
2939   if (LoopStripMiningIter == 1) {
2940     transform_to_counted_loop(igvn, nullptr);
2941     return;
2942   }
2943   Node* inner_iv_phi = inner_cl->phi();
2944   if (inner_iv_phi == nullptr) {
2945     IfNode* outer_le = outer_loop_end();
2946     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
2947     igvn->replace_node(outer_le, iff);
2948     inner_cl->clear_strip_mined();
2949     return;
2950   }
2951   CountedLoopEndNode* inner_cle = inner_cl->loopexit();
2952 
2953   int stride = inner_cl->stride_con();
2954   jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS(stride);
2955   int scaled_iters = (int)scaled_iters_long;
2956   int short_scaled_iters = LoopStripMiningIterShortLoop* ABS(stride);
2957   const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int();
2958   jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo;
2959   assert(iter_estimate > 0, "broken");
2960   if ((jlong)scaled_iters != scaled_iters_long || iter_estimate <= short_scaled_iters) {
2961     // Remove outer loop and safepoint (too few iterations)
2962     remove_outer_loop_and_safepoint(igvn);
2963     return;
2964   }
2965   if (iter_estimate <= scaled_iters_long) {
2966     // We would only go through one iteration of
2967     // the outer loop: drop the outer loop but
2968     // keep the safepoint so we don't run for
2969     // too long without a safepoint
2970     IfNode* outer_le = outer_loop_end();
2971     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
2972     igvn->replace_node(outer_le, iff);
2973     inner_cl->clear_strip_mined();
2974     return;
2975   }
2976 
2977   Node* cle_tail = inner_cle->proj_out(true);
2978   ResourceMark rm;
2979   Node_List old_new;
2980   if (cle_tail->outcnt() > 1) {
2981     // Look for nodes on backedge of inner loop and clone them
2982     Unique_Node_List backedge_nodes;
2983     for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) {
2984       Node* u = cle_tail->fast_out(i);
2985       if (u != inner_cl) {
2986         assert(!u->is_CFG(), "control flow on the backedge?");
2987         backedge_nodes.push(u);
2988       }
2989     }
2990     uint last = igvn->C->unique();
2991     for (uint next = 0; next < backedge_nodes.size(); next++) {
2992       Node* n = backedge_nodes.at(next);
2993       old_new.map(n->_idx, n->clone());
2994       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2995         Node* u = n->fast_out(i);
2996         assert(!u->is_CFG(), "broken");
2997         if (u->_idx >= last) {
2998           continue;
2999         }
3000         if (!u->is_Phi()) {
3001           backedge_nodes.push(u);
3002         } else {
3003           assert(u->in(0) == inner_cl, "strange phi on the backedge");
3004         }
3005       }
3006     }
3007     // Put the clones on the outer loop backedge
3008     Node* le_tail = outer_loop_tail();
3009     for (uint next = 0; next < backedge_nodes.size(); next++) {
3010       Node *n = old_new[backedge_nodes.at(next)->_idx];
3011       for (uint i = 1; i < n->req(); i++) {
3012         if (n->in(i) != nullptr && old_new[n->in(i)->_idx] != nullptr) {
3013           n->set_req(i, old_new[n->in(i)->_idx]);
3014         }
3015       }
3016       if (n->in(0) != nullptr && n->in(0) == cle_tail) {
3017         n->set_req(0, le_tail);
3018       }
3019       igvn->register_new_node_with_optimizer(n);
3020     }
3021   }
3022 
3023   Node* iv_phi = nullptr;
3024   // Make a clone of each phi in the inner loop
3025   // for the outer loop
3026   for (uint i = 0; i < inner_cl->outcnt(); i++) {
3027     Node* u = inner_cl->raw_out(i);
3028     if (u->is_Phi()) {
3029       assert(u->in(0) == inner_cl, "inconsistent");
3030       Node* phi = u->clone();
3031       phi->set_req(0, this);
3032       Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx];
3033       if (be != nullptr) {
3034         phi->set_req(LoopNode::LoopBackControl, be);
3035       }
3036       phi = igvn->transform(phi);
3037       igvn->replace_input_of(u, LoopNode::EntryControl, phi);
3038       if (u == inner_iv_phi) {
3039         iv_phi = phi;
3040       }
3041     }
3042   }
3043 
3044   if (iv_phi != nullptr) {
3045     // Now adjust the inner loop's exit condition
3046     Node* limit = inner_cl->limit();
3047     // If limit < init for stride > 0 (or limit > init for stride < 0),
3048     // the loop body is run only once. Given limit - init (init - limit resp.)
3049     // would be negative, the unsigned comparison below would cause
3050     // the loop body to be run for LoopStripMiningIter.
3051     Node* max = nullptr;
3052     if (stride > 0) {
3053       max = MaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn);
3054     } else {
3055       max = MaxNode::max_diff_with_zero(iv_phi, limit, TypeInt::INT, *igvn);
3056     }
3057     // sub is positive and can be larger than the max signed int
3058     // value. Use an unsigned min.
3059     Node* const_iters = igvn->intcon(scaled_iters);
3060     Node* min = MaxNode::unsigned_min(max, const_iters, TypeInt::make(0, scaled_iters, Type::WidenMin), *igvn);
3061     // min is the number of iterations for the next inner loop execution:
3062     // unsigned_min(max(limit - iv_phi, 0), scaled_iters) if stride > 0
3063     // unsigned_min(max(iv_phi - limit, 0), scaled_iters) if stride < 0
3064 
3065     Node* new_limit = nullptr;
3066     if (stride > 0) {
3067       new_limit = igvn->transform(new AddINode(min, iv_phi));
3068     } else {
3069       new_limit = igvn->transform(new SubINode(iv_phi, min));
3070     }
3071     Node* inner_cmp = inner_cle->cmp_node();
3072     Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue);
3073     Node* outer_bol = inner_bol;
3074     // cmp node for inner loop may be shared
3075     inner_cmp = inner_cmp->clone();
3076     inner_cmp->set_req(2, new_limit);
3077     inner_bol = inner_bol->clone();
3078     inner_bol->set_req(1, igvn->transform(inner_cmp));
3079     igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol));
3080     // Set the outer loop's exit condition too
3081     igvn->replace_input_of(outer_loop_end(), 1, outer_bol);
3082   } else {
3083     assert(false, "should be able to adjust outer loop");
3084     IfNode* outer_le = outer_loop_end();
3085     Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt));
3086     igvn->replace_node(outer_le, iff);
3087     inner_cl->clear_strip_mined();
3088   }
3089 }
3090 
3091 void OuterStripMinedLoopNode::transform_to_counted_loop(PhaseIterGVN* igvn, PhaseIdealLoop* iloop) {
3092   CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
3093   CountedLoopEndNode* cle = inner_cl->loopexit();
3094   Node* inner_test = cle->in(1);
3095   IfNode* outer_le = outer_loop_end();
3096   CountedLoopEndNode* inner_cle = inner_cl->loopexit();
3097   Node* safepoint = outer_safepoint();
3098 
3099   fix_sunk_stores(inner_cle, inner_cl, igvn, iloop);
3100 
3101   // make counted loop exit test always fail
3102   ConINode* zero = igvn->intcon(0);
3103   if (iloop != nullptr) {
3104     iloop->set_ctrl(zero, igvn->C->root());
3105   }
3106   igvn->replace_input_of(cle, 1, zero);
3107   // replace outer loop end with CountedLoopEndNode with formers' CLE's exit test
3108   Node* new_end = new CountedLoopEndNode(outer_le->in(0), inner_test, cle->_prob, cle->_fcnt);
3109   register_control(new_end, inner_cl, outer_le->in(0), igvn, iloop);
3110   if (iloop == nullptr) {
3111     igvn->replace_node(outer_le, new_end);
3112   } else {
3113     iloop->lazy_replace(outer_le, new_end);
3114   }
3115   // the backedge of the inner loop must be rewired to the new loop end
3116   Node* backedge = cle->proj_out(true);
3117   igvn->replace_input_of(backedge, 0, new_end);
3118   if (iloop != nullptr) {
3119     iloop->set_idom(backedge, new_end, iloop->dom_depth(new_end) + 1);
3120   }
3121   // make the outer loop go away
3122   igvn->replace_input_of(in(LoopBackControl), 0, igvn->C->top());
3123   igvn->replace_input_of(this, LoopBackControl, igvn->C->top());
3124   inner_cl->clear_strip_mined();
3125   if (iloop != nullptr) {
3126     Unique_Node_List wq;
3127     wq.push(safepoint);
3128 
3129     IdealLoopTree* outer_loop_ilt = iloop->get_loop(this);
3130     IdealLoopTree* loop = iloop->get_loop(inner_cl);
3131 
3132     for (uint i = 0; i < wq.size(); i++) {
3133       Node* n = wq.at(i);
3134       for (uint j = 0; j < n->req(); ++j) {
3135         Node* in = n->in(j);
3136         if (in == nullptr || in->is_CFG()) {
3137           continue;
3138         }
3139         if (iloop->get_loop(iloop->get_ctrl(in)) != outer_loop_ilt) {
3140           continue;
3141         }
3142         assert(!loop->_body.contains(in), "");
3143         loop->_body.push(in);
3144         wq.push(in);
3145       }
3146     }
3147     iloop->set_loop(safepoint, loop);
3148     loop->_body.push(safepoint);
3149     iloop->set_loop(safepoint->in(0), loop);
3150     loop->_body.push(safepoint->in(0));
3151     outer_loop_ilt->_tail = igvn->C->top();
3152   }
3153 }
3154 
3155 void OuterStripMinedLoopNode::remove_outer_loop_and_safepoint(PhaseIterGVN* igvn) const {
3156   CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop();
3157   Node* outer_sfpt = outer_safepoint();
3158   Node* outer_out = outer_loop_exit();
3159   igvn->replace_node(outer_out, outer_sfpt->in(0));
3160   igvn->replace_input_of(outer_sfpt, 0, igvn->C->top());
3161   inner_cl->clear_strip_mined();
3162 }
3163 
3164 Node* OuterStripMinedLoopNode::register_new_node(Node* node, LoopNode* ctrl, PhaseIterGVN* igvn, PhaseIdealLoop* iloop) {
3165   if (iloop == nullptr) {
3166     return igvn->transform(node);
3167   }
3168   iloop->register_new_node(node, ctrl);
3169   return node;
3170 }
3171 
3172 Node* OuterStripMinedLoopNode::register_control(Node* node, Node* loop, Node* idom, PhaseIterGVN* igvn,
3173                                                 PhaseIdealLoop* iloop) {
3174   if (iloop == nullptr) {
3175     return igvn->transform(node);
3176   }
3177   iloop->register_control(node, iloop->get_loop(loop), idom);
3178   return node;
3179 }
3180 
3181 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const {
3182   if (!in(0)) return Type::TOP;
3183   if (phase->type(in(0)) == Type::TOP)
3184     return Type::TOP;
3185 
3186   // Until expansion, the loop end condition is not set so this should not constant fold.
3187   if (is_expanded(phase)) {
3188     return IfNode::Value(phase);
3189   }
3190 
3191   return TypeTuple::IFBOTH;
3192 }
3193 
3194 bool OuterStripMinedLoopEndNode::is_expanded(PhaseGVN *phase) const {
3195   // The outer strip mined loop head only has Phi uses after expansion
3196   if (phase->is_IterGVN()) {
3197     Node* backedge = proj_out_or_null(true);
3198     if (backedge != nullptr) {
3199       Node* head = backedge->unique_ctrl_out_or_null();
3200       if (head != nullptr && head->is_OuterStripMinedLoop()) {
3201         if (head->find_out_with(Op_Phi) != nullptr) {
3202           return true;
3203         }
3204       }
3205     }
3206   }
3207   return false;
3208 }
3209 
3210 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3211   if (remove_dead_region(phase, can_reshape))  return this;
3212 
3213   return nullptr;
3214 }
3215 
3216 //------------------------------filtered_type--------------------------------
3217 // Return a type based on condition control flow
3218 // A successful return will be a type that is restricted due
3219 // to a series of dominating if-tests, such as:
3220 //    if (i < 10) {
3221 //       if (i > 0) {
3222 //          here: "i" type is [1..10)
3223 //       }
3224 //    }
3225 // or a control flow merge
3226 //    if (i < 10) {
3227 //       do {
3228 //          phi( , ) -- at top of loop type is [min_int..10)
3229 //         i = ?
3230 //       } while ( i < 10)
3231 //
3232 const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) {
3233   assert(n && n->bottom_type()->is_int(), "must be int");
3234   const TypeInt* filtered_t = nullptr;
3235   if (!n->is_Phi()) {
3236     assert(n_ctrl != nullptr || n_ctrl == C->top(), "valid control");
3237     filtered_t = filtered_type_from_dominators(n, n_ctrl);
3238 
3239   } else {
3240     Node* phi    = n->as_Phi();
3241     Node* region = phi->in(0);
3242     assert(n_ctrl == nullptr || n_ctrl == region, "ctrl parameter must be region");
3243     if (region && region != C->top()) {
3244       for (uint i = 1; i < phi->req(); i++) {
3245         Node* val   = phi->in(i);
3246         Node* use_c = region->in(i);
3247         const TypeInt* val_t = filtered_type_from_dominators(val, use_c);
3248         if (val_t != nullptr) {
3249           if (filtered_t == nullptr) {
3250             filtered_t = val_t;
3251           } else {
3252             filtered_t = filtered_t->meet(val_t)->is_int();
3253           }
3254         }
3255       }
3256     }
3257   }
3258   const TypeInt* n_t = _igvn.type(n)->is_int();
3259   if (filtered_t != nullptr) {
3260     n_t = n_t->join(filtered_t)->is_int();
3261   }
3262   return n_t;
3263 }
3264 
3265 
3266 //------------------------------filtered_type_from_dominators--------------------------------
3267 // Return a possibly more restrictive type for val based on condition control flow of dominators
3268 const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) {
3269   if (val->is_Con()) {
3270      return val->bottom_type()->is_int();
3271   }
3272   uint if_limit = 10; // Max number of dominating if's visited
3273   const TypeInt* rtn_t = nullptr;
3274 
3275   if (use_ctrl && use_ctrl != C->top()) {
3276     Node* val_ctrl = get_ctrl(val);
3277     uint val_dom_depth = dom_depth(val_ctrl);
3278     Node* pred = use_ctrl;
3279     uint if_cnt = 0;
3280     while (if_cnt < if_limit) {
3281       if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) {
3282         if_cnt++;
3283         const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred);
3284         if (if_t != nullptr) {
3285           if (rtn_t == nullptr) {
3286             rtn_t = if_t;
3287           } else {
3288             rtn_t = rtn_t->join(if_t)->is_int();
3289           }
3290         }
3291       }
3292       pred = idom(pred);
3293       if (pred == nullptr || pred == C->top()) {
3294         break;
3295       }
3296       // Stop if going beyond definition block of val
3297       if (dom_depth(pred) < val_dom_depth) {
3298         break;
3299       }
3300     }
3301   }
3302   return rtn_t;
3303 }
3304 
3305 
3306 //------------------------------dump_spec--------------------------------------
3307 // Dump special per-node info
3308 #ifndef PRODUCT
3309 void CountedLoopEndNode::dump_spec(outputStream *st) const {
3310   if( in(TestValue) != nullptr && in(TestValue)->is_Bool() ) {
3311     BoolTest bt( test_trip()); // Added this for g++.
3312 
3313     st->print("[");
3314     bt.dump_on(st);
3315     st->print("]");
3316   }
3317   st->print(" ");
3318   IfNode::dump_spec(st);
3319 }
3320 #endif
3321 
3322 //=============================================================================
3323 //------------------------------is_member--------------------------------------
3324 // Is 'l' a member of 'this'?
3325 bool IdealLoopTree::is_member(const IdealLoopTree *l) const {
3326   while( l->_nest > _nest ) l = l->_parent;
3327   return l == this;
3328 }
3329 
3330 //------------------------------set_nest---------------------------------------
3331 // Set loop tree nesting depth.  Accumulate _has_call bits.
3332 int IdealLoopTree::set_nest( uint depth ) {
3333   assert(depth <= SHRT_MAX, "sanity");
3334   _nest = depth;
3335   int bits = _has_call;
3336   if( _child ) bits |= _child->set_nest(depth+1);
3337   if( bits ) _has_call = 1;
3338   if( _next  ) bits |= _next ->set_nest(depth  );
3339   return bits;
3340 }
3341 
3342 //------------------------------split_fall_in----------------------------------
3343 // Split out multiple fall-in edges from the loop header.  Move them to a
3344 // private RegionNode before the loop.  This becomes the loop landing pad.
3345 void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) {
3346   PhaseIterGVN &igvn = phase->_igvn;
3347   uint i;
3348 
3349   // Make a new RegionNode to be the landing pad.
3350   RegionNode* landing_pad = new RegionNode(fall_in_cnt + 1);
3351   phase->set_loop(landing_pad,_parent);
3352   // If _head was irreducible loop entry, landing_pad may now be too
3353   landing_pad->set_loop_status(_head->as_Region()->loop_status());
3354   // Gather all the fall-in control paths into the landing pad
3355   uint icnt = fall_in_cnt;
3356   uint oreq = _head->req();
3357   for( i = oreq-1; i>0; i-- )
3358     if( !phase->is_member( this, _head->in(i) ) )
3359       landing_pad->set_req(icnt--,_head->in(i));
3360 
3361   // Peel off PhiNode edges as well
3362   for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3363     Node *oj = _head->fast_out(j);
3364     if( oj->is_Phi() ) {
3365       PhiNode* old_phi = oj->as_Phi();
3366       assert( old_phi->region() == _head, "" );
3367       igvn.hash_delete(old_phi);   // Yank from hash before hacking edges
3368       Node *p = PhiNode::make_blank(landing_pad, old_phi);
3369       uint icnt = fall_in_cnt;
3370       for( i = oreq-1; i>0; i-- ) {
3371         if( !phase->is_member( this, _head->in(i) ) ) {
3372           p->init_req(icnt--, old_phi->in(i));
3373           // Go ahead and clean out old edges from old phi
3374           old_phi->del_req(i);
3375         }
3376       }
3377       // Search for CSE's here, because ZKM.jar does a lot of
3378       // loop hackery and we need to be a little incremental
3379       // with the CSE to avoid O(N^2) node blow-up.
3380       Node *p2 = igvn.hash_find_insert(p); // Look for a CSE
3381       if( p2 ) {                // Found CSE
3382         p->destruct(&igvn);     // Recover useless new node
3383         p = p2;                 // Use old node
3384       } else {
3385         igvn.register_new_node_with_optimizer(p, old_phi);
3386       }
3387       // Make old Phi refer to new Phi.
3388       old_phi->add_req(p);
3389       // Check for the special case of making the old phi useless and
3390       // disappear it.  In JavaGrande I have a case where this useless
3391       // Phi is the loop limit and prevents recognizing a CountedLoop
3392       // which in turn prevents removing an empty loop.
3393       Node *id_old_phi = old_phi->Identity(&igvn);
3394       if( id_old_phi != old_phi ) { // Found a simple identity?
3395         // Note that I cannot call 'replace_node' here, because
3396         // that will yank the edge from old_phi to the Region and
3397         // I'm mid-iteration over the Region's uses.
3398         for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
3399           Node* use = old_phi->last_out(i);
3400           igvn.rehash_node_delayed(use);
3401           uint uses_found = 0;
3402           for (uint j = 0; j < use->len(); j++) {
3403             if (use->in(j) == old_phi) {
3404               if (j < use->req()) use->set_req (j, id_old_phi);
3405               else                use->set_prec(j, id_old_phi);
3406               uses_found++;
3407             }
3408           }
3409           i -= uses_found;    // we deleted 1 or more copies of this edge
3410         }
3411       }
3412       igvn._worklist.push(old_phi);
3413     }
3414   }
3415   // Finally clean out the fall-in edges from the RegionNode
3416   for( i = oreq-1; i>0; i-- ) {
3417     if( !phase->is_member( this, _head->in(i) ) ) {
3418       _head->del_req(i);
3419     }
3420   }
3421   igvn.rehash_node_delayed(_head);
3422   // Transform landing pad
3423   igvn.register_new_node_with_optimizer(landing_pad, _head);
3424   // Insert landing pad into the header
3425   _head->add_req(landing_pad);
3426 }
3427 
3428 //------------------------------split_outer_loop-------------------------------
3429 // Split out the outermost loop from this shared header.
3430 void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
3431   PhaseIterGVN &igvn = phase->_igvn;
3432 
3433   // Find index of outermost loop; it should also be my tail.
3434   uint outer_idx = 1;
3435   while( _head->in(outer_idx) != _tail ) outer_idx++;
3436 
3437   // Make a LoopNode for the outermost loop.
3438   Node *ctl = _head->in(LoopNode::EntryControl);
3439   Node *outer = new LoopNode( ctl, _head->in(outer_idx) );
3440   outer = igvn.register_new_node_with_optimizer(outer, _head);
3441   phase->set_created_loop_node();
3442 
3443   // Outermost loop falls into '_head' loop
3444   _head->set_req(LoopNode::EntryControl, outer);
3445   _head->del_req(outer_idx);
3446   // Split all the Phis up between '_head' loop and 'outer' loop.
3447   for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3448     Node *out = _head->fast_out(j);
3449     if( out->is_Phi() ) {
3450       PhiNode *old_phi = out->as_Phi();
3451       assert( old_phi->region() == _head, "" );
3452       Node *phi = PhiNode::make_blank(outer, old_phi);
3453       phi->init_req(LoopNode::EntryControl,    old_phi->in(LoopNode::EntryControl));
3454       phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
3455       phi = igvn.register_new_node_with_optimizer(phi, old_phi);
3456       // Make old Phi point to new Phi on the fall-in path
3457       igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
3458       old_phi->del_req(outer_idx);
3459     }
3460   }
3461 
3462   // Use the new loop head instead of the old shared one
3463   _head = outer;
3464   phase->set_loop(_head, this);
3465 }
3466 
3467 //------------------------------fix_parent-------------------------------------
3468 static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) {
3469   loop->_parent = parent;
3470   if( loop->_child ) fix_parent( loop->_child, loop   );
3471   if( loop->_next  ) fix_parent( loop->_next , parent );
3472 }
3473 
3474 //------------------------------estimate_path_freq-----------------------------
3475 static float estimate_path_freq( Node *n ) {
3476   // Try to extract some path frequency info
3477   IfNode *iff;
3478   for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests
3479     uint nop = n->Opcode();
3480     if( nop == Op_SafePoint ) {   // Skip any safepoint
3481       n = n->in(0);
3482       continue;
3483     }
3484     if( nop == Op_CatchProj ) {   // Get count from a prior call
3485       // Assume call does not always throw exceptions: means the call-site
3486       // count is also the frequency of the fall-through path.
3487       assert( n->is_CatchProj(), "" );
3488       if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index )
3489         return 0.0f;            // Assume call exception path is rare
3490       Node *call = n->in(0)->in(0)->in(0);
3491       assert( call->is_Call(), "expect a call here" );
3492       const JVMState *jvms = ((CallNode*)call)->jvms();
3493       ciMethodData* methodData = jvms->method()->method_data();
3494       if (!methodData->is_mature())  return 0.0f; // No call-site data
3495       ciProfileData* data = methodData->bci_to_data(jvms->bci());
3496       if ((data == nullptr) || !data->is_CounterData()) {
3497         // no call profile available, try call's control input
3498         n = n->in(0);
3499         continue;
3500       }
3501       return data->as_CounterData()->count()/FreqCountInvocations;
3502     }
3503     // See if there's a gating IF test
3504     Node *n_c = n->in(0);
3505     if( !n_c->is_If() ) break;       // No estimate available
3506     iff = n_c->as_If();
3507     if( iff->_fcnt != COUNT_UNKNOWN )   // Have a valid count?
3508       // Compute how much count comes on this path
3509       return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt;
3510     // Have no count info.  Skip dull uncommon-trap like branches.
3511     if( (nop == Op_IfTrue  && iff->_prob < PROB_LIKELY_MAG(5)) ||
3512         (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) )
3513       break;
3514     // Skip through never-taken branch; look for a real loop exit.
3515     n = iff->in(0);
3516   }
3517   return 0.0f;                  // No estimate available
3518 }
3519 
3520 //------------------------------merge_many_backedges---------------------------
3521 // Merge all the backedges from the shared header into a private Region.
3522 // Feed that region as the one backedge to this loop.
3523 void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) {
3524   uint i;
3525 
3526   // Scan for the top 2 hottest backedges
3527   float hotcnt = 0.0f;
3528   float warmcnt = 0.0f;
3529   uint hot_idx = 0;
3530   // Loop starts at 2 because slot 1 is the fall-in path
3531   for( i = 2; i < _head->req(); i++ ) {
3532     float cnt = estimate_path_freq(_head->in(i));
3533     if( cnt > hotcnt ) {       // Grab hottest path
3534       warmcnt = hotcnt;
3535       hotcnt = cnt;
3536       hot_idx = i;
3537     } else if( cnt > warmcnt ) { // And 2nd hottest path
3538       warmcnt = cnt;
3539     }
3540   }
3541 
3542   // See if the hottest backedge is worthy of being an inner loop
3543   // by being much hotter than the next hottest backedge.
3544   if( hotcnt <= 0.0001 ||
3545       hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge
3546 
3547   // Peel out the backedges into a private merge point; peel
3548   // them all except optionally hot_idx.
3549   PhaseIterGVN &igvn = phase->_igvn;
3550 
3551   Node *hot_tail = nullptr;
3552   // Make a Region for the merge point
3553   Node *r = new RegionNode(1);
3554   for( i = 2; i < _head->req(); i++ ) {
3555     if( i != hot_idx )
3556       r->add_req( _head->in(i) );
3557     else hot_tail = _head->in(i);
3558   }
3559   igvn.register_new_node_with_optimizer(r, _head);
3560   // Plug region into end of loop _head, followed by hot_tail
3561   while( _head->req() > 3 ) _head->del_req( _head->req()-1 );
3562   igvn.replace_input_of(_head, 2, r);
3563   if( hot_idx ) _head->add_req(hot_tail);
3564 
3565   // Split all the Phis up between '_head' loop and the Region 'r'
3566   for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
3567     Node *out = _head->fast_out(j);
3568     if( out->is_Phi() ) {
3569       PhiNode* n = out->as_Phi();
3570       igvn.hash_delete(n);      // Delete from hash before hacking edges
3571       Node *hot_phi = nullptr;
3572       Node *phi = new PhiNode(r, n->type(), n->adr_type());
3573       // Check all inputs for the ones to peel out
3574       uint j = 1;
3575       for( uint i = 2; i < n->req(); i++ ) {
3576         if( i != hot_idx )
3577           phi->set_req( j++, n->in(i) );
3578         else hot_phi = n->in(i);
3579       }
3580       // Register the phi but do not transform until whole place transforms
3581       igvn.register_new_node_with_optimizer(phi, n);
3582       // Add the merge phi to the old Phi
3583       while( n->req() > 3 ) n->del_req( n->req()-1 );
3584       igvn.replace_input_of(n, 2, phi);
3585       if( hot_idx ) n->add_req(hot_phi);
3586     }
3587   }
3588 
3589 
3590   // Insert a new IdealLoopTree inserted below me.  Turn it into a clone
3591   // of self loop tree.  Turn self into a loop headed by _head and with
3592   // tail being the new merge point.
3593   IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail );
3594   phase->set_loop(_tail,ilt);   // Adjust tail
3595   _tail = r;                    // Self's tail is new merge point
3596   phase->set_loop(r,this);
3597   ilt->_child = _child;         // New guy has my children
3598   _child = ilt;                 // Self has new guy as only child
3599   ilt->_parent = this;          // new guy has self for parent
3600   ilt->_nest = _nest;           // Same nesting depth (for now)
3601 
3602   // Starting with 'ilt', look for child loop trees using the same shared
3603   // header.  Flatten these out; they will no longer be loops in the end.
3604   IdealLoopTree **pilt = &_child;
3605   while( ilt ) {
3606     if( ilt->_head == _head ) {
3607       uint i;
3608       for( i = 2; i < _head->req(); i++ )
3609         if( _head->in(i) == ilt->_tail )
3610           break;                // Still a loop
3611       if( i == _head->req() ) { // No longer a loop
3612         // Flatten ilt.  Hang ilt's "_next" list from the end of
3613         // ilt's '_child' list.  Move the ilt's _child up to replace ilt.
3614         IdealLoopTree **cp = &ilt->_child;
3615         while( *cp ) cp = &(*cp)->_next;   // Find end of child list
3616         *cp = ilt->_next;       // Hang next list at end of child list
3617         *pilt = ilt->_child;    // Move child up to replace ilt
3618         ilt->_head = nullptr;   // Flag as a loop UNIONED into parent
3619         ilt = ilt->_child;      // Repeat using new ilt
3620         continue;               // do not advance over ilt->_child
3621       }
3622       assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" );
3623       phase->set_loop(_head,ilt);
3624     }
3625     pilt = &ilt->_child;        // Advance to next
3626     ilt = *pilt;
3627   }
3628 
3629   if( _child ) fix_parent( _child, this );
3630 }
3631 
3632 //------------------------------beautify_loops---------------------------------
3633 // Split shared headers and insert loop landing pads.
3634 // Insert a LoopNode to replace the RegionNode.
3635 // Return TRUE if loop tree is structurally changed.
3636 bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) {
3637   bool result = false;
3638   // Cache parts in locals for easy
3639   PhaseIterGVN &igvn = phase->_igvn;
3640 
3641   igvn.hash_delete(_head);      // Yank from hash before hacking edges
3642 
3643   // Check for multiple fall-in paths.  Peel off a landing pad if need be.
3644   int fall_in_cnt = 0;
3645   for( uint i = 1; i < _head->req(); i++ )
3646     if( !phase->is_member( this, _head->in(i) ) )
3647       fall_in_cnt++;
3648   assert( fall_in_cnt, "at least 1 fall-in path" );
3649   if( fall_in_cnt > 1 )         // Need a loop landing pad to merge fall-ins
3650     split_fall_in( phase, fall_in_cnt );
3651 
3652   // Swap inputs to the _head and all Phis to move the fall-in edge to
3653   // the left.
3654   fall_in_cnt = 1;
3655   while( phase->is_member( this, _head->in(fall_in_cnt) ) )
3656     fall_in_cnt++;
3657   if( fall_in_cnt > 1 ) {
3658     // Since I am just swapping inputs I do not need to update def-use info
3659     Node *tmp = _head->in(1);
3660     igvn.rehash_node_delayed(_head);
3661     _head->set_req( 1, _head->in(fall_in_cnt) );
3662     _head->set_req( fall_in_cnt, tmp );
3663     // Swap also all Phis
3664     for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) {
3665       Node* phi = _head->fast_out(i);
3666       if( phi->is_Phi() ) {
3667         igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges
3668         tmp = phi->in(1);
3669         phi->set_req( 1, phi->in(fall_in_cnt) );
3670         phi->set_req( fall_in_cnt, tmp );
3671       }
3672     }
3673   }
3674   assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" );
3675   assert(  phase->is_member( this, _head->in(2) ), "right edge is loop" );
3676 
3677   // If I am a shared header (multiple backedges), peel off the many
3678   // backedges into a private merge point and use the merge point as
3679   // the one true backedge.
3680   if (_head->req() > 3) {
3681     // Merge the many backedges into a single backedge but leave
3682     // the hottest backedge as separate edge for the following peel.
3683     if (!_irreducible) {
3684       merge_many_backedges( phase );
3685     }
3686 
3687     // When recursively beautify my children, split_fall_in can change
3688     // loop tree structure when I am an irreducible loop. Then the head
3689     // of my children has a req() not bigger than 3. Here we need to set
3690     // result to true to catch that case in order to tell the caller to
3691     // rebuild loop tree. See issue JDK-8244407 for details.
3692     result = true;
3693   }
3694 
3695   // If I have one hot backedge, peel off myself loop.
3696   // I better be the outermost loop.
3697   if (_head->req() > 3 && !_irreducible) {
3698     split_outer_loop( phase );
3699     result = true;
3700 
3701   } else if (!_head->is_Loop() && !_irreducible) {
3702     // Make a new LoopNode to replace the old loop head
3703     Node *l = new LoopNode( _head->in(1), _head->in(2) );
3704     l = igvn.register_new_node_with_optimizer(l, _head);
3705     phase->set_created_loop_node();
3706     // Go ahead and replace _head
3707     phase->_igvn.replace_node( _head, l );
3708     _head = l;
3709     phase->set_loop(_head, this);
3710   }
3711 
3712   // Now recursively beautify nested loops
3713   if( _child ) result |= _child->beautify_loops( phase );
3714   if( _next  ) result |= _next ->beautify_loops( phase );
3715   return result;
3716 }
3717 
3718 //------------------------------allpaths_check_safepts----------------------------
3719 // Allpaths backwards scan. Starting at the head, traversing all backedges, and the body. Terminating each path at first
3720 // safepoint encountered.  Helper for check_safepts.
3721 void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) {
3722   assert(stack.size() == 0, "empty stack");
3723   stack.push(_head);
3724   visited.clear();
3725   visited.set(_head->_idx);
3726   while (stack.size() > 0) {
3727     Node* n = stack.pop();
3728     if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
3729       // Terminate this path
3730     } else if (n->Opcode() == Op_SafePoint) {
3731       if (_phase->get_loop(n) != this) {
3732         if (_required_safept == nullptr) _required_safept = new Node_List();
3733         // save the first we run into on that path: closest to the tail if the head has a single backedge
3734         _required_safept->push(n);
3735       }
3736       // Terminate this path
3737     } else {
3738       uint start = n->is_Region() ? 1 : 0;
3739       uint end   = n->is_Region() && (!n->is_Loop() || n == _head) ? n->req() : start + 1;
3740       for (uint i = start; i < end; i++) {
3741         Node* in = n->in(i);
3742         assert(in->is_CFG(), "must be");
3743         if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) {
3744           stack.push(in);
3745         }
3746       }
3747     }
3748   }
3749 }
3750 
3751 //------------------------------check_safepts----------------------------
3752 // Given dominators, try to find loops with calls that must always be
3753 // executed (call dominates loop tail).  These loops do not need non-call
3754 // safepoints (ncsfpt).
3755 //
3756 // A complication is that a safepoint in a inner loop may be needed
3757 // by an outer loop. In the following, the inner loop sees it has a
3758 // call (block 3) on every path from the head (block 2) to the
3759 // backedge (arc 3->2).  So it deletes the ncsfpt (non-call safepoint)
3760 // in block 2, _but_ this leaves the outer loop without a safepoint.
3761 //
3762 //          entry  0
3763 //                 |
3764 //                 v
3765 // outer 1,2    +->1
3766 //              |  |
3767 //              |  v
3768 //              |  2<---+  ncsfpt in 2
3769 //              |_/|\   |
3770 //                 | v  |
3771 // inner 2,3      /  3  |  call in 3
3772 //               /   |  |
3773 //              v    +--+
3774 //        exit  4
3775 //
3776 //
3777 // This method creates a list (_required_safept) of ncsfpt nodes that must
3778 // be protected is created for each loop. When a ncsfpt maybe deleted, it
3779 // is first looked for in the lists for the outer loops of the current loop.
3780 //
3781 // The insights into the problem:
3782 //  A) counted loops are okay
3783 //  B) innermost loops are okay (only an inner loop can delete
3784 //     a ncsfpt needed by an outer loop)
3785 //  C) a loop is immune from an inner loop deleting a safepoint
3786 //     if the loop has a call on the idom-path
3787 //  D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the
3788 //     idom-path that is not in a nested loop
3789 //  E) otherwise, an ncsfpt on the idom-path that is nested in an inner
3790 //     loop needs to be prevented from deletion by an inner loop
3791 //
3792 // There are two analyses:
3793 //  1) The first, and cheaper one, scans the loop body from
3794 //     tail to head following the idom (immediate dominator)
3795 //     chain, looking for the cases (C,D,E) above.
3796 //     Since inner loops are scanned before outer loops, there is summary
3797 //     information about inner loops.  Inner loops can be skipped over
3798 //     when the tail of an inner loop is encountered.
3799 //
3800 //  2) The second, invoked if the first fails to find a call or ncsfpt on
3801 //     the idom path (which is rare), scans all predecessor control paths
3802 //     from the tail to the head, terminating a path when a call or sfpt
3803 //     is encountered, to find the ncsfpt's that are closest to the tail.
3804 //
3805 void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) {
3806   // Bottom up traversal
3807   IdealLoopTree* ch = _child;
3808   if (_child) _child->check_safepts(visited, stack);
3809   if (_next)  _next ->check_safepts(visited, stack);
3810 
3811   if (!_head->is_CountedLoop() && !_has_sfpt && _parent != nullptr) {
3812     bool  has_call         = false;    // call on dom-path
3813     bool  has_local_ncsfpt = false;    // ncsfpt on dom-path at this loop depth
3814     Node* nonlocal_ncsfpt  = nullptr;  // ncsfpt on dom-path at a deeper depth
3815     if (!_irreducible) {
3816       // Scan the dom-path nodes from tail to head
3817       for (Node* n = tail(); n != _head; n = _phase->idom(n)) {
3818         if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
3819           has_call = true;
3820           _has_sfpt = 1;          // Then no need for a safept!
3821           break;
3822         } else if (n->Opcode() == Op_SafePoint) {
3823           if (_phase->get_loop(n) == this) {
3824             has_local_ncsfpt = true;
3825             break;
3826           }
3827           if (nonlocal_ncsfpt == nullptr) {
3828             nonlocal_ncsfpt = n; // save the one closest to the tail
3829           }
3830         } else {
3831           IdealLoopTree* nlpt = _phase->get_loop(n);
3832           if (this != nlpt) {
3833             // If at an inner loop tail, see if the inner loop has already
3834             // recorded seeing a call on the dom-path (and stop.)  If not,
3835             // jump to the head of the inner loop.
3836             assert(is_member(nlpt), "nested loop");
3837             Node* tail = nlpt->_tail;
3838             if (tail->in(0)->is_If()) tail = tail->in(0);
3839             if (n == tail) {
3840               // If inner loop has call on dom-path, so does outer loop
3841               if (nlpt->_has_sfpt) {
3842                 has_call = true;
3843                 _has_sfpt = 1;
3844                 break;
3845               }
3846               // Skip to head of inner loop
3847               assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head");
3848               n = nlpt->_head;
3849               if (_head == n) {
3850                 // this and nlpt (inner loop) have the same loop head. This should not happen because
3851                 // during beautify_loops we call merge_many_backedges. However, infinite loops may not
3852                 // have been attached to the loop-tree during build_loop_tree before beautify_loops,
3853                 // but then attached in the build_loop_tree afterwards, and so still have unmerged
3854                 // backedges. Check if we are indeed in an infinite subgraph, and terminate the scan,
3855                 // since we have reached the loop head of this.
3856                 assert(_head->as_Region()->is_in_infinite_subgraph(),
3857                        "only expect unmerged backedges in infinite loops");
3858                 break;
3859               }
3860             }
3861           }
3862         }
3863       }
3864     }
3865     // Record safept's that this loop needs preserved when an
3866     // inner loop attempts to delete it's safepoints.
3867     if (_child != nullptr && !has_call && !has_local_ncsfpt) {
3868       if (nonlocal_ncsfpt != nullptr) {
3869         if (_required_safept == nullptr) _required_safept = new Node_List();
3870         _required_safept->push(nonlocal_ncsfpt);
3871       } else {
3872         // Failed to find a suitable safept on the dom-path.  Now use
3873         // an all paths walk from tail to head, looking for safepoints to preserve.
3874         allpaths_check_safepts(visited, stack);
3875       }
3876     }
3877   }
3878 }
3879 
3880 //---------------------------is_deleteable_safept----------------------------
3881 // Is safept not required by an outer loop?
3882 bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) {
3883   assert(sfpt->Opcode() == Op_SafePoint, "");
3884   IdealLoopTree* lp = get_loop(sfpt)->_parent;
3885   while (lp != nullptr) {
3886     Node_List* sfpts = lp->_required_safept;
3887     if (sfpts != nullptr) {
3888       for (uint i = 0; i < sfpts->size(); i++) {
3889         if (sfpt == sfpts->at(i))
3890           return false;
3891       }
3892     }
3893     lp = lp->_parent;
3894   }
3895   return true;
3896 }
3897 
3898 //---------------------------replace_parallel_iv-------------------------------
3899 // Replace parallel induction variable (parallel to trip counter)
3900 void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
3901   assert(loop->_head->is_CountedLoop(), "");
3902   CountedLoopNode *cl = loop->_head->as_CountedLoop();
3903   if (!cl->is_valid_counted_loop(T_INT)) {
3904     return;         // skip malformed counted loop
3905   }
3906   Node *incr = cl->incr();
3907   if (incr == nullptr) {
3908     return;         // Dead loop?
3909   }
3910   Node *init = cl->init_trip();
3911   Node *phi  = cl->phi();
3912   int stride_con = cl->stride_con();
3913 
3914   // Visit all children, looking for Phis
3915   for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
3916     Node *out = cl->out(i);
3917     // Look for other phis (secondary IVs). Skip dead ones
3918     if (!out->is_Phi() || out == phi || !has_node(out)) {
3919       continue;
3920     }
3921 
3922     PhiNode* phi2 = out->as_Phi();
3923     Node* incr2 = phi2->in(LoopNode::LoopBackControl);
3924     // Look for induction variables of the form:  X += constant
3925     if (phi2->region() != loop->_head ||
3926         incr2->req() != 3 ||
3927         incr2->in(1)->uncast() != phi2 ||
3928         incr2 == incr ||
3929         incr2->Opcode() != Op_AddI ||
3930         !incr2->in(2)->is_Con()) {
3931       continue;
3932     }
3933 
3934     if (incr2->in(1)->is_ConstraintCast() &&
3935         !(incr2->in(1)->in(0)->is_IfProj() && incr2->in(1)->in(0)->in(0)->is_RangeCheck())) {
3936       // Skip AddI->CastII->Phi case if CastII is not controlled by local RangeCheck
3937       continue;
3938     }
3939     // Check for parallel induction variable (parallel to trip counter)
3940     // via an affine function.  In particular, count-down loops with
3941     // count-up array indices are common. We only RCE references off
3942     // the trip-counter, so we need to convert all these to trip-counter
3943     // expressions.
3944     Node* init2 = phi2->in(LoopNode::EntryControl);
3945     int stride_con2 = incr2->in(2)->get_int();
3946 
3947     // The ratio of the two strides cannot be represented as an int
3948     // if stride_con2 is min_int and stride_con is -1.
3949     if (stride_con2 == min_jint && stride_con == -1) {
3950       continue;
3951     }
3952 
3953     // The general case here gets a little tricky.  We want to find the
3954     // GCD of all possible parallel IV's and make a new IV using this
3955     // GCD for the loop.  Then all possible IVs are simple multiples of
3956     // the GCD.  In practice, this will cover very few extra loops.
3957     // Instead we require 'stride_con2' to be a multiple of 'stride_con',
3958     // where +/-1 is the common case, but other integer multiples are
3959     // also easy to handle.
3960     int ratio_con = stride_con2/stride_con;
3961 
3962     if ((ratio_con * stride_con) == stride_con2) { // Check for exact
3963 #ifndef PRODUCT
3964       if (TraceLoopOpts) {
3965         tty->print("Parallel IV: %d ", phi2->_idx);
3966         loop->dump_head();
3967       }
3968 #endif
3969       // Convert to using the trip counter.  The parallel induction
3970       // variable differs from the trip counter by a loop-invariant
3971       // amount, the difference between their respective initial values.
3972       // It is scaled by the 'ratio_con'.
3973       Node* ratio = _igvn.intcon(ratio_con);
3974       set_ctrl(ratio, C->root());
3975       Node* ratio_init = new MulINode(init, ratio);
3976       _igvn.register_new_node_with_optimizer(ratio_init, init);
3977       set_early_ctrl(ratio_init, false);
3978       Node* diff = new SubINode(init2, ratio_init);
3979       _igvn.register_new_node_with_optimizer(diff, init2);
3980       set_early_ctrl(diff, false);
3981       Node* ratio_idx = new MulINode(phi, ratio);
3982       _igvn.register_new_node_with_optimizer(ratio_idx, phi);
3983       set_ctrl(ratio_idx, cl);
3984       Node* add = new AddINode(ratio_idx, diff);
3985       _igvn.register_new_node_with_optimizer(add);
3986       set_ctrl(add, cl);
3987       _igvn.replace_node( phi2, add );
3988       // Sometimes an induction variable is unused
3989       if (add->outcnt() == 0) {
3990         _igvn.remove_dead_node(add);
3991       }
3992       --i; // deleted this phi; rescan starting with next position
3993       continue;
3994     }
3995   }
3996 }
3997 
3998 void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) {
3999   Node* keep = nullptr;
4000   if (keep_one) {
4001     // Look for a safepoint on the idom-path.
4002     for (Node* i = tail(); i != _head; i = phase->idom(i)) {
4003       if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) {
4004         keep = i;
4005         break; // Found one
4006       }
4007     }
4008   }
4009 
4010   // Don't remove any safepoints if it is requested to keep a single safepoint and
4011   // no safepoint was found on idom-path. It is not safe to remove any safepoint
4012   // in this case since there's no safepoint dominating all paths in the loop body.
4013   bool prune = !keep_one || keep != nullptr;
4014 
4015   // Delete other safepoints in this loop.
4016   Node_List* sfpts = _safepts;
4017   if (prune && sfpts != nullptr) {
4018     assert(keep == nullptr || keep->Opcode() == Op_SafePoint, "not safepoint");
4019     for (uint i = 0; i < sfpts->size(); i++) {
4020       Node* n = sfpts->at(i);
4021       assert(phase->get_loop(n) == this, "");
4022       if (n != keep && phase->is_deleteable_safept(n)) {
4023         phase->lazy_replace(n, n->in(TypeFunc::Control));
4024       }
4025     }
4026   }
4027 }
4028 
4029 //------------------------------counted_loop-----------------------------------
4030 // Convert to counted loops where possible
4031 void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
4032 
4033   // For grins, set the inner-loop flag here
4034   if (!_child) {
4035     if (_head->is_Loop()) _head->as_Loop()->set_inner_loop();
4036   }
4037 
4038   IdealLoopTree* loop = this;
4039   if (_head->is_CountedLoop() ||
4040       phase->is_counted_loop(_head, loop, T_INT)) {
4041 
4042     if (LoopStripMiningIter == 0 || _head->as_CountedLoop()->is_strip_mined()) {
4043       // Indicate we do not need a safepoint here
4044       _has_sfpt = 1;
4045     }
4046 
4047     // Remove safepoints
4048     bool keep_one_sfpt = !(_has_call || _has_sfpt);
4049     remove_safepoints(phase, keep_one_sfpt);
4050 
4051     // Look for induction variables
4052     phase->replace_parallel_iv(this);
4053   } else if (_head->is_LongCountedLoop() ||
4054              phase->is_counted_loop(_head, loop, T_LONG)) {
4055     remove_safepoints(phase, true);
4056   } else {
4057     assert(!_head->is_Loop() || !_head->as_Loop()->is_loop_nest_inner_loop(), "transformation to counted loop should not fail");
4058     if (_parent != nullptr && !_irreducible) {
4059       // Not a counted loop. Keep one safepoint.
4060       bool keep_one_sfpt = true;
4061       remove_safepoints(phase, keep_one_sfpt);
4062     }
4063   }
4064 
4065   // Recursively
4066   assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?");
4067   assert(loop->_child != this || (loop->_child->_child == nullptr && loop->_child->_next == nullptr), "would miss some loops");
4068   if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase);
4069   if (loop->_next)  loop->_next ->counted_loop(phase);
4070 }
4071 
4072 
4073 // The Estimated Loop Clone Size:
4074 //   CloneFactor * (~112% * BodySize + BC) + CC + FanOutTerm,
4075 // where  BC and  CC are  totally ad-hoc/magic  "body" and "clone" constants,
4076 // respectively, used to ensure that the node usage estimates made are on the
4077 // safe side, for the most part. The FanOutTerm is an attempt to estimate the
4078 // possible additional/excessive nodes generated due to data and control flow
4079 // merging, for edges reaching outside the loop.
4080 uint IdealLoopTree::est_loop_clone_sz(uint factor) const {
4081 
4082   precond(0 < factor && factor < 16);
4083 
4084   uint const bc = 13;
4085   uint const cc = 17;
4086   uint const sz = _body.size() + (_body.size() + 7) / 2;
4087   uint estimate = factor * (sz + bc) + cc;
4088 
4089   assert((estimate - cc) / factor == sz + bc, "overflow");
4090 
4091   return estimate + est_loop_flow_merge_sz();
4092 }
4093 
4094 // The Estimated Loop (full-) Unroll Size:
4095 //   UnrollFactor * (~106% * BodySize) + CC + FanOutTerm,
4096 // where CC is a (totally) ad-hoc/magic "clone" constant, used to ensure that
4097 // node usage estimates made are on the safe side, for the most part. This is
4098 // a "light" version of the loop clone size calculation (above), based on the
4099 // assumption that most of the loop-construct overhead will be unraveled when
4100 // (fully) unrolled. Defined for unroll factors larger or equal to one (>=1),
4101 // including an overflow check and returning UINT_MAX in case of an overflow.
4102 uint IdealLoopTree::est_loop_unroll_sz(uint factor) const {
4103 
4104   precond(factor > 0);
4105 
4106   // Take into account that after unroll conjoined heads and tails will fold.
4107   uint const b0 = _body.size() - EMPTY_LOOP_SIZE;
4108   uint const cc = 7;
4109   uint const sz = b0 + (b0 + 15) / 16;
4110   uint estimate = factor * sz + cc;
4111 
4112   if ((estimate - cc) / factor != sz) {
4113     return UINT_MAX;
4114   }
4115 
4116   return estimate + est_loop_flow_merge_sz();
4117 }
4118 
4119 // Estimate the growth effect (in nodes) of merging control and data flow when
4120 // cloning a loop body, based on the amount of  control and data flow reaching
4121 // outside of the (current) loop body.
4122 uint IdealLoopTree::est_loop_flow_merge_sz() const {
4123 
4124   uint ctrl_edge_out_cnt = 0;
4125   uint data_edge_out_cnt = 0;
4126 
4127   for (uint i = 0; i < _body.size(); i++) {
4128     Node* node = _body.at(i);
4129     uint outcnt = node->outcnt();
4130 
4131     for (uint k = 0; k < outcnt; k++) {
4132       Node* out = node->raw_out(k);
4133       if (out == nullptr) continue;
4134       if (out->is_CFG()) {
4135         if (!is_member(_phase->get_loop(out))) {
4136           ctrl_edge_out_cnt++;
4137         }
4138       } else if (_phase->has_ctrl(out)) {
4139         Node* ctrl = _phase->get_ctrl(out);
4140         assert(ctrl != nullptr, "must be");
4141         assert(ctrl->is_CFG(), "must be");
4142         if (!is_member(_phase->get_loop(ctrl))) {
4143           data_edge_out_cnt++;
4144         }
4145       }
4146     }
4147   }
4148   // Use data and control count (x2.0) in estimate iff both are > 0. This is
4149   // a rather pessimistic estimate for the most part, in particular for some
4150   // complex loops, but still not enough to capture all loops.
4151   if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) {
4152     return 2 * (ctrl_edge_out_cnt + data_edge_out_cnt);
4153   }
4154   return 0;
4155 }
4156 
4157 #ifndef PRODUCT
4158 //------------------------------dump_head--------------------------------------
4159 // Dump 1 liner for loop header info
4160 void IdealLoopTree::dump_head() {
4161   tty->sp(2 * _nest);
4162   tty->print("Loop: N%d/N%d ", _head->_idx, _tail->_idx);
4163   if (_irreducible) tty->print(" IRREDUCIBLE");
4164   Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl)
4165                                  : _head->in(LoopNode::EntryControl);
4166   const Predicates predicates(entry);
4167   if (predicates.loop_limit_check_predicate_block()->is_non_empty()) {
4168     tty->print(" limit_check");
4169   }
4170   if (UseProfiledLoopPredicate && predicates.profiled_loop_predicate_block()->is_non_empty()) {
4171     tty->print(" profile_predicated");
4172   }
4173   if (UseLoopPredicate && predicates.loop_predicate_block()->is_non_empty()) {
4174     tty->print(" predicated");
4175   }
4176   if (_head->is_CountedLoop()) {
4177     CountedLoopNode *cl = _head->as_CountedLoop();
4178     tty->print(" counted");
4179 
4180     Node* init_n = cl->init_trip();
4181     if (init_n  != nullptr &&  init_n->is_Con())
4182       tty->print(" [%d,", cl->init_trip()->get_int());
4183     else
4184       tty->print(" [int,");
4185     Node* limit_n = cl->limit();
4186     if (limit_n  != nullptr &&  limit_n->is_Con())
4187       tty->print("%d),", cl->limit()->get_int());
4188     else
4189       tty->print("int),");
4190     int stride_con  = cl->stride_con();
4191     if (stride_con > 0) tty->print("+");
4192     tty->print("%d", stride_con);
4193 
4194     tty->print(" (%0.f iters) ", cl->profile_trip_cnt());
4195 
4196     if (cl->is_pre_loop ()) tty->print(" pre" );
4197     if (cl->is_main_loop()) tty->print(" main");
4198     if (cl->is_post_loop()) tty->print(" post");
4199     if (cl->is_vectorized_loop()) tty->print(" vector");
4200     if (range_checks_present()) tty->print(" rc ");
4201   }
4202   if (_has_call) tty->print(" has_call");
4203   if (_has_sfpt) tty->print(" has_sfpt");
4204   if (_rce_candidate) tty->print(" rce");
4205   if (_safepts != nullptr && _safepts->size() > 0) {
4206     tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }");
4207   }
4208   if (_required_safept != nullptr && _required_safept->size() > 0) {
4209     tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }");
4210   }
4211   if (Verbose) {
4212     tty->print(" body={"); _body.dump_simple(); tty->print(" }");
4213   }
4214   if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) {
4215     tty->print(" strip_mined");
4216   }
4217   tty->cr();
4218 }
4219 
4220 //------------------------------dump-------------------------------------------
4221 // Dump loops by loop tree
4222 void IdealLoopTree::dump() {
4223   dump_head();
4224   if (_child) _child->dump();
4225   if (_next)  _next ->dump();
4226 }
4227 
4228 #endif
4229 
4230 static void log_loop_tree_helper(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) {
4231   if (loop == root) {
4232     if (loop->_child != nullptr) {
4233       log->begin_head("loop_tree");
4234       log->end_head();
4235       log_loop_tree_helper(root, loop->_child, log);
4236       log->tail("loop_tree");
4237       assert(loop->_next == nullptr, "what?");
4238     }
4239   } else if (loop != nullptr) {
4240     Node* head = loop->_head;
4241     log->begin_head("loop");
4242     log->print(" idx='%d' ", head->_idx);
4243     if (loop->_irreducible) log->print("irreducible='1' ");
4244     if (head->is_Loop()) {
4245       if (head->as_Loop()->is_inner_loop())        log->print("inner_loop='1' ");
4246       if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' ");
4247     } else if (head->is_CountedLoop()) {
4248       CountedLoopNode* cl = head->as_CountedLoop();
4249       if (cl->is_pre_loop())  log->print("pre_loop='%d' ",  cl->main_idx());
4250       if (cl->is_main_loop()) log->print("main_loop='%d' ", cl->_idx);
4251       if (cl->is_post_loop()) log->print("post_loop='%d' ", cl->main_idx());
4252     }
4253     log->end_head();
4254     log_loop_tree_helper(root, loop->_child, log);
4255     log->tail("loop");
4256     log_loop_tree_helper(root, loop->_next, log);
4257   }
4258 }
4259 
4260 void PhaseIdealLoop::log_loop_tree() {
4261   if (C->log() != nullptr) {
4262     log_loop_tree_helper(_ltree_root, _ltree_root, C->log());
4263   }
4264 }
4265 
4266 // Eliminate all Parse and Template Assertion Predicates that are not associated with a loop anymore. The eliminated
4267 // predicates will be removed during the next round of IGVN.
4268 void PhaseIdealLoop::eliminate_useless_predicates() {
4269   if (C->parse_predicate_count() == 0 && C->template_assertion_predicate_count() == 0) {
4270     return; // No predicates left.
4271   }
4272 
4273   eliminate_useless_parse_predicates();
4274   eliminate_useless_template_assertion_predicates();
4275 }
4276 
4277 // Eliminate all Parse Predicates that do not belong to a loop anymore by marking them useless. These will be removed
4278 // during the next round of IGVN.
4279 void PhaseIdealLoop::eliminate_useless_parse_predicates() {
4280   mark_all_parse_predicates_useless();
4281   if (C->has_loops()) {
4282     mark_loop_associated_parse_predicates_useful();
4283   }
4284   add_useless_parse_predicates_to_igvn_worklist();
4285 }
4286 
4287 void PhaseIdealLoop::mark_all_parse_predicates_useless() const {
4288   for (int i = 0; i < C->parse_predicate_count(); i++) {
4289     C->parse_predicate(i)->mark_useless();
4290   }
4291 }
4292 
4293 void PhaseIdealLoop::mark_loop_associated_parse_predicates_useful() {
4294   for (LoopTreeIterator iterator(_ltree_root); !iterator.done(); iterator.next()) {
4295     IdealLoopTree* loop = iterator.current();
4296     if (loop->can_apply_loop_predication()) {
4297       mark_useful_parse_predicates_for_loop(loop);
4298     }
4299   }
4300 }
4301 
4302 void PhaseIdealLoop::mark_useful_parse_predicates_for_loop(IdealLoopTree* loop) {
4303   Node* entry = loop->_head->in(LoopNode::EntryControl);
4304   const Predicates predicates(entry);
4305   ParsePredicateIterator iterator(predicates);
4306   while (iterator.has_next()) {
4307     iterator.next()->mark_useful();
4308   }
4309 }
4310 
4311 void PhaseIdealLoop::add_useless_parse_predicates_to_igvn_worklist() {
4312   for (int i = 0; i < C->parse_predicate_count(); i++) {
4313     ParsePredicateNode* parse_predicate_node = C->parse_predicate(i);
4314     if (parse_predicate_node->is_useless()) {
4315       _igvn._worklist.push(parse_predicate_node);
4316     }
4317   }
4318 }
4319 
4320 
4321 // Eliminate all Template Assertion Predicates that do not belong to their originally associated loop anymore by
4322 // replacing the Opaque4 node of the If node with true. These nodes will be removed during the next round of IGVN.
4323 void PhaseIdealLoop::eliminate_useless_template_assertion_predicates() {
4324   Unique_Node_List useful_predicates;
4325   if (C->has_loops()) {
4326     collect_useful_template_assertion_predicates(useful_predicates);
4327   }
4328   eliminate_useless_template_assertion_predicates(useful_predicates);
4329 }
4330 
4331 void PhaseIdealLoop::collect_useful_template_assertion_predicates(Unique_Node_List& useful_predicates) {
4332   for (LoopTreeIterator iterator(_ltree_root); !iterator.done(); iterator.next()) {
4333     IdealLoopTree* loop = iterator.current();
4334     if (loop->can_apply_loop_predication()) {
4335       collect_useful_template_assertion_predicates_for_loop(loop, useful_predicates);
4336     }
4337   }
4338 }
4339 
4340 void PhaseIdealLoop::collect_useful_template_assertion_predicates_for_loop(IdealLoopTree* loop,
4341                                                                            Unique_Node_List &useful_predicates) {
4342   Node* entry = loop->_head->in(LoopNode::EntryControl);
4343   const Predicates predicates(entry);
4344   if (UseProfiledLoopPredicate) {
4345     const PredicateBlock* profiled_loop_predicate_block = predicates.profiled_loop_predicate_block();
4346     if (profiled_loop_predicate_block->has_parse_predicate()) {
4347       IfProjNode* parse_predicate_proj = profiled_loop_predicate_block->parse_predicate_success_proj();
4348       get_assertion_predicates(parse_predicate_proj, useful_predicates, true);
4349     }
4350   }
4351 
4352   if (UseLoopPredicate) {
4353     const PredicateBlock* loop_predicate_block = predicates.loop_predicate_block();
4354     if (loop_predicate_block->has_parse_predicate()) {
4355       IfProjNode* parse_predicate_proj = loop_predicate_block->parse_predicate_success_proj();
4356       get_assertion_predicates(parse_predicate_proj, useful_predicates, true);
4357     }
4358   }
4359 }
4360 
4361 void PhaseIdealLoop::eliminate_useless_template_assertion_predicates(Unique_Node_List& useful_predicates) {
4362   for (int i = 0; i < C->template_assertion_predicate_count(); i++) {
4363     Node* opaque4 = C->template_assertion_predicate_opaq_node(i);
4364     assert(opaque4->Opcode() == Op_Opaque4, "must be");
4365     if (!useful_predicates.member(opaque4)) { // not in the useful list
4366       _igvn.replace_node(opaque4, opaque4->in(2));
4367     }
4368   }
4369 }
4370 
4371 // If a post or main loop is removed due to an assert predicate, the opaque that guards the loop is not needed anymore
4372 void PhaseIdealLoop::eliminate_useless_zero_trip_guard() {
4373   if (_zero_trip_guard_opaque_nodes.size() == 0) {
4374     return;
4375   }
4376   Unique_Node_List useful_zero_trip_guard_opaques_nodes;
4377   for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4378     IdealLoopTree* lpt = iter.current();
4379     if (lpt->_child == nullptr && lpt->is_counted()) {
4380       CountedLoopNode* head = lpt->_head->as_CountedLoop();
4381       Node* opaque = head->is_canonical_loop_entry();
4382       if (opaque != nullptr) {
4383         useful_zero_trip_guard_opaques_nodes.push(opaque);
4384       }
4385     }
4386   }
4387   for (uint i = 0; i < _zero_trip_guard_opaque_nodes.size(); ++i) {
4388     OpaqueZeroTripGuardNode* opaque = ((OpaqueZeroTripGuardNode*)_zero_trip_guard_opaque_nodes.at(i));
4389     DEBUG_ONLY(CountedLoopNode* guarded_loop = opaque->guarded_loop());
4390     if (!useful_zero_trip_guard_opaques_nodes.member(opaque)) {
4391       IfNode* iff = opaque->if_node();
4392       IdealLoopTree* loop = get_loop(iff);
4393       while (loop != _ltree_root && loop != nullptr) {
4394         loop = loop->_parent;
4395       }
4396       if (loop == nullptr) {
4397         // unreachable from _ltree_root: zero trip guard is in a newly discovered infinite loop.
4398         // We can't tell if the opaque node is useful or not
4399         assert(guarded_loop == nullptr || guarded_loop->is_in_infinite_subgraph(), "");
4400       } else {
4401         assert(guarded_loop == nullptr, "");
4402         this->_igvn.replace_node(opaque, opaque->in(1));
4403       }
4404     } else {
4405       assert(guarded_loop != nullptr, "");
4406     }
4407   }
4408 }
4409 
4410 //------------------------process_expensive_nodes-----------------------------
4411 // Expensive nodes have their control input set to prevent the GVN
4412 // from commoning them and as a result forcing the resulting node to
4413 // be in a more frequent path. Use CFG information here, to change the
4414 // control inputs so that some expensive nodes can be commoned while
4415 // not executed more frequently.
4416 bool PhaseIdealLoop::process_expensive_nodes() {
4417   assert(OptimizeExpensiveOps, "optimization off?");
4418 
4419   // Sort nodes to bring similar nodes together
4420   C->sort_expensive_nodes();
4421 
4422   bool progress = false;
4423 
4424   for (int i = 0; i < C->expensive_count(); ) {
4425     Node* n = C->expensive_node(i);
4426     int start = i;
4427     // Find nodes similar to n
4428     i++;
4429     for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++);
4430     int end = i;
4431     // And compare them two by two
4432     for (int j = start; j < end; j++) {
4433       Node* n1 = C->expensive_node(j);
4434       if (is_node_unreachable(n1)) {
4435         continue;
4436       }
4437       for (int k = j+1; k < end; k++) {
4438         Node* n2 = C->expensive_node(k);
4439         if (is_node_unreachable(n2)) {
4440           continue;
4441         }
4442 
4443         assert(n1 != n2, "should be pair of nodes");
4444 
4445         Node* c1 = n1->in(0);
4446         Node* c2 = n2->in(0);
4447 
4448         Node* parent_c1 = c1;
4449         Node* parent_c2 = c2;
4450 
4451         // The call to get_early_ctrl_for_expensive() moves the
4452         // expensive nodes up but stops at loops that are in a if
4453         // branch. See whether we can exit the loop and move above the
4454         // If.
4455         if (c1->is_Loop()) {
4456           parent_c1 = c1->in(1);
4457         }
4458         if (c2->is_Loop()) {
4459           parent_c2 = c2->in(1);
4460         }
4461 
4462         if (parent_c1 == parent_c2) {
4463           _igvn._worklist.push(n1);
4464           _igvn._worklist.push(n2);
4465           continue;
4466         }
4467 
4468         // Look for identical expensive node up the dominator chain.
4469         if (is_dominator(c1, c2)) {
4470           c2 = c1;
4471         } else if (is_dominator(c2, c1)) {
4472           c1 = c2;
4473         } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() &&
4474                    parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) {
4475           // Both branches have the same expensive node so move it up
4476           // before the if.
4477           c1 = c2 = idom(parent_c1->in(0));
4478         }
4479         // Do the actual moves
4480         if (n1->in(0) != c1) {
4481           _igvn.replace_input_of(n1, 0, c1);
4482           progress = true;
4483         }
4484         if (n2->in(0) != c2) {
4485           _igvn.replace_input_of(n2, 0, c2);
4486           progress = true;
4487         }
4488       }
4489     }
4490   }
4491 
4492   return progress;
4493 }
4494 
4495 #ifdef ASSERT
4496 // Goes over all children of the root of the loop tree. Check if any of them have a path
4497 // down to Root, that does not go via a NeverBranch exit.
4498 bool PhaseIdealLoop::only_has_infinite_loops() {
4499   ResourceMark rm;
4500   Unique_Node_List worklist;
4501   // start traversal at all loop heads of first-level loops
4502   for (IdealLoopTree* l = _ltree_root->_child; l != nullptr; l = l->_next) {
4503     Node* head = l->_head;
4504     assert(head->is_Region(), "");
4505     worklist.push(head);
4506   }
4507   return RegionNode::are_all_nodes_in_infinite_subgraph(worklist);
4508 }
4509 #endif
4510 
4511 
4512 //=============================================================================
4513 //----------------------------build_and_optimize-------------------------------
4514 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
4515 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
4516 void PhaseIdealLoop::build_and_optimize() {
4517   assert(!C->post_loop_opts_phase(), "no loop opts allowed");
4518 
4519   bool do_split_ifs = (_mode == LoopOptsDefault);
4520   bool skip_loop_opts = (_mode == LoopOptsNone);
4521   bool do_max_unroll = (_mode == LoopOptsMaxUnroll);
4522 
4523 
4524   int old_progress = C->major_progress();
4525   uint orig_worklist_size = _igvn._worklist.size();
4526 
4527   // Reset major-progress flag for the driver's heuristics
4528   C->clear_major_progress();
4529 
4530 #ifndef PRODUCT
4531   // Capture for later assert
4532   uint unique = C->unique();
4533   _loop_invokes++;
4534   _loop_work += unique;
4535 #endif
4536 
4537   // True if the method has at least 1 irreducible loop
4538   _has_irreducible_loops = false;
4539 
4540   _created_loop_node = false;
4541 
4542   VectorSet visited;
4543   // Pre-grow the mapping from Nodes to IdealLoopTrees.
4544   _loop_or_ctrl.map(C->unique(), nullptr);
4545   memset(_loop_or_ctrl.adr(), 0, wordSize * C->unique());
4546 
4547   // Pre-build the top-level outermost loop tree entry
4548   _ltree_root = new IdealLoopTree( this, C->root(), C->root() );
4549   // Do not need a safepoint at the top level
4550   _ltree_root->_has_sfpt = 1;
4551 
4552   // Initialize Dominators.
4553   // Checked in clone_loop_predicate() during beautify_loops().
4554   _idom_size = 0;
4555   _idom      = nullptr;
4556   _dom_depth = nullptr;
4557   _dom_stk   = nullptr;
4558 
4559   // Empty pre-order array
4560   allocate_preorders();
4561 
4562   // Build a loop tree on the fly.  Build a mapping from CFG nodes to
4563   // IdealLoopTree entries.  Data nodes are NOT walked.
4564   build_loop_tree();
4565   // Check for bailout, and return
4566   if (C->failing()) {
4567     return;
4568   }
4569 
4570   // Verify that the has_loops() flag set at parse time is consistent
4571   // with the just built loop tree. With infinite loops, it could be
4572   // that one pass of loop opts only finds infinite loops, clears the
4573   // has_loops() flag but adds NeverBranch nodes so the next loop opts
4574   // verification pass finds a non empty loop tree. When the back edge
4575   // is an exception edge, parsing doesn't set has_loops().
4576   assert(_ltree_root->_child == nullptr || C->has_loops() || only_has_infinite_loops() || C->has_exception_backedge(), "parsing found no loops but there are some");
4577   // No loops after all
4578   if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
4579 
4580   // There should always be an outer loop containing the Root and Return nodes.
4581   // If not, we have a degenerate empty program.  Bail out in this case.
4582   if (!has_node(C->root())) {
4583     if (!_verify_only) {
4584       C->clear_major_progress();
4585       assert(false, "empty program detected during loop optimization");
4586       C->record_method_not_compilable("empty program detected during loop optimization");
4587     }
4588     return;
4589   }
4590 
4591   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4592   // Nothing to do, so get out
4593   bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !do_max_unroll && !_verify_me &&
4594           !_verify_only && !bs->is_gc_specific_loop_opts_pass(_mode);
4595   bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
4596   bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(_mode);
4597   if (stop_early && !do_expensive_nodes) {
4598     return;
4599   }
4600 
4601   // Set loop nesting depth
4602   _ltree_root->set_nest( 0 );
4603 
4604   // Split shared headers and insert loop landing pads.
4605   // Do not bother doing this on the Root loop of course.
4606   if( !_verify_me && !_verify_only && _ltree_root->_child ) {
4607     C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
4608     if( _ltree_root->_child->beautify_loops( this ) ) {
4609       // Re-build loop tree!
4610       _ltree_root->_child = nullptr;
4611       _loop_or_ctrl.clear();
4612       reallocate_preorders();
4613       build_loop_tree();
4614       // Check for bailout, and return
4615       if (C->failing()) {
4616         return;
4617       }
4618       // Reset loop nesting depth
4619       _ltree_root->set_nest( 0 );
4620 
4621       C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
4622     }
4623   }
4624 
4625   // Build Dominators for elision of null checks & loop finding.
4626   // Since nodes do not have a slot for immediate dominator, make
4627   // a persistent side array for that info indexed on node->_idx.
4628   _idom_size = C->unique();
4629   _idom      = NEW_RESOURCE_ARRAY( Node*, _idom_size );
4630   _dom_depth = NEW_RESOURCE_ARRAY( uint,  _idom_size );
4631   _dom_stk   = nullptr; // Allocated on demand in recompute_dom_depth
4632   memset( _dom_depth, 0, _idom_size * sizeof(uint) );
4633 
4634   Dominators();
4635 
4636   if (!_verify_only) {
4637     // As a side effect, Dominators removed any unreachable CFG paths
4638     // into RegionNodes.  It doesn't do this test against Root, so
4639     // we do it here.
4640     for( uint i = 1; i < C->root()->req(); i++ ) {
4641       if (!_loop_or_ctrl[C->root()->in(i)->_idx]) { // Dead path into Root?
4642         _igvn.delete_input_of(C->root(), i);
4643         i--;                      // Rerun same iteration on compressed edges
4644       }
4645     }
4646 
4647     // Given dominators, try to find inner loops with calls that must
4648     // always be executed (call dominates loop tail).  These loops do
4649     // not need a separate safepoint.
4650     Node_List cisstack;
4651     _ltree_root->check_safepts(visited, cisstack);
4652   }
4653 
4654   // Walk the DATA nodes and place into loops.  Find earliest control
4655   // node.  For CFG nodes, the _loop_or_ctrl array starts out and remains
4656   // holding the associated IdealLoopTree pointer.  For DATA nodes, the
4657   // _loop_or_ctrl array holds the earliest legal controlling CFG node.
4658 
4659   // Allocate stack with enough space to avoid frequent realloc
4660   int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
4661   Node_Stack nstack(stack_size);
4662 
4663   visited.clear();
4664   Node_List worklist;
4665   // Don't need C->root() on worklist since
4666   // it will be processed among C->top() inputs
4667   worklist.push(C->top());
4668   visited.set(C->top()->_idx); // Set C->top() as visited now
4669   build_loop_early( visited, worklist, nstack );
4670 
4671   // Given early legal placement, try finding counted loops.  This placement
4672   // is good enough to discover most loop invariants.
4673   if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) {
4674     _ltree_root->counted_loop( this );
4675   }
4676 
4677   // Find latest loop placement.  Find ideal loop placement.
4678   visited.clear();
4679   init_dom_lca_tags();
4680   // Need C->root() on worklist when processing outs
4681   worklist.push(C->root());
4682   NOT_PRODUCT( C->verify_graph_edges(); )
4683   worklist.push(C->top());
4684   build_loop_late( visited, worklist, nstack );
4685   if (C->failing()) { return; }
4686 
4687   if (_verify_only) {
4688     C->restore_major_progress(old_progress);
4689     assert(C->unique() == unique, "verification _mode made Nodes? ? ?");
4690     assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
4691     return;
4692   }
4693 
4694   // clear out the dead code after build_loop_late
4695   while (_deadlist.size()) {
4696     _igvn.remove_globally_dead_node(_deadlist.pop());
4697   }
4698 
4699   eliminate_useless_zero_trip_guard();
4700 
4701   if (stop_early) {
4702     assert(do_expensive_nodes, "why are we here?");
4703     if (process_expensive_nodes()) {
4704       // If we made some progress when processing expensive nodes then
4705       // the IGVN may modify the graph in a way that will allow us to
4706       // make some more progress: we need to try processing expensive
4707       // nodes again.
4708       C->set_major_progress();
4709     }
4710     return;
4711   }
4712 
4713   // Some parser-inserted loop predicates could never be used by loop
4714   // predication or they were moved away from loop during some optimizations.
4715   // For example, peeling. Eliminate them before next loop optimizations.
4716   eliminate_useless_predicates();
4717 
4718 #ifndef PRODUCT
4719   C->verify_graph_edges();
4720   if (_verify_me) {             // Nested verify pass?
4721     // Check to see if the verify _mode is broken
4722     assert(C->unique() == unique, "non-optimize _mode made Nodes? ? ?");
4723     return;
4724   }
4725   DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
4726   if (TraceLoopOpts && C->has_loops()) {
4727     _ltree_root->dump();
4728   }
4729 #endif
4730 
4731   if (skip_loop_opts) {
4732     C->restore_major_progress(old_progress);
4733     return;
4734   }
4735 
4736   if (do_max_unroll) {
4737     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4738       IdealLoopTree* lpt = iter.current();
4739       if (lpt->is_innermost() && lpt->_allow_optimizations && !lpt->_has_call && lpt->is_counted()) {
4740         lpt->compute_trip_count(this);
4741         if (!lpt->do_one_iteration_loop(this) &&
4742             !lpt->do_remove_empty_loop(this)) {
4743           AutoNodeBudget node_budget(this);
4744           if (lpt->_head->as_CountedLoop()->is_normal_loop() &&
4745               lpt->policy_maximally_unroll(this)) {
4746             memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) );
4747             do_maximally_unroll(lpt, worklist);
4748           }
4749         }
4750       }
4751     }
4752 
4753     C->restore_major_progress(old_progress);
4754     return;
4755   }
4756 
4757   if (bs->optimize_loops(this, _mode, visited, nstack, worklist)) {
4758     return;
4759   }
4760 
4761   if (ReassociateInvariants && !C->major_progress()) {
4762     // Reassociate invariants and prep for split_thru_phi
4763     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4764       IdealLoopTree* lpt = iter.current();
4765       if (!lpt->is_loop()) {
4766         continue;
4767       }
4768       Node* head = lpt->_head;
4769       if (!head->is_BaseCountedLoop() || !lpt->is_innermost()) continue;
4770 
4771       // check for vectorized loops, any reassociation of invariants was already done
4772       if (head->is_CountedLoop()) {
4773         if (head->as_CountedLoop()->is_unroll_only()) {
4774           continue;
4775         } else {
4776           AutoNodeBudget node_budget(this);
4777           lpt->reassociate_invariants(this);
4778         }
4779       }
4780       // Because RCE opportunities can be masked by split_thru_phi,
4781       // look for RCE candidates and inhibit split_thru_phi
4782       // on just their loop-phi's for this pass of loop opts
4783       if (SplitIfBlocks && do_split_ifs &&
4784           head->as_BaseCountedLoop()->is_valid_counted_loop(head->as_BaseCountedLoop()->bt()) &&
4785           (lpt->policy_range_check(this, true, T_LONG) ||
4786            (head->is_CountedLoop() && lpt->policy_range_check(this, true, T_INT)))) {
4787         lpt->_rce_candidate = 1; // = true
4788       }
4789     }
4790   }
4791 
4792   // Check for aggressive application of split-if and other transforms
4793   // that require basic-block info (like cloning through Phi's)
4794   if (!C->major_progress() && SplitIfBlocks && do_split_ifs) {
4795     visited.clear();
4796     split_if_with_blocks( visited, nstack);
4797     DEBUG_ONLY( if (VerifyLoopOptimizations) { verify(); } );
4798   }
4799 
4800   if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
4801     C->set_major_progress();
4802   }
4803 
4804   // Perform loop predication before iteration splitting
4805   if (UseLoopPredicate && C->has_loops() && !C->major_progress() && (C->parse_predicate_count() > 0)) {
4806     _ltree_root->_child->loop_predication(this);
4807   }
4808 
4809   if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) {
4810     if (do_intrinsify_fill()) {
4811       C->set_major_progress();
4812     }
4813   }
4814 
4815   // Perform iteration-splitting on inner loops.  Split iterations to avoid
4816   // range checks or one-shot null checks.
4817 
4818   // If split-if's didn't hack the graph too bad (no CFG changes)
4819   // then do loop opts.
4820   if (C->has_loops() && !C->major_progress()) {
4821     memset( worklist.adr(), 0, worklist.max()*sizeof(Node*) );
4822     _ltree_root->_child->iteration_split( this, worklist );
4823     // No verify after peeling!  GCM has hoisted code out of the loop.
4824     // After peeling, the hoisted code could sink inside the peeled area.
4825     // The peeling code does not try to recompute the best location for
4826     // all the code before the peeled area, so the verify pass will always
4827     // complain about it.
4828   }
4829 
4830   // Check for bailout, and return
4831   if (C->failing()) {
4832     return;
4833   }
4834 
4835   // Do verify graph edges in any case
4836   NOT_PRODUCT( C->verify_graph_edges(); );
4837 
4838   if (!do_split_ifs) {
4839     // We saw major progress in Split-If to get here.  We forced a
4840     // pass with unrolling and not split-if, however more split-if's
4841     // might make progress.  If the unrolling didn't make progress
4842     // then the major-progress flag got cleared and we won't try
4843     // another round of Split-If.  In particular the ever-common
4844     // instance-of/check-cast pattern requires at least 2 rounds of
4845     // Split-If to clear out.
4846     C->set_major_progress();
4847   }
4848 
4849   // Repeat loop optimizations if new loops were seen
4850   if (created_loop_node()) {
4851     C->set_major_progress();
4852   }
4853 
4854   // Keep loop predicates and perform optimizations with them
4855   // until no more loop optimizations could be done.
4856   // After that switch predicates off and do more loop optimizations.
4857   if (!C->major_progress() && (C->parse_predicate_count() > 0)) {
4858     C->mark_parse_predicate_nodes_useless(_igvn);
4859     assert(C->parse_predicate_count() == 0, "should be zero now");
4860      if (TraceLoopOpts) {
4861        tty->print_cr("PredicatesOff");
4862      }
4863      C->set_major_progress();
4864   }
4865 
4866   // Convert scalar to superword operations at the end of all loop opts.
4867   if (C->do_superword() && C->has_loops() && !C->major_progress()) {
4868     Compile::TracePhase tp("autoVectorize", &timers[_t_autoVectorize]);
4869     // SuperWord transform
4870     SuperWord sw(this);
4871     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4872       IdealLoopTree* lpt = iter.current();
4873       if (lpt->is_counted()) {
4874         CountedLoopNode *cl = lpt->_head->as_CountedLoop();
4875         if (cl->is_main_loop()) {
4876           if (!sw.transform_loop(lpt, true)) {
4877             // Instigate more unrolling for optimization when vectorization fails.
4878             if (cl->has_passed_slp()) {
4879               C->set_major_progress();
4880               cl->set_notpassed_slp();
4881               cl->mark_do_unroll_only();
4882             }
4883           }
4884         }
4885       }
4886     }
4887   }
4888 
4889   // Move UnorderedReduction out of counted loop. Can be introduced by SuperWord.
4890   if (C->has_loops() && !C->major_progress()) {
4891     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
4892       IdealLoopTree* lpt = iter.current();
4893       if (lpt->is_counted() && lpt->is_innermost()) {
4894         move_unordered_reduction_out_of_loop(lpt);
4895       }
4896     }
4897   }
4898 }
4899 
4900 #ifndef PRODUCT
4901 //------------------------------print_statistics-------------------------------
4902 int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes
4903 int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique
4904 volatile int PhaseIdealLoop::_long_loop_candidates=0; // Number of long loops seen
4905 volatile int PhaseIdealLoop::_long_loop_nests=0; // Number of long loops successfully transformed to a nest
4906 volatile int PhaseIdealLoop::_long_loop_counted_loops=0; // Number of long loops successfully transformed to a counted loop
4907 void PhaseIdealLoop::print_statistics() {
4908   tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d, long loops=%d/%d/%d", _loop_invokes, _loop_work, _long_loop_counted_loops, _long_loop_nests, _long_loop_candidates);
4909 }
4910 #endif
4911 
4912 #ifdef ASSERT
4913 // Build a verify-only PhaseIdealLoop, and see that it agrees with "this".
4914 void PhaseIdealLoop::verify() const {
4915   ResourceMark rm;
4916   int old_progress = C->major_progress();
4917   bool success = true;
4918 
4919   PhaseIdealLoop phase_verify(_igvn, this);
4920   if (C->failing()) return;
4921 
4922   // Verify ctrl and idom of every node.
4923   success &= verify_idom_and_nodes(C->root(), &phase_verify);
4924 
4925   // Verify loop-tree.
4926   success &= _ltree_root->verify_tree(phase_verify._ltree_root);
4927 
4928   assert(success, "VerifyLoopOptimizations failed");
4929 
4930   // Major progress was cleared by creating a verify version of PhaseIdealLoop.
4931   C->restore_major_progress(old_progress);
4932 }
4933 
4934 // Perform a BFS starting at n, through all inputs.
4935 // Call verify_idom and verify_node on all nodes of BFS traversal.
4936 bool PhaseIdealLoop::verify_idom_and_nodes(Node* root, const PhaseIdealLoop* phase_verify) const {
4937   Unique_Node_List worklist;
4938   worklist.push(root);
4939   bool success = true;
4940   for (uint i = 0; i < worklist.size(); i++) {
4941     Node* n = worklist.at(i);
4942     // process node
4943     success &= verify_idom(n, phase_verify);
4944     success &= verify_loop_ctrl(n, phase_verify);
4945     // visit inputs
4946     for (uint j = 0; j < n->req(); j++) {
4947       if (n->in(j) != nullptr) {
4948         worklist.push(n->in(j));
4949       }
4950     }
4951   }
4952   return success;
4953 }
4954 
4955 // Verify dominator structure (IDOM).
4956 bool PhaseIdealLoop::verify_idom(Node* n, const PhaseIdealLoop* phase_verify) const {
4957   // Verify IDOM for all CFG nodes (except root).
4958   if (!n->is_CFG() || n->is_Root()) {
4959     return true; // pass
4960   }
4961 
4962   if (n->_idx >= _idom_size) {
4963     tty->print("CFG Node with no idom: ");
4964     n->dump();
4965     return false; // fail
4966   }
4967 
4968   Node* id = idom_no_update(n);
4969   Node* id_verify = phase_verify->idom_no_update(n);
4970   if (id != id_verify) {
4971     tty->print("Mismatching idom for node: ");
4972     n->dump();
4973     tty->print("  We have idom: ");
4974     id->dump();
4975     tty->print("  Verify has idom: ");
4976     id_verify->dump();
4977     tty->cr();
4978     return false; // fail
4979   }
4980   return true; // pass
4981 }
4982 
4983 // Verify "_loop_or_ctrl": control and loop membership.
4984 //  (0) _loop_or_ctrl[i] == nullptr -> node not reachable.
4985 //  (1) has_ctrl -> check lowest bit. 1 -> data node. 0 -> ctrl node.
4986 //  (2) has_ctrl true: get_ctrl_no_update returns ctrl of data node.
4987 //  (3) has_ctrl false: get_loop_idx returns IdealLoopTree for ctrl node.
4988 bool PhaseIdealLoop::verify_loop_ctrl(Node* n, const PhaseIdealLoop* phase_verify) const {
4989   const uint i = n->_idx;
4990   // The loop-tree was built from def to use (top-down).
4991   // The verification happens from use to def (bottom-up).
4992   // We may thus find nodes during verification that are not in the loop-tree.
4993   if (_loop_or_ctrl[i] == nullptr || phase_verify->_loop_or_ctrl[i] == nullptr) {
4994     if (_loop_or_ctrl[i] != nullptr || phase_verify->_loop_or_ctrl[i] != nullptr) {
4995       tty->print_cr("Was reachable in only one. this %d, verify %d.",
4996                  _loop_or_ctrl[i] != nullptr, phase_verify->_loop_or_ctrl[i] != nullptr);
4997       n->dump();
4998       return false; // fail
4999     }
5000     // Not reachable for both.
5001     return true; // pass
5002   }
5003 
5004   if (n->is_CFG() == has_ctrl(n)) {
5005     tty->print_cr("Exactly one should be true: %d for is_CFG, %d for has_ctrl.", n->is_CFG(), has_ctrl(n));
5006     n->dump();
5007     return false; // fail
5008   }
5009 
5010   if (has_ctrl(n) != phase_verify->has_ctrl(n)) {
5011     tty->print_cr("Mismatch has_ctrl: %d for this, %d for verify.", has_ctrl(n), phase_verify->has_ctrl(n));
5012     n->dump();
5013     return false; // fail
5014   } else if (has_ctrl(n)) {
5015     assert(phase_verify->has_ctrl(n), "sanity");
5016     // n is a data node.
5017     // Verify that its ctrl is the same.
5018 
5019     // Broken part of VerifyLoopOptimizations (A)
5020     // Reason:
5021     //   BUG, wrong control set for example in
5022     //   PhaseIdealLoop::split_if_with_blocks
5023     //   at "set_ctrl(x, new_ctrl);"
5024     /*
5025     if( _loop_or_ctrl[i] != loop_verify->_loop_or_ctrl[i] &&
5026         get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) {
5027       tty->print("Mismatched control setting for: ");
5028       n->dump();
5029       if( fail++ > 10 ) return;
5030       Node *c = get_ctrl_no_update(n);
5031       tty->print("We have it as: ");
5032       if( c->in(0) ) c->dump();
5033         else tty->print_cr("N%d",c->_idx);
5034       tty->print("Verify thinks: ");
5035       if( loop_verify->has_ctrl(n) )
5036         loop_verify->get_ctrl_no_update(n)->dump();
5037       else
5038         loop_verify->get_loop_idx(n)->dump();
5039       tty->cr();
5040     }
5041     */
5042     return true; // pass
5043   } else {
5044     assert(!phase_verify->has_ctrl(n), "sanity");
5045     // n is a ctrl node.
5046     // Verify that not has_ctrl, and that get_loop_idx is the same.
5047 
5048     // Broken part of VerifyLoopOptimizations (B)
5049     // Reason:
5050     //   NeverBranch node for example is added to loop outside its scope.
5051     //   Once we run build_loop_tree again, it is added to the correct loop.
5052     /*
5053     if (!C->major_progress()) {
5054       // Loop selection can be messed up if we did a major progress
5055       // operation, like split-if.  Do not verify in that case.
5056       IdealLoopTree *us = get_loop_idx(n);
5057       IdealLoopTree *them = loop_verify->get_loop_idx(n);
5058       if( us->_head != them->_head ||  us->_tail != them->_tail ) {
5059         tty->print("Unequals loops for: ");
5060         n->dump();
5061         if( fail++ > 10 ) return;
5062         tty->print("We have it as: ");
5063         us->dump();
5064         tty->print("Verify thinks: ");
5065         them->dump();
5066         tty->cr();
5067       }
5068     }
5069     */
5070     return true; // pass
5071   }
5072 }
5073 
5074 int compare_tree(IdealLoopTree* const& a, IdealLoopTree* const& b) {
5075   assert(a != nullptr && b != nullptr, "must be");
5076   return a->_head->_idx - b->_head->_idx;
5077 }
5078 
5079 GrowableArray<IdealLoopTree*> IdealLoopTree::collect_sorted_children() const {
5080   GrowableArray<IdealLoopTree*> children;
5081   IdealLoopTree* child = _child;
5082   while (child != nullptr) {
5083     assert(child->_parent == this, "all must be children of this");
5084     children.insert_sorted<compare_tree>(child);
5085     child = child->_next;
5086   }
5087   return children;
5088 }
5089 
5090 // Verify that tree structures match. Because the CFG can change, siblings
5091 // within the loop tree can be reordered. We attempt to deal with that by
5092 // reordering the verify's loop tree if possible.
5093 bool IdealLoopTree::verify_tree(IdealLoopTree* loop_verify) const {
5094   assert(_head == loop_verify->_head, "mismatched loop head");
5095   assert(this->_parent != nullptr || this->_next == nullptr, "is_root_loop implies has_no_sibling");
5096 
5097   // Collect the children
5098   GrowableArray<IdealLoopTree*> children = collect_sorted_children();
5099   GrowableArray<IdealLoopTree*> children_verify = loop_verify->collect_sorted_children();
5100 
5101   bool success = true;
5102 
5103   // Compare the two children lists
5104   for (int i = 0, j = 0; i < children.length() || j < children_verify.length(); ) {
5105     IdealLoopTree* child        = nullptr;
5106     IdealLoopTree* child_verify = nullptr;
5107     // Read from both lists, if possible.
5108     if (i < children.length()) {
5109       child = children.at(i);
5110     }
5111     if (j < children_verify.length()) {
5112       child_verify = children_verify.at(j);
5113     }
5114     assert(child != nullptr || child_verify != nullptr, "must find at least one");
5115     if (child != nullptr && child_verify != nullptr && child->_head != child_verify->_head) {
5116       // We found two non-equal children. Select the smaller one.
5117       if (child->_head->_idx < child_verify->_head->_idx) {
5118         child_verify = nullptr;
5119       } else {
5120         child = nullptr;
5121       }
5122     }
5123     // Process the two children, or potentially log the failure if we only found one.
5124     if (child_verify == nullptr) {
5125       if (child->_irreducible && Compile::current()->major_progress()) {
5126         // Irreducible loops can pick a different header (one of its entries).
5127       } else {
5128         tty->print_cr("We have a loop that verify does not have");
5129         child->dump();
5130         success = false;
5131       }
5132       i++; // step for this
5133     } else if (child == nullptr) {
5134       if (child_verify->_irreducible && Compile::current()->major_progress()) {
5135         // Irreducible loops can pick a different header (one of its entries).
5136       } else if (child_verify->_head->as_Region()->is_in_infinite_subgraph()) {
5137         // Infinite loops do not get attached to the loop-tree on their first visit.
5138         // "this" runs before "loop_verify". It is thus possible that we find the
5139         // infinite loop only for "child_verify". Only finding it with "child" would
5140         // mean that we lost it, which is not ok.
5141       } else {
5142         tty->print_cr("Verify has a loop that we do not have");
5143         child_verify->dump();
5144         success = false;
5145       }
5146       j++; // step for verify
5147     } else {
5148       assert(child->_head == child_verify->_head, "We have both and they are equal");
5149       success &= child->verify_tree(child_verify); // Recursion
5150       i++; // step for this
5151       j++; // step for verify
5152     }
5153   }
5154 
5155   // Broken part of VerifyLoopOptimizations (D)
5156   // Reason:
5157   //   split_if has to update the _tail, if it is modified. But that is done by
5158   //   checking to what loop the iff belongs to. That info can be wrong, and then
5159   //   we do not update the _tail correctly.
5160   /*
5161   Node *tail = _tail;           // Inline a non-updating version of
5162   while( !tail->in(0) )         // the 'tail()' call.
5163     tail = tail->in(1);
5164   assert( tail == loop->_tail, "mismatched loop tail" );
5165   */
5166 
5167   if (_head->is_CountedLoop()) {
5168     CountedLoopNode *cl = _head->as_CountedLoop();
5169 
5170     Node* ctrl     = cl->init_control();
5171     Node* back     = cl->back_control();
5172     assert(ctrl != nullptr && ctrl->is_CFG(), "sane loop in-ctrl");
5173     assert(back != nullptr && back->is_CFG(), "sane loop backedge");
5174     cl->loopexit(); // assert implied
5175   }
5176 
5177   // Broken part of VerifyLoopOptimizations (E)
5178   // Reason:
5179   //   PhaseIdealLoop::split_thru_region creates new nodes for loop that are not added
5180   //   to the loop body. Or maybe they are not added to the correct loop.
5181   //   at "Node* x = n->clone();"
5182   /*
5183   // Innermost loops need to verify loop bodies,
5184   // but only if no 'major_progress'
5185   int fail = 0;
5186   if (!Compile::current()->major_progress() && _child == nullptr) {
5187     for( uint i = 0; i < _body.size(); i++ ) {
5188       Node *n = _body.at(i);
5189       if (n->outcnt() == 0)  continue; // Ignore dead
5190       uint j;
5191       for( j = 0; j < loop->_body.size(); j++ )
5192         if( loop->_body.at(j) == n )
5193           break;
5194       if( j == loop->_body.size() ) { // Not found in loop body
5195         // Last ditch effort to avoid assertion: Its possible that we
5196         // have some users (so outcnt not zero) but are still dead.
5197         // Try to find from root.
5198         if (Compile::current()->root()->find(n->_idx)) {
5199           fail++;
5200           tty->print("We have that verify does not: ");
5201           n->dump();
5202         }
5203       }
5204     }
5205     for( uint i2 = 0; i2 < loop->_body.size(); i2++ ) {
5206       Node *n = loop->_body.at(i2);
5207       if (n->outcnt() == 0)  continue; // Ignore dead
5208       uint j;
5209       for( j = 0; j < _body.size(); j++ )
5210         if( _body.at(j) == n )
5211           break;
5212       if( j == _body.size() ) { // Not found in loop body
5213         // Last ditch effort to avoid assertion: Its possible that we
5214         // have some users (so outcnt not zero) but are still dead.
5215         // Try to find from root.
5216         if (Compile::current()->root()->find(n->_idx)) {
5217           fail++;
5218           tty->print("Verify has that we do not: ");
5219           n->dump();
5220         }
5221       }
5222     }
5223     assert( !fail, "loop body mismatch" );
5224   }
5225   */
5226   return success;
5227 }
5228 #endif
5229 
5230 //------------------------------set_idom---------------------------------------
5231 void PhaseIdealLoop::set_idom(Node* d, Node* n, uint dom_depth) {
5232   uint idx = d->_idx;
5233   if (idx >= _idom_size) {
5234     uint newsize = next_power_of_2(idx);
5235     _idom      = REALLOC_RESOURCE_ARRAY( Node*,     _idom,_idom_size,newsize);
5236     _dom_depth = REALLOC_RESOURCE_ARRAY( uint, _dom_depth,_idom_size,newsize);
5237     memset( _dom_depth + _idom_size, 0, (newsize - _idom_size) * sizeof(uint) );
5238     _idom_size = newsize;
5239   }
5240   _idom[idx] = n;
5241   _dom_depth[idx] = dom_depth;
5242 }
5243 
5244 //------------------------------recompute_dom_depth---------------------------------------
5245 // The dominator tree is constructed with only parent pointers.
5246 // This recomputes the depth in the tree by first tagging all
5247 // nodes as "no depth yet" marker.  The next pass then runs up
5248 // the dom tree from each node marked "no depth yet", and computes
5249 // the depth on the way back down.
5250 void PhaseIdealLoop::recompute_dom_depth() {
5251   uint no_depth_marker = C->unique();
5252   uint i;
5253   // Initialize depth to "no depth yet" and realize all lazy updates
5254   for (i = 0; i < _idom_size; i++) {
5255     // Only indices with a _dom_depth has a Node* or null (otherwise uninitialized).
5256     if (_dom_depth[i] > 0 && _idom[i] != nullptr) {
5257       _dom_depth[i] = no_depth_marker;
5258 
5259       // heal _idom if it has a fwd mapping in _loop_or_ctrl
5260       if (_idom[i]->in(0) == nullptr) {
5261         idom(i);
5262       }
5263     }
5264   }
5265   if (_dom_stk == nullptr) {
5266     uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
5267     if (init_size < 10) init_size = 10;
5268     _dom_stk = new GrowableArray<uint>(init_size);
5269   }
5270   // Compute new depth for each node.
5271   for (i = 0; i < _idom_size; i++) {
5272     uint j = i;
5273     // Run up the dom tree to find a node with a depth
5274     while (_dom_depth[j] == no_depth_marker) {
5275       _dom_stk->push(j);
5276       j = _idom[j]->_idx;
5277     }
5278     // Compute the depth on the way back down this tree branch
5279     uint dd = _dom_depth[j] + 1;
5280     while (_dom_stk->length() > 0) {
5281       uint j = _dom_stk->pop();
5282       _dom_depth[j] = dd;
5283       dd++;
5284     }
5285   }
5286 }
5287 
5288 //------------------------------sort-------------------------------------------
5289 // Insert 'loop' into the existing loop tree.  'innermost' is a leaf of the
5290 // loop tree, not the root.
5291 IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermost ) {
5292   if( !innermost ) return loop; // New innermost loop
5293 
5294   int loop_preorder = get_preorder(loop->_head); // Cache pre-order number
5295   assert( loop_preorder, "not yet post-walked loop" );
5296   IdealLoopTree **pp = &innermost;      // Pointer to previous next-pointer
5297   IdealLoopTree *l = *pp;               // Do I go before or after 'l'?
5298 
5299   // Insert at start of list
5300   while( l ) {                  // Insertion sort based on pre-order
5301     if( l == loop ) return innermost; // Already on list!
5302     int l_preorder = get_preorder(l->_head); // Cache pre-order number
5303     assert( l_preorder, "not yet post-walked l" );
5304     // Check header pre-order number to figure proper nesting
5305     if( loop_preorder > l_preorder )
5306       break;                    // End of insertion
5307     // If headers tie (e.g., shared headers) check tail pre-order numbers.
5308     // Since I split shared headers, you'd think this could not happen.
5309     // BUT: I must first do the preorder numbering before I can discover I
5310     // have shared headers, so the split headers all get the same preorder
5311     // number as the RegionNode they split from.
5312     if( loop_preorder == l_preorder &&
5313         get_preorder(loop->_tail) < get_preorder(l->_tail) )
5314       break;                    // Also check for shared headers (same pre#)
5315     pp = &l->_parent;           // Chain up list
5316     l = *pp;
5317   }
5318   // Link into list
5319   // Point predecessor to me
5320   *pp = loop;
5321   // Point me to successor
5322   IdealLoopTree *p = loop->_parent;
5323   loop->_parent = l;            // Point me to successor
5324   if( p ) sort( p, innermost ); // Insert my parents into list as well
5325   return innermost;
5326 }
5327 
5328 //------------------------------build_loop_tree--------------------------------
5329 // I use a modified Vick/Tarjan algorithm.  I need pre- and a post- visit
5330 // bits.  The _loop_or_ctrl[] array is mapped by Node index and holds a null for
5331 // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the
5332 // tightest enclosing IdealLoopTree for post-walked.
5333 //
5334 // During my forward walk I do a short 1-layer lookahead to see if I can find
5335 // a loop backedge with that doesn't have any work on the backedge.  This
5336 // helps me construct nested loops with shared headers better.
5337 //
5338 // Once I've done the forward recursion, I do the post-work.  For each child
5339 // I check to see if there is a backedge.  Backedges define a loop!  I
5340 // insert an IdealLoopTree at the target of the backedge.
5341 //
5342 // During the post-work I also check to see if I have several children
5343 // belonging to different loops.  If so, then this Node is a decision point
5344 // where control flow can choose to change loop nests.  It is at this
5345 // decision point where I can figure out how loops are nested.  At this
5346 // time I can properly order the different loop nests from my children.
5347 // Note that there may not be any backedges at the decision point!
5348 //
5349 // Since the decision point can be far removed from the backedges, I can't
5350 // order my loops at the time I discover them.  Thus at the decision point
5351 // I need to inspect loop header pre-order numbers to properly nest my
5352 // loops.  This means I need to sort my childrens' loops by pre-order.
5353 // The sort is of size number-of-control-children, which generally limits
5354 // it to size 2 (i.e., I just choose between my 2 target loops).
5355 void PhaseIdealLoop::build_loop_tree() {
5356   // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
5357   GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
5358   Node *n = C->root();
5359   bltstack.push(n);
5360   int pre_order = 1;
5361   int stack_size;
5362 
5363   while ( ( stack_size = bltstack.length() ) != 0 ) {
5364     n = bltstack.top(); // Leave node on stack
5365     if ( !is_visited(n) ) {
5366       // ---- Pre-pass Work ----
5367       // Pre-walked but not post-walked nodes need a pre_order number.
5368 
5369       set_preorder_visited( n, pre_order ); // set as visited
5370 
5371       // ---- Scan over children ----
5372       // Scan first over control projections that lead to loop headers.
5373       // This helps us find inner-to-outer loops with shared headers better.
5374 
5375       // Scan children's children for loop headers.
5376       for ( int i = n->outcnt() - 1; i >= 0; --i ) {
5377         Node* m = n->raw_out(i);       // Child
5378         if( m->is_CFG() && !is_visited(m) ) { // Only for CFG children
5379           // Scan over children's children to find loop
5380           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
5381             Node* l = m->fast_out(j);
5382             if( is_visited(l) &&       // Been visited?
5383                 !is_postvisited(l) &&  // But not post-visited
5384                 get_preorder(l) < pre_order ) { // And smaller pre-order
5385               // Found!  Scan the DFS down this path before doing other paths
5386               bltstack.push(m);
5387               break;
5388             }
5389           }
5390         }
5391       }
5392       pre_order++;
5393     }
5394     else if ( !is_postvisited(n) ) {
5395       // Note: build_loop_tree_impl() adds out edges on rare occasions,
5396       // such as com.sun.rsasign.am::a.
5397       // For non-recursive version, first, process current children.
5398       // On next iteration, check if additional children were added.
5399       for ( int k = n->outcnt() - 1; k >= 0; --k ) {
5400         Node* u = n->raw_out(k);
5401         if ( u->is_CFG() && !is_visited(u) ) {
5402           bltstack.push(u);
5403         }
5404       }
5405       if ( bltstack.length() == stack_size ) {
5406         // There were no additional children, post visit node now
5407         (void)bltstack.pop(); // Remove node from stack
5408         pre_order = build_loop_tree_impl( n, pre_order );
5409         // Check for bailout
5410         if (C->failing()) {
5411           return;
5412         }
5413         // Check to grow _preorders[] array for the case when
5414         // build_loop_tree_impl() adds new nodes.
5415         check_grow_preorders();
5416       }
5417     }
5418     else {
5419       (void)bltstack.pop(); // Remove post-visited node from stack
5420     }
5421   }
5422   DEBUG_ONLY(verify_regions_in_irreducible_loops();)
5423 }
5424 
5425 //------------------------------build_loop_tree_impl---------------------------
5426 int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
5427   // ---- Post-pass Work ----
5428   // Pre-walked but not post-walked nodes need a pre_order number.
5429 
5430   // Tightest enclosing loop for this Node
5431   IdealLoopTree *innermost = nullptr;
5432 
5433   // For all children, see if any edge is a backedge.  If so, make a loop
5434   // for it.  Then find the tightest enclosing loop for the self Node.
5435   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5436     Node* m = n->fast_out(i);   // Child
5437     if( n == m ) continue;      // Ignore control self-cycles
5438     if( !m->is_CFG() ) continue;// Ignore non-CFG edges
5439 
5440     IdealLoopTree *l;           // Child's loop
5441     if( !is_postvisited(m) ) {  // Child visited but not post-visited?
5442       // Found a backedge
5443       assert( get_preorder(m) < pre_order, "should be backedge" );
5444       // Check for the RootNode, which is already a LoopNode and is allowed
5445       // to have multiple "backedges".
5446       if( m == C->root()) {     // Found the root?
5447         l = _ltree_root;        // Root is the outermost LoopNode
5448       } else {                  // Else found a nested loop
5449         // Insert a LoopNode to mark this loop.
5450         l = new IdealLoopTree(this, m, n);
5451       } // End of Else found a nested loop
5452       if( !has_loop(m) )        // If 'm' does not already have a loop set
5453         set_loop(m, l);         // Set loop header to loop now
5454 
5455     } else {                    // Else not a nested loop
5456       if (!_loop_or_ctrl[m->_idx]) continue; // Dead code has no loop
5457       l = get_loop(m);          // Get previously determined loop
5458       // If successor is header of a loop (nest), move up-loop till it
5459       // is a member of some outer enclosing loop.  Since there are no
5460       // shared headers (I've split them already) I only need to go up
5461       // at most 1 level.
5462       while( l && l->_head == m ) // Successor heads loop?
5463         l = l->_parent;         // Move up 1 for me
5464       // If this loop is not properly parented, then this loop
5465       // has no exit path out, i.e. its an infinite loop.
5466       if( !l ) {
5467         // Make loop "reachable" from root so the CFG is reachable.  Basically
5468         // insert a bogus loop exit that is never taken.  'm', the loop head,
5469         // points to 'n', one (of possibly many) fall-in paths.  There may be
5470         // many backedges as well.
5471 
5472         // Here I set the loop to be the root loop.  I could have, after
5473         // inserting a bogus loop exit, restarted the recursion and found my
5474         // new loop exit.  This would make the infinite loop a first-class
5475         // loop and it would then get properly optimized.  What's the use of
5476         // optimizing an infinite loop?
5477         l = _ltree_root;        // Oops, found infinite loop
5478 
5479         if (!_verify_only) {
5480           // Insert the NeverBranch between 'm' and it's control user.
5481           NeverBranchNode *iff = new NeverBranchNode( m );
5482           _igvn.register_new_node_with_optimizer(iff);
5483           set_loop(iff, l);
5484           Node *if_t = new CProjNode( iff, 0 );
5485           _igvn.register_new_node_with_optimizer(if_t);
5486           set_loop(if_t, l);
5487 
5488           Node* cfg = nullptr;       // Find the One True Control User of m
5489           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
5490             Node* x = m->fast_out(j);
5491             if (x->is_CFG() && x != m && x != iff)
5492               { cfg = x; break; }
5493           }
5494           assert(cfg != nullptr, "must find the control user of m");
5495           uint k = 0;             // Probably cfg->in(0)
5496           while( cfg->in(k) != m ) k++; // But check in case cfg is a Region
5497           _igvn.replace_input_of(cfg, k, if_t); // Now point to NeverBranch
5498 
5499           // Now create the never-taken loop exit
5500           Node *if_f = new CProjNode( iff, 1 );
5501           _igvn.register_new_node_with_optimizer(if_f);
5502           set_loop(if_f, l);
5503           // Find frame ptr for Halt.  Relies on the optimizer
5504           // V-N'ing.  Easier and quicker than searching through
5505           // the program structure.
5506           Node *frame = new ParmNode( C->start(), TypeFunc::FramePtr );
5507           _igvn.register_new_node_with_optimizer(frame);
5508           // Halt & Catch Fire
5509           Node* halt = new HaltNode(if_f, frame, "never-taken loop exit reached");
5510           _igvn.register_new_node_with_optimizer(halt);
5511           set_loop(halt, l);
5512           _igvn.add_input_to(C->root(), halt);
5513         }
5514         set_loop(C->root(), _ltree_root);
5515       }
5516     }
5517     if (is_postvisited(l->_head)) {
5518       // We are currently visiting l, but its head has already been post-visited.
5519       // l is irreducible: we just found a second entry m.
5520       _has_irreducible_loops = true;
5521       RegionNode* secondary_entry = m->as_Region();
5522       DEBUG_ONLY(secondary_entry->verify_can_be_irreducible_entry();)
5523 
5524       // Walk up the loop-tree, mark all loops that are already post-visited as irreducible
5525       // Since m is a secondary entry to them all.
5526       while( is_postvisited(l->_head) ) {
5527         l->_irreducible = 1; // = true
5528         RegionNode* head = l->_head->as_Region();
5529         DEBUG_ONLY(head->verify_can_be_irreducible_entry();)
5530         l = l->_parent;
5531         // Check for bad CFG here to prevent crash, and bailout of compile
5532         if (l == nullptr) {
5533 #ifndef PRODUCT
5534           if (TraceLoopOpts) {
5535             tty->print_cr("bailout: unhandled CFG: infinite irreducible loop");
5536             m->dump();
5537           }
5538 #endif
5539           // This is a rare case that we do not want to handle in C2.
5540           C->record_method_not_compilable("unhandled CFG detected during loop optimization");
5541           return pre_order;
5542         }
5543       }
5544     }
5545     if (!_verify_only) {
5546       C->set_has_irreducible_loop(_has_irreducible_loops);
5547     }
5548 
5549     // This Node might be a decision point for loops.  It is only if
5550     // it's children belong to several different loops.  The sort call
5551     // does a trivial amount of work if there is only 1 child or all
5552     // children belong to the same loop.  If however, the children
5553     // belong to different loops, the sort call will properly set the
5554     // _parent pointers to show how the loops nest.
5555     //
5556     // In any case, it returns the tightest enclosing loop.
5557     innermost = sort( l, innermost );
5558   }
5559 
5560   // Def-use info will have some dead stuff; dead stuff will have no
5561   // loop decided on.
5562 
5563   // Am I a loop header?  If so fix up my parent's child and next ptrs.
5564   if( innermost && innermost->_head == n ) {
5565     assert( get_loop(n) == innermost, "" );
5566     IdealLoopTree *p = innermost->_parent;
5567     IdealLoopTree *l = innermost;
5568     while( p && l->_head == n ) {
5569       l->_next = p->_child;     // Put self on parents 'next child'
5570       p->_child = l;            // Make self as first child of parent
5571       l = p;                    // Now walk up the parent chain
5572       p = l->_parent;
5573     }
5574   } else {
5575     // Note that it is possible for a LoopNode to reach here, if the
5576     // backedge has been made unreachable (hence the LoopNode no longer
5577     // denotes a Loop, and will eventually be removed).
5578 
5579     // Record tightest enclosing loop for self.  Mark as post-visited.
5580     set_loop(n, innermost);
5581     // Also record has_call flag early on
5582     if( innermost ) {
5583       if( n->is_Call() && !n->is_CallLeaf() && !n->is_macro() ) {
5584         // Do not count uncommon calls
5585         if( !n->is_CallStaticJava() || !n->as_CallStaticJava()->_name ) {
5586           Node *iff = n->in(0)->in(0);
5587           // No any calls for vectorized loops.
5588           if (C->do_superword() ||
5589               !iff->is_If() ||
5590               (n->in(0)->Opcode() == Op_IfFalse && (1.0 - iff->as_If()->_prob) >= 0.01) ||
5591               iff->as_If()->_prob >= 0.01) {
5592             innermost->_has_call = 1;
5593           }
5594         }
5595       } else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) {
5596         // Disable loop optimizations if the loop has a scalar replaceable
5597         // allocation. This disabling may cause a potential performance lost
5598         // if the allocation is not eliminated for some reason.
5599         innermost->_allow_optimizations = false;
5600         innermost->_has_call = 1; // = true
5601       } else if (n->Opcode() == Op_SafePoint) {
5602         // Record all safepoints in this loop.
5603         if (innermost->_safepts == nullptr) innermost->_safepts = new Node_List();
5604         innermost->_safepts->push(n);
5605       }
5606     }
5607   }
5608 
5609   // Flag as post-visited now
5610   set_postvisited(n);
5611   return pre_order;
5612 }
5613 
5614 #ifdef ASSERT
5615 //--------------------------verify_regions_in_irreducible_loops----------------
5616 // Iterate down from Root through CFG, verify for every region:
5617 // if it is in an irreducible loop it must be marked as such
5618 void PhaseIdealLoop::verify_regions_in_irreducible_loops() {
5619   ResourceMark rm;
5620   if (!_has_irreducible_loops) {
5621     // last build_loop_tree has not found any irreducible loops
5622     // hence no region has to be marked is_in_irreduible_loop
5623     return;
5624   }
5625 
5626   RootNode* root = C->root();
5627   Unique_Node_List worklist; // visit all nodes once
5628   worklist.push(root);
5629   bool failure = false;
5630   for (uint i = 0; i < worklist.size(); i++) {
5631     Node* n = worklist.at(i);
5632     if (n->is_Region()) {
5633       RegionNode* region = n->as_Region();
5634       if (is_in_irreducible_loop(region) &&
5635           region->loop_status() == RegionNode::LoopStatus::Reducible) {
5636         failure = true;
5637         tty->print("irreducible! ");
5638         region->dump();
5639       }
5640     }
5641     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
5642       Node* use = n->fast_out(j);
5643       if (use->is_CFG()) {
5644         worklist.push(use); // push if was not pushed before
5645       }
5646     }
5647   }
5648   assert(!failure, "region in irreducible loop was marked as reducible");
5649 }
5650 
5651 //---------------------------is_in_irreducible_loop-------------------------
5652 // Analogous to ciTypeFlow::Block::is_in_irreducible_loop
5653 bool PhaseIdealLoop::is_in_irreducible_loop(RegionNode* region) {
5654   if (!_has_irreducible_loops) {
5655     return false; // no irreducible loop in graph
5656   }
5657   IdealLoopTree* l = get_loop(region); // l: innermost loop that contains region
5658   do {
5659     if (l->_irreducible) {
5660       return true; // found it
5661     }
5662     if (l == _ltree_root) {
5663       return false; // reached root, terimnate
5664     }
5665     l = l->_parent;
5666   } while (l != nullptr);
5667   assert(region->is_in_infinite_subgraph(), "must be in infinite subgraph");
5668   // We have "l->_parent == nullptr", which happens only for infinite loops,
5669   // where no parent is attached to the loop. We did not find any irreducible
5670   // loop from this block out to lp. Thus lp only has one entry, and no exit
5671   // (it is infinite and reducible). We can always rewrite an infinite loop
5672   // that is nested inside other loops:
5673   // while(condition) { infinite_loop; }
5674   // with an equivalent program where the infinite loop is an outermost loop
5675   // that is not nested in any loop:
5676   // while(condition) { break; } infinite_loop;
5677   // Thus, we can understand lp as an outermost loop, and can terminate and
5678   // conclude: this block is in no irreducible loop.
5679   return false;
5680 }
5681 #endif
5682 
5683 //------------------------------build_loop_early-------------------------------
5684 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
5685 // First pass computes the earliest controlling node possible.  This is the
5686 // controlling input with the deepest dominating depth.
5687 void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
5688   while (worklist.size() != 0) {
5689     // Use local variables nstack_top_n & nstack_top_i to cache values
5690     // on nstack's top.
5691     Node *nstack_top_n = worklist.pop();
5692     uint  nstack_top_i = 0;
5693 //while_nstack_nonempty:
5694     while (true) {
5695       // Get parent node and next input's index from stack's top.
5696       Node  *n = nstack_top_n;
5697       uint   i = nstack_top_i;
5698       uint cnt = n->req(); // Count of inputs
5699       if (i == 0) {        // Pre-process the node.
5700         if( has_node(n) &&            // Have either loop or control already?
5701             !has_ctrl(n) ) {          // Have loop picked out already?
5702           // During "merge_many_backedges" we fold up several nested loops
5703           // into a single loop.  This makes the members of the original
5704           // loop bodies pointing to dead loops; they need to move up
5705           // to the new UNION'd larger loop.  I set the _head field of these
5706           // dead loops to null and the _parent field points to the owning
5707           // loop.  Shades of UNION-FIND algorithm.
5708           IdealLoopTree *ilt;
5709           while( !(ilt = get_loop(n))->_head ) {
5710             // Normally I would use a set_loop here.  But in this one special
5711             // case, it is legal (and expected) to change what loop a Node
5712             // belongs to.
5713             _loop_or_ctrl.map(n->_idx, (Node*)(ilt->_parent));
5714           }
5715           // Remove safepoints ONLY if I've already seen I don't need one.
5716           // (the old code here would yank a 2nd safepoint after seeing a
5717           // first one, even though the 1st did not dominate in the loop body
5718           // and thus could be avoided indefinitely)
5719           if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
5720               is_deleteable_safept(n)) {
5721             Node *in = n->in(TypeFunc::Control);
5722             lazy_replace(n,in);       // Pull safepoint now
5723             if (ilt->_safepts != nullptr) {
5724               ilt->_safepts->yank(n);
5725             }
5726             // Carry on with the recursion "as if" we are walking
5727             // only the control input
5728             if( !visited.test_set( in->_idx ) ) {
5729               worklist.push(in);      // Visit this guy later, using worklist
5730             }
5731             // Get next node from nstack:
5732             // - skip n's inputs processing by setting i > cnt;
5733             // - we also will not call set_early_ctrl(n) since
5734             //   has_node(n) == true (see the condition above).
5735             i = cnt + 1;
5736           }
5737         }
5738       } // if (i == 0)
5739 
5740       // Visit all inputs
5741       bool done = true;       // Assume all n's inputs will be processed
5742       while (i < cnt) {
5743         Node *in = n->in(i);
5744         ++i;
5745         if (in == nullptr) continue;
5746         if (in->pinned() && !in->is_CFG())
5747           set_ctrl(in, in->in(0));
5748         int is_visited = visited.test_set( in->_idx );
5749         if (!has_node(in)) {  // No controlling input yet?
5750           assert( !in->is_CFG(), "CFG Node with no controlling input?" );
5751           assert( !is_visited, "visit only once" );
5752           nstack.push(n, i);  // Save parent node and next input's index.
5753           nstack_top_n = in;  // Process current input now.
5754           nstack_top_i = 0;
5755           done = false;       // Not all n's inputs processed.
5756           break; // continue while_nstack_nonempty;
5757         } else if (!is_visited) {
5758           // This guy has a location picked out for him, but has not yet
5759           // been visited.  Happens to all CFG nodes, for instance.
5760           // Visit him using the worklist instead of recursion, to break
5761           // cycles.  Since he has a location already we do not need to
5762           // find his location before proceeding with the current Node.
5763           worklist.push(in);  // Visit this guy later, using worklist
5764         }
5765       }
5766       if (done) {
5767         // All of n's inputs have been processed, complete post-processing.
5768 
5769         // Compute earliest point this Node can go.
5770         // CFG, Phi, pinned nodes already know their controlling input.
5771         if (!has_node(n)) {
5772           // Record earliest legal location
5773           set_early_ctrl(n, false);
5774         }
5775         if (nstack.is_empty()) {
5776           // Finished all nodes on stack.
5777           // Process next node on the worklist.
5778           break;
5779         }
5780         // Get saved parent node and next input's index.
5781         nstack_top_n = nstack.node();
5782         nstack_top_i = nstack.index();
5783         nstack.pop();
5784       }
5785     } // while (true)
5786   }
5787 }
5788 
5789 //------------------------------dom_lca_internal--------------------------------
5790 // Pair-wise LCA
5791 Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const {
5792   if( !n1 ) return n2;          // Handle null original LCA
5793   assert( n1->is_CFG(), "" );
5794   assert( n2->is_CFG(), "" );
5795   // find LCA of all uses
5796   uint d1 = dom_depth(n1);
5797   uint d2 = dom_depth(n2);
5798   while (n1 != n2) {
5799     if (d1 > d2) {
5800       n1 =      idom(n1);
5801       d1 = dom_depth(n1);
5802     } else if (d1 < d2) {
5803       n2 =      idom(n2);
5804       d2 = dom_depth(n2);
5805     } else {
5806       // Here d1 == d2.  Due to edits of the dominator-tree, sections
5807       // of the tree might have the same depth.  These sections have
5808       // to be searched more carefully.
5809 
5810       // Scan up all the n1's with equal depth, looking for n2.
5811       Node *t1 = idom(n1);
5812       while (dom_depth(t1) == d1) {
5813         if (t1 == n2)  return n2;
5814         t1 = idom(t1);
5815       }
5816       // Scan up all the n2's with equal depth, looking for n1.
5817       Node *t2 = idom(n2);
5818       while (dom_depth(t2) == d2) {
5819         if (t2 == n1)  return n1;
5820         t2 = idom(t2);
5821       }
5822       // Move up to a new dominator-depth value as well as up the dom-tree.
5823       n1 = t1;
5824       n2 = t2;
5825       d1 = dom_depth(n1);
5826       d2 = dom_depth(n2);
5827     }
5828   }
5829   return n1;
5830 }
5831 
5832 //------------------------------compute_idom-----------------------------------
5833 // Locally compute IDOM using dom_lca call.  Correct only if the incoming
5834 // IDOMs are correct.
5835 Node *PhaseIdealLoop::compute_idom( Node *region ) const {
5836   assert( region->is_Region(), "" );
5837   Node *LCA = nullptr;
5838   for( uint i = 1; i < region->req(); i++ ) {
5839     if( region->in(i) != C->top() )
5840       LCA = dom_lca( LCA, region->in(i) );
5841   }
5842   return LCA;
5843 }
5844 
5845 bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) {
5846   bool had_error = false;
5847 #ifdef ASSERT
5848   if (early != C->root()) {
5849     // Make sure that there's a dominance path from LCA to early
5850     Node* d = LCA;
5851     while (d != early) {
5852       if (d == C->root()) {
5853         dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA);
5854         tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx);
5855         had_error = true;
5856         break;
5857       }
5858       d = idom(d);
5859     }
5860   }
5861 #endif
5862   return had_error;
5863 }
5864 
5865 
5866 Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) {
5867   // Compute LCA over list of uses
5868   bool had_error = false;
5869   Node *LCA = nullptr;
5870   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) {
5871     Node* c = n->fast_out(i);
5872     if (_loop_or_ctrl[c->_idx] == nullptr)
5873       continue;                 // Skip the occasional dead node
5874     if( c->is_Phi() ) {         // For Phis, we must land above on the path
5875       for( uint j=1; j<c->req(); j++ ) {// For all inputs
5876         if( c->in(j) == n ) {   // Found matching input?
5877           Node *use = c->in(0)->in(j);
5878           if (_verify_only && use->is_top()) continue;
5879           LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
5880           if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
5881         }
5882       }
5883     } else {
5884       // For CFG data-users, use is in the block just prior
5885       Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0);
5886       LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
5887       if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
5888     }
5889   }
5890   assert(!had_error, "bad dominance");
5891   return LCA;
5892 }
5893 
5894 // Check the shape of the graph at the loop entry. In some cases,
5895 // the shape of the graph does not match the shape outlined below.
5896 // That is caused by the Opaque1 node "protecting" the shape of
5897 // the graph being removed by, for example, the IGVN performed
5898 // in PhaseIdealLoop::build_and_optimize().
5899 //
5900 // After the Opaque1 node has been removed, optimizations (e.g., split-if,
5901 // loop unswitching, and IGVN, or a combination of them) can freely change
5902 // the graph's shape. As a result, the graph shape outlined below cannot
5903 // be guaranteed anymore.
5904 Node* CountedLoopNode::is_canonical_loop_entry() {
5905   if (!is_main_loop() && !is_post_loop()) {
5906     return nullptr;
5907   }
5908   Node* ctrl = skip_assertion_predicates_with_halt();
5909 
5910   if (ctrl == nullptr || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) {
5911     return nullptr;
5912   }
5913   Node* iffm = ctrl->in(0);
5914   if (iffm == nullptr || iffm->Opcode() != Op_If) {
5915     return nullptr;
5916   }
5917   Node* bolzm = iffm->in(1);
5918   if (bolzm == nullptr || !bolzm->is_Bool()) {
5919     return nullptr;
5920   }
5921   Node* cmpzm = bolzm->in(1);
5922   if (cmpzm == nullptr || !cmpzm->is_Cmp()) {
5923     return nullptr;
5924   }
5925 
5926   uint input = is_main_loop() ? 2 : 1;
5927   if (input >= cmpzm->req() || cmpzm->in(input) == nullptr) {
5928     return nullptr;
5929   }
5930   bool res = cmpzm->in(input)->Opcode() == Op_OpaqueZeroTripGuard;
5931 #ifdef ASSERT
5932   bool found_opaque = false;
5933   for (uint i = 1; i < cmpzm->req(); i++) {
5934     Node* opnd = cmpzm->in(i);
5935     if (opnd && opnd->is_Opaque1()) {
5936       found_opaque = true;
5937       break;
5938     }
5939   }
5940   assert(found_opaque == res, "wrong pattern");
5941 #endif
5942   return res ? cmpzm->in(input) : nullptr;
5943 }
5944 
5945 // Find pre loop end from main loop. Returns nullptr if none.
5946 CountedLoopEndNode* CountedLoopNode::find_pre_loop_end() {
5947   assert(is_main_loop(), "Can only find pre-loop from main-loop");
5948   // The loop cannot be optimized if the graph shape at the loop entry is
5949   // inappropriate.
5950   if (is_canonical_loop_entry() == nullptr) {
5951     return nullptr;
5952   }
5953 
5954   Node* p_f = skip_assertion_predicates_with_halt()->in(0)->in(0);
5955   if (!p_f->is_IfFalse() || !p_f->in(0)->is_CountedLoopEnd()) {
5956     return nullptr;
5957   }
5958   CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd();
5959   CountedLoopNode* loop_node = pre_end->loopnode();
5960   if (loop_node == nullptr || !loop_node->is_pre_loop()) {
5961     return nullptr;
5962   }
5963   return pre_end;
5964 }
5965 
5966   CountedLoopNode* CountedLoopNode::pre_loop_head() const {
5967     assert(is_main_loop(), "Only main loop has pre loop");
5968     assert(_pre_loop_end != nullptr && _pre_loop_end->loopnode() != nullptr,
5969            "should find head from pre loop end");
5970     return _pre_loop_end->loopnode();
5971   }
5972 
5973   CountedLoopEndNode* CountedLoopNode::pre_loop_end() {
5974 #ifdef ASSERT
5975     assert(is_main_loop(), "Only main loop has pre loop");
5976     assert(_pre_loop_end != nullptr, "should be set when fetched");
5977     Node* found_pre_end = find_pre_loop_end();
5978     assert(_pre_loop_end == found_pre_end && _pre_loop_end == pre_loop_head()->loopexit(),
5979            "should find the pre loop end and must be the same result");
5980 #endif
5981     return _pre_loop_end;
5982   }
5983 
5984   void CountedLoopNode::set_pre_loop_end(CountedLoopEndNode* pre_loop_end) {
5985     assert(is_main_loop(), "Only main loop has pre loop");
5986     assert(pre_loop_end, "must be valid");
5987     _pre_loop_end = pre_loop_end;
5988   }
5989 
5990 //------------------------------get_late_ctrl----------------------------------
5991 // Compute latest legal control.
5992 Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
5993   assert(early != nullptr, "early control should not be null");
5994 
5995   Node* LCA = compute_lca_of_uses(n, early);
5996 #ifdef ASSERT
5997   if (LCA == C->root() && LCA != early) {
5998     // def doesn't dominate uses so print some useful debugging output
5999     compute_lca_of_uses(n, early, true);
6000   }
6001 #endif
6002 
6003   if (n->is_Load() && LCA != early) {
6004     LCA = get_late_ctrl_with_anti_dep(n->as_Load(), early, LCA);
6005   }
6006 
6007   assert(LCA == find_non_split_ctrl(LCA), "unexpected late control");
6008   return LCA;
6009 }
6010 
6011 // if this is a load, check for anti-dependent stores
6012 // We use a conservative algorithm to identify potential interfering
6013 // instructions and for rescheduling the load.  The users of the memory
6014 // input of this load are examined.  Any use which is not a load and is
6015 // dominated by early is considered a potentially interfering store.
6016 // This can produce false positives.
6017 Node* PhaseIdealLoop::get_late_ctrl_with_anti_dep(LoadNode* n, Node* early, Node* LCA) {
6018   int load_alias_idx = C->get_alias_index(n->adr_type());
6019   if (C->alias_type(load_alias_idx)->is_rewritable()) {
6020     Unique_Node_List worklist;
6021 
6022     Node* mem = n->in(MemNode::Memory);
6023     for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
6024       Node* s = mem->fast_out(i);
6025       worklist.push(s);
6026     }
6027     for (uint i = 0; i < worklist.size() && LCA != early; i++) {
6028       Node* s = worklist.at(i);
6029       if (s->is_Load() || s->Opcode() == Op_SafePoint ||
6030           (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0) ||
6031           s->is_Phi()) {
6032         continue;
6033       } else if (s->is_MergeMem()) {
6034         for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
6035           Node* s1 = s->fast_out(i);
6036           worklist.push(s1);
6037         }
6038       } else {
6039         Node* sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0);
6040         assert(sctrl != nullptr || !s->is_reachable_from_root(), "must have control");
6041         if (sctrl != nullptr && !sctrl->is_top() && is_dominator(early, sctrl)) {
6042           const TypePtr* adr_type = s->adr_type();
6043           if (s->is_ArrayCopy()) {
6044             // Copy to known instance needs destination type to test for aliasing
6045             const TypePtr* dest_type = s->as_ArrayCopy()->_dest_type;
6046             if (dest_type != TypeOopPtr::BOTTOM) {
6047               adr_type = dest_type;
6048             }
6049           }
6050           if (C->can_alias(adr_type, load_alias_idx)) {
6051             LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n);
6052           } else if (s->is_CFG() && s->is_Multi()) {
6053             // Look for the memory use of s (that is the use of its memory projection)
6054             for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) {
6055               Node* s1 = s->fast_out(i);
6056               assert(s1->is_Proj(), "projection expected");
6057               if (_igvn.type(s1) == Type::MEMORY) {
6058                 for (DUIterator_Fast jmax, j = s1->fast_outs(jmax); j < jmax; j++) {
6059                   Node* s2 = s1->fast_out(j);
6060                   worklist.push(s2);
6061                 }
6062               }
6063             }
6064           }
6065         }
6066       }
6067     }
6068     // For Phis only consider Region's inputs that were reached by following the memory edges
6069     if (LCA != early) {
6070       for (uint i = 0; i < worklist.size(); i++) {
6071         Node* s = worklist.at(i);
6072         if (s->is_Phi() && C->can_alias(s->adr_type(), load_alias_idx)) {
6073           Node* r = s->in(0);
6074           for (uint j = 1; j < s->req(); j++) {
6075             Node* in = s->in(j);
6076             Node* r_in = r->in(j);
6077             // We can't reach any node from a Phi because we don't enqueue Phi's uses above
6078             if (((worklist.member(in) && !in->is_Phi()) || in == mem) && is_dominator(early, r_in)) {
6079               LCA = dom_lca_for_get_late_ctrl(LCA, r_in, n);
6080             }
6081           }
6082         }
6083       }
6084     }
6085   }
6086   return LCA;
6087 }
6088 
6089 // true if CFG node d dominates CFG node n
6090 bool PhaseIdealLoop::is_dominator(Node *d, Node *n) {
6091   if (d == n)
6092     return true;
6093   assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes");
6094   uint dd = dom_depth(d);
6095   while (dom_depth(n) >= dd) {
6096     if (n == d)
6097       return true;
6098     n = idom(n);
6099   }
6100   return false;
6101 }
6102 
6103 //------------------------------dom_lca_for_get_late_ctrl_internal-------------
6104 // Pair-wise LCA with tags.
6105 // Tag each index with the node 'tag' currently being processed
6106 // before advancing up the dominator chain using idom().
6107 // Later calls that find a match to 'tag' know that this path has already
6108 // been considered in the current LCA (which is input 'n1' by convention).
6109 // Since get_late_ctrl() is only called once for each node, the tag array
6110 // does not need to be cleared between calls to get_late_ctrl().
6111 // Algorithm trades a larger constant factor for better asymptotic behavior
6112 //
6113 Node *PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal(Node *n1, Node *n2, Node *tag_node) {
6114   uint d1 = dom_depth(n1);
6115   uint d2 = dom_depth(n2);
6116   jlong tag = tag_node->_idx | (((jlong)_dom_lca_tags_round) << 32);
6117 
6118   do {
6119     if (d1 > d2) {
6120       // current lca is deeper than n2
6121       _dom_lca_tags.at_put_grow(n1->_idx, tag);
6122       n1 =      idom(n1);
6123       d1 = dom_depth(n1);
6124     } else if (d1 < d2) {
6125       // n2 is deeper than current lca
6126       jlong memo = _dom_lca_tags.at_grow(n2->_idx, 0);
6127       if (memo == tag) {
6128         return n1;    // Return the current LCA
6129       }
6130       _dom_lca_tags.at_put_grow(n2->_idx, tag);
6131       n2 =      idom(n2);
6132       d2 = dom_depth(n2);
6133     } else {
6134       // Here d1 == d2.  Due to edits of the dominator-tree, sections
6135       // of the tree might have the same depth.  These sections have
6136       // to be searched more carefully.
6137 
6138       // Scan up all the n1's with equal depth, looking for n2.
6139       _dom_lca_tags.at_put_grow(n1->_idx, tag);
6140       Node *t1 = idom(n1);
6141       while (dom_depth(t1) == d1) {
6142         if (t1 == n2)  return n2;
6143         _dom_lca_tags.at_put_grow(t1->_idx, tag);
6144         t1 = idom(t1);
6145       }
6146       // Scan up all the n2's with equal depth, looking for n1.
6147       _dom_lca_tags.at_put_grow(n2->_idx, tag);
6148       Node *t2 = idom(n2);
6149       while (dom_depth(t2) == d2) {
6150         if (t2 == n1)  return n1;
6151         _dom_lca_tags.at_put_grow(t2->_idx, tag);
6152         t2 = idom(t2);
6153       }
6154       // Move up to a new dominator-depth value as well as up the dom-tree.
6155       n1 = t1;
6156       n2 = t2;
6157       d1 = dom_depth(n1);
6158       d2 = dom_depth(n2);
6159     }
6160   } while (n1 != n2);
6161   return n1;
6162 }
6163 
6164 //------------------------------init_dom_lca_tags------------------------------
6165 // Tag could be a node's integer index, 32bits instead of 64bits in some cases
6166 // Intended use does not involve any growth for the array, so it could
6167 // be of fixed size.
6168 void PhaseIdealLoop::init_dom_lca_tags() {
6169   uint limit = C->unique() + 1;
6170   _dom_lca_tags.at_grow(limit, 0);
6171   _dom_lca_tags_round = 0;
6172 #ifdef ASSERT
6173   for (uint i = 0; i < limit; ++i) {
6174     assert(_dom_lca_tags.at(i) == 0, "Must be distinct from each node pointer");
6175   }
6176 #endif // ASSERT
6177 }
6178 
6179 //------------------------------build_loop_late--------------------------------
6180 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
6181 // Second pass finds latest legal placement, and ideal loop placement.
6182 void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
6183   while (worklist.size() != 0) {
6184     Node *n = worklist.pop();
6185     // Only visit once
6186     if (visited.test_set(n->_idx)) continue;
6187     uint cnt = n->outcnt();
6188     uint   i = 0;
6189     while (true) {
6190       assert(_loop_or_ctrl[n->_idx], "no dead nodes");
6191       // Visit all children
6192       if (i < cnt) {
6193         Node* use = n->raw_out(i);
6194         ++i;
6195         // Check for dead uses.  Aggressively prune such junk.  It might be
6196         // dead in the global sense, but still have local uses so I cannot
6197         // easily call 'remove_dead_node'.
6198         if (_loop_or_ctrl[use->_idx] != nullptr || use->is_top()) { // Not dead?
6199           // Due to cycles, we might not hit the same fixed point in the verify
6200           // pass as we do in the regular pass.  Instead, visit such phis as
6201           // simple uses of the loop head.
6202           if( use->in(0) && (use->is_CFG() || use->is_Phi()) ) {
6203             if( !visited.test(use->_idx) )
6204               worklist.push(use);
6205           } else if( !visited.test_set(use->_idx) ) {
6206             nstack.push(n, i); // Save parent and next use's index.
6207             n   = use;         // Process all children of current use.
6208             cnt = use->outcnt();
6209             i   = 0;
6210           }
6211         } else {
6212           // Do not visit around the backedge of loops via data edges.
6213           // push dead code onto a worklist
6214           _deadlist.push(use);
6215         }
6216       } else {
6217         // All of n's children have been processed, complete post-processing.
6218         build_loop_late_post(n);
6219         if (C->failing()) { return; }
6220         if (nstack.is_empty()) {
6221           // Finished all nodes on stack.
6222           // Process next node on the worklist.
6223           break;
6224         }
6225         // Get saved parent node and next use's index. Visit the rest of uses.
6226         n   = nstack.node();
6227         cnt = n->outcnt();
6228         i   = nstack.index();
6229         nstack.pop();
6230       }
6231     }
6232   }
6233 }
6234 
6235 // Verify that no data node is scheduled in the outer loop of a strip
6236 // mined loop.
6237 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) {
6238 #ifdef ASSERT
6239   if (get_loop(least)->_nest == 0) {
6240     return;
6241   }
6242   IdealLoopTree* loop = get_loop(least);
6243   Node* head = loop->_head;
6244   if (head->is_OuterStripMinedLoop() &&
6245       // Verification can't be applied to fully built strip mined loops
6246       head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) {
6247     Node* sfpt = head->as_Loop()->outer_safepoint();
6248     ResourceMark rm;
6249     Unique_Node_List wq;
6250     wq.push(sfpt);
6251     for (uint i = 0; i < wq.size(); i++) {
6252       Node *m = wq.at(i);
6253       for (uint i = 1; i < m->req(); i++) {
6254         Node* nn = m->in(i);
6255         if (nn == n) {
6256           return;
6257         }
6258         if (nn != nullptr && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) {
6259           wq.push(nn);
6260         }
6261       }
6262     }
6263     ShouldNotReachHere();
6264   }
6265 #endif
6266 }
6267 
6268 
6269 //------------------------------build_loop_late_post---------------------------
6270 // Put Data nodes into some loop nest, by setting the _loop_or_ctrl[]->loop mapping.
6271 // Second pass finds latest legal placement, and ideal loop placement.
6272 void PhaseIdealLoop::build_loop_late_post(Node *n) {
6273   build_loop_late_post_work(n, true);
6274 }
6275 
6276 void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
6277 
6278   if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) {
6279     _igvn._worklist.push(n);  // Maybe we'll normalize it, if no more loops.
6280   }
6281 
6282 #ifdef ASSERT
6283   if (_verify_only && !n->is_CFG()) {
6284     // Check def-use domination.
6285     compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
6286   }
6287 #endif
6288 
6289   // CFG and pinned nodes already handled
6290   if( n->in(0) ) {
6291     if( n->in(0)->is_top() ) return; // Dead?
6292 
6293     // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads
6294     // _must_ be pinned (they have to observe their control edge of course).
6295     // Unlike Stores (which modify an unallocable resource, the memory
6296     // state), Mods/Loads can float around.  So free them up.
6297     switch( n->Opcode() ) {
6298     case Op_DivI:
6299     case Op_DivF:
6300     case Op_DivD:
6301     case Op_ModI:
6302     case Op_ModF:
6303     case Op_ModD:
6304     case Op_LoadB:              // Same with Loads; they can sink
6305     case Op_LoadUB:             // during loop optimizations.
6306     case Op_LoadUS:
6307     case Op_LoadD:
6308     case Op_LoadF:
6309     case Op_LoadI:
6310     case Op_LoadKlass:
6311     case Op_LoadNKlass:
6312     case Op_LoadL:
6313     case Op_LoadS:
6314     case Op_LoadP:
6315     case Op_LoadN:
6316     case Op_LoadRange:
6317     case Op_LoadD_unaligned:
6318     case Op_LoadL_unaligned:
6319     case Op_StrComp:            // Does a bunch of load-like effects
6320     case Op_StrEquals:
6321     case Op_StrIndexOf:
6322     case Op_StrIndexOfChar:
6323     case Op_AryEq:
6324     case Op_VectorizedHashCode:
6325     case Op_CountPositives:
6326       pinned = false;
6327     }
6328     if (n->is_CMove() || n->is_ConstraintCast()) {
6329       pinned = false;
6330     }
6331     if( pinned ) {
6332       IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n));
6333       if( !chosen_loop->_child )       // Inner loop?
6334         chosen_loop->_body.push(n); // Collect inner loops
6335       return;
6336     }
6337   } else {                      // No slot zero
6338     if( n->is_CFG() ) {         // CFG with no slot 0 is dead
6339       _loop_or_ctrl.map(n->_idx,0);    // No block setting, it's globally dead
6340       return;
6341     }
6342     assert(!n->is_CFG() || n->outcnt() == 0, "");
6343   }
6344 
6345   // Do I have a "safe range" I can select over?
6346   Node *early = get_ctrl(n);// Early location already computed
6347 
6348   // Compute latest point this Node can go
6349   Node *LCA = get_late_ctrl( n, early );
6350   // LCA is null due to uses being dead
6351   if( LCA == nullptr ) {
6352 #ifdef ASSERT
6353     for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) {
6354       assert(_loop_or_ctrl[n->out(i1)->_idx] == nullptr, "all uses must also be dead");
6355     }
6356 #endif
6357     _loop_or_ctrl.map(n->_idx, 0);     // This node is useless
6358     _deadlist.push(n);
6359     return;
6360   }
6361   assert(LCA != nullptr && !LCA->is_top(), "no dead nodes");
6362 
6363   Node *legal = LCA;            // Walk 'legal' up the IDOM chain
6364   Node *least = legal;          // Best legal position so far
6365   while( early != legal ) {     // While not at earliest legal
6366     if (legal->is_Start() && !early->is_Root()) {
6367 #ifdef ASSERT
6368       // Bad graph. Print idom path and fail.
6369       dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA);
6370       assert(false, "Bad graph detected in build_loop_late");
6371 #endif
6372       C->record_method_not_compilable("Bad graph detected in build_loop_late");
6373       return;
6374     }
6375     // Find least loop nesting depth
6376     legal = idom(legal);        // Bump up the IDOM tree
6377     // Check for lower nesting depth
6378     if( get_loop(legal)->_nest < get_loop(least)->_nest )
6379       least = legal;
6380   }
6381   assert(early == legal || legal != C->root(), "bad dominance of inputs");
6382 
6383   if (least != early) {
6384     // Move the node above predicates as far up as possible so a
6385     // following pass of loop predication doesn't hoist a predicate
6386     // that depends on it above that node.
6387     Node* new_ctrl = least;
6388     for (;;) {
6389       if (!new_ctrl->is_Proj()) {
6390         break;
6391       }
6392       CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern();
6393       if (call == nullptr) {
6394         break;
6395       }
6396       int req = call->uncommon_trap_request();
6397       Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
6398       if (trap_reason != Deoptimization::Reason_loop_limit_check &&
6399           trap_reason != Deoptimization::Reason_predicate &&
6400           trap_reason != Deoptimization::Reason_profile_predicate) {
6401         break;
6402       }
6403       Node* c = new_ctrl->in(0)->in(0);
6404       if (is_dominator(c, early) && c != early) {
6405         break;
6406       }
6407       new_ctrl = c;
6408     }
6409     least = new_ctrl;
6410   }
6411   // Try not to place code on a loop entry projection
6412   // which can inhibit range check elimination.
6413   if (least != early && !BarrierSet::barrier_set()->barrier_set_c2()->is_gc_specific_loop_opts_pass(_mode)) {
6414     Node* ctrl_out = least->unique_ctrl_out_or_null();
6415     if (ctrl_out != nullptr && ctrl_out->is_Loop() &&
6416         least == ctrl_out->in(LoopNode::EntryControl) &&
6417         (ctrl_out->is_CountedLoop() || ctrl_out->is_OuterStripMinedLoop())) {
6418       Node* least_dom = idom(least);
6419       if (get_loop(least_dom)->is_member(get_loop(least))) {
6420         least = least_dom;
6421       }
6422     }
6423   }
6424   // Don't extend live ranges of raw oops
6425   if (least != early && n->is_ConstraintCast() && n->in(1)->bottom_type()->isa_rawptr() &&
6426       !n->bottom_type()->isa_rawptr()) {
6427     least = early;
6428   }
6429 
6430 #ifdef ASSERT
6431   // Broken part of VerifyLoopOptimizations (F)
6432   // Reason:
6433   //   _verify_me->get_ctrl_no_update(n) seems to return wrong result
6434   /*
6435   // If verifying, verify that 'verify_me' has a legal location
6436   // and choose it as our location.
6437   if( _verify_me ) {
6438     Node *v_ctrl = _verify_me->get_ctrl_no_update(n);
6439     Node *legal = LCA;
6440     while( early != legal ) {   // While not at earliest legal
6441       if( legal == v_ctrl ) break;  // Check for prior good location
6442       legal = idom(legal)      ;// Bump up the IDOM tree
6443     }
6444     // Check for prior good location
6445     if( legal == v_ctrl ) least = legal; // Keep prior if found
6446   }
6447   */
6448 #endif
6449 
6450   // Assign discovered "here or above" point
6451   least = find_non_split_ctrl(least);
6452   verify_strip_mined_scheduling(n, least);
6453   set_ctrl(n, least);
6454 
6455   // Collect inner loop bodies
6456   IdealLoopTree *chosen_loop = get_loop(least);
6457   if( !chosen_loop->_child )   // Inner loop?
6458     chosen_loop->_body.push(n);// Collect inner loops
6459 
6460   if (!_verify_only && n->Opcode() == Op_OpaqueZeroTripGuard) {
6461     _zero_trip_guard_opaque_nodes.push(n);
6462   }
6463 
6464 }
6465 
6466 #ifdef ASSERT
6467 void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) {
6468   tty->print_cr("%s", msg);
6469   tty->print("n: "); n->dump();
6470   tty->print("early(n): "); early->dump();
6471   if (n->in(0) != nullptr  && !n->in(0)->is_top() &&
6472       n->in(0) != early && !n->in(0)->is_Root()) {
6473     tty->print("n->in(0): "); n->in(0)->dump();
6474   }
6475   for (uint i = 1; i < n->req(); i++) {
6476     Node* in1 = n->in(i);
6477     if (in1 != nullptr && in1 != n && !in1->is_top()) {
6478       tty->print("n->in(%d): ", i); in1->dump();
6479       Node* in1_early = get_ctrl(in1);
6480       tty->print("early(n->in(%d)): ", i); in1_early->dump();
6481       if (in1->in(0) != nullptr     && !in1->in(0)->is_top() &&
6482           in1->in(0) != in1_early && !in1->in(0)->is_Root()) {
6483         tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump();
6484       }
6485       for (uint j = 1; j < in1->req(); j++) {
6486         Node* in2 = in1->in(j);
6487         if (in2 != nullptr && in2 != n && in2 != in1 && !in2->is_top()) {
6488           tty->print("n->in(%d)->in(%d): ", i, j); in2->dump();
6489           Node* in2_early = get_ctrl(in2);
6490           tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump();
6491           if (in2->in(0) != nullptr     && !in2->in(0)->is_top() &&
6492               in2->in(0) != in2_early && !in2->in(0)->is_Root()) {
6493             tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump();
6494           }
6495         }
6496       }
6497     }
6498   }
6499   tty->cr();
6500   tty->print("LCA(n): "); LCA->dump();
6501   for (uint i = 0; i < n->outcnt(); i++) {
6502     Node* u1 = n->raw_out(i);
6503     if (u1 == n)
6504       continue;
6505     tty->print("n->out(%d): ", i); u1->dump();
6506     if (u1->is_CFG()) {
6507       for (uint j = 0; j < u1->outcnt(); j++) {
6508         Node* u2 = u1->raw_out(j);
6509         if (u2 != u1 && u2 != n && u2->is_CFG()) {
6510           tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
6511         }
6512       }
6513     } else {
6514       Node* u1_later = get_ctrl(u1);
6515       tty->print("later(n->out(%d)): ", i); u1_later->dump();
6516       if (u1->in(0) != nullptr     && !u1->in(0)->is_top() &&
6517           u1->in(0) != u1_later && !u1->in(0)->is_Root()) {
6518         tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump();
6519       }
6520       for (uint j = 0; j < u1->outcnt(); j++) {
6521         Node* u2 = u1->raw_out(j);
6522         if (u2 == n || u2 == u1)
6523           continue;
6524         tty->print("n->out(%d)->out(%d): ", i, j); u2->dump();
6525         if (!u2->is_CFG()) {
6526           Node* u2_later = get_ctrl(u2);
6527           tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump();
6528           if (u2->in(0) != nullptr     && !u2->in(0)->is_top() &&
6529               u2->in(0) != u2_later && !u2->in(0)->is_Root()) {
6530             tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump();
6531           }
6532         }
6533       }
6534     }
6535   }
6536   dump_idoms(early, LCA);
6537   tty->cr();
6538 }
6539 
6540 // Class to compute the real LCA given an early node and a wrong LCA in a bad graph.
6541 class RealLCA {
6542   const PhaseIdealLoop* _phase;
6543   Node* _early;
6544   Node* _wrong_lca;
6545   uint _early_index;
6546   int _wrong_lca_index;
6547 
6548   // Given idom chains of early and wrong LCA: Walk through idoms starting at StartNode and find the first node which
6549   // is different: Return the previously visited node which must be the real LCA.
6550   // The node lists also contain _early and _wrong_lca, respectively.
6551   Node* find_real_lca(Unique_Node_List& early_with_idoms, Unique_Node_List& wrong_lca_with_idoms) {
6552     int early_index = early_with_idoms.size() - 1;
6553     int wrong_lca_index = wrong_lca_with_idoms.size() - 1;
6554     bool found_difference = false;
6555     do {
6556       if (early_with_idoms[early_index] != wrong_lca_with_idoms[wrong_lca_index]) {
6557         // First time early and wrong LCA idoms differ. Real LCA must be at the previous index.
6558         found_difference = true;
6559         break;
6560       }
6561       early_index--;
6562       wrong_lca_index--;
6563     } while (wrong_lca_index >= 0);
6564 
6565     assert(early_index >= 0, "must always find an LCA - cannot be early");
6566     _early_index = early_index;
6567     _wrong_lca_index = wrong_lca_index;
6568     Node* real_lca = early_with_idoms[_early_index + 1]; // Plus one to skip _early.
6569     assert(found_difference || real_lca == _wrong_lca, "wrong LCA dominates early and is therefore the real LCA");
6570     return real_lca;
6571   }
6572 
6573   void dump(Node* real_lca) {
6574     tty->cr();
6575     tty->print_cr("idoms of early \"%d %s\":", _early->_idx, _early->Name());
6576     _phase->dump_idom(_early, _early_index + 1);
6577 
6578     tty->cr();
6579     tty->print_cr("idoms of (wrong) LCA \"%d %s\":", _wrong_lca->_idx, _wrong_lca->Name());
6580     _phase->dump_idom(_wrong_lca, _wrong_lca_index + 1);
6581 
6582     tty->cr();
6583     tty->print("Real LCA of early \"%d %s\" (idom[%d]) and wrong LCA \"%d %s\"",
6584                _early->_idx, _early->Name(), _early_index, _wrong_lca->_idx, _wrong_lca->Name());
6585     if (_wrong_lca_index >= 0) {
6586       tty->print(" (idom[%d])", _wrong_lca_index);
6587     }
6588     tty->print_cr(":");
6589     real_lca->dump();
6590   }
6591 
6592  public:
6593   RealLCA(const PhaseIdealLoop* phase, Node* early, Node* wrong_lca)
6594       : _phase(phase), _early(early), _wrong_lca(wrong_lca), _early_index(0), _wrong_lca_index(0) {
6595     assert(!wrong_lca->is_Start(), "StartNode is always a common dominator");
6596   }
6597 
6598   void compute_and_dump() {
6599     ResourceMark rm;
6600     Unique_Node_List early_with_idoms;
6601     Unique_Node_List wrong_lca_with_idoms;
6602     early_with_idoms.push(_early);
6603     wrong_lca_with_idoms.push(_wrong_lca);
6604     _phase->get_idoms(_early, 10000, early_with_idoms);
6605     _phase->get_idoms(_wrong_lca, 10000, wrong_lca_with_idoms);
6606     Node* real_lca = find_real_lca(early_with_idoms, wrong_lca_with_idoms);
6607     dump(real_lca);
6608   }
6609 };
6610 
6611 // Dump the idom chain of early, of the wrong LCA and dump the real LCA of early and wrong LCA.
6612 void PhaseIdealLoop::dump_idoms(Node* early, Node* wrong_lca) {
6613   assert(!is_dominator(early, wrong_lca), "sanity check that early does not dominate wrong lca");
6614   assert(!has_ctrl(early) && !has_ctrl(wrong_lca), "sanity check, no data nodes");
6615 
6616   RealLCA real_lca(this, early, wrong_lca);
6617   real_lca.compute_and_dump();
6618 }
6619 #endif // ASSERT
6620 
6621 #ifndef PRODUCT
6622 //------------------------------dump-------------------------------------------
6623 void PhaseIdealLoop::dump() const {
6624   ResourceMark rm;
6625   Node_Stack stack(C->live_nodes() >> 2);
6626   Node_List rpo_list;
6627   VectorSet visited;
6628   visited.set(C->top()->_idx);
6629   rpo(C->root(), stack, visited, rpo_list);
6630   // Dump root loop indexed by last element in PO order
6631   dump(_ltree_root, rpo_list.size(), rpo_list);
6632 }
6633 
6634 void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) const {
6635   loop->dump_head();
6636 
6637   // Now scan for CFG nodes in the same loop
6638   for (uint j = idx; j > 0; j--) {
6639     Node* n = rpo_list[j-1];
6640     if (!_loop_or_ctrl[n->_idx])      // Skip dead nodes
6641       continue;
6642 
6643     if (get_loop(n) != loop) { // Wrong loop nest
6644       if (get_loop(n)->_head == n &&    // Found nested loop?
6645           get_loop(n)->_parent == loop)
6646         dump(get_loop(n), rpo_list.size(), rpo_list);     // Print it nested-ly
6647       continue;
6648     }
6649 
6650     // Dump controlling node
6651     tty->sp(2 * loop->_nest);
6652     tty->print("C");
6653     if (n == C->root()) {
6654       n->dump();
6655     } else {
6656       Node* cached_idom   = idom_no_update(n);
6657       Node* computed_idom = n->in(0);
6658       if (n->is_Region()) {
6659         computed_idom = compute_idom(n);
6660         // computed_idom() will return n->in(0) when idom(n) is an IfNode (or
6661         // any MultiBranch ctrl node), so apply a similar transform to
6662         // the cached idom returned from idom_no_update.
6663         cached_idom = find_non_split_ctrl(cached_idom);
6664       }
6665       tty->print(" ID:%d", computed_idom->_idx);
6666       n->dump();
6667       if (cached_idom != computed_idom) {
6668         tty->print_cr("*** BROKEN IDOM!  Computed as: %d, cached as: %d",
6669                       computed_idom->_idx, cached_idom->_idx);
6670       }
6671     }
6672     // Dump nodes it controls
6673     for (uint k = 0; k < _loop_or_ctrl.max(); k++) {
6674       // (k < C->unique() && get_ctrl(find(k)) == n)
6675       if (k < C->unique() && _loop_or_ctrl[k] == (Node*)((intptr_t)n + 1)) {
6676         Node* m = C->root()->find(k);
6677         if (m && m->outcnt() > 0) {
6678           if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) {
6679             tty->print_cr("*** BROKEN CTRL ACCESSOR!  _loop_or_ctrl[k] is %p, ctrl is %p",
6680                           _loop_or_ctrl[k], has_ctrl(m) ? get_ctrl_no_update(m) : nullptr);
6681           }
6682           tty->sp(2 * loop->_nest + 1);
6683           m->dump();
6684         }
6685       }
6686     }
6687   }
6688 }
6689 
6690 void PhaseIdealLoop::dump_idom(Node* n, const uint count) const {
6691   if (has_ctrl(n)) {
6692     tty->print_cr("No idom for data nodes");
6693   } else {
6694     ResourceMark rm;
6695     Unique_Node_List idoms;
6696     get_idoms(n, count, idoms);
6697     dump_idoms_in_reverse(n, idoms);
6698   }
6699 }
6700 
6701 void PhaseIdealLoop::get_idoms(Node* n, const uint count, Unique_Node_List& idoms) const {
6702   Node* next = n;
6703   for (uint i = 0; !next->is_Start() && i < count; i++) {
6704     next = idom(next);
6705     assert(!idoms.member(next), "duplicated idom is not possible");
6706     idoms.push(next);
6707   }
6708 }
6709 
6710 void PhaseIdealLoop::dump_idoms_in_reverse(const Node* n, const Node_List& idom_list) const {
6711   Node* next;
6712   uint padding = 3;
6713   uint node_index_padding_width = static_cast<int>(log10(static_cast<double>(C->unique()))) + 1;
6714   for (int i = idom_list.size() - 1; i >= 0; i--) {
6715     if (i == 9 || i == 99) {
6716       padding++;
6717     }
6718     next = idom_list[i];
6719     tty->print_cr("idom[%d]:%*c%*d  %s", i, padding, ' ', node_index_padding_width, next->_idx, next->Name());
6720   }
6721   tty->print_cr("n:      %*c%*d  %s", padding, ' ', node_index_padding_width, n->_idx, n->Name());
6722 }
6723 #endif // NOT PRODUCT
6724 
6725 // Collect a R-P-O for the whole CFG.
6726 // Result list is in post-order (scan backwards for RPO)
6727 void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const {
6728   stk.push(start, 0);
6729   visited.set(start->_idx);
6730 
6731   while (stk.is_nonempty()) {
6732     Node* m   = stk.node();
6733     uint  idx = stk.index();
6734     if (idx < m->outcnt()) {
6735       stk.set_index(idx + 1);
6736       Node* n = m->raw_out(idx);
6737       if (n->is_CFG() && !visited.test_set(n->_idx)) {
6738         stk.push(n, 0);
6739       }
6740     } else {
6741       rpo_list.push(m);
6742       stk.pop();
6743     }
6744   }
6745 }
6746 
6747 
6748 //=============================================================================
6749 //------------------------------LoopTreeIterator-------------------------------
6750 
6751 // Advance to next loop tree using a preorder, left-to-right traversal.
6752 void LoopTreeIterator::next() {
6753   assert(!done(), "must not be done.");
6754   if (_curnt->_child != nullptr) {
6755     _curnt = _curnt->_child;
6756   } else if (_curnt->_next != nullptr) {
6757     _curnt = _curnt->_next;
6758   } else {
6759     while (_curnt != _root && _curnt->_next == nullptr) {
6760       _curnt = _curnt->_parent;
6761     }
6762     if (_curnt == _root) {
6763       _curnt = nullptr;
6764       assert(done(), "must be done.");
6765     } else {
6766       assert(_curnt->_next != nullptr, "must be more to do");
6767       _curnt = _curnt->_next;
6768     }
6769   }
6770 }