1 /*
   2  * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciTypeFlow.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "opto/addnode.hpp"
  29 #include "opto/castnode.hpp"
  30 #include "opto/cfgnode.hpp"
  31 #include "opto/connode.hpp"
  32 #include "opto/loopnode.hpp"
  33 #include "opto/phaseX.hpp"
  34 #include "opto/predicates_enums.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "opto/subnode.hpp"
  38 #include "opto/subtypenode.hpp"
  39 
  40 // Portions of code courtesy of Clifford Click
  41 
  42 // Optimization - Graph Style
  43 
  44 
  45 #ifndef PRODUCT
  46 extern uint explicit_null_checks_elided;
  47 #endif
  48 
  49 IfNode::IfNode(Node* control, Node* bol, float p, float fcnt)
  50     : MultiBranchNode(2),
  51       _prob(p),
  52       _fcnt(fcnt),
  53       _assertion_predicate_type(AssertionPredicateType::None) {
  54   init_node(control, bol);
  55 }
  56 
  57 IfNode::IfNode(Node* control, Node* bol, float p, float fcnt, AssertionPredicateType assertion_predicate_type)
  58     : MultiBranchNode(2),
  59       _prob(p),
  60       _fcnt(fcnt),
  61       _assertion_predicate_type(assertion_predicate_type) {
  62   init_node(control, bol);
  63 }
  64 
  65 //=============================================================================
  66 //------------------------------Value------------------------------------------
  67 // Return a tuple for whichever arm of the IF is reachable
  68 const Type* IfNode::Value(PhaseGVN* phase) const {
  69   if( !in(0) ) return Type::TOP;
  70   if( phase->type(in(0)) == Type::TOP )
  71     return Type::TOP;
  72   const Type *t = phase->type(in(1));
  73   if( t == Type::TOP )          // data is undefined
  74     return TypeTuple::IFNEITHER; // unreachable altogether
  75   if( t == TypeInt::ZERO )      // zero, or false
  76     return TypeTuple::IFFALSE;  // only false branch is reachable
  77   if( t == TypeInt::ONE )       // 1, or true
  78     return TypeTuple::IFTRUE;   // only true branch is reachable
  79   assert( t == TypeInt::BOOL, "expected boolean type" );
  80 
  81   return TypeTuple::IFBOTH;     // No progress
  82 }
  83 
  84 const RegMask &IfNode::out_RegMask() const {
  85   return RegMask::EMPTY;
  86 }
  87 
  88 //------------------------------split_if---------------------------------------
  89 // Look for places where we merge constants, then test on the merged value.
  90 // If the IF test will be constant folded on the path with the constant, we
  91 // win by splitting the IF to before the merge point.
  92 static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
  93   // I could be a lot more general here, but I'm trying to squeeze this
  94   // in before the Christmas '98 break so I'm gonna be kinda restrictive
  95   // on the patterns I accept.  CNC
  96 
  97   // Look for a compare of a constant and a merged value
  98   Node *i1 = iff->in(1);
  99   if( !i1->is_Bool() ) return nullptr;
 100   BoolNode *b = i1->as_Bool();
 101   Node *cmp = b->in(1);
 102   if( !cmp->is_Cmp() ) return nullptr;
 103   i1 = cmp->in(1);
 104   if( i1 == nullptr || !i1->is_Phi() ) return nullptr;
 105   PhiNode *phi = i1->as_Phi();
 106   Node *con2 = cmp->in(2);
 107   if( !con2->is_Con() ) return nullptr;
 108   // See that the merge point contains some constants
 109   Node *con1=nullptr;
 110   uint i4;
 111   RegionNode* phi_region = phi->region();
 112   for (i4 = 1; i4 < phi->req(); i4++ ) {
 113     con1 = phi->in(i4);
 114     // Do not optimize partially collapsed merges
 115     if (con1 == nullptr || phi_region->in(i4) == nullptr || igvn->type(phi_region->in(i4)) == Type::TOP) {
 116       igvn->_worklist.push(iff);
 117       return nullptr;
 118     }
 119     if( con1->is_Con() ) break; // Found a constant
 120     // Also allow null-vs-not-null checks
 121     const TypePtr *tp = igvn->type(con1)->isa_ptr();
 122     if( tp && tp->_ptr == TypePtr::NotNull )
 123       break;
 124   }
 125   if( i4 >= phi->req() ) return nullptr; // Found no constants
 126 
 127   igvn->C->set_has_split_ifs(true); // Has chance for split-if
 128 
 129   // Make sure that the compare can be constant folded away
 130   Node *cmp2 = cmp->clone();
 131   cmp2->set_req(1,con1);
 132   cmp2->set_req(2,con2);
 133   const Type *t = cmp2->Value(igvn);
 134   // This compare is dead, so whack it!
 135   igvn->remove_dead_node(cmp2, PhaseIterGVN::NodeOrigin::Speculative);
 136   if( !t->singleton() ) return nullptr;
 137 
 138   // No intervening control, like a simple Call
 139   Node* r = iff->in(0);
 140   if (!r->is_Region() || r->is_Loop() || phi_region != r || r->as_Region()->is_copy()) {
 141     return nullptr;
 142   }
 143 
 144   // No other users of the cmp/bool
 145   if (b->outcnt() != 1 || cmp->outcnt() != 1) {
 146     //tty->print_cr("many users of cmp/bool");
 147     return nullptr;
 148   }
 149 
 150   // Make sure we can determine where all the uses of merged values go
 151   for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
 152     Node* u = r->fast_out(j);
 153     if( u == r ) continue;
 154     if( u == iff ) continue;
 155     if( u->outcnt() == 0 ) continue; // use is dead & ignorable
 156     if( !u->is_Phi() ) {
 157       /*
 158       if( u->is_Start() ) {
 159         tty->print_cr("Region has inlined start use");
 160       } else {
 161         tty->print_cr("Region has odd use");
 162         u->dump(2);
 163       }*/
 164       return nullptr;
 165     }
 166     if( u != phi ) {
 167       // CNC - do not allow any other merged value
 168       //tty->print_cr("Merging another value");
 169       //u->dump(2);
 170       return nullptr;
 171     }
 172     // Make sure we can account for all Phi uses
 173     for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) {
 174       Node* v = u->fast_out(k); // User of the phi
 175       // CNC - Allow only really simple patterns.
 176       // In particular I disallow AddP of the Phi, a fairly common pattern
 177       if (v == cmp) continue;  // The compare is OK
 178       if (v->is_ConstraintCast()) {
 179         // If the cast is derived from data flow edges, it may not have a control edge.
 180         // If so, it should be safe to split. But follow-up code can not deal with
 181         // this (l. 359). So skip.
 182         if (v->in(0) == nullptr) {
 183           return nullptr;
 184         }
 185         if (v->in(0)->in(0) == iff) {
 186           continue;               // CastPP/II of the IfNode is OK
 187         }
 188       }
 189       // Disabled following code because I cannot tell if exactly one
 190       // path dominates without a real dominator check. CNC 9/9/1999
 191       //uint vop = v->Opcode();
 192       //if( vop == Op_Phi ) {        // Phi from another merge point might be OK
 193       //  Node *r = v->in(0);        // Get controlling point
 194       //  if( !r ) return nullptr;   // Degraded to a copy
 195       //  // Find exactly one path in (either True or False doms, but not IFF)
 196       //  int cnt = 0;
 197       //  for( uint i = 1; i < r->req(); i++ )
 198       //    if( r->in(i) && r->in(i)->in(0) == iff )
 199       //      cnt++;
 200       //  if( cnt == 1 ) continue; // Exactly one of True or False guards Phi
 201       //}
 202       if( !v->is_Call() ) {
 203         /*
 204         if( v->Opcode() == Op_AddP ) {
 205           tty->print_cr("Phi has AddP use");
 206         } else if( v->Opcode() == Op_CastPP ) {
 207           tty->print_cr("Phi has CastPP use");
 208         } else if( v->Opcode() == Op_CastII ) {
 209           tty->print_cr("Phi has CastII use");
 210         } else {
 211           tty->print_cr("Phi has use I can't be bothered with");
 212         }
 213         */
 214       }
 215       return nullptr;
 216 
 217       /* CNC - Cut out all the fancy acceptance tests
 218       // Can we clone this use when doing the transformation?
 219       // If all uses are from Phis at this merge or constants, then YES.
 220       if( !v->in(0) && v != cmp ) {
 221         tty->print_cr("Phi has free-floating use");
 222         v->dump(2);
 223         return nullptr;
 224       }
 225       for( uint l = 1; l < v->req(); l++ ) {
 226         if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) &&
 227             !v->in(l)->is_Con() ) {
 228           tty->print_cr("Phi has use");
 229           v->dump(2);
 230           return nullptr;
 231         } // End of if Phi-use input is neither Phi nor Constant
 232       } // End of for all inputs to Phi-use
 233       */
 234     } // End of for all uses of Phi
 235   } // End of for all uses of Region
 236 
 237   // Only do this if the IF node is in a sane state
 238   if (iff->outcnt() != 2)
 239     return nullptr;
 240 
 241   // Got a hit!  Do the Mondo Hack!
 242   //
 243   //ABC  a1c   def   ghi            B     1     e     h   A C   a c   d f   g i
 244   // R - Phi - Phi - Phi            Rc - Phi - Phi - Phi   Rx - Phi - Phi - Phi
 245   //     cmp - 2                         cmp - 2               cmp - 2
 246   //       bool                            bool_c                bool_x
 247   //       if                               if_c                  if_x
 248   //      T  F                              T  F                  T  F
 249   // ..s..    ..t ..                   ..s..    ..t..        ..s..    ..t..
 250   //
 251   // Split the paths coming into the merge point into 2 separate groups of
 252   // merges.  On the left will be all the paths feeding constants into the
 253   // Cmp's Phi.  On the right will be the remaining paths.  The Cmp's Phi
 254   // will fold up into a constant; this will let the Cmp fold up as well as
 255   // all the control flow.  Below the original IF we have 2 control
 256   // dependent regions, 's' and 't'.  Now we will merge the two paths
 257   // just prior to 's' and 't' from the two IFs.  At least 1 path (and quite
 258   // likely 2 or more) will promptly constant fold away.
 259   PhaseGVN *phase = igvn;
 260 
 261   // Make a region merging constants and a region merging the rest
 262   uint req_c = 0;
 263   for (uint ii = 1; ii < r->req(); ii++) {
 264     if (phi->in(ii) == con1) {
 265       req_c++;
 266     }
 267     if (Node::may_be_loop_entry(r->in(ii))) {
 268       // Bail out if splitting through a region with a Parse Predicate input (could
 269       // also be a loop header before loop opts creates a LoopNode for it).
 270       return nullptr;
 271     }
 272   }
 273 
 274   // If all the defs of the phi are the same constant, we already have the desired end state.
 275   // Skip the split that would create empty phi and region nodes.
 276   if ((r->req() - req_c) == 1) {
 277     return nullptr;
 278   }
 279 
 280   // At this point we know that we can apply the split if optimization. If the region is still on the worklist,
 281   // we should wait until it is processed. The region might be removed which makes this optimization redundant.
 282   // This also avoids the creation of dead data loops when rewiring data nodes below when a region is dying.
 283   if (igvn->_worklist.member(r)) {
 284     igvn->_worklist.push(iff); // retry split if later again
 285     return nullptr;
 286   }
 287 
 288   Node *region_c = new RegionNode(req_c + 1);
 289   Node *phi_c    = con1;
 290   uint  len      = r->req();
 291   Node *region_x = new RegionNode(len - req_c);
 292   Node *phi_x    = PhiNode::make_blank(region_x, phi);
 293   for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) {
 294     if (phi->in(i) == con1) {
 295       region_c->init_req( i_c++, r  ->in(i) );
 296     } else {
 297       region_x->init_req( i_x,   r  ->in(i) );
 298       phi_x   ->init_req( i_x++, phi->in(i) );
 299     }
 300   }
 301 
 302   // Register the new RegionNodes but do not transform them.  Cannot
 303   // transform until the entire Region/Phi conglomerate has been hacked
 304   // as a single huge transform.
 305   igvn->register_new_node_with_optimizer( region_c );
 306   igvn->register_new_node_with_optimizer( region_x );
 307   // Prevent the untimely death of phi_x.  Currently he has no uses.  He is
 308   // about to get one.  If this only use goes away, then phi_x will look dead.
 309   // However, he will be picking up some more uses down below.
 310   Node *hook = new Node(4);
 311   hook->init_req(0, phi_x);
 312   hook->init_req(1, phi_c);
 313   phi_x = phase->transform( phi_x );
 314 
 315   // Make the compare
 316   Node *cmp_c = phase->makecon(t);
 317   Node *cmp_x = cmp->clone();
 318   cmp_x->set_req(1,phi_x);
 319   cmp_x->set_req(2,con2);
 320   cmp_x = phase->transform(cmp_x);
 321   // Make the bool
 322   Node *b_c = phase->transform(new BoolNode(cmp_c,b->_test._test));
 323   Node *b_x = phase->transform(new BoolNode(cmp_x,b->_test._test));
 324   // Make the IfNode
 325   IfNode* iff_c = iff->clone()->as_If();
 326   iff_c->set_req(0, region_c);
 327   iff_c->set_req(1, b_c);
 328   igvn->set_type_bottom(iff_c);
 329   igvn->_worklist.push(iff_c);
 330   hook->init_req(2, iff_c);
 331 
 332   IfNode* iff_x = iff->clone()->as_If();
 333   iff_x->set_req(0, region_x);
 334   iff_x->set_req(1, b_x);
 335   igvn->set_type_bottom(iff_x);
 336   igvn->_worklist.push(iff_x);
 337   hook->init_req(3, iff_x);
 338 
 339   // Make the true/false arms
 340   Node *iff_c_t = phase->transform(new IfTrueNode (iff_c));
 341   Node *iff_c_f = phase->transform(new IfFalseNode(iff_c));
 342   Node *iff_x_t = phase->transform(new IfTrueNode (iff_x));
 343   Node *iff_x_f = phase->transform(new IfFalseNode(iff_x));
 344 
 345   // Merge the TRUE paths
 346   Node *region_s = new RegionNode(3);
 347   igvn->_worklist.push(region_s);
 348   region_s->init_req(1, iff_c_t);
 349   region_s->init_req(2, iff_x_t);
 350   igvn->register_new_node_with_optimizer( region_s );
 351 
 352   // Merge the FALSE paths
 353   Node *region_f = new RegionNode(3);
 354   igvn->_worklist.push(region_f);
 355   region_f->init_req(1, iff_c_f);
 356   region_f->init_req(2, iff_x_f);
 357   igvn->register_new_node_with_optimizer( region_f );
 358 
 359   igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
 360   cmp->set_req(1,nullptr);  // Whack the inputs to cmp because it will be dead
 361   cmp->set_req(2,nullptr);
 362   // Check for all uses of the Phi and give them a new home.
 363   // The 'cmp' got cloned, but CastPP/IIs need to be moved.
 364   Node *phi_s = nullptr;     // do not construct unless needed
 365   Node *phi_f = nullptr;     // do not construct unless needed
 366   for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
 367     Node* v = phi->last_out(i2);// User of the phi
 368     igvn->rehash_node_delayed(v); // Have to fixup other Phi users
 369     uint vop = v->Opcode();
 370     Node *proj = nullptr;
 371     if( vop == Op_Phi ) {       // Remote merge point
 372       Node *r = v->in(0);
 373       for (uint i3 = 1; i3 < r->req(); i3++)
 374         if (r->in(i3) && r->in(i3)->in(0) == iff) {
 375           proj = r->in(i3);
 376           break;
 377         }
 378     } else if( v->is_ConstraintCast() ) {
 379       proj = v->in(0);          // Controlling projection
 380     } else {
 381       assert( 0, "do not know how to handle this guy" );
 382     }
 383     guarantee(proj != nullptr, "sanity");
 384 
 385     Node *proj_path_data, *proj_path_ctrl;
 386     if( proj->Opcode() == Op_IfTrue ) {
 387       if( phi_s == nullptr ) {
 388         // Only construct phi_s if needed, otherwise provides
 389         // interfering use.
 390         phi_s = PhiNode::make_blank(region_s,phi);
 391         phi_s->init_req( 1, phi_c );
 392         phi_s->init_req( 2, phi_x );
 393         hook->add_req(phi_s);
 394         phi_s = phase->transform(phi_s);
 395       }
 396       proj_path_data = phi_s;
 397       proj_path_ctrl = region_s;
 398     } else {
 399       if( phi_f == nullptr ) {
 400         // Only construct phi_f if needed, otherwise provides
 401         // interfering use.
 402         phi_f = PhiNode::make_blank(region_f,phi);
 403         phi_f->init_req( 1, phi_c );
 404         phi_f->init_req( 2, phi_x );
 405         hook->add_req(phi_f);
 406         phi_f = phase->transform(phi_f);
 407       }
 408       proj_path_data = phi_f;
 409       proj_path_ctrl = region_f;
 410     }
 411 
 412     // Fixup 'v' for for the split
 413     if( vop == Op_Phi ) {       // Remote merge point
 414       uint i;
 415       for( i = 1; i < v->req(); i++ )
 416         if( v->in(i) == phi )
 417           break;
 418       v->set_req(i, proj_path_data );
 419     } else if( v->is_ConstraintCast() ) {
 420       v->set_req(0, proj_path_ctrl );
 421       v->set_req(1, proj_path_data );
 422     } else
 423       ShouldNotReachHere();
 424   }
 425 
 426   // Now replace the original iff's True/False with region_s/region_t.
 427   // This makes the original iff go dead.
 428   for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) {
 429     Node* p = iff->last_out(i3);
 430     assert( p->Opcode() == Op_IfTrue || p->Opcode() == Op_IfFalse, "" );
 431     Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f;
 432     // Replace p with u
 433     igvn->add_users_to_worklist(p);
 434     for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
 435       Node* x = p->last_out(l);
 436       igvn->hash_delete(x);
 437       uint uses_found = 0;
 438       for( uint j = 0; j < x->req(); j++ ) {
 439         if( x->in(j) == p ) {
 440           x->set_req(j, u);
 441           uses_found++;
 442         }
 443       }
 444       l -= uses_found;    // we deleted 1 or more copies of this edge
 445     }
 446     igvn->remove_dead_node(p, PhaseIterGVN::NodeOrigin::Graph);
 447   }
 448 
 449   // Force the original merge dead
 450   igvn->hash_delete(r);
 451   // First, remove region's dead users.
 452   for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
 453     Node* u = r->last_out(l);
 454     if( u == r ) {
 455       r->set_req(0, nullptr);
 456     } else {
 457       assert(u->outcnt() == 0, "only dead users");
 458       igvn->remove_dead_node(u, PhaseIterGVN::NodeOrigin::Graph);
 459     }
 460     l -= 1;
 461   }
 462   igvn->remove_dead_node(r, PhaseIterGVN::NodeOrigin::Graph);
 463 
 464   // Now remove the bogus extra edges used to keep things alive
 465   igvn->remove_dead_node(hook, PhaseIterGVN::NodeOrigin::Speculative);
 466 
 467   // Must return either the original node (now dead) or a new node
 468   // (Do not return a top here, since that would break the uniqueness of top.)
 469   return new ConINode(TypeInt::ZERO);
 470 }
 471 
 472 IfNode* IfNode::make_with_same_profile(IfNode* if_node_profile, Node* ctrl, Node* bol) {
 473   // Assert here that we only try to create a clone from an If node with the same profiling if that actually makes sense.
 474   // Some If node subtypes should not be cloned in this way. In theory, we should not clone BaseCountedLoopEndNodes.
 475   // But they can end up being used as normal If nodes when peeling a loop - they serve as zero-trip guard.
 476   // Allow them as well.
 477   assert(if_node_profile->Opcode() == Op_If || if_node_profile->is_RangeCheck()
 478          || if_node_profile->is_BaseCountedLoopEnd(), "should not clone other nodes");
 479   if (if_node_profile->is_RangeCheck()) {
 480     // RangeCheck nodes could be further optimized.
 481     return new RangeCheckNode(ctrl, bol, if_node_profile->_prob, if_node_profile->_fcnt);
 482   } else {
 483     // Not a RangeCheckNode? Fall back to IfNode.
 484     return new IfNode(ctrl, bol, if_node_profile->_prob, if_node_profile->_fcnt);
 485   }
 486 }
 487 
 488 // if this IfNode follows a range check pattern return the projection
 489 // for the failed path
 490 IfProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) const {
 491   if (outcnt() != 2) {
 492     return nullptr;
 493   }
 494   Node* b = in(1);
 495   if (b == nullptr || !b->is_Bool())  return nullptr;
 496   BoolNode* bn = b->as_Bool();
 497   Node* cmp = bn->in(1);
 498   if (cmp == nullptr)  return nullptr;
 499   if (cmp->Opcode() != Op_CmpU)  return nullptr;
 500 
 501   l = cmp->in(1);
 502   r = cmp->in(2);
 503   flip_test = 1;
 504   if (bn->_test._test == BoolTest::le) {
 505     l = cmp->in(2);
 506     r = cmp->in(1);
 507     flip_test = 2;
 508   } else if (bn->_test._test != BoolTest::lt) {
 509     return nullptr;
 510   }
 511   if (l->is_top())  return nullptr;   // Top input means dead test
 512   if (r->Opcode() != Op_LoadRange && !is_RangeCheck())  return nullptr;
 513 
 514   // We have recognized one of these forms:
 515   //  Flip 1:  If (Bool[<] CmpU(l, LoadRange)) ...
 516   //  Flip 2:  If (Bool[<=] CmpU(LoadRange, l)) ...
 517 
 518   if (flip_test == 2) {
 519     return true_proj_or_null();
 520   }
 521   return false_proj_or_null();
 522 }
 523 
 524 
 525 //------------------------------is_range_check---------------------------------
 526 // Return 0 if not a range check.  Return 1 if a range check and set index and
 527 // offset.  Return 2 if we had to negate the test.  Index is null if the check
 528 // is versus a constant.
 529 int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
 530   int flip_test = 0;
 531   Node* l = nullptr;
 532   Node* r = nullptr;
 533   IfProjNode* iftrap = range_check_trap_proj(flip_test, l, r);
 534 
 535   if (iftrap == nullptr) {
 536     return 0;
 537   }
 538 
 539   // Make sure it's a real range check by requiring an uncommon trap
 540   // along the OOB path.  Otherwise, it's possible that the user wrote
 541   // something which optimized to look like a range check but behaves
 542   // in some other way.
 543   if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == nullptr) {
 544     return 0;
 545   }
 546 
 547   // Look for index+offset form
 548   Node* ind = l;
 549   jint  off = 0;
 550   if (l->is_top()) {
 551     return 0;
 552   } else if (l->Opcode() == Op_AddI) {
 553     if ((off = l->in(1)->find_int_con(0)) != 0) {
 554       ind = l->in(2)->uncast();
 555     } else if ((off = l->in(2)->find_int_con(0)) != 0) {
 556       ind = l->in(1)->uncast();
 557     }
 558   } else if ((off = l->find_int_con(-1)) >= 0) {
 559     // constant offset with no variable index
 560     ind = nullptr;
 561   } else {
 562     // variable index with no constant offset (or dead negative index)
 563     off = 0;
 564   }
 565 
 566   // Return all the values:
 567   index  = ind;
 568   offset = off;
 569   range  = r;
 570   return flip_test;
 571 }
 572 
 573 //------------------------------adjust_check-----------------------------------
 574 // Adjust (widen) a prior range check
 575 static void adjust_check(IfProjNode* proj, Node* range, Node* index,
 576                          int flip, jint off_lo, PhaseIterGVN* igvn) {
 577   PhaseGVN *gvn = igvn;
 578   // Break apart the old check
 579   Node *iff = proj->in(0);
 580   Node *bol = iff->in(1);
 581   if( bol->is_top() ) return;   // In case a partially dead range check appears
 582   // bail (or bomb[ASSERT/DEBUG]) if NOT projection-->IfNode-->BoolNode
 583   DEBUG_ONLY( if (!bol->is_Bool()) { proj->dump(3); fatal("Expect projection-->IfNode-->BoolNode"); } )
 584   if (!bol->is_Bool()) return;
 585 
 586   Node *cmp = bol->in(1);
 587   // Compute a new check
 588   Node *new_add = gvn->intcon(off_lo);
 589   if (index) {
 590     new_add = off_lo ? gvn->transform(new AddINode(index, new_add)) : index;
 591   }
 592   Node *new_cmp = (flip == 1)
 593     ? new CmpUNode(new_add, range)
 594     : new CmpUNode(range, new_add);
 595   new_cmp = gvn->transform(new_cmp);
 596   // See if no need to adjust the existing check
 597   if (new_cmp == cmp) return;
 598   // Else, adjust existing check
 599   Node* new_bol = gvn->transform(new BoolNode(new_cmp, bol->as_Bool()->_test._test));
 600   igvn->rehash_node_delayed(iff);
 601   iff->set_req_X(1, new_bol, igvn);
 602   // As part of range check smearing, this range check is widened. Loads and range check Cast nodes that are control
 603   // dependent on this range check now depend on multiple dominating range checks. These control dependent nodes end up
 604   // at the lowest/nearest dominating check in the graph. To ensure that these Loads/Casts do not float above any of the
 605   // dominating checks (even when the lowest dominating check is later replaced by yet another dominating check), we
 606   // need to pin them at the lowest dominating check.
 607   proj->pin_dependent_nodes(igvn);
 608 }
 609 
 610 //------------------------------up_one_dom-------------------------------------
 611 // Walk up the dominator tree one step.  Return null at root or true
 612 // complex merges.  Skips through small diamonds.
 613 Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
 614   Node *dom = curr->in(0);
 615   if( !dom )                    // Found a Region degraded to a copy?
 616     return curr->nonnull_req(); // Skip thru it
 617 
 618   if( curr != dom )             // Normal walk up one step?
 619     return dom;
 620 
 621   // Use linear_only if we are still parsing, since we cannot
 622   // trust the regions to be fully filled in.
 623   if (linear_only)
 624     return nullptr;
 625 
 626   if( dom->is_Root() )
 627     return nullptr;
 628 
 629   // Else hit a Region.  Check for a loop header
 630   if( dom->is_Loop() )
 631     return dom->in(1);          // Skip up thru loops
 632 
 633   // Check for small diamonds
 634   Node *din1, *din2, *din3, *din4;
 635   if( dom->req() == 3 &&        // 2-path merge point
 636       (din1 = dom ->in(1)) &&   // Left  path exists
 637       (din2 = dom ->in(2)) &&   // Right path exists
 638       (din3 = din1->in(0)) &&   // Left  path up one
 639       (din4 = din2->in(0)) ) {  // Right path up one
 640     if( din3->is_Call() &&      // Handle a slow-path call on either arm
 641         (din3 = din3->in(0)) )
 642       din3 = din3->in(0);
 643     if( din4->is_Call() &&      // Handle a slow-path call on either arm
 644         (din4 = din4->in(0)) )
 645       din4 = din4->in(0);
 646     if (din3 != nullptr && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
 647       return din3;              // Skip around diamonds
 648   }
 649 
 650   // Give up the search at true merges
 651   return nullptr;                  // Dead loop?  Or hit root?
 652 }
 653 
 654 
 655 //------------------------------filtered_int_type--------------------------------
 656 // Return a possibly more restrictive type for val based on condition control flow for an if
 657 const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj) {
 658   assert(if_proj &&
 659          (if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
 660   if (if_proj->in(0) && if_proj->in(0)->is_If()) {
 661     IfNode* iff = if_proj->in(0)->as_If();
 662     if (iff->in(1) && iff->in(1)->is_Bool()) {
 663       BoolNode* bol = iff->in(1)->as_Bool();
 664       if (bol->in(1) && bol->in(1)->is_Cmp()) {
 665         const CmpNode* cmp  = bol->in(1)->as_Cmp();
 666         if (cmp->in(1) == val) {
 667           const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
 668           if (cmp2_t != nullptr) {
 669             jint lo = cmp2_t->_lo;
 670             jint hi = cmp2_t->_hi;
 671             BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
 672             switch (msk) {
 673             case BoolTest::ne: {
 674               // If val is compared to its lower or upper bound, we can narrow the type
 675               const TypeInt* val_t = gvn->type(val)->isa_int();
 676               if (val_t != nullptr && !val_t->singleton() && cmp2_t->is_con()) {
 677                 if (val_t->_lo == lo) {
 678                   return TypeInt::make(val_t->_lo + 1, val_t->_hi, val_t->_widen);
 679                 } else if (val_t->_hi == hi) {
 680                   return TypeInt::make(val_t->_lo, val_t->_hi - 1, val_t->_widen);
 681                 }
 682               }
 683               // Can't refine type
 684               return nullptr;
 685             }
 686             case BoolTest::eq:
 687               return cmp2_t;
 688             case BoolTest::lt:
 689               lo = TypeInt::INT->_lo;
 690               if (hi != min_jint) {
 691                 hi = hi - 1;
 692               }
 693               break;
 694             case BoolTest::le:
 695               lo = TypeInt::INT->_lo;
 696               break;
 697             case BoolTest::gt:
 698               if (lo != max_jint) {
 699                 lo = lo + 1;
 700               }
 701               hi = TypeInt::INT->_hi;
 702               break;
 703             case BoolTest::ge:
 704               // lo unchanged
 705               hi = TypeInt::INT->_hi;
 706               break;
 707             default:
 708               break;
 709             }
 710             const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen);
 711             return rtn_t;
 712           }
 713         }
 714       }
 715     }
 716   }
 717   return nullptr;
 718 }
 719 
 720 //------------------------------fold_compares----------------------------
 721 // See if a pair of CmpIs can be converted into a CmpU.  In some cases
 722 // the direction of this if is determined by the preceding if so it
 723 // can be eliminate entirely.
 724 //
 725 // Given an if testing (CmpI n v) check for an immediately control
 726 // dependent if that is testing (CmpI n v2) and has one projection
 727 // leading to this if and the other projection leading to a region
 728 // that merges one of this ifs control projections.
 729 //
 730 //                   If
 731 //                  / |
 732 //                 /  |
 733 //                /   |
 734 //              If    |
 735 //              /\    |
 736 //             /  \   |
 737 //            /    \  |
 738 //           /    Region
 739 //
 740 // Or given an if testing (CmpI n v) check for a dominating if that is
 741 // testing (CmpI n v2), both having one projection leading to an
 742 // uncommon trap. Allow Another independent guard in between to cover
 743 // an explicit range check:
 744 // if (index < 0 || index >= array.length) {
 745 // which may need a null check to guard the LoadRange
 746 //
 747 //                   If
 748 //                  / \
 749 //                 /   \
 750 //                /     \
 751 //              If      unc
 752 //              /\
 753 //             /  \
 754 //            /    \
 755 //           /      unc
 756 //
 757 
 758 // Is the comparison for this If suitable for folding?
 759 bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) {
 760   return in(1) != nullptr &&
 761     in(1)->is_Bool() &&
 762     in(1)->in(1) != nullptr &&
 763     in(1)->in(1)->Opcode() == Op_CmpI &&
 764     in(1)->in(1)->in(2) != nullptr &&
 765     in(1)->in(1)->in(2) != igvn->C->top() &&
 766     (in(1)->as_Bool()->_test.is_less() ||
 767      in(1)->as_Bool()->_test.is_greater() ||
 768      (fold_ne && in(1)->as_Bool()->_test._test == BoolTest::ne));
 769 }
 770 
 771 // Is a dominating control suitable for folding with this if?
 772 bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
 773   return ctrl != nullptr &&
 774     ctrl->is_IfProj() &&
 775     ctrl->outcnt() == 1 && // No side-effects
 776     ctrl->in(0) != nullptr &&
 777     ctrl->in(0)->Opcode() == Op_If &&
 778     ctrl->in(0)->outcnt() == 2 &&
 779     ctrl->in(0)->as_If()->cmpi_folds(igvn, true) &&
 780     // Must compare same value
 781     ctrl->in(0)->in(1)->in(1)->in(1) != nullptr &&
 782     ctrl->in(0)->in(1)->in(1)->in(1) != igvn->C->top() &&
 783     ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1);
 784 }
 785 
 786 // Do this If and the dominating If share a region?
 787 bool IfNode::has_shared_region(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail) const {
 788   IfProjNode* otherproj = proj->other_if_proj();
 789   Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null();
 790   RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr;
 791   success = nullptr;
 792   fail = nullptr;
 793 
 794   if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) {
 795     for (int i = 0; i < 2; i++) {
 796       IfProjNode* next_proj = proj_out(i)->as_IfProj();
 797       if (success == nullptr && next_proj->outcnt() == 1 && next_proj->unique_out() == region) {
 798         success = next_proj;
 799       } else if (fail == nullptr) {
 800         fail = next_proj;
 801       } else {
 802         success = nullptr;
 803         fail = nullptr;
 804       }
 805     }
 806   }
 807   return success != nullptr && fail != nullptr;
 808 }
 809 
 810 bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) {
 811   // Different methods and methods containing jsrs are not supported.
 812   ciMethod* method = unc->jvms()->method();
 813   ciMethod* dom_method = dom_unc->jvms()->method();
 814   if (method != dom_method || method->has_jsrs()) {
 815     return false;
 816   }
 817   // Check that both traps are in the same activation of the method (instead
 818   // of two activations being inlined through different call sites) by verifying
 819   // that the call stacks are equal for both JVMStates.
 820   JVMState* dom_caller = dom_unc->jvms()->caller();
 821   JVMState* caller = unc->jvms()->caller();
 822   if ((dom_caller == nullptr) != (caller == nullptr)) {
 823     // The current method must either be inlined into both dom_caller and
 824     // caller or must not be inlined at all (top method). Bail out otherwise.
 825     return false;
 826   } else if (dom_caller != nullptr && !dom_caller->same_calls_as(caller)) {
 827     return false;
 828   }
 829   // Check that the bci of the dominating uncommon trap dominates the bci
 830   // of the dominated uncommon trap. Otherwise we may not re-execute
 831   // the dominated check after deoptimization from the merged uncommon trap.
 832   ciTypeFlow* flow = dom_method->get_flow_analysis();
 833   int bci = unc->jvms()->bci();
 834   int dom_bci = dom_unc->jvms()->bci();
 835   if (!flow->is_dominated_by(bci, dom_bci)) {
 836     return false;
 837   }
 838 
 839   return true;
 840 }
 841 
 842 // Return projection that leads to an uncommon trap if any
 843 ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization::DeoptReason reason) const {
 844   for (int i = 0; i < 2; i++) {
 845     call = proj_out(i)->is_uncommon_trap_proj(reason);
 846     if (call != nullptr) {
 847       return proj_out(i);
 848     }
 849   }
 850   return nullptr;
 851 }
 852 
 853 // Do this If and the dominating If both branch out to an uncommon trap
 854 bool IfNode::has_only_uncommon_traps(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail, PhaseIterGVN* igvn) const {
 855   IfProjNode* otherproj = proj->other_if_proj();
 856   CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj();
 857 
 858   if (otherproj->outcnt() == 1 && dom_unc != nullptr) {
 859     // We need to re-execute the folded Ifs after deoptimization from the merged traps
 860     if (!dom_unc->jvms()->should_reexecute()) {
 861       return false;
 862     }
 863 
 864     CallStaticJavaNode* unc = nullptr;
 865     ProjNode* unc_proj = uncommon_trap_proj(unc);
 866     if (unc_proj != nullptr && unc_proj->outcnt() == 1) {
 867       if (dom_unc == unc) {
 868         // Allow the uncommon trap to be shared through a region
 869         RegionNode* r = unc->in(0)->as_Region();
 870         if (r->outcnt() != 2 || r->req() != 3 || r->find_edge(otherproj) == -1 || r->find_edge(unc_proj) == -1) {
 871           return false;
 872         }
 873         assert(r->has_phi() == nullptr, "simple region shouldn't have a phi");
 874       } else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) {
 875         return false;
 876       }
 877 
 878       if (!is_dominator_unc(dom_unc, unc)) {
 879         return false;
 880       }
 881 
 882       // See merge_uncommon_traps: the reason of the uncommon trap
 883       // will be changed and the state of the dominating If will be
 884       // used. Checked that we didn't apply this transformation in a
 885       // previous compilation and it didn't cause too many traps
 886       ciMethod* dom_method = dom_unc->jvms()->method();
 887       int dom_bci = dom_unc->jvms()->bci();
 888       if (!igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_unstable_fused_if) &&
 889           !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check) &&
 890           // Return true if c2 manages to reconcile with UnstableIf optimization. See the comments for it.
 891           igvn->C->remove_unstable_if_trap(dom_unc, true/*yield*/)) {
 892         success = unc_proj->as_IfProj();
 893         fail = unc_proj->as_IfProj()->other_if_proj();
 894         return true;
 895       }
 896     }
 897   }
 898   return false;
 899 }
 900 
 901 // Check that the 2 CmpI can be folded into as single CmpU and proceed with the folding
 902 bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn) {
 903   Node* this_cmp = in(1)->in(1);
 904   BoolNode* this_bool = in(1)->as_Bool();
 905   IfNode* dom_iff = proj->in(0)->as_If();
 906   BoolNode* dom_bool = dom_iff->in(1)->as_Bool();
 907   Node* lo = dom_iff->in(1)->in(1)->in(2);
 908   Node* orig_lo = lo;
 909   Node* hi = this_cmp->in(2);
 910   Node* n = this_cmp->in(1);
 911   IfProjNode* otherproj = proj->other_if_proj();
 912 
 913   const TypeInt* lo_type = IfNode::filtered_int_type(igvn, n, otherproj);
 914   const TypeInt* hi_type = IfNode::filtered_int_type(igvn, n, success);
 915 
 916   BoolTest::mask lo_test = dom_bool->_test._test;
 917   BoolTest::mask hi_test = this_bool->_test._test;
 918   BoolTest::mask cond = hi_test;
 919 
 920   PhaseTransform::SpeculativeProgressGuard progress_guard(igvn);
 921   // convert:
 922   //
 923   //          dom_bool = x {<,<=,>,>=} a
 924   //                           / \
 925   //     proj = {True,False}  /   \ otherproj = {False,True}
 926   //                         /
 927   //        this_bool = x {<,<=} b
 928   //                       / \
 929   //  fail = {True,False} /   \ success = {False,True}
 930   //                     /
 931   //
 932   // (Second test guaranteed canonicalized, first one may not have
 933   // been canonicalized yet)
 934   //
 935   // into:
 936   //
 937   // cond = (x - lo) {<u,<=u,>u,>=u} adjusted_lim
 938   //                       / \
 939   //                 fail /   \ success
 940   //                     /
 941   //
 942 
 943   // Figure out which of the two tests sets the upper bound and which
 944   // sets the lower bound if any.
 945   Node* adjusted_lim = nullptr;
 946   if (lo_type != nullptr && hi_type != nullptr && hi_type->_lo > lo_type->_hi &&
 947       hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) {
 948     assert((dom_bool->_test.is_less() && !proj->_con) ||
 949            (dom_bool->_test.is_greater() && proj->_con), "incorrect test");
 950 
 951     // this_bool = <
 952     //   dom_bool = >= (proj = True) or dom_bool = < (proj = False)
 953     //     x in [a, b[ on the fail (= True) projection, b > a-1 (because of hi_type->_lo > lo_type->_hi test above):
 954     //     lo = a, hi = b, adjusted_lim = b-a, cond = <u
 955     //   dom_bool = > (proj = True) or dom_bool = <= (proj = False)
 956     //     x in ]a, b[ on the fail (= True) projection, b > a:
 957     //     lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <u
 958     // this_bool = <=
 959     //   dom_bool = >= (proj = True) or dom_bool = < (proj = False)
 960     //     x in [a, b] on the fail (= True) projection, b+1 > a-1:
 961     //     lo = a, hi = b, adjusted_lim = b-a+1, cond = <u
 962     //     lo = a, hi = b, adjusted_lim = b-a, cond = <=u doesn't work because b = a - 1 is possible, then b-a = -1
 963     //   dom_bool = > (proj = True) or dom_bool = <= (proj = False)
 964     //     x in ]a, b] on the fail (= True) projection b+1 > a:
 965     //     lo = a+1, hi = b, adjusted_lim = b-a, cond = <u
 966     //     lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <=u doesn't work because a = b is possible, then b-a-1 = -1
 967 
 968     if (hi_test == BoolTest::lt) {
 969       if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
 970         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
 971       }
 972     } else if (hi_test == BoolTest::le) {
 973       if (lo_test == BoolTest::ge || lo_test == BoolTest::lt) {
 974         adjusted_lim = igvn->transform(new SubINode(hi, lo));
 975         adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
 976         cond = BoolTest::lt;
 977       } else if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
 978         adjusted_lim = igvn->transform(new SubINode(hi, lo));
 979         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
 980         cond = BoolTest::lt;
 981       } else {
 982         assert(false, "unhandled lo_test: %d", lo_test);
 983         return false;
 984       }
 985     } else {
 986       assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled hi_test: %d", hi_test);
 987       return false;
 988     }
 989     // this test was canonicalized
 990     assert(this_bool->_test.is_less() && fail->_con, "incorrect test");
 991   } else if (lo_type != nullptr && hi_type != nullptr && lo_type->_lo > hi_type->_hi &&
 992              lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) {
 993 
 994     // this_bool = <
 995     //   dom_bool = < (proj = True) or dom_bool = >= (proj = False)
 996     //     x in [b, a[ on the fail (= False) projection, a > b-1 (because of lo_type->_lo > hi_type->_hi above):
 997     //     lo = b, hi = a, adjusted_lim = a-b, cond = >=u
 998     //   dom_bool = <= (proj = True) or dom_bool = > (proj = False)
 999     //     x in [b, a] on the fail (= False) projection, a+1 > b-1:
1000     //     lo = b, hi = a, adjusted_lim = a-b+1, cond = >=u
1001     //     lo = b, hi = a, adjusted_lim = a-b, cond = >u doesn't work because a = b - 1 is possible, then b-a = -1
1002     // this_bool = <=
1003     //   dom_bool = < (proj = True) or dom_bool = >= (proj = False)
1004     //     x in ]b, a[ on the fail (= False) projection, a > b:
1005     //     lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >=u
1006     //   dom_bool = <= (proj = True) or dom_bool = > (proj = False)
1007     //     x in ]b, a] on the fail (= False) projection, a+1 > b:
1008     //     lo = b+1, hi = a, adjusted_lim = a-b, cond = >=u
1009     //     lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >u doesn't work because a = b is possible, then b-a-1 = -1
1010 
1011     swap(lo, hi);
1012     swap(lo_type, hi_type);
1013     swap(lo_test, hi_test);
1014 
1015     assert((dom_bool->_test.is_less() && proj->_con) ||
1016            (dom_bool->_test.is_greater() && !proj->_con), "incorrect test");
1017 
1018     cond = (hi_test == BoolTest::le || hi_test == BoolTest::gt) ? BoolTest::gt : BoolTest::ge;
1019 
1020     if (lo_test == BoolTest::lt) {
1021       if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
1022         cond = BoolTest::ge;
1023       } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
1024         adjusted_lim = igvn->transform(new SubINode(hi, lo));
1025         adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
1026         cond = BoolTest::ge;
1027       } else {
1028         assert(false, "unhandled hi_test: %d", hi_test);
1029         return false;
1030       }
1031     } else if (lo_test == BoolTest::le) {
1032       if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
1033         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
1034         cond = BoolTest::ge;
1035       } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
1036         adjusted_lim = igvn->transform(new SubINode(hi, lo));
1037         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
1038         cond = BoolTest::ge;
1039       } else {
1040         assert(false, "unhandled hi_test: %d", hi_test);
1041         return false;
1042       }
1043     } else {
1044       assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled lo_test: %d", lo_test);
1045       return false;
1046     }
1047     // this test was canonicalized
1048     assert(this_bool->_test.is_less() && !fail->_con, "incorrect test");
1049   } else {
1050     const TypeInt* failtype = filtered_int_type(igvn, n, proj);
1051     if (failtype != nullptr) {
1052       const TypeInt* type2 = filtered_int_type(igvn, n, fail);
1053       if (type2 != nullptr) {
1054         if (failtype->filter(type2) == Type::TOP) {
1055           // previous if determines the result of this if so
1056           // replace Bool with constant
1057           igvn->replace_input_of(this, 1, igvn->intcon(success->_con));
1058           progress_guard.commit();
1059           return true;
1060         }
1061       }
1062     }
1063     return false;
1064   }
1065 
1066   assert(lo != nullptr && hi != nullptr, "sanity");
1067   Node* hook = new Node(lo); // Add a use to lo to prevent him from dying
1068   // Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo))
1069   Node* adjusted_val = igvn->transform(new SubINode(n,  lo));
1070   if (adjusted_lim == nullptr) {
1071     adjusted_lim = igvn->transform(new SubINode(hi, lo));
1072   }
1073   hook->destruct(igvn);
1074 
1075   if (adjusted_val->is_top() || adjusted_lim->is_top()) {
1076     return false;
1077   }
1078 
1079   if (igvn->type(adjusted_lim)->is_int()->_lo < 0 &&
1080       !igvn->C->post_loop_opts_phase()) {
1081     // If range check elimination applies to this comparison, it includes code to protect from overflows that may
1082     // cause the main loop to be skipped entirely. Delay this transformation.
1083     // Example:
1084     // for (int i = 0; i < limit; i++) {
1085     //   if (i < max_jint && i > min_jint) {...
1086     // }
1087     // Comparisons folded as:
1088     // i - min_jint - 1 <u -2
1089     // when RC applies, main loop limit becomes:
1090     // min(limit, max(-2 + min_jint + 1, min_jint))
1091     // = min(limit, min_jint)
1092     // = min_jint
1093     if (lo != orig_lo && lo->outcnt() == 0) {
1094       igvn->remove_dead_node(lo, PhaseIterGVN::NodeOrigin::Speculative);
1095     }
1096     if (adjusted_val->outcnt() == 0) {
1097       igvn->remove_dead_node(adjusted_val, PhaseIterGVN::NodeOrigin::Speculative);
1098     }
1099     if (adjusted_lim->outcnt() == 0) {
1100       igvn->remove_dead_node(adjusted_lim, PhaseIterGVN::NodeOrigin::Speculative);
1101     }
1102     igvn->C->record_for_post_loop_opts_igvn(this);
1103     return false;
1104   }
1105 
1106   Node* newcmp = igvn->transform(new CmpUNode(adjusted_val, adjusted_lim));
1107   Node* newbool = igvn->transform(new BoolNode(newcmp, cond));
1108 
1109   igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
1110   igvn->replace_input_of(this, 1, newbool);
1111 
1112   progress_guard.commit();
1113   return true;
1114 }
1115 
1116 // Merge the branches that trap for this If and the dominating If into
1117 // a single region that branches to the uncommon trap for the
1118 // dominating If
1119 Node* IfNode::merge_uncommon_traps(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn) {
1120   Node* res = this;
1121   assert(success->in(0) == this, "bad projection");
1122 
1123   IfProjNode* otherproj = proj->other_if_proj();
1124 
1125   CallStaticJavaNode* unc = success->is_uncommon_trap_proj();
1126   CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj();
1127 
1128   if (unc != dom_unc) {
1129     Node* r = new RegionNode(3);
1130 
1131     r->set_req(1, otherproj);
1132     r->set_req(2, success);
1133     r = igvn->transform(r);
1134     assert(r->is_Region(), "can't go away");
1135 
1136     // Make both If trap at the state of the first If: once the CmpI
1137     // nodes are merged, if we trap we don't know which of the CmpI
1138     // nodes would have caused the trap so we have to restart
1139     // execution at the first one
1140     igvn->replace_input_of(dom_unc, 0, r);
1141     igvn->replace_input_of(unc, 0, igvn->C->top());
1142   }
1143   int trap_request = dom_unc->uncommon_trap_request();
1144   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1145   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
1146 
1147   int flip_test = 0;
1148   Node* l = nullptr;
1149   Node* r = nullptr;
1150 
1151   if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != nullptr) {
1152     // If this looks like a range check, change the trap to
1153     // Reason_range_check so the compiler recognizes it as a range
1154     // check and applies the corresponding optimizations
1155     trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_range_check, action);
1156 
1157     improve_address_types(l, r, fail, igvn);
1158 
1159     res = igvn->transform(new RangeCheckNode(in(0), in(1), _prob, _fcnt));
1160   } else if (unc != dom_unc) {
1161     // If we trap we won't know what CmpI would have caused the trap
1162     // so use a special trap reason to mark this pair of CmpI nodes as
1163     // bad candidate for folding. On recompilation we won't fold them
1164     // and we may trap again but this time we'll know what branch
1165     // traps
1166     trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action);
1167   }
1168   igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request));
1169   return res;
1170 }
1171 
1172 // If we are turning 2 CmpI nodes into a CmpU that follows the pattern
1173 // of a rangecheck on index i, on 64 bit the compares may be followed
1174 // by memory accesses using i as index. In that case, the CmpU tells
1175 // us something about the values taken by i that can help the compiler
1176 // (see Compile::conv_I2X_index())
1177 void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn) {
1178 #ifdef _LP64
1179   ResourceMark rm;
1180   Node_Stack stack(2);
1181 
1182   assert(r->Opcode() == Op_LoadRange, "unexpected range check");
1183   const TypeInt* array_size = igvn->type(r)->is_int();
1184 
1185   stack.push(l, 0);
1186 
1187   while(stack.size() > 0) {
1188     Node* n = stack.node();
1189     uint start = stack.index();
1190 
1191     uint i = start;
1192     for (; i < n->outcnt(); i++) {
1193       Node* use = n->raw_out(i);
1194       if (stack.size() == 1) {
1195         if (use->Opcode() == Op_ConvI2L) {
1196           const TypeLong* bounds = use->as_Type()->type()->is_long();
1197           if (bounds->_lo <= array_size->_lo && bounds->_hi >= array_size->_hi &&
1198               (bounds->_lo != array_size->_lo || bounds->_hi != array_size->_hi)) {
1199             stack.set_index(i+1);
1200             stack.push(use, 0);
1201             break;
1202           }
1203         }
1204       } else if (use->is_Mem()) {
1205         Node* ctrl = use->in(0);
1206         for (int i = 0; i < 10 && ctrl != nullptr && ctrl != fail; i++) {
1207           ctrl = up_one_dom(ctrl);
1208         }
1209         if (ctrl == fail) {
1210           Node* init_n = stack.node_at(1);
1211           assert(init_n->Opcode() == Op_ConvI2L, "unexpected first node");
1212           // Create a new narrow ConvI2L node that is dependent on the range check
1213           Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail);
1214 
1215           // The type of the ConvI2L may be widen and so the new
1216           // ConvI2L may not be better than an existing ConvI2L
1217           if (new_n != init_n) {
1218             for (uint j = 2; j < stack.size(); j++) {
1219               Node* n = stack.node_at(j);
1220               Node* clone = n->clone();
1221               int rep = clone->replace_edge(init_n, new_n, igvn);
1222               assert(rep > 0, "can't find expected node?");
1223               clone = igvn->transform(clone);
1224               init_n = n;
1225               new_n = clone;
1226             }
1227             igvn->hash_delete(use);
1228             int rep = use->replace_edge(init_n, new_n, igvn);
1229             assert(rep > 0, "can't find expected node?");
1230             igvn->transform(use);
1231             if (init_n->outcnt() == 0) {
1232               igvn->_worklist.push(init_n);
1233             }
1234           }
1235         }
1236       } else if (use->in(0) == nullptr && (igvn->type(use)->isa_long() ||
1237                                         igvn->type(use)->isa_ptr())) {
1238         stack.set_index(i+1);
1239         stack.push(use, 0);
1240         break;
1241       }
1242     }
1243     if (i == n->outcnt()) {
1244       stack.pop();
1245     }
1246   }
1247 #endif
1248 }
1249 
1250 bool IfNode::is_cmp_with_loadrange(IfProjNode* proj) const {
1251   if (in(1) != nullptr &&
1252       in(1)->in(1) != nullptr &&
1253       in(1)->in(1)->in(2) != nullptr) {
1254     Node* other = in(1)->in(1)->in(2);
1255     if (other->Opcode() == Op_LoadRange &&
1256         ((other->in(0) != nullptr && other->in(0) == proj) ||
1257          (other->in(0) == nullptr &&
1258           other->in(2) != nullptr &&
1259           other->in(2)->is_AddP() &&
1260           other->in(2)->in(1) != nullptr &&
1261           other->in(2)->in(1)->Opcode() == Op_CastPP &&
1262           other->in(2)->in(1)->in(0) == proj))) {
1263       return true;
1264     }
1265   }
1266   return false;
1267 }
1268 
1269 bool IfNode::is_null_check(IfProjNode* proj, PhaseIterGVN* igvn) const {
1270   Node* other = in(1)->in(1)->in(2);
1271   if (other->in(MemNode::Address) != nullptr &&
1272       proj->in(0)->in(1) != nullptr &&
1273       proj->in(0)->in(1)->is_Bool() &&
1274       proj->in(0)->in(1)->in(1) != nullptr &&
1275       proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1276       proj->in(0)->in(1)->in(1)->in(2) != nullptr &&
1277       proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() &&
1278       igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) {
1279     return true;
1280   }
1281   return false;
1282 }
1283 
1284 // Check that the If that is in between the 2 integer comparisons has
1285 // no side effect
1286 bool IfNode::is_side_effect_free_test(IfProjNode* proj, PhaseIterGVN* igvn) const {
1287   if (proj == nullptr) {
1288     return false;
1289   }
1290   CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1291   if (unc != nullptr && proj->outcnt() <= 2) {
1292     if (proj->outcnt() == 1 ||
1293         // Allow simple null check from LoadRange
1294         (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
1295       CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1296       CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern();
1297       assert(dom_unc != nullptr, "is_uncommon_trap_if_pattern returned null");
1298 
1299       // reroute_side_effect_free_unc changes the state of this
1300       // uncommon trap to restart execution at the previous
1301       // CmpI. Check that this change in a previous compilation didn't
1302       // cause too many traps.
1303       int trap_request = unc->uncommon_trap_request();
1304       Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1305 
1306       if (igvn->C->too_many_traps(dom_unc->jvms()->method(), dom_unc->jvms()->bci(), reason)) {
1307         return false;
1308       }
1309 
1310       if (!is_dominator_unc(dom_unc, unc)) {
1311         return false;
1312       }
1313 
1314       return true;
1315     }
1316   }
1317   return false;
1318 }
1319 
1320 // Make the If between the 2 integer comparisons trap at the state of
1321 // the first If: the last CmpI is the one replaced by a CmpU and the
1322 // first CmpI is eliminated, so the test between the 2 CmpI nodes
1323 // won't be guarded by the first CmpI anymore. It can trap in cases
1324 // where the first CmpI would have prevented it from executing: on a
1325 // trap, we need to restart execution at the state of the first CmpI
1326 void IfNode::reroute_side_effect_free_unc(IfProjNode* proj, IfProjNode* dom_proj, PhaseIterGVN* igvn) {
1327   CallStaticJavaNode* dom_unc = dom_proj->is_uncommon_trap_if_pattern();
1328   IfProjNode* otherproj = proj->other_if_proj();
1329   CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern();
1330   Node* call_proj = dom_unc->unique_ctrl_out();
1331   Node* halt = call_proj->unique_ctrl_out();
1332 
1333   Node* new_unc = dom_unc->clone();
1334   call_proj = call_proj->clone();
1335   halt = halt->clone();
1336   Node* c = otherproj->clone();
1337 
1338   c = igvn->transform(c);
1339   new_unc->set_req(TypeFunc::Parms, unc->in(TypeFunc::Parms));
1340   new_unc->set_req(0, c);
1341   new_unc = igvn->transform(new_unc);
1342   call_proj->set_req(0, new_unc);
1343   call_proj = igvn->transform(call_proj);
1344   halt->set_req(0, call_proj);
1345   halt = igvn->transform(halt);
1346 
1347   igvn->replace_node(otherproj, igvn->C->top());
1348   igvn->C->root()->add_req(halt);
1349 }
1350 
1351 Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
1352   if (Opcode() != Op_If) return nullptr;
1353 
1354   if (cmpi_folds(igvn)) {
1355     Node* ctrl = in(0);
1356     if (is_ctrl_folds(ctrl, igvn)) {
1357       // A integer comparison immediately dominated by another integer
1358       // comparison
1359       IfProjNode* success = nullptr;
1360       IfProjNode* fail = nullptr;
1361       IfProjNode* dom_cmp = ctrl->as_IfProj();
1362       if (has_shared_region(dom_cmp, success, fail) &&
1363           // Next call modifies graph so must be last
1364           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1365         return this;
1366       }
1367       if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1368           // Next call modifies graph so must be last
1369           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1370         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1371       }
1372       return nullptr;
1373     } else if (ctrl->in(0) != nullptr &&
1374                ctrl->in(0)->in(0) != nullptr) {
1375       IfProjNode* success = nullptr;
1376       IfProjNode* fail = nullptr;
1377       Node* dom = ctrl->in(0)->in(0);
1378       IfProjNode* dom_cmp = dom->isa_IfProj();
1379       IfProjNode* other_cmp = ctrl->isa_IfProj();
1380 
1381       // Check if it's an integer comparison dominated by another
1382       // integer comparison with another test in between
1383       if (is_ctrl_folds(dom, igvn) &&
1384           has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1385           is_side_effect_free_test(other_cmp, igvn) &&
1386           // Next call modifies graph so must be last
1387           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1388         reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn);
1389         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1390       }
1391     }
1392   }
1393   return nullptr;
1394 }
1395 
1396 //------------------------------remove_useless_bool----------------------------
1397 // Check for people making a useless boolean: things like
1398 // if( (x < y ? true : false) ) { ... }
1399 // Replace with if( x < y ) { ... }
1400 static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
1401   Node *i1 = iff->in(1);
1402   if( !i1->is_Bool() ) return nullptr;
1403   BoolNode *bol = i1->as_Bool();
1404 
1405   Node *cmp = bol->in(1);
1406   if( cmp->Opcode() != Op_CmpI ) return nullptr;
1407 
1408   // Must be comparing against a bool
1409   const Type *cmp2_t = phase->type( cmp->in(2) );
1410   if( cmp2_t != TypeInt::ZERO &&
1411       cmp2_t != TypeInt::ONE )
1412     return nullptr;
1413 
1414   // Find a prior merge point merging the boolean
1415   i1 = cmp->in(1);
1416   if( !i1->is_Phi() ) return nullptr;
1417   PhiNode *phi = i1->as_Phi();
1418   if( phase->type( phi ) != TypeInt::BOOL )
1419     return nullptr;
1420 
1421   // Check for diamond pattern
1422   int true_path = phi->is_diamond_phi();
1423   if( true_path == 0 ) return nullptr;
1424 
1425   // Make sure that iff and the control of the phi are different. This
1426   // should really only happen for dead control flow since it requires
1427   // an illegal cycle.
1428   if (phi->in(0)->in(1)->in(0) == iff) return nullptr;
1429 
1430   // phi->region->if_proj->ifnode->bool->cmp
1431   BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
1432 
1433   // Now get the 'sense' of the test correct so we can plug in
1434   // either iff2->in(1) or its complement.
1435   int flip = 0;
1436   if( bol->_test._test == BoolTest::ne ) flip = 1-flip;
1437   else if( bol->_test._test != BoolTest::eq ) return nullptr;
1438   if( cmp2_t == TypeInt::ZERO ) flip = 1-flip;
1439 
1440   const Type *phi1_t = phase->type( phi->in(1) );
1441   const Type *phi2_t = phase->type( phi->in(2) );
1442   // Check for Phi(0,1) and flip
1443   if( phi1_t == TypeInt::ZERO ) {
1444     if( phi2_t != TypeInt::ONE ) return nullptr;
1445     flip = 1-flip;
1446   } else {
1447     // Check for Phi(1,0)
1448     if( phi1_t != TypeInt::ONE  ) return nullptr;
1449     if( phi2_t != TypeInt::ZERO ) return nullptr;
1450   }
1451   if( true_path == 2 ) {
1452     flip = 1-flip;
1453   }
1454 
1455   Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2);
1456   assert(new_bol != iff->in(1), "must make progress");
1457   iff->set_req_X(1, new_bol, phase);
1458   // Intervening diamond probably goes dead
1459   phase->C->set_major_progress();
1460   return iff;
1461 }
1462 
1463 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
1464 
1465 struct RangeCheck {
1466   IfProjNode* ctl;
1467   jint off;
1468 };
1469 
1470 Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
1471   if (remove_dead_region(phase, can_reshape))  return this;
1472   // No Def-Use info?
1473   if (!can_reshape)  return nullptr;
1474 
1475   // Don't bother trying to transform a dead if
1476   if (in(0)->is_top())  return nullptr;
1477   // Don't bother trying to transform an if with a dead test
1478   if (in(1)->is_top())  return nullptr;
1479   // Another variation of a dead test
1480   if (in(1)->is_Con())  return nullptr;
1481   // Another variation of a dead if
1482   if (outcnt() < 2)  return nullptr;
1483 
1484   // Canonicalize the test.
1485   Node* idt_if = idealize_test(phase, this);
1486   if (idt_if != nullptr)  return idt_if;
1487 
1488   // Try to split the IF
1489   PhaseIterGVN *igvn = phase->is_IterGVN();
1490   Node *s = split_if(this, igvn);
1491   if (s != nullptr)  return s;
1492 
1493   return NodeSentinel;
1494 }
1495 
1496 //------------------------------Ideal------------------------------------------
1497 // Return a node which is more "ideal" than the current node.  Strip out
1498 // control copies
1499 Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1500   Node* res = Ideal_common(phase, can_reshape);
1501   if (res != NodeSentinel) {
1502     return res;
1503   }
1504 
1505   // Check for people making a useless boolean: things like
1506   // if( (x < y ? true : false) ) { ... }
1507   // Replace with if( x < y ) { ... }
1508   Node* bol2 = remove_useless_bool(this, phase);
1509   if (bol2) return bol2;
1510 
1511   if (in(0) == nullptr) return nullptr;     // Dead loop?
1512 
1513   PhaseIterGVN* igvn = phase->is_IterGVN();
1514   Node* result = fold_compares(igvn);
1515   if (result != nullptr) {
1516     return result;
1517   }
1518 
1519   // Scan for an equivalent test
1520   int dist = 4;               // Cutoff limit for search
1521   if (is_If() && in(1)->is_Bool()) {
1522     Node* cmp = in(1)->in(1);
1523     if (cmp->Opcode() == Op_CmpP &&
1524         cmp->in(2) != nullptr && // make sure cmp is not already dead
1525         cmp->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1526       dist = 64;              // Limit for null-pointer scans
1527     }
1528   }
1529 
1530   Node* prev_dom = search_identical(dist, igvn);
1531 
1532   if (prev_dom != nullptr) {
1533     // Dominating CountedLoopEnd (left over from some now dead loop) will become the new loop exit. Outer strip mined
1534     // loop will go away. Mark this loop as no longer strip mined.
1535     if (is_CountedLoopEnd()) {
1536       CountedLoopNode* counted_loop_node = as_CountedLoopEnd()->loopnode();
1537       if (counted_loop_node != nullptr) {
1538         counted_loop_node->clear_strip_mined();
1539       }
1540     }
1541     // Replace dominated IfNode
1542     return dominated_by(prev_dom, igvn, false);
1543   }
1544 
1545   return simple_subsuming(igvn);
1546 }
1547 
1548 //------------------------------dominated_by-----------------------------------
1549 Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool prev_dom_not_imply_this) {
1550 #ifndef PRODUCT
1551   if (TraceIterativeGVN) {
1552     tty->print("   Removing IfNode: "); this->dump();
1553   }
1554 #endif
1555 
1556   igvn->hash_delete(this);      // Remove self to prevent spurious V-N
1557   Node *idom = in(0);
1558   // Need opcode to decide which way 'this' test goes
1559   int prev_op = prev_dom->Opcode();
1560   Node *top = igvn->C->top(); // Shortcut to top
1561 
1562   // Now walk the current IfNode's projections.
1563   // Loop ends when 'this' has no more uses.
1564   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
1565     Node *ifp = last_out(i);     // Get IfTrue/IfFalse
1566     igvn->add_users_to_worklist(ifp);
1567     // Check which projection it is and set target.
1568     // Data-target is either the dominating projection of the same type
1569     // or TOP if the dominating projection is of opposite type.
1570     // Data-target will be used as the new control edge for the non-CFG
1571     // nodes like Casts and Loads.
1572     Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;
1573     // Control-target is just the If's immediate dominator or TOP.
1574     Node *ctrl_target = (ifp->Opcode() == prev_op) ?     idom : top;
1575 
1576     // For each child of an IfTrue/IfFalse projection, reroute.
1577     // Loop ends when projection has no more uses.
1578     for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
1579       Node* s = ifp->last_out(j);   // Get child of IfTrue/IfFalse
1580       if (s->depends_only_on_test()) {
1581         // For control producers
1582         igvn->replace_input_of(s, 0, data_target); // Move child to data-target
1583         if (prev_dom_not_imply_this && data_target != top) {
1584           // If prev_dom_not_imply_this, s now depends on multiple tests with prev_dom being the
1585           // lowest dominating one. As a result, it must be pinned there. Otherwise, it can be
1586           // incorrectly moved to a dominating test equivalent to the lowest one here.
1587           Node* clone = s->pin_node_under_control();
1588           if (clone != nullptr) {
1589             igvn->register_new_node_with_optimizer(clone, s);
1590             igvn->replace_node(s, clone);
1591           }
1592         }
1593       } else {
1594         // Find the control input matching this def-use edge.
1595         // For Regions it may not be in slot 0.
1596         uint l;
1597         for (l = 0; s->in(l) != ifp; l++) { }
1598         igvn->replace_input_of(s, l, ctrl_target);
1599       }
1600     } // End for each child of a projection
1601 
1602     igvn->remove_dead_node(ifp, PhaseIterGVN::NodeOrigin::Graph);
1603   } // End for each IfTrue/IfFalse child of If
1604 
1605   // Kill the IfNode
1606   igvn->remove_dead_node(this, PhaseIterGVN::NodeOrigin::Graph);
1607 
1608   // Must return either the original node (now dead) or a new node
1609   // (Do not return a top here, since that would break the uniqueness of top.)
1610   return new ConINode(TypeInt::ZERO);
1611 }
1612 
1613 Node* IfNode::search_identical(int dist, PhaseIterGVN* igvn) {
1614   // Setup to scan up the CFG looking for a dominating test
1615   Node* dom = in(0);
1616   Node* prev_dom = this;
1617   int op = Opcode();
1618   // Search up the dominator tree for an If with an identical test
1619   while (dom->Opcode() != op ||  // Not same opcode?
1620          !same_condition(dom, igvn) ||  // Not same input 1?
1621          prev_dom->in(0) != dom) {  // One path of test does not dominate?
1622     if (dist < 0) return nullptr;
1623 
1624     dist--;
1625     prev_dom = dom;
1626     dom = up_one_dom(dom);
1627     if (!dom) return nullptr;
1628   }
1629 
1630   // Check that we did not follow a loop back to ourselves
1631   if (this == dom) {
1632     return nullptr;
1633   }
1634 
1635 #ifndef PRODUCT
1636   if (dist > 2) { // Add to count of null checks elided
1637     explicit_null_checks_elided++;
1638   }
1639 #endif
1640 
1641   return prev_dom;
1642 }
1643 
1644 bool IfNode::same_condition(const Node* dom, PhaseIterGVN* igvn) const {
1645   Node* dom_bool = dom->in(1);
1646   Node* this_bool = in(1);
1647   if (dom_bool == this_bool) {
1648     return true;
1649   }
1650 
1651   if (dom_bool == nullptr || !dom_bool->is_Bool() ||
1652       this_bool == nullptr || !this_bool->is_Bool()) {
1653     return false;
1654   }
1655   Node* dom_cmp = dom_bool->in(1);
1656   Node* this_cmp = this_bool->in(1);
1657 
1658   // If the comparison is a subtype check, then SubTypeCheck nodes may have profile data attached to them and may be
1659   // different nodes even-though they perform the same subtype check
1660   if (dom_cmp == nullptr || !dom_cmp->is_SubTypeCheck() ||
1661       this_cmp == nullptr || !this_cmp->is_SubTypeCheck()) {
1662     return false;
1663   }
1664 
1665   if (dom_cmp->in(1) != this_cmp->in(1) ||
1666       dom_cmp->in(2) != this_cmp->in(2) ||
1667       dom_bool->as_Bool()->_test._test != this_bool->as_Bool()->_test._test) {
1668     return false;
1669   }
1670 
1671   return true;
1672 }
1673 
1674 
1675 static int subsuming_bool_test_encode(Node*);
1676 
1677 // Check if dominating test is subsuming 'this' one.
1678 //
1679 //              cmp
1680 //              / \
1681 //     (r1)  bool  \
1682 //            /    bool (r2)
1683 //    (dom) if       \
1684 //            \       )
1685 //    (pre)  if[TF]  /
1686 //               \  /
1687 //                if (this)
1688 //   \r1
1689 //  r2\  eqT  eqF  neT  neF  ltT  ltF  leT  leF  gtT  gtF  geT  geF
1690 //  eq    t    f    f    t    f    -    -    f    f    -    -    f
1691 //  ne    f    t    t    f    t    -    -    t    t    -    -    t
1692 //  lt    f    -    -    f    t    f    -    f    f    -    f    t
1693 //  le    t    -    -    t    t    -    t    f    f    t    -    t
1694 //  gt    f    -    -    f    f    -    f    t    t    f    -    f
1695 //  ge    t    -    -    t    f    t    -    t    t    -    t    f
1696 //
1697 Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) {
1698   // Table encoding: N/A (na), True-branch (tb), False-branch (fb).
1699   static enum { na, tb, fb } s_short_circuit_map[6][12] = {
1700   /*rel: eq+T eq+F ne+T ne+F lt+T lt+F le+T le+F gt+T gt+F ge+T ge+F*/
1701   /*eq*/{ tb,  fb,  fb,  tb,  fb,  na,  na,  fb,  fb,  na,  na,  fb },
1702   /*ne*/{ fb,  tb,  tb,  fb,  tb,  na,  na,  tb,  tb,  na,  na,  tb },
1703   /*lt*/{ fb,  na,  na,  fb,  tb,  fb,  na,  fb,  fb,  na,  fb,  tb },
1704   /*le*/{ tb,  na,  na,  tb,  tb,  na,  tb,  fb,  fb,  tb,  na,  tb },
1705   /*gt*/{ fb,  na,  na,  fb,  fb,  na,  fb,  tb,  tb,  fb,  na,  fb },
1706   /*ge*/{ tb,  na,  na,  tb,  fb,  tb,  na,  tb,  tb,  na,  tb,  fb }};
1707 
1708   Node* pre = in(0);
1709   if (!pre->is_IfTrue() && !pre->is_IfFalse()) {
1710     return nullptr;
1711   }
1712   Node* dom = pre->in(0);
1713   if (!dom->is_If()) {
1714     return nullptr;
1715   }
1716   Node* bol = in(1);
1717   if (!bol->is_Bool()) {
1718     return nullptr;
1719   }
1720   Node* cmp = in(1)->in(1);
1721   if (!cmp->is_Cmp()) {
1722     return nullptr;
1723   }
1724 
1725   if (!dom->in(1)->is_Bool()) {
1726     return nullptr;
1727   }
1728   if (dom->in(1)->in(1) != cmp) {  // Not same cond?
1729     return nullptr;
1730   }
1731 
1732   int drel = subsuming_bool_test_encode(dom->in(1));
1733   int trel = subsuming_bool_test_encode(bol);
1734   int bout = pre->is_IfFalse() ? 1 : 0;
1735 
1736   if (drel < 0 || trel < 0) {
1737     return nullptr;
1738   }
1739   int br = s_short_circuit_map[trel][2*drel+bout];
1740   if (br == na) {
1741     return nullptr;
1742   }
1743 #ifndef PRODUCT
1744   if (TraceIterativeGVN) {
1745     tty->print("   Subsumed IfNode: "); dump();
1746   }
1747 #endif
1748   // Replace condition with constant True(1)/False(0).
1749   bool is_always_true = br == tb;
1750   set_req(1, igvn->intcon(is_always_true ? 1 : 0));
1751 
1752   // Update any data dependencies to the directly dominating test. This subsumed test is not immediately removed by igvn
1753   // and therefore subsequent optimizations might miss these data dependencies otherwise. There might be a dead loop
1754   // ('always_taken_proj' == 'pre') that is cleaned up later. Skip this case to make the iterator work properly.
1755   Node* always_taken_proj = proj_out(is_always_true);
1756   if (always_taken_proj != pre) {
1757     for (DUIterator_Fast imax, i = always_taken_proj->fast_outs(imax); i < imax; i++) {
1758       Node* u = always_taken_proj->fast_out(i);
1759       if (!u->is_CFG()) {
1760         igvn->replace_input_of(u, 0, pre);
1761         --i;
1762         --imax;
1763       }
1764     }
1765   }
1766 
1767   if (bol->outcnt() == 0) {
1768     igvn->remove_dead_node(bol, PhaseIterGVN::NodeOrigin::Graph);    // Kill the BoolNode.
1769   }
1770   return this;
1771 }
1772 
1773 // Map BoolTest to local table encoding. The BoolTest (e)numerals
1774 //   { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1 }
1775 // are mapped to table indices, while the remaining (e)numerals in BoolTest
1776 //   { overflow = 2, no_overflow = 6, never = 8, illegal = 9 }
1777 // are ignored (these are not modeled in the table).
1778 //
1779 static int subsuming_bool_test_encode(Node* node) {
1780   precond(node->is_Bool());
1781   BoolTest::mask x = node->as_Bool()->_test._test;
1782   switch (x) {
1783     case BoolTest::eq: return 0;
1784     case BoolTest::ne: return 1;
1785     case BoolTest::lt: return 2;
1786     case BoolTest::le: return 3;
1787     case BoolTest::gt: return 4;
1788     case BoolTest::ge: return 5;
1789     case BoolTest::overflow:
1790     case BoolTest::no_overflow:
1791     case BoolTest::never:
1792     case BoolTest::illegal:
1793     default:
1794       return -1;
1795   }
1796 }
1797 
1798 //------------------------------Identity---------------------------------------
1799 // If the test is constant & we match, then we are the input Control
1800 Node* IfProjNode::Identity(PhaseGVN* phase) {
1801   // Can only optimize if cannot go the other way
1802   const TypeTuple *t = phase->type(in(0))->is_tuple();
1803   if (t == TypeTuple::IFNEITHER || (always_taken(t) &&
1804        // During parsing (GVN) we don't remove dead code aggressively.
1805        // Cut off dead branch and let PhaseRemoveUseless take care of it.
1806       (!phase->is_IterGVN() ||
1807        // During IGVN, first wait for the dead branch to be killed.
1808        // Otherwise, the IfNode's control will have two control uses (the IfNode
1809        // that doesn't go away because it still has uses and this branch of the
1810        // If) which breaks other optimizations. Node::has_special_unique_user()
1811        // will cause this node to be reprocessed once the dead branch is killed.
1812        in(0)->outcnt() == 1))) {
1813     // IfNode control
1814     if (in(0)->is_BaseCountedLoopEnd()) {
1815       // CountedLoopEndNode may be eliminated by if subsuming, replace CountedLoopNode with LoopNode to
1816       // avoid mismatching between CountedLoopNode and CountedLoopEndNode in the following optimization.
1817       Node* head = unique_ctrl_out_or_null();
1818       if (head != nullptr && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) {
1819         Node* new_head = new LoopNode(head->in(LoopNode::EntryControl), this);
1820         phase->is_IterGVN()->register_new_node_with_optimizer(new_head);
1821         phase->is_IterGVN()->replace_node(head, new_head);
1822       }
1823     }
1824     return in(0)->in(0);
1825   }
1826   // no progress
1827   return this;
1828 }
1829 
1830 bool IfNode::is_zero_trip_guard() const {
1831   if (in(1)->is_Bool() && in(1)->in(1)->is_Cmp()) {
1832     return in(1)->in(1)->in(1)->Opcode() == Op_OpaqueZeroTripGuard;
1833   }
1834   return false;
1835 }
1836 
1837 void IfProjNode::pin_dependent_nodes(PhaseIterGVN* igvn) {
1838   for (DUIterator i = outs(); has_out(i); i++) {
1839     Node* u = out(i);
1840     if (!u->depends_only_on_test()) {
1841       continue;
1842     }
1843     Node* clone = u->pin_node_under_control();
1844     if (clone != nullptr) {
1845       igvn->register_new_node_with_optimizer(clone, u);
1846       igvn->replace_node(u, clone);
1847       --i;
1848     }
1849   }
1850 }
1851 
1852 #ifndef PRODUCT
1853 void IfNode::dump_spec(outputStream* st) const {
1854   switch (_assertion_predicate_type) {
1855     case AssertionPredicateType::InitValue:
1856       st->print("#Init Value Assertion Predicate  ");
1857       break;
1858     case AssertionPredicateType::LastValue:
1859       st->print("#Last Value Assertion Predicate  ");
1860       break;
1861     case AssertionPredicateType::FinalIv:
1862       st->print("#Final IV Assertion Predicate  ");
1863       break;
1864     case AssertionPredicateType::None:
1865       // No Assertion Predicate
1866       break;
1867     default:
1868       fatal("Unknown Assertion Predicate type");
1869   }
1870   st->print("P=%f, C=%f", _prob, _fcnt);
1871 }
1872 #endif // NOT PRODUCT
1873 
1874 //------------------------------idealize_test----------------------------------
1875 // Try to canonicalize tests better.  Peek at the Cmp/Bool/If sequence and
1876 // come up with a canonical sequence.  Bools getting 'eq', 'gt' and 'ge' forms
1877 // converted to 'ne', 'le' and 'lt' forms.  IfTrue/IfFalse get swapped as
1878 // needed.
1879 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
1880   assert(iff->in(0) != nullptr, "If must be live");
1881 
1882   if (iff->outcnt() != 2)  return nullptr; // Malformed projections.
1883   IfFalseNode* old_if_f = iff->false_proj();
1884   IfTrueNode* old_if_t = iff->true_proj();
1885 
1886   // CountedLoopEnds want the back-control test to be TRUE, regardless of
1887   // whether they are testing a 'gt' or 'lt' condition.  The 'gt' condition
1888   // happens in count-down loops
1889   if (iff->is_BaseCountedLoopEnd())  return nullptr;
1890   if (!iff->in(1)->is_Bool())  return nullptr; // Happens for partially optimized IF tests
1891   BoolNode *b = iff->in(1)->as_Bool();
1892   BoolTest bt = b->_test;
1893   // Test already in good order?
1894   if( bt.is_canonical() )
1895     return nullptr;
1896 
1897   // Flip test to be canonical.  Requires flipping the IfFalse/IfTrue and
1898   // cloning the IfNode.
1899   Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) );
1900   if( !new_b->is_Bool() ) return nullptr;
1901   b = new_b->as_Bool();
1902 
1903   PhaseIterGVN *igvn = phase->is_IterGVN();
1904   assert( igvn, "Test is not canonical in parser?" );
1905 
1906   // The IF node never really changes, but it needs to be cloned
1907   iff = iff->clone()->as_If();
1908   iff->set_req(1, b);
1909   iff->_prob = 1.0-iff->_prob;
1910 
1911   Node *prior = igvn->hash_find_insert(iff);
1912   if( prior ) {
1913     igvn->remove_dead_node(iff, PhaseIterGVN::NodeOrigin::Graph);
1914     iff = (IfNode*)prior;
1915   } else {
1916     // Cannot call transform on it just yet
1917     igvn->set_type_bottom(iff);
1918   }
1919   igvn->_worklist.push(iff);
1920 
1921   // Now handle projections.  Cloning not required.
1922   Node* new_if_f = (Node*)(new IfFalseNode( iff ));
1923   Node* new_if_t = (Node*)(new IfTrueNode ( iff ));
1924 
1925   igvn->register_new_node_with_optimizer(new_if_f);
1926   igvn->register_new_node_with_optimizer(new_if_t);
1927   // Flip test, so flip trailing control
1928   igvn->replace_node(old_if_f, new_if_t);
1929   igvn->replace_node(old_if_t, new_if_f);
1930 
1931   // Progress
1932   return iff;
1933 }
1934 
1935 Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1936   Node* res = Ideal_common(phase, can_reshape);
1937   if (res != NodeSentinel) {
1938     return res;
1939   }
1940 
1941   PhaseIterGVN *igvn = phase->is_IterGVN();
1942   // Setup to scan up the CFG looking for a dominating test
1943   Node* prev_dom = this;
1944 
1945   // Check for range-check vs other kinds of tests
1946   Node* index1;
1947   Node* range1;
1948   jint offset1;
1949   int flip1 = is_range_check(range1, index1, offset1);
1950   if (flip1) {
1951     Node* dom = in(0);
1952     // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
1953     // so all checks we inspect post-dominate the top-most check we find.
1954     // If we are going to fail the current check and we reach the top check
1955     // then we are guaranteed to fail, so just start interpreting there.
1956     // We 'expand' the top 3 range checks to include all post-dominating
1957     // checks.
1958     //
1959     // Example:
1960     // a[i+x] // (1) 1 < x < 6
1961     // a[i+3] // (2)
1962     // a[i+4] // (3)
1963     // a[i+6] // max = max of all constants
1964     // a[i+2]
1965     // a[i+1] // min = min of all constants
1966     //
1967     // If x < 3:
1968     //   (1) a[i+x]: Leave unchanged
1969     //   (2) a[i+3]: Replace with a[i+max] = a[i+6]: i+x < i+3 <= i+6  -> (2) is covered
1970     //   (3) a[i+4]: Replace with a[i+min] = a[i+1]: i+1 < i+4 <= i+6  -> (3) and all following checks are covered
1971     //   Remove all other a[i+c] checks
1972     //
1973     // If x >= 3:
1974     //   (1) a[i+x]: Leave unchanged
1975     //   (2) a[i+3]: Replace with a[i+min] = a[i+1]: i+1 < i+3 <= i+x  -> (2) is covered
1976     //   (3) a[i+4]: Replace with a[i+max] = a[i+6]: i+1 < i+4 <= i+6  -> (3) and all following checks are covered
1977     //   Remove all other a[i+c] checks
1978     //
1979     // We only need the top 2 range checks if x is the min or max of all constants.
1980     //
1981     // This, however, only works if the interval [i+min,i+max] is not larger than max_int (i.e. abs(max - min) < max_int):
1982     // The theoretical max size of an array is max_int with:
1983     // - Valid index space: [0,max_int-1]
1984     // - Invalid index space: [max_int,-1] // max_int, min_int, min_int - 1 ..., -1
1985     //
1986     // The size of the consecutive valid index space is smaller than the size of the consecutive invalid index space.
1987     // If we choose min and max in such a way that:
1988     // - abs(max - min) < max_int
1989     // - i+max and i+min are inside the valid index space
1990     // then all indices [i+min,i+max] must be in the valid index space. Otherwise, the invalid index space must be
1991     // smaller than the valid index space which is never the case for any array size.
1992     //
1993     // Choosing a smaller array size only makes the valid index space smaller and the invalid index space larger and
1994     // the argument above still holds.
1995     //
1996     // Note that the same optimization with the same maximal accepted interval size can also be found in C1.
1997     const jlong maximum_number_of_min_max_interval_indices = (jlong)max_jint;
1998 
1999     // The top 3 range checks seen
2000     const int NRC = 3;
2001     RangeCheck prev_checks[NRC];
2002     int nb_checks = 0;
2003 
2004     // Low and high offsets seen so far
2005     jint off_lo = offset1;
2006     jint off_hi = offset1;
2007 
2008     bool found_immediate_dominator = false;
2009 
2010     // Scan for the top checks and collect range of offsets
2011     for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
2012       if (dom->Opcode() == Op_RangeCheck &&  // Not same opcode?
2013           prev_dom->in(0) == dom) { // One path of test does dominate?
2014         if (dom == this) return nullptr; // dead loop
2015         // See if this is a range check
2016         Node* index2;
2017         Node* range2;
2018         jint offset2;
2019         int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2);
2020         // See if this is a _matching_ range check, checking against
2021         // the same array bounds.
2022         if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
2023             dom->outcnt() == 2) {
2024           if (nb_checks == 0 && dom->in(1) == in(1)) {
2025             // Found an immediately dominating test at the same offset.
2026             // This kind of back-to-back test can be eliminated locally,
2027             // and there is no need to search further for dominating tests.
2028             assert(offset2 == offset1, "Same test but different offsets");
2029             found_immediate_dominator = true;
2030             break;
2031           }
2032 
2033           // "x - y" -> must add one to the difference for number of elements in [x,y]
2034           const jlong diff = (jlong)MIN2(offset2, off_lo) - (jlong)MAX2(offset2, off_hi);
2035           if (ABS(diff) < maximum_number_of_min_max_interval_indices) {
2036             // Gather expanded bounds
2037             off_lo = MIN2(off_lo, offset2);
2038             off_hi = MAX2(off_hi, offset2);
2039             // Record top NRC range checks
2040             prev_checks[nb_checks % NRC].ctl = prev_dom->as_IfProj();
2041             prev_checks[nb_checks % NRC].off = offset2;
2042             nb_checks++;
2043           }
2044         }
2045       }
2046       prev_dom = dom;
2047       dom = up_one_dom(dom);
2048       if (!dom) break;
2049     }
2050 
2051     if (!found_immediate_dominator) {
2052       // Attempt to widen the dominating range check to cover some later
2053       // ones.  Since range checks "fail" by uncommon-trapping to the
2054       // interpreter, widening a check can make us speculatively enter
2055       // the interpreter.  If we see range-check deopt's, do not widen!
2056       if (!phase->C->allow_range_check_smearing())  return nullptr;
2057 
2058       if (can_reshape && !phase->C->post_loop_opts_phase()) {
2059         // We are about to perform range check smearing (i.e. remove this RangeCheck if it is dominated by
2060         // a series of RangeChecks which have a range that covers this RangeCheck). This can cause array access nodes to
2061         // be pinned. We want to avoid that and first allow range check elimination a chance to remove the RangeChecks
2062         // from loops. Hence, we delay range check smearing until after loop opts.
2063         phase->C->record_for_post_loop_opts_igvn(this);
2064         return nullptr;
2065       }
2066 
2067       // Didn't find prior covering check, so cannot remove anything.
2068       if (nb_checks == 0) {
2069         return nullptr;
2070       }
2071       // Constant indices only need to check the upper bound.
2072       // Non-constant indices must check both low and high.
2073       int chk0 = (nb_checks - 1) % NRC;
2074       if (index1) {
2075         if (nb_checks == 1) {
2076           return nullptr;
2077         } else {
2078           // If the top range check's constant is the min or max of
2079           // all constants we widen the next one to cover the whole
2080           // range of constants.
2081           RangeCheck rc0 = prev_checks[chk0];
2082           int chk1 = (nb_checks - 2) % NRC;
2083           RangeCheck rc1 = prev_checks[chk1];
2084           if (rc0.off == off_lo) {
2085             adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
2086             prev_dom = rc1.ctl;
2087           } else if (rc0.off == off_hi) {
2088             adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
2089             prev_dom = rc1.ctl;
2090           } else {
2091             // If the top test's constant is not the min or max of all
2092             // constants, we need 3 range checks. We must leave the
2093             // top test unchanged because widening it would allow the
2094             // accesses it protects to successfully read/write out of
2095             // bounds.
2096             if (nb_checks == 2) {
2097               return nullptr;
2098             }
2099             int chk2 = (nb_checks - 3) % NRC;
2100             RangeCheck rc2 = prev_checks[chk2];
2101             // The top range check a+i covers interval: -a <= i < length-a
2102             // The second range check b+i covers interval: -b <= i < length-b
2103             if (rc1.off <= rc0.off) {
2104               // if b <= a, we change the second range check to:
2105               // -min_of_all_constants <= i < length-min_of_all_constants
2106               // Together top and second range checks now cover:
2107               // -min_of_all_constants <= i < length-a
2108               // which is more restrictive than -b <= i < length-b:
2109               // -b <= -min_of_all_constants <= i < length-a <= length-b
2110               // The third check is then changed to:
2111               // -max_of_all_constants <= i < length-max_of_all_constants
2112               // so 2nd and 3rd checks restrict allowed values of i to:
2113               // -min_of_all_constants <= i < length-max_of_all_constants
2114               adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
2115               adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
2116             } else {
2117               // if b > a, we change the second range check to:
2118               // -max_of_all_constants <= i < length-max_of_all_constants
2119               // Together top and second range checks now cover:
2120               // -a <= i < length-max_of_all_constants
2121               // which is more restrictive than -b <= i < length-b:
2122               // -b < -a <= i < length-max_of_all_constants <= length-b
2123               // The third check is then changed to:
2124               // -max_of_all_constants <= i < length-max_of_all_constants
2125               // so 2nd and 3rd checks restrict allowed values of i to:
2126               // -min_of_all_constants <= i < length-max_of_all_constants
2127               adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
2128               adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
2129             }
2130             prev_dom = rc2.ctl;
2131           }
2132         }
2133       } else {
2134         RangeCheck rc0 = prev_checks[chk0];
2135         // 'Widen' the offset of the 1st and only covering check
2136         adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
2137         // Test is now covered by prior checks, dominate it out
2138         prev_dom = rc0.ctl;
2139       }
2140       // The last RangeCheck is found to be redundant with a sequence of n (n >= 2) preceding RangeChecks.
2141       // If an array load is control dependent on the eliminated range check, the array load nodes (CastII and Load)
2142       // become control dependent on the last range check of the sequence, but they are really dependent on the entire
2143       // sequence of RangeChecks. If RangeCheck#n is later replaced by a dominating identical check, the array load
2144       // nodes must not float above the n-1 other RangeCheck in the sequence. We pin the array load nodes here to
2145       // guarantee it doesn't happen.
2146       //
2147       // RangeCheck#1                 RangeCheck#1
2148       //    |      \                     |      \
2149       //    |      uncommon trap         |      uncommon trap
2150       //    ..                           ..
2151       // RangeCheck#n              -> RangeCheck#n
2152       //    |      \                     |      \
2153       //    |      uncommon trap        CastII  uncommon trap
2154       // RangeCheck                     Load
2155       //    |      \
2156       //   CastII  uncommon trap
2157       //   Load
2158 
2159       return dominated_by(prev_dom, igvn, true);
2160     }
2161   } else {
2162     prev_dom = search_identical(4, igvn);
2163 
2164     if (prev_dom == nullptr) {
2165       return nullptr;
2166     }
2167   }
2168 
2169   // Replace dominated IfNode
2170   return dominated_by(prev_dom, igvn, false);
2171 }
2172 
2173 ParsePredicateNode::ParsePredicateNode(Node* control, Deoptimization::DeoptReason deopt_reason, PhaseGVN* gvn)
2174     : IfNode(control, gvn->intcon(1), PROB_MAX, COUNT_UNKNOWN),
2175       _deopt_reason(deopt_reason),
2176       _predicate_state(PredicateState::Useful) {
2177   init_class_id(Class_ParsePredicate);
2178   gvn->C->add_parse_predicate(this);
2179   gvn->C->record_for_post_loop_opts_igvn(this);
2180 #ifdef ASSERT
2181   switch (deopt_reason) {
2182     case Deoptimization::Reason_predicate:
2183     case Deoptimization::Reason_profile_predicate:
2184     case Deoptimization::Reason_auto_vectorization_check:
2185     case Deoptimization::Reason_loop_limit_check:
2186     case Deoptimization::Reason_short_running_long_loop:
2187       break;
2188     default:
2189       assert(false, "unsupported deoptimization reason for Parse Predicate");
2190   }
2191 #endif // ASSERT
2192 }
2193 
2194 void ParsePredicateNode::mark_useless(PhaseIterGVN& igvn) {
2195   _predicate_state = PredicateState::Useless;
2196   igvn._worklist.push(this);
2197 }
2198 
2199 Node* ParsePredicateNode::uncommon_trap() const {
2200   ParsePredicateUncommonProj* uncommon_proj = false_proj();
2201   Node* uct_region_or_call = uncommon_proj->unique_ctrl_out();
2202   assert(uct_region_or_call->is_Region() || uct_region_or_call->is_Call(), "must be a region or call uct");
2203   return uct_region_or_call;
2204 }
2205 
2206 // Fold this node away once it becomes useless or at latest in post loop opts IGVN.
2207 const Type* ParsePredicateNode::Value(PhaseGVN* phase) const {
2208   assert(_predicate_state != PredicateState::MaybeUseful, "should only be MaybeUseful when eliminating useless "
2209                                                           "predicates during loop opts");
2210   if (phase->type(in(0)) == Type::TOP) {
2211     return Type::TOP;
2212   }
2213   if (_predicate_state == PredicateState::Useless || phase->C->post_loop_opts_phase()) {
2214     return TypeTuple::IFTRUE;
2215   }
2216   return bottom_type();
2217 }
2218 
2219 #ifndef PRODUCT
2220 void ParsePredicateNode::dump_spec(outputStream* st) const {
2221   st->print(" #");
2222   switch (_deopt_reason) {
2223     case Deoptimization::DeoptReason::Reason_predicate:
2224       st->print("Loop ");
2225       break;
2226     case Deoptimization::DeoptReason::Reason_profile_predicate:
2227       st->print("Profiled_Loop ");
2228       break;
2229     case Deoptimization::DeoptReason::Reason_auto_vectorization_check:
2230       st->print("Auto_Vectorization_Check ");
2231       break;
2232     case Deoptimization::DeoptReason::Reason_loop_limit_check:
2233       st->print("Loop_Limit_Check ");
2234       break;
2235     case Deoptimization::DeoptReason::Reason_short_running_long_loop:
2236       st->print("Short_Running_Long_Loop ");
2237       break;
2238     default:
2239       fatal("unknown kind");
2240   }
2241   if (_predicate_state == PredicateState::Useless) {
2242     st->print("#useless ");
2243   }
2244 }
2245 #endif // NOT PRODUCT