1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciTypeFlow.hpp"
  27 #include "memory/allocation.inline.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/connode.hpp"
  33 #include "opto/loopnode.hpp"
  34 #include "opto/phaseX.hpp"
  35 #include "opto/runtime.hpp"
  36 #include "opto/rootnode.hpp"
  37 #include "opto/subnode.hpp"
  38 
  39 // Portions of code courtesy of Clifford Click
  40 
  41 // Optimization - Graph Style
  42 
  43 
  44 #ifndef PRODUCT
  45 extern uint explicit_null_checks_elided;
  46 #endif
  47 
  48 //=============================================================================
  49 //------------------------------Value------------------------------------------
  50 // Return a tuple for whichever arm of the IF is reachable
  51 const Type* IfNode::Value(PhaseGVN* phase) const {
  52   if( !in(0) ) return Type::TOP;
  53   if( phase->type(in(0)) == Type::TOP )
  54     return Type::TOP;
  55   const Type *t = phase->type(in(1));
  56   if( t == Type::TOP )          // data is undefined
  57     return TypeTuple::IFNEITHER; // unreachable altogether
  58   if( t == TypeInt::ZERO )      // zero, or false
  59     return TypeTuple::IFFALSE;  // only false branch is reachable
  60   if( t == TypeInt::ONE )       // 1, or true
  61     return TypeTuple::IFTRUE;   // only true branch is reachable
  62   assert( t == TypeInt::BOOL, "expected boolean type" );
  63 
  64   return TypeTuple::IFBOTH;     // No progress
  65 }
  66 
  67 const RegMask &IfNode::out_RegMask() const {
  68   return RegMask::Empty;
  69 }
  70 
  71 //------------------------------split_if---------------------------------------
  72 // Look for places where we merge constants, then test on the merged value.
  73 // If the IF test will be constant folded on the path with the constant, we
  74 // win by splitting the IF to before the merge point.
  75 static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
  76   // I could be a lot more general here, but I'm trying to squeeze this
  77   // in before the Christmas '98 break so I'm gonna be kinda restrictive
  78   // on the patterns I accept.  CNC
  79 
  80   // Look for a compare of a constant and a merged value
  81   Node *i1 = iff->in(1);
  82   if( !i1->is_Bool() ) return nullptr;
  83   BoolNode *b = i1->as_Bool();
  84   Node *cmp = b->in(1);
  85   if( !cmp->is_Cmp() ) return nullptr;
  86   i1 = cmp->in(1);
  87   if( i1 == nullptr || !i1->is_Phi() ) return nullptr;
  88   PhiNode *phi = i1->as_Phi();
  89   Node *con2 = cmp->in(2);
  90   if( !con2->is_Con() ) return nullptr;
  91   // See that the merge point contains some constants
  92   Node *con1=nullptr;
  93   uint i4;
  94   RegionNode* phi_region = phi->region();
  95   for (i4 = 1; i4 < phi->req(); i4++ ) {
  96     con1 = phi->in(i4);
  97     // Do not optimize partially collapsed merges
  98     if (con1 == nullptr || phi_region->in(i4) == nullptr || igvn->type(phi_region->in(i4)) == Type::TOP) {
  99       igvn->_worklist.push(iff);
 100       return nullptr;
 101     }
 102     if( con1->is_Con() ) break; // Found a constant
 103     // Also allow null-vs-not-null checks
 104     const TypePtr *tp = igvn->type(con1)->isa_ptr();
 105     if( tp && tp->_ptr == TypePtr::NotNull )
 106       break;
 107   }
 108   if( i4 >= phi->req() ) return nullptr; // Found no constants
 109 
 110   igvn->C->set_has_split_ifs(true); // Has chance for split-if
 111 
 112   // Make sure that the compare can be constant folded away
 113   Node *cmp2 = cmp->clone();
 114   cmp2->set_req(1,con1);
 115   cmp2->set_req(2,con2);
 116   const Type *t = cmp2->Value(igvn);
 117   // This compare is dead, so whack it!
 118   igvn->remove_dead_node(cmp2);
 119   if( !t->singleton() ) return nullptr;
 120 
 121   // No intervening control, like a simple Call
 122   Node* r = iff->in(0);
 123   if (!r->is_Region() || r->is_Loop() || phi_region != r || r->as_Region()->is_copy()) {
 124     return nullptr;
 125   }
 126 
 127   // No other users of the cmp/bool
 128   if (b->outcnt() != 1 || cmp->outcnt() != 1) {
 129     //tty->print_cr("many users of cmp/bool");
 130     return nullptr;
 131   }
 132 
 133   // Make sure we can determine where all the uses of merged values go
 134   for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
 135     Node* u = r->fast_out(j);
 136     if( u == r ) continue;
 137     if( u == iff ) continue;
 138     if( u->outcnt() == 0 ) continue; // use is dead & ignorable
 139     if( !u->is_Phi() ) {
 140       /*
 141       if( u->is_Start() ) {
 142         tty->print_cr("Region has inlined start use");
 143       } else {
 144         tty->print_cr("Region has odd use");
 145         u->dump(2);
 146       }*/
 147       return nullptr;
 148     }
 149     if( u != phi ) {
 150       // CNC - do not allow any other merged value
 151       //tty->print_cr("Merging another value");
 152       //u->dump(2);
 153       return nullptr;
 154     }
 155     // Make sure we can account for all Phi uses
 156     for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) {
 157       Node* v = u->fast_out(k); // User of the phi
 158       // CNC - Allow only really simple patterns.
 159       // In particular I disallow AddP of the Phi, a fairly common pattern
 160       if (v == cmp) continue;  // The compare is OK
 161       if (v->is_ConstraintCast()) {
 162         // If the cast is derived from data flow edges, it may not have a control edge.
 163         // If so, it should be safe to split. But follow-up code can not deal with
 164         // this (l. 359). So skip.
 165         if (v->in(0) == nullptr) {
 166           return nullptr;
 167         }
 168         if (v->in(0)->in(0) == iff) {
 169           continue;               // CastPP/II of the IfNode is OK
 170         }
 171       }
 172       // Disabled following code because I cannot tell if exactly one
 173       // path dominates without a real dominator check. CNC 9/9/1999
 174       //uint vop = v->Opcode();
 175       //if( vop == Op_Phi ) {        // Phi from another merge point might be OK
 176       //  Node *r = v->in(0);        // Get controlling point
 177       //  if( !r ) return nullptr;   // Degraded to a copy
 178       //  // Find exactly one path in (either True or False doms, but not IFF)
 179       //  int cnt = 0;
 180       //  for( uint i = 1; i < r->req(); i++ )
 181       //    if( r->in(i) && r->in(i)->in(0) == iff )
 182       //      cnt++;
 183       //  if( cnt == 1 ) continue; // Exactly one of True or False guards Phi
 184       //}
 185       if( !v->is_Call() ) {
 186         /*
 187         if( v->Opcode() == Op_AddP ) {
 188           tty->print_cr("Phi has AddP use");
 189         } else if( v->Opcode() == Op_CastPP ) {
 190           tty->print_cr("Phi has CastPP use");
 191         } else if( v->Opcode() == Op_CastII ) {
 192           tty->print_cr("Phi has CastII use");
 193         } else {
 194           tty->print_cr("Phi has use I can't be bothered with");
 195         }
 196         */
 197       }
 198       return nullptr;
 199 
 200       /* CNC - Cut out all the fancy acceptance tests
 201       // Can we clone this use when doing the transformation?
 202       // If all uses are from Phis at this merge or constants, then YES.
 203       if( !v->in(0) && v != cmp ) {
 204         tty->print_cr("Phi has free-floating use");
 205         v->dump(2);
 206         return nullptr;
 207       }
 208       for( uint l = 1; l < v->req(); l++ ) {
 209         if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) &&
 210             !v->in(l)->is_Con() ) {
 211           tty->print_cr("Phi has use");
 212           v->dump(2);
 213           return nullptr;
 214         } // End of if Phi-use input is neither Phi nor Constant
 215       } // End of for all inputs to Phi-use
 216       */
 217     } // End of for all uses of Phi
 218   } // End of for all uses of Region
 219 
 220   // Only do this if the IF node is in a sane state
 221   if (iff->outcnt() != 2)
 222     return nullptr;
 223 
 224   // Got a hit!  Do the Mondo Hack!
 225   //
 226   //ABC  a1c   def   ghi            B     1     e     h   A C   a c   d f   g i
 227   // R - Phi - Phi - Phi            Rc - Phi - Phi - Phi   Rx - Phi - Phi - Phi
 228   //     cmp - 2                         cmp - 2               cmp - 2
 229   //       bool                            bool_c                bool_x
 230   //       if                               if_c                  if_x
 231   //      T  F                              T  F                  T  F
 232   // ..s..    ..t ..                   ..s..    ..t..        ..s..    ..t..
 233   //
 234   // Split the paths coming into the merge point into 2 separate groups of
 235   // merges.  On the left will be all the paths feeding constants into the
 236   // Cmp's Phi.  On the right will be the remaining paths.  The Cmp's Phi
 237   // will fold up into a constant; this will let the Cmp fold up as well as
 238   // all the control flow.  Below the original IF we have 2 control
 239   // dependent regions, 's' and 't'.  Now we will merge the two paths
 240   // just prior to 's' and 't' from the two IFs.  At least 1 path (and quite
 241   // likely 2 or more) will promptly constant fold away.
 242   PhaseGVN *phase = igvn;
 243 
 244   // Make a region merging constants and a region merging the rest
 245   uint req_c = 0;
 246   for (uint ii = 1; ii < r->req(); ii++) {
 247     if (phi->in(ii) == con1) {
 248       req_c++;
 249     }
 250     if (Node::may_be_loop_entry(r->in(ii))) {
 251       // Bail out if splitting through a region with a Parse Predicate input (could
 252       // also be a loop header before loop opts creates a LoopNode for it).
 253       return nullptr;
 254     }
 255   }
 256 
 257   // If all the defs of the phi are the same constant, we already have the desired end state.
 258   // Skip the split that would create empty phi and region nodes.
 259   if ((r->req() - req_c) == 1) {
 260     return nullptr;
 261   }
 262 
 263   // At this point we know that we can apply the split if optimization. If the region is still on the worklist,
 264   // we should wait until it is processed. The region might be removed which makes this optimization redundant.
 265   // This also avoids the creation of dead data loops when rewiring data nodes below when a region is dying.
 266   if (igvn->_worklist.member(r)) {
 267     igvn->_worklist.push(iff); // retry split if later again
 268     return nullptr;
 269   }
 270 
 271   Node *region_c = new RegionNode(req_c + 1);
 272   Node *phi_c    = con1;
 273   uint  len      = r->req();
 274   Node *region_x = new RegionNode(len - req_c);
 275   Node *phi_x    = PhiNode::make_blank(region_x, phi);
 276   for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) {
 277     if (phi->in(i) == con1) {
 278       region_c->init_req( i_c++, r  ->in(i) );
 279     } else {
 280       region_x->init_req( i_x,   r  ->in(i) );
 281       phi_x   ->init_req( i_x++, phi->in(i) );
 282     }
 283   }
 284 
 285   // Register the new RegionNodes but do not transform them.  Cannot
 286   // transform until the entire Region/Phi conglomerate has been hacked
 287   // as a single huge transform.
 288   igvn->register_new_node_with_optimizer( region_c );
 289   igvn->register_new_node_with_optimizer( region_x );
 290   // Prevent the untimely death of phi_x.  Currently he has no uses.  He is
 291   // about to get one.  If this only use goes away, then phi_x will look dead.
 292   // However, he will be picking up some more uses down below.
 293   Node *hook = new Node(4);
 294   hook->init_req(0, phi_x);
 295   hook->init_req(1, phi_c);
 296   phi_x = phase->transform( phi_x );
 297 
 298   // Make the compare
 299   Node *cmp_c = phase->makecon(t);
 300   Node *cmp_x = cmp->clone();
 301   cmp_x->set_req(1,phi_x);
 302   cmp_x->set_req(2,con2);
 303   cmp_x = phase->transform(cmp_x);
 304   // Make the bool
 305   Node *b_c = phase->transform(new BoolNode(cmp_c,b->_test._test));
 306   Node *b_x = phase->transform(new BoolNode(cmp_x,b->_test._test));
 307   // Make the IfNode
 308   IfNode* iff_c = iff->clone()->as_If();
 309   iff_c->set_req(0, region_c);
 310   iff_c->set_req(1, b_c);
 311   igvn->set_type_bottom(iff_c);
 312   igvn->_worklist.push(iff_c);
 313   hook->init_req(2, iff_c);
 314 
 315   IfNode* iff_x = iff->clone()->as_If();
 316   iff_x->set_req(0, region_x);
 317   iff_x->set_req(1, b_x);
 318   igvn->set_type_bottom(iff_x);
 319   igvn->_worklist.push(iff_x);
 320   hook->init_req(3, iff_x);
 321 
 322   // Make the true/false arms
 323   Node *iff_c_t = phase->transform(new IfTrueNode (iff_c));
 324   Node *iff_c_f = phase->transform(new IfFalseNode(iff_c));
 325   Node *iff_x_t = phase->transform(new IfTrueNode (iff_x));
 326   Node *iff_x_f = phase->transform(new IfFalseNode(iff_x));
 327 
 328   // Merge the TRUE paths
 329   Node *region_s = new RegionNode(3);
 330   igvn->_worklist.push(region_s);
 331   region_s->init_req(1, iff_c_t);
 332   region_s->init_req(2, iff_x_t);
 333   igvn->register_new_node_with_optimizer( region_s );
 334 
 335   // Merge the FALSE paths
 336   Node *region_f = new RegionNode(3);
 337   igvn->_worklist.push(region_f);
 338   region_f->init_req(1, iff_c_f);
 339   region_f->init_req(2, iff_x_f);
 340   igvn->register_new_node_with_optimizer( region_f );
 341 
 342   igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
 343   cmp->set_req(1,nullptr);  // Whack the inputs to cmp because it will be dead
 344   cmp->set_req(2,nullptr);
 345   // Check for all uses of the Phi and give them a new home.
 346   // The 'cmp' got cloned, but CastPP/IIs need to be moved.
 347   Node *phi_s = nullptr;     // do not construct unless needed
 348   Node *phi_f = nullptr;     // do not construct unless needed
 349   for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
 350     Node* v = phi->last_out(i2);// User of the phi
 351     igvn->rehash_node_delayed(v); // Have to fixup other Phi users
 352     uint vop = v->Opcode();
 353     Node *proj = nullptr;
 354     if( vop == Op_Phi ) {       // Remote merge point
 355       Node *r = v->in(0);
 356       for (uint i3 = 1; i3 < r->req(); i3++)
 357         if (r->in(i3) && r->in(i3)->in(0) == iff) {
 358           proj = r->in(i3);
 359           break;
 360         }
 361     } else if( v->is_ConstraintCast() ) {
 362       proj = v->in(0);          // Controlling projection
 363     } else {
 364       assert( 0, "do not know how to handle this guy" );
 365     }
 366     guarantee(proj != nullptr, "sanity");
 367 
 368     Node *proj_path_data, *proj_path_ctrl;
 369     if( proj->Opcode() == Op_IfTrue ) {
 370       if( phi_s == nullptr ) {
 371         // Only construct phi_s if needed, otherwise provides
 372         // interfering use.
 373         phi_s = PhiNode::make_blank(region_s,phi);
 374         phi_s->init_req( 1, phi_c );
 375         phi_s->init_req( 2, phi_x );
 376         hook->add_req(phi_s);
 377         phi_s = phase->transform(phi_s);
 378       }
 379       proj_path_data = phi_s;
 380       proj_path_ctrl = region_s;
 381     } else {
 382       if( phi_f == nullptr ) {
 383         // Only construct phi_f if needed, otherwise provides
 384         // interfering use.
 385         phi_f = PhiNode::make_blank(region_f,phi);
 386         phi_f->init_req( 1, phi_c );
 387         phi_f->init_req( 2, phi_x );
 388         hook->add_req(phi_f);
 389         phi_f = phase->transform(phi_f);
 390       }
 391       proj_path_data = phi_f;
 392       proj_path_ctrl = region_f;
 393     }
 394 
 395     // Fixup 'v' for for the split
 396     if( vop == Op_Phi ) {       // Remote merge point
 397       uint i;
 398       for( i = 1; i < v->req(); i++ )
 399         if( v->in(i) == phi )
 400           break;
 401       v->set_req(i, proj_path_data );
 402     } else if( v->is_ConstraintCast() ) {
 403       v->set_req(0, proj_path_ctrl );
 404       v->set_req(1, proj_path_data );
 405     } else
 406       ShouldNotReachHere();
 407   }
 408 
 409   // Now replace the original iff's True/False with region_s/region_t.
 410   // This makes the original iff go dead.
 411   for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) {
 412     Node* p = iff->last_out(i3);
 413     assert( p->Opcode() == Op_IfTrue || p->Opcode() == Op_IfFalse, "" );
 414     Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f;
 415     // Replace p with u
 416     igvn->add_users_to_worklist(p);
 417     for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
 418       Node* x = p->last_out(l);
 419       igvn->hash_delete(x);
 420       uint uses_found = 0;
 421       for( uint j = 0; j < x->req(); j++ ) {
 422         if( x->in(j) == p ) {
 423           x->set_req(j, u);
 424           uses_found++;
 425         }
 426       }
 427       l -= uses_found;    // we deleted 1 or more copies of this edge
 428     }
 429     igvn->remove_dead_node(p);
 430   }
 431 
 432   // Force the original merge dead
 433   igvn->hash_delete(r);
 434   // First, remove region's dead users.
 435   for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
 436     Node* u = r->last_out(l);
 437     if( u == r ) {
 438       r->set_req(0, nullptr);
 439     } else {
 440       assert(u->outcnt() == 0, "only dead users");
 441       igvn->remove_dead_node(u);
 442     }
 443     l -= 1;
 444   }
 445   igvn->remove_dead_node(r);
 446 
 447   // Now remove the bogus extra edges used to keep things alive
 448   igvn->remove_dead_node( hook );
 449 
 450   // Must return either the original node (now dead) or a new node
 451   // (Do not return a top here, since that would break the uniqueness of top.)
 452   return new ConINode(TypeInt::ZERO);
 453 }
 454 
 455 // if this IfNode follows a range check pattern return the projection
 456 // for the failed path
 457 ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
 458   if (outcnt() != 2) {
 459     return nullptr;
 460   }
 461   Node* b = in(1);
 462   if (b == nullptr || !b->is_Bool())  return nullptr;
 463   BoolNode* bn = b->as_Bool();
 464   Node* cmp = bn->in(1);
 465   if (cmp == nullptr)  return nullptr;
 466   if (cmp->Opcode() != Op_CmpU)  return nullptr;
 467 
 468   l = cmp->in(1);
 469   r = cmp->in(2);
 470   flip_test = 1;
 471   if (bn->_test._test == BoolTest::le) {
 472     l = cmp->in(2);
 473     r = cmp->in(1);
 474     flip_test = 2;
 475   } else if (bn->_test._test != BoolTest::lt) {
 476     return nullptr;
 477   }
 478   if (l->is_top())  return nullptr;   // Top input means dead test
 479   if (r->Opcode() != Op_LoadRange && !is_RangeCheck())  return nullptr;
 480 
 481   // We have recognized one of these forms:
 482   //  Flip 1:  If (Bool[<] CmpU(l, LoadRange)) ...
 483   //  Flip 2:  If (Bool[<=] CmpU(LoadRange, l)) ...
 484 
 485   ProjNode* iftrap = proj_out_or_null(flip_test == 2 ? true : false);
 486   return iftrap;
 487 }
 488 
 489 
 490 //------------------------------is_range_check---------------------------------
 491 // Return 0 if not a range check.  Return 1 if a range check and set index and
 492 // offset.  Return 2 if we had to negate the test.  Index is null if the check
 493 // is versus a constant.
 494 int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
 495   int flip_test = 0;
 496   Node* l = nullptr;
 497   Node* r = nullptr;
 498   ProjNode* iftrap = range_check_trap_proj(flip_test, l, r);
 499 
 500   if (iftrap == nullptr) {
 501     return 0;
 502   }
 503 
 504   // Make sure it's a real range check by requiring an uncommon trap
 505   // along the OOB path.  Otherwise, it's possible that the user wrote
 506   // something which optimized to look like a range check but behaves
 507   // in some other way.
 508   if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == nullptr) {
 509     return 0;
 510   }
 511 
 512   // Look for index+offset form
 513   Node* ind = l;
 514   jint  off = 0;
 515   if (l->is_top()) {
 516     return 0;
 517   } else if (l->Opcode() == Op_AddI) {
 518     if ((off = l->in(1)->find_int_con(0)) != 0) {
 519       ind = l->in(2)->uncast();
 520     } else if ((off = l->in(2)->find_int_con(0)) != 0) {
 521       ind = l->in(1)->uncast();
 522     }
 523   } else if ((off = l->find_int_con(-1)) >= 0) {
 524     // constant offset with no variable index
 525     ind = nullptr;
 526   } else {
 527     // variable index with no constant offset (or dead negative index)
 528     off = 0;
 529   }
 530 
 531   // Return all the values:
 532   index  = ind;
 533   offset = off;
 534   range  = r;
 535   return flip_test;
 536 }
 537 
 538 //------------------------------adjust_check-----------------------------------
 539 // Adjust (widen) a prior range check
 540 static void adjust_check(Node* proj, Node* range, Node* index,
 541                          int flip, jint off_lo, PhaseIterGVN* igvn) {
 542   PhaseGVN *gvn = igvn;
 543   // Break apart the old check
 544   Node *iff = proj->in(0);
 545   Node *bol = iff->in(1);
 546   if( bol->is_top() ) return;   // In case a partially dead range check appears
 547   // bail (or bomb[ASSERT/DEBUG]) if NOT projection-->IfNode-->BoolNode
 548   DEBUG_ONLY( if( !bol->is_Bool() ) { proj->dump(3); fatal("Expect projection-->IfNode-->BoolNode"); } )
 549   if( !bol->is_Bool() ) return;
 550 
 551   Node *cmp = bol->in(1);
 552   // Compute a new check
 553   Node *new_add = gvn->intcon(off_lo);
 554   if( index ) {
 555     new_add = off_lo ? gvn->transform(new AddINode( index, new_add )) : index;
 556   }
 557   Node *new_cmp = (flip == 1)
 558     ? new CmpUNode( new_add, range )
 559     : new CmpUNode( range, new_add );
 560   new_cmp = gvn->transform(new_cmp);
 561   // See if no need to adjust the existing check
 562   if( new_cmp == cmp ) return;
 563   // Else, adjust existing check
 564   Node *new_bol = gvn->transform( new BoolNode( new_cmp, bol->as_Bool()->_test._test ) );
 565   igvn->rehash_node_delayed( iff );
 566   iff->set_req_X( 1, new_bol, igvn );
 567 }
 568 
 569 //------------------------------up_one_dom-------------------------------------
 570 // Walk up the dominator tree one step.  Return null at root or true
 571 // complex merges.  Skips through small diamonds.
 572 Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
 573   Node *dom = curr->in(0);
 574   if( !dom )                    // Found a Region degraded to a copy?
 575     return curr->nonnull_req(); // Skip thru it
 576 
 577   if( curr != dom )             // Normal walk up one step?
 578     return dom;
 579 
 580   // Use linear_only if we are still parsing, since we cannot
 581   // trust the regions to be fully filled in.
 582   if (linear_only)
 583     return nullptr;
 584 
 585   if( dom->is_Root() )
 586     return nullptr;
 587 
 588   // Else hit a Region.  Check for a loop header
 589   if( dom->is_Loop() )
 590     return dom->in(1);          // Skip up thru loops
 591 
 592   // Check for small diamonds
 593   Node *din1, *din2, *din3, *din4;
 594   if( dom->req() == 3 &&        // 2-path merge point
 595       (din1 = dom ->in(1)) &&   // Left  path exists
 596       (din2 = dom ->in(2)) &&   // Right path exists
 597       (din3 = din1->in(0)) &&   // Left  path up one
 598       (din4 = din2->in(0)) ) {  // Right path up one
 599     if( din3->is_Call() &&      // Handle a slow-path call on either arm
 600         (din3 = din3->in(0)) )
 601       din3 = din3->in(0);
 602     if( din4->is_Call() &&      // Handle a slow-path call on either arm
 603         (din4 = din4->in(0)) )
 604       din4 = din4->in(0);
 605     if (din3 != nullptr && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
 606       return din3;              // Skip around diamonds
 607   }
 608 
 609   // Give up the search at true merges
 610   return nullptr;                  // Dead loop?  Or hit root?
 611 }
 612 
 613 
 614 //------------------------------filtered_int_type--------------------------------
 615 // Return a possibly more restrictive type for val based on condition control flow for an if
 616 const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj) {
 617   assert(if_proj &&
 618          (if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
 619   if (if_proj->in(0) && if_proj->in(0)->is_If()) {
 620     IfNode* iff = if_proj->in(0)->as_If();
 621     if (iff->in(1) && iff->in(1)->is_Bool()) {
 622       BoolNode* bol = iff->in(1)->as_Bool();
 623       if (bol->in(1) && bol->in(1)->is_Cmp()) {
 624         const CmpNode* cmp  = bol->in(1)->as_Cmp();
 625         if (cmp->in(1) == val) {
 626           const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
 627           if (cmp2_t != nullptr) {
 628             jint lo = cmp2_t->_lo;
 629             jint hi = cmp2_t->_hi;
 630             BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
 631             switch (msk) {
 632             case BoolTest::ne: {
 633               // If val is compared to its lower or upper bound, we can narrow the type
 634               const TypeInt* val_t = gvn->type(val)->isa_int();
 635               if (val_t != nullptr && !val_t->singleton() && cmp2_t->is_con()) {
 636                 if (val_t->_lo == lo) {
 637                   return TypeInt::make(val_t->_lo + 1, val_t->_hi, val_t->_widen);
 638                 } else if (val_t->_hi == hi) {
 639                   return TypeInt::make(val_t->_lo, val_t->_hi - 1, val_t->_widen);
 640                 }
 641               }
 642               // Can't refine type
 643               return nullptr;
 644             }
 645             case BoolTest::eq:
 646               return cmp2_t;
 647             case BoolTest::lt:
 648               lo = TypeInt::INT->_lo;
 649               if (hi != min_jint) {
 650                 hi = hi - 1;
 651               }
 652               break;
 653             case BoolTest::le:
 654               lo = TypeInt::INT->_lo;
 655               break;
 656             case BoolTest::gt:
 657               if (lo != max_jint) {
 658                 lo = lo + 1;
 659               }
 660               hi = TypeInt::INT->_hi;
 661               break;
 662             case BoolTest::ge:
 663               // lo unchanged
 664               hi = TypeInt::INT->_hi;
 665               break;
 666             default:
 667               break;
 668             }
 669             const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen);
 670             return rtn_t;
 671           }
 672         }
 673       }
 674     }
 675   }
 676   return nullptr;
 677 }
 678 
 679 //------------------------------fold_compares----------------------------
 680 // See if a pair of CmpIs can be converted into a CmpU.  In some cases
 681 // the direction of this if is determined by the preceding if so it
 682 // can be eliminate entirely.
 683 //
 684 // Given an if testing (CmpI n v) check for an immediately control
 685 // dependent if that is testing (CmpI n v2) and has one projection
 686 // leading to this if and the other projection leading to a region
 687 // that merges one of this ifs control projections.
 688 //
 689 //                   If
 690 //                  / |
 691 //                 /  |
 692 //                /   |
 693 //              If    |
 694 //              /\    |
 695 //             /  \   |
 696 //            /    \  |
 697 //           /    Region
 698 //
 699 // Or given an if testing (CmpI n v) check for a dominating if that is
 700 // testing (CmpI n v2), both having one projection leading to an
 701 // uncommon trap. Allow Another independent guard in between to cover
 702 // an explicit range check:
 703 // if (index < 0 || index >= array.length) {
 704 // which may need a null check to guard the LoadRange
 705 //
 706 //                   If
 707 //                  / \
 708 //                 /   \
 709 //                /     \
 710 //              If      unc
 711 //              /\
 712 //             /  \
 713 //            /    \
 714 //           /      unc
 715 //
 716 
 717 // Is the comparison for this If suitable for folding?
 718 bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) {
 719   return in(1) != nullptr &&
 720     in(1)->is_Bool() &&
 721     in(1)->in(1) != nullptr &&
 722     in(1)->in(1)->Opcode() == Op_CmpI &&
 723     in(1)->in(1)->in(2) != nullptr &&
 724     in(1)->in(1)->in(2) != igvn->C->top() &&
 725     (in(1)->as_Bool()->_test.is_less() ||
 726      in(1)->as_Bool()->_test.is_greater() ||
 727      (fold_ne && in(1)->as_Bool()->_test._test == BoolTest::ne));
 728 }
 729 
 730 // Is a dominating control suitable for folding with this if?
 731 bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
 732   return ctrl != nullptr &&
 733     ctrl->is_Proj() &&
 734     ctrl->in(0) != nullptr &&
 735     ctrl->in(0)->Opcode() == Op_If &&
 736     ctrl->in(0)->outcnt() == 2 &&
 737     ctrl->in(0)->as_If()->cmpi_folds(igvn, true) &&
 738     // Must compare same value
 739     ctrl->in(0)->in(1)->in(1)->in(1) != nullptr &&
 740     ctrl->in(0)->in(1)->in(1)->in(1) != igvn->C->top() &&
 741     ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1);
 742 }
 743 
 744 // Do this If and the dominating If share a region?
 745 bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) {
 746   ProjNode* otherproj = proj->other_if_proj();
 747   Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null();
 748   RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr;
 749   success = nullptr;
 750   fail = nullptr;
 751 
 752   if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) {
 753     for (int i = 0; i < 2; i++) {
 754       ProjNode* proj = proj_out(i);
 755       if (success == nullptr && proj->outcnt() == 1 && proj->unique_out() == region) {
 756         success = proj;
 757       } else if (fail == nullptr) {
 758         fail = proj;
 759       } else {
 760         success = fail = nullptr;
 761       }
 762     }
 763   }
 764   return success != nullptr && fail != nullptr;
 765 }
 766 
 767 bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) {
 768   // Different methods and methods containing jsrs are not supported.
 769   ciMethod* method = unc->jvms()->method();
 770   ciMethod* dom_method = dom_unc->jvms()->method();
 771   if (method != dom_method || method->has_jsrs()) {
 772     return false;
 773   }
 774   // Check that both traps are in the same activation of the method (instead
 775   // of two activations being inlined through different call sites) by verifying
 776   // that the call stacks are equal for both JVMStates.
 777   JVMState* dom_caller = dom_unc->jvms()->caller();
 778   JVMState* caller = unc->jvms()->caller();
 779   if ((dom_caller == nullptr) != (caller == nullptr)) {
 780     // The current method must either be inlined into both dom_caller and
 781     // caller or must not be inlined at all (top method). Bail out otherwise.
 782     return false;
 783   } else if (dom_caller != nullptr && !dom_caller->same_calls_as(caller)) {
 784     return false;
 785   }
 786   // Check that the bci of the dominating uncommon trap dominates the bci
 787   // of the dominated uncommon trap. Otherwise we may not re-execute
 788   // the dominated check after deoptimization from the merged uncommon trap.
 789   ciTypeFlow* flow = dom_method->get_flow_analysis();
 790   int bci = unc->jvms()->bci();
 791   int dom_bci = dom_unc->jvms()->bci();
 792   if (!flow->is_dominated_by(bci, dom_bci)) {
 793     return false;
 794   }
 795 
 796   return true;
 797 }
 798 
 799 // Return projection that leads to an uncommon trap if any
 800 ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call) const {
 801   for (int i = 0; i < 2; i++) {
 802     call = proj_out(i)->is_uncommon_trap_proj(Deoptimization::Reason_none);
 803     if (call != nullptr) {
 804       return proj_out(i);
 805     }
 806   }
 807   return nullptr;
 808 }
 809 
 810 // Do this If and the dominating If both branch out to an uncommon trap
 811 bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNode*& fail, PhaseIterGVN* igvn) {
 812   ProjNode* otherproj = proj->other_if_proj();
 813   CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none);
 814 
 815   if (otherproj->outcnt() == 1 && dom_unc != nullptr) {
 816     // We need to re-execute the folded Ifs after deoptimization from the merged traps
 817     if (!dom_unc->jvms()->should_reexecute()) {
 818       return false;
 819     }
 820 
 821     CallStaticJavaNode* unc = nullptr;
 822     ProjNode* unc_proj = uncommon_trap_proj(unc);
 823     if (unc_proj != nullptr && unc_proj->outcnt() == 1) {
 824       if (dom_unc == unc) {
 825         // Allow the uncommon trap to be shared through a region
 826         RegionNode* r = unc->in(0)->as_Region();
 827         if (r->outcnt() != 2 || r->req() != 3 || r->find_edge(otherproj) == -1 || r->find_edge(unc_proj) == -1) {
 828           return false;
 829         }
 830         assert(r->has_phi() == nullptr, "simple region shouldn't have a phi");
 831       } else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) {
 832         return false;
 833       }
 834 
 835       if (!is_dominator_unc(dom_unc, unc)) {
 836         return false;
 837       }
 838 
 839       // See merge_uncommon_traps: the reason of the uncommon trap
 840       // will be changed and the state of the dominating If will be
 841       // used. Checked that we didn't apply this transformation in a
 842       // previous compilation and it didn't cause too many traps
 843       ciMethod* dom_method = dom_unc->jvms()->method();
 844       int dom_bci = dom_unc->jvms()->bci();
 845       if (!igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_unstable_fused_if) &&
 846           !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check) &&
 847           // Return true if c2 manages to reconcile with UnstableIf optimization. See the comments for it.
 848           igvn->C->remove_unstable_if_trap(dom_unc, true/*yield*/)) {
 849         success = unc_proj;
 850         fail = unc_proj->other_if_proj();
 851         return true;
 852       }
 853     }
 854   }
 855   return false;
 856 }
 857 
 858 // Check that the 2 CmpI can be folded into as single CmpU and proceed with the folding
 859 bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) {
 860   Node* this_cmp = in(1)->in(1);
 861   BoolNode* this_bool = in(1)->as_Bool();
 862   IfNode* dom_iff = proj->in(0)->as_If();
 863   BoolNode* dom_bool = dom_iff->in(1)->as_Bool();
 864   Node* lo = dom_iff->in(1)->in(1)->in(2);
 865   Node* hi = this_cmp->in(2);
 866   Node* n = this_cmp->in(1);
 867   ProjNode* otherproj = proj->other_if_proj();
 868 
 869   const TypeInt* lo_type = IfNode::filtered_int_type(igvn, n, otherproj);
 870   const TypeInt* hi_type = IfNode::filtered_int_type(igvn, n, success);
 871 
 872   BoolTest::mask lo_test = dom_bool->_test._test;
 873   BoolTest::mask hi_test = this_bool->_test._test;
 874   BoolTest::mask cond = hi_test;
 875 
 876   // convert:
 877   //
 878   //          dom_bool = x {<,<=,>,>=} a
 879   //                           / \
 880   //     proj = {True,False}  /   \ otherproj = {False,True}
 881   //                         /
 882   //        this_bool = x {<,<=} b
 883   //                       / \
 884   //  fail = {True,False} /   \ success = {False,True}
 885   //                     /
 886   //
 887   // (Second test guaranteed canonicalized, first one may not have
 888   // been canonicalized yet)
 889   //
 890   // into:
 891   //
 892   // cond = (x - lo) {<u,<=u,>u,>=u} adjusted_lim
 893   //                       / \
 894   //                 fail /   \ success
 895   //                     /
 896   //
 897 
 898   // Figure out which of the two tests sets the upper bound and which
 899   // sets the lower bound if any.
 900   Node* adjusted_lim = nullptr;
 901   if (lo_type != nullptr && hi_type != nullptr && hi_type->_lo > lo_type->_hi &&
 902       hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) {
 903     assert((dom_bool->_test.is_less() && !proj->_con) ||
 904            (dom_bool->_test.is_greater() && proj->_con), "incorrect test");
 905 
 906     // this_bool = <
 907     //   dom_bool = >= (proj = True) or dom_bool = < (proj = False)
 908     //     x in [a, b[ on the fail (= True) projection, b > a-1 (because of hi_type->_lo > lo_type->_hi test above):
 909     //     lo = a, hi = b, adjusted_lim = b-a, cond = <u
 910     //   dom_bool = > (proj = True) or dom_bool = <= (proj = False)
 911     //     x in ]a, b[ on the fail (= True) projection, b > a:
 912     //     lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <u
 913     // this_bool = <=
 914     //   dom_bool = >= (proj = True) or dom_bool = < (proj = False)
 915     //     x in [a, b] on the fail (= True) projection, b+1 > a-1:
 916     //     lo = a, hi = b, adjusted_lim = b-a+1, cond = <u
 917     //     lo = a, hi = b, adjusted_lim = b-a, cond = <=u doesn't work because b = a - 1 is possible, then b-a = -1
 918     //   dom_bool = > (proj = True) or dom_bool = <= (proj = False)
 919     //     x in ]a, b] on the fail (= True) projection b+1 > a:
 920     //     lo = a+1, hi = b, adjusted_lim = b-a, cond = <u
 921     //     lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <=u doesn't work because a = b is possible, then b-a-1 = -1
 922 
 923     if (hi_test == BoolTest::lt) {
 924       if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
 925         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
 926       }
 927     } else if (hi_test == BoolTest::le) {
 928       if (lo_test == BoolTest::ge || lo_test == BoolTest::lt) {
 929         adjusted_lim = igvn->transform(new SubINode(hi, lo));
 930         adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
 931         cond = BoolTest::lt;
 932       } else if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
 933         adjusted_lim = igvn->transform(new SubINode(hi, lo));
 934         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
 935         cond = BoolTest::lt;
 936       } else {
 937         assert(false, "unhandled lo_test: %d", lo_test);
 938         return false;
 939       }
 940     } else {
 941       assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled hi_test: %d", hi_test);
 942       return false;
 943     }
 944     // this test was canonicalized
 945     assert(this_bool->_test.is_less() && fail->_con, "incorrect test");
 946   } else if (lo_type != nullptr && hi_type != nullptr && lo_type->_lo > hi_type->_hi &&
 947              lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) {
 948 
 949     // this_bool = <
 950     //   dom_bool = < (proj = True) or dom_bool = >= (proj = False)
 951     //     x in [b, a[ on the fail (= False) projection, a > b-1 (because of lo_type->_lo > hi_type->_hi above):
 952     //     lo = b, hi = a, adjusted_lim = a-b, cond = >=u
 953     //   dom_bool = <= (proj = True) or dom_bool = > (proj = False)
 954     //     x in [b, a] on the fail (= False) projection, a+1 > b-1:
 955     //     lo = b, hi = a, adjusted_lim = a-b+1, cond = >=u
 956     //     lo = b, hi = a, adjusted_lim = a-b, cond = >u doesn't work because a = b - 1 is possible, then b-a = -1
 957     // this_bool = <=
 958     //   dom_bool = < (proj = True) or dom_bool = >= (proj = False)
 959     //     x in ]b, a[ on the fail (= False) projection, a > b:
 960     //     lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >=u
 961     //   dom_bool = <= (proj = True) or dom_bool = > (proj = False)
 962     //     x in ]b, a] on the fail (= False) projection, a+1 > b:
 963     //     lo = b+1, hi = a, adjusted_lim = a-b, cond = >=u
 964     //     lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >u doesn't work because a = b is possible, then b-a-1 = -1
 965 
 966     swap(lo, hi);
 967     swap(lo_type, hi_type);
 968     swap(lo_test, hi_test);
 969 
 970     assert((dom_bool->_test.is_less() && proj->_con) ||
 971            (dom_bool->_test.is_greater() && !proj->_con), "incorrect test");
 972 
 973     cond = (hi_test == BoolTest::le || hi_test == BoolTest::gt) ? BoolTest::gt : BoolTest::ge;
 974 
 975     if (lo_test == BoolTest::lt) {
 976       if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
 977         cond = BoolTest::ge;
 978       } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
 979         adjusted_lim = igvn->transform(new SubINode(hi, lo));
 980         adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1)));
 981         cond = BoolTest::ge;
 982       } else {
 983         assert(false, "unhandled hi_test: %d", hi_test);
 984         return false;
 985       }
 986     } else if (lo_test == BoolTest::le) {
 987       if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) {
 988         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
 989         cond = BoolTest::ge;
 990       } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) {
 991         adjusted_lim = igvn->transform(new SubINode(hi, lo));
 992         lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
 993         cond = BoolTest::ge;
 994       } else {
 995         assert(false, "unhandled hi_test: %d", hi_test);
 996         return false;
 997       }
 998     } else {
 999       assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled lo_test: %d", lo_test);
1000       return false;
1001     }
1002     // this test was canonicalized
1003     assert(this_bool->_test.is_less() && !fail->_con, "incorrect test");
1004   } else {
1005     const TypeInt* failtype = filtered_int_type(igvn, n, proj);
1006     if (failtype != nullptr) {
1007       const TypeInt* type2 = filtered_int_type(igvn, n, fail);
1008       if (type2 != nullptr) {
1009         failtype = failtype->join(type2)->is_int();
1010         if (failtype->_lo > failtype->_hi) {
1011           // previous if determines the result of this if so
1012           // replace Bool with constant
1013           igvn->replace_input_of(this, 1, igvn->intcon(success->_con));
1014           return true;
1015         }
1016       }
1017     }
1018     lo = nullptr;
1019     hi = nullptr;
1020   }
1021 
1022   if (lo && hi) {
1023     Node* hook = new Node(1);
1024     hook->init_req(0, lo); // Add a use to lo to prevent him from dying
1025     // Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo))
1026     Node* adjusted_val = igvn->transform(new SubINode(n,  lo));
1027     if (adjusted_lim == nullptr) {
1028       adjusted_lim = igvn->transform(new SubINode(hi, lo));
1029     }
1030     hook->destruct(igvn);
1031 
1032     int lo = igvn->type(adjusted_lim)->is_int()->_lo;
1033     if (lo < 0) {
1034       // If range check elimination applies to this comparison, it includes code to protect from overflows that may
1035       // cause the main loop to be skipped entirely. Delay this transformation.
1036       // Example:
1037       // for (int i = 0; i < limit; i++) {
1038       //   if (i < max_jint && i > min_jint) {...
1039       // }
1040       // Comparisons folded as:
1041       // i - min_jint - 1 <u -2
1042       // when RC applies, main loop limit becomes:
1043       // min(limit, max(-2 + min_jint + 1, min_jint))
1044       // = min(limit, min_jint)
1045       // = min_jint
1046       if (!igvn->C->post_loop_opts_phase()) {
1047         if (adjusted_val->outcnt() == 0) {
1048           igvn->remove_dead_node(adjusted_val);
1049         }
1050         if (adjusted_lim->outcnt() == 0) {
1051           igvn->remove_dead_node(adjusted_lim);
1052         }
1053         igvn->C->record_for_post_loop_opts_igvn(this);
1054         return false;
1055       }
1056     }
1057 
1058     Node* newcmp = igvn->transform(new CmpUNode(adjusted_val, adjusted_lim));
1059     Node* newbool = igvn->transform(new BoolNode(newcmp, cond));
1060 
1061     igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
1062     igvn->replace_input_of(this, 1, newbool);
1063 
1064     return true;
1065   }
1066   return false;
1067 }
1068 
1069 // Merge the branches that trap for this If and the dominating If into
1070 // a single region that branches to the uncommon trap for the
1071 // dominating If
1072 Node* IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) {
1073   Node* res = this;
1074   assert(success->in(0) == this, "bad projection");
1075 
1076   ProjNode* otherproj = proj->other_if_proj();
1077 
1078   CallStaticJavaNode* unc = success->is_uncommon_trap_proj(Deoptimization::Reason_none);
1079   CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none);
1080 
1081   if (unc != dom_unc) {
1082     Node* r = new RegionNode(3);
1083 
1084     r->set_req(1, otherproj);
1085     r->set_req(2, success);
1086     r = igvn->transform(r);
1087     assert(r->is_Region(), "can't go away");
1088 
1089     // Make both If trap at the state of the first If: once the CmpI
1090     // nodes are merged, if we trap we don't know which of the CmpI
1091     // nodes would have caused the trap so we have to restart
1092     // execution at the first one
1093     igvn->replace_input_of(dom_unc, 0, r);
1094     igvn->replace_input_of(unc, 0, igvn->C->top());
1095   }
1096   int trap_request = dom_unc->uncommon_trap_request();
1097   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1098   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
1099 
1100   int flip_test = 0;
1101   Node* l = nullptr;
1102   Node* r = nullptr;
1103 
1104   if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != nullptr) {
1105     // If this looks like a range check, change the trap to
1106     // Reason_range_check so the compiler recognizes it as a range
1107     // check and applies the corresponding optimizations
1108     trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_range_check, action);
1109 
1110     improve_address_types(l, r, fail, igvn);
1111 
1112     res = igvn->transform(new RangeCheckNode(in(0), in(1), _prob, _fcnt));
1113   } else if (unc != dom_unc) {
1114     // If we trap we won't know what CmpI would have caused the trap
1115     // so use a special trap reason to mark this pair of CmpI nodes as
1116     // bad candidate for folding. On recompilation we won't fold them
1117     // and we may trap again but this time we'll know what branch
1118     // traps
1119     trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action);
1120   }
1121   igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request));
1122   return res;
1123 }
1124 
1125 // If we are turning 2 CmpI nodes into a CmpU that follows the pattern
1126 // of a rangecheck on index i, on 64 bit the compares may be followed
1127 // by memory accesses using i as index. In that case, the CmpU tells
1128 // us something about the values taken by i that can help the compiler
1129 // (see Compile::conv_I2X_index())
1130 void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn) {
1131 #ifdef _LP64
1132   ResourceMark rm;
1133   Node_Stack stack(2);
1134 
1135   assert(r->Opcode() == Op_LoadRange, "unexpected range check");
1136   const TypeInt* array_size = igvn->type(r)->is_int();
1137 
1138   stack.push(l, 0);
1139 
1140   while(stack.size() > 0) {
1141     Node* n = stack.node();
1142     uint start = stack.index();
1143 
1144     uint i = start;
1145     for (; i < n->outcnt(); i++) {
1146       Node* use = n->raw_out(i);
1147       if (stack.size() == 1) {
1148         if (use->Opcode() == Op_ConvI2L) {
1149           const TypeLong* bounds = use->as_Type()->type()->is_long();
1150           if (bounds->_lo <= array_size->_lo && bounds->_hi >= array_size->_hi &&
1151               (bounds->_lo != array_size->_lo || bounds->_hi != array_size->_hi)) {
1152             stack.set_index(i+1);
1153             stack.push(use, 0);
1154             break;
1155           }
1156         }
1157       } else if (use->is_Mem()) {
1158         Node* ctrl = use->in(0);
1159         for (int i = 0; i < 10 && ctrl != nullptr && ctrl != fail; i++) {
1160           ctrl = up_one_dom(ctrl);
1161         }
1162         if (ctrl == fail) {
1163           Node* init_n = stack.node_at(1);
1164           assert(init_n->Opcode() == Op_ConvI2L, "unexpected first node");
1165           // Create a new narrow ConvI2L node that is dependent on the range check
1166           Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail);
1167 
1168           // The type of the ConvI2L may be widen and so the new
1169           // ConvI2L may not be better than an existing ConvI2L
1170           if (new_n != init_n) {
1171             for (uint j = 2; j < stack.size(); j++) {
1172               Node* n = stack.node_at(j);
1173               Node* clone = n->clone();
1174               int rep = clone->replace_edge(init_n, new_n, igvn);
1175               assert(rep > 0, "can't find expected node?");
1176               clone = igvn->transform(clone);
1177               init_n = n;
1178               new_n = clone;
1179             }
1180             igvn->hash_delete(use);
1181             int rep = use->replace_edge(init_n, new_n, igvn);
1182             assert(rep > 0, "can't find expected node?");
1183             igvn->transform(use);
1184             if (init_n->outcnt() == 0) {
1185               igvn->_worklist.push(init_n);
1186             }
1187           }
1188         }
1189       } else if (use->in(0) == nullptr && (igvn->type(use)->isa_long() ||
1190                                         igvn->type(use)->isa_ptr())) {
1191         stack.set_index(i+1);
1192         stack.push(use, 0);
1193         break;
1194       }
1195     }
1196     if (i == n->outcnt()) {
1197       stack.pop();
1198     }
1199   }
1200 #endif
1201 }
1202 
1203 bool IfNode::is_cmp_with_loadrange(ProjNode* proj) {
1204   if (in(1) != nullptr &&
1205       in(1)->in(1) != nullptr &&
1206       in(1)->in(1)->in(2) != nullptr) {
1207     Node* other = in(1)->in(1)->in(2);
1208     if (other->Opcode() == Op_LoadRange &&
1209         ((other->in(0) != nullptr && other->in(0) == proj) ||
1210          (other->in(0) == nullptr &&
1211           other->in(2) != nullptr &&
1212           other->in(2)->is_AddP() &&
1213           other->in(2)->in(1) != nullptr &&
1214           other->in(2)->in(1)->Opcode() == Op_CastPP &&
1215           other->in(2)->in(1)->in(0) == proj))) {
1216       return true;
1217     }
1218   }
1219   return false;
1220 }
1221 
1222 bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) {
1223   Node* other = in(1)->in(1)->in(2);
1224   if (other->in(MemNode::Address) != nullptr &&
1225       proj->in(0)->in(1) != nullptr &&
1226       proj->in(0)->in(1)->is_Bool() &&
1227       proj->in(0)->in(1)->in(1) != nullptr &&
1228       proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1229       proj->in(0)->in(1)->in(1)->in(2) != nullptr &&
1230       proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() &&
1231       igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) {
1232     return true;
1233   }
1234   return false;
1235 }
1236 
1237 // Returns true if this IfNode belongs to a flat array check
1238 // and returns the corresponding array in the 'array' parameter.
1239 bool IfNode::is_flat_array_check(PhaseTransform* phase, Node** array) {
1240   Node* bol = in(1);
1241   if (!bol->is_Bool()) {
1242     return false;
1243   }
1244   Node* cmp = bol->in(1);
1245   if (cmp->isa_FlatArrayCheck()) {
1246     if (array != nullptr) {
1247       *array = cmp->in(FlatArrayCheckNode::ArrayOrKlass);
1248     }
1249     return true;
1250   }
1251   return false;
1252 }
1253 
1254 // Check that the If that is in between the 2 integer comparisons has
1255 // no side effect
1256 bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) {
1257   if (proj == nullptr) {
1258     return false;
1259   }
1260   CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1261   if (unc != nullptr && proj->outcnt() <= 2) {
1262     if (proj->outcnt() == 1 ||
1263         // Allow simple null check from LoadRange
1264         (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) {
1265       CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1266       CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1267       assert(dom_unc != nullptr, "is_uncommon_trap_if_pattern returned null");
1268 
1269       // reroute_side_effect_free_unc changes the state of this
1270       // uncommon trap to restart execution at the previous
1271       // CmpI. Check that this change in a previous compilation didn't
1272       // cause too many traps.
1273       int trap_request = unc->uncommon_trap_request();
1274       Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
1275 
1276       if (igvn->C->too_many_traps(dom_unc->jvms()->method(), dom_unc->jvms()->bci(), reason)) {
1277         return false;
1278       }
1279 
1280       if (!is_dominator_unc(dom_unc, unc)) {
1281         return false;
1282       }
1283 
1284       return true;
1285     }
1286   }
1287   return false;
1288 }
1289 
1290 // Make the If between the 2 integer comparisons trap at the state of
1291 // the first If: the last CmpI is the one replaced by a CmpU and the
1292 // first CmpI is eliminated, so the test between the 2 CmpI nodes
1293 // won't be guarded by the first CmpI anymore. It can trap in cases
1294 // where the first CmpI would have prevented it from executing: on a
1295 // trap, we need to restart execution at the state of the first CmpI
1296 void IfNode::reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, PhaseIterGVN* igvn) {
1297   CallStaticJavaNode* dom_unc = dom_proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1298   ProjNode* otherproj = proj->other_if_proj();
1299   CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1300   Node* call_proj = dom_unc->unique_ctrl_out();
1301   Node* halt = call_proj->unique_ctrl_out();
1302 
1303   Node* new_unc = dom_unc->clone();
1304   call_proj = call_proj->clone();
1305   halt = halt->clone();
1306   Node* c = otherproj->clone();
1307 
1308   c = igvn->transform(c);
1309   new_unc->set_req(TypeFunc::Parms, unc->in(TypeFunc::Parms));
1310   new_unc->set_req(0, c);
1311   new_unc = igvn->transform(new_unc);
1312   call_proj->set_req(0, new_unc);
1313   call_proj = igvn->transform(call_proj);
1314   halt->set_req(0, call_proj);
1315   halt = igvn->transform(halt);
1316 
1317   igvn->replace_node(otherproj, igvn->C->top());
1318   igvn->C->root()->add_req(halt);
1319 }
1320 
1321 Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
1322   if (Opcode() != Op_If) return nullptr;
1323 
1324   if (cmpi_folds(igvn)) {
1325     Node* ctrl = in(0);
1326     if (is_ctrl_folds(ctrl, igvn) && ctrl->outcnt() == 1) {
1327       // A integer comparison immediately dominated by another integer
1328       // comparison
1329       ProjNode* success = nullptr;
1330       ProjNode* fail = nullptr;
1331       ProjNode* dom_cmp = ctrl->as_Proj();
1332       if (has_shared_region(dom_cmp, success, fail) &&
1333           // Next call modifies graph so must be last
1334           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1335         return this;
1336       }
1337       if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1338           // Next call modifies graph so must be last
1339           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1340         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1341       }
1342       return nullptr;
1343     } else if (ctrl->in(0) != nullptr &&
1344                ctrl->in(0)->in(0) != nullptr) {
1345       ProjNode* success = nullptr;
1346       ProjNode* fail = nullptr;
1347       Node* dom = ctrl->in(0)->in(0);
1348       ProjNode* dom_cmp = dom->isa_Proj();
1349       ProjNode* other_cmp = ctrl->isa_Proj();
1350 
1351       // Check if it's an integer comparison dominated by another
1352       // integer comparison with another test in between
1353       if (is_ctrl_folds(dom, igvn) &&
1354           has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
1355           is_side_effect_free_test(other_cmp, igvn) &&
1356           // Next call modifies graph so must be last
1357           fold_compares_helper(dom_cmp, success, fail, igvn)) {
1358         reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn);
1359         return merge_uncommon_traps(dom_cmp, success, fail, igvn);
1360       }
1361     }
1362   }
1363   return nullptr;
1364 }
1365 
1366 //------------------------------remove_useless_bool----------------------------
1367 // Check for people making a useless boolean: things like
1368 // if( (x < y ? true : false) ) { ... }
1369 // Replace with if( x < y ) { ... }
1370 static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
1371   Node *i1 = iff->in(1);
1372   if( !i1->is_Bool() ) return nullptr;
1373   BoolNode *bol = i1->as_Bool();
1374 
1375   Node *cmp = bol->in(1);
1376   if( cmp->Opcode() != Op_CmpI ) return nullptr;
1377 
1378   // Must be comparing against a bool
1379   const Type *cmp2_t = phase->type( cmp->in(2) );
1380   if( cmp2_t != TypeInt::ZERO &&
1381       cmp2_t != TypeInt::ONE )
1382     return nullptr;
1383 
1384   // Find a prior merge point merging the boolean
1385   i1 = cmp->in(1);
1386   if( !i1->is_Phi() ) return nullptr;
1387   PhiNode *phi = i1->as_Phi();
1388   if( phase->type( phi ) != TypeInt::BOOL )
1389     return nullptr;
1390 
1391   // Check for diamond pattern
1392   int true_path = phi->is_diamond_phi();
1393   if( true_path == 0 ) return nullptr;
1394 
1395   // Make sure that iff and the control of the phi are different. This
1396   // should really only happen for dead control flow since it requires
1397   // an illegal cycle.
1398   if (phi->in(0)->in(1)->in(0) == iff) return nullptr;
1399 
1400   // phi->region->if_proj->ifnode->bool->cmp
1401   BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
1402 
1403   // Now get the 'sense' of the test correct so we can plug in
1404   // either iff2->in(1) or its complement.
1405   int flip = 0;
1406   if( bol->_test._test == BoolTest::ne ) flip = 1-flip;
1407   else if( bol->_test._test != BoolTest::eq ) return nullptr;
1408   if( cmp2_t == TypeInt::ZERO ) flip = 1-flip;
1409 
1410   const Type *phi1_t = phase->type( phi->in(1) );
1411   const Type *phi2_t = phase->type( phi->in(2) );
1412   // Check for Phi(0,1) and flip
1413   if( phi1_t == TypeInt::ZERO ) {
1414     if( phi2_t != TypeInt::ONE ) return nullptr;
1415     flip = 1-flip;
1416   } else {
1417     // Check for Phi(1,0)
1418     if( phi1_t != TypeInt::ONE  ) return nullptr;
1419     if( phi2_t != TypeInt::ZERO ) return nullptr;
1420   }
1421   if( true_path == 2 ) {
1422     flip = 1-flip;
1423   }
1424 
1425   Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2);
1426   assert(new_bol != iff->in(1), "must make progress");
1427   iff->set_req_X(1, new_bol, phase);
1428   // Intervening diamond probably goes dead
1429   phase->C->set_major_progress();
1430   return iff;
1431 }
1432 
1433 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
1434 
1435 struct RangeCheck {
1436   Node* ctl;
1437   jint off;
1438 };
1439 
1440 Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
1441   if (remove_dead_region(phase, can_reshape))  return this;
1442   // No Def-Use info?
1443   if (!can_reshape)  return nullptr;
1444 
1445   // Don't bother trying to transform a dead if
1446   if (in(0)->is_top())  return nullptr;
1447   // Don't bother trying to transform an if with a dead test
1448   if (in(1)->is_top())  return nullptr;
1449   // Another variation of a dead test
1450   if (in(1)->is_Con())  return nullptr;
1451   // Another variation of a dead if
1452   if (outcnt() < 2)  return nullptr;
1453 
1454   // Canonicalize the test.
1455   Node* idt_if = idealize_test(phase, this);
1456   if (idt_if != nullptr)  return idt_if;
1457 
1458   // Try to split the IF
1459   PhaseIterGVN *igvn = phase->is_IterGVN();
1460   Node *s = split_if(this, igvn);
1461   if (s != nullptr)  return s;
1462 
1463   return NodeSentinel;
1464 }
1465 
1466 //------------------------------Ideal------------------------------------------
1467 // Return a node which is more "ideal" than the current node.  Strip out
1468 // control copies
1469 Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1470   Node* res = Ideal_common(phase, can_reshape);
1471   if (res != NodeSentinel) {
1472     return res;
1473   }
1474 
1475   // Check for people making a useless boolean: things like
1476   // if( (x < y ? true : false) ) { ... }
1477   // Replace with if( x < y ) { ... }
1478   Node* bol2 = remove_useless_bool(this, phase);
1479   if (bol2) return bol2;
1480 
1481   if (in(0) == nullptr) return nullptr;     // Dead loop?
1482 
1483   PhaseIterGVN* igvn = phase->is_IterGVN();
1484   Node* result = fold_compares(igvn);
1485   if (result != nullptr) {
1486     return result;
1487   }
1488 
1489   // Scan for an equivalent test
1490   int dist = 4;               // Cutoff limit for search
1491   if (is_If() && in(1)->is_Bool()) {
1492     Node* cmp = in(1)->in(1);
1493     if (cmp->Opcode() == Op_CmpP &&
1494         cmp->in(2) != nullptr && // make sure cmp is not already dead
1495         cmp->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1496       dist = 64;              // Limit for null-pointer scans
1497     }
1498   }
1499 
1500   Node* prev_dom = search_identical(dist);
1501 
1502   if (prev_dom != nullptr) {
1503     // Replace dominated IfNode
1504     return dominated_by(prev_dom, igvn);
1505   }
1506 
1507   return simple_subsuming(igvn);
1508 }
1509 
1510 //------------------------------dominated_by-----------------------------------
1511 Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) {
1512 #ifndef PRODUCT
1513   if (TraceIterativeGVN) {
1514     tty->print("   Removing IfNode: "); this->dump();
1515   }
1516 #endif
1517 
1518   igvn->hash_delete(this);      // Remove self to prevent spurious V-N
1519   Node *idom = in(0);
1520   // Need opcode to decide which way 'this' test goes
1521   int prev_op = prev_dom->Opcode();
1522   Node *top = igvn->C->top(); // Shortcut to top
1523 
1524   // Loop predicates may have depending checks which should not
1525   // be skipped. For example, range check predicate has two checks
1526   // for lower and upper bounds.
1527   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
1528   if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != nullptr ||
1529       unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != nullptr) {
1530     prev_dom = idom;
1531   }
1532 
1533   // Now walk the current IfNode's projections.
1534   // Loop ends when 'this' has no more uses.
1535   for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
1536     Node *ifp = last_out(i);     // Get IfTrue/IfFalse
1537     igvn->add_users_to_worklist(ifp);
1538     // Check which projection it is and set target.
1539     // Data-target is either the dominating projection of the same type
1540     // or TOP if the dominating projection is of opposite type.
1541     // Data-target will be used as the new control edge for the non-CFG
1542     // nodes like Casts and Loads.
1543     Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;
1544     // Control-target is just the If's immediate dominator or TOP.
1545     Node *ctrl_target = (ifp->Opcode() == prev_op) ?     idom : top;
1546 
1547     // For each child of an IfTrue/IfFalse projection, reroute.
1548     // Loop ends when projection has no more uses.
1549     for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
1550       Node* s = ifp->last_out(j);   // Get child of IfTrue/IfFalse
1551       if (s->depends_only_on_test() && igvn->no_dependent_zero_check(s)) {
1552         // For control producers.
1553         // Do not rewire Div and Mod nodes which could have a zero divisor to avoid skipping their zero check.
1554         igvn->replace_input_of(s, 0, data_target); // Move child to data-target
1555       } else {
1556         // Find the control input matching this def-use edge.
1557         // For Regions it may not be in slot 0.
1558         uint l;
1559         for (l = 0; s->in(l) != ifp; l++) { }
1560         igvn->replace_input_of(s, l, ctrl_target);
1561       }
1562     } // End for each child of a projection
1563 
1564     igvn->remove_dead_node(ifp);
1565   } // End for each IfTrue/IfFalse child of If
1566 
1567   // Kill the IfNode
1568   igvn->remove_dead_node(this);
1569 
1570   // Must return either the original node (now dead) or a new node
1571   // (Do not return a top here, since that would break the uniqueness of top.)
1572   return new ConINode(TypeInt::ZERO);
1573 }
1574 
1575 Node* IfNode::search_identical(int dist) {
1576   // Setup to scan up the CFG looking for a dominating test
1577   Node* dom = in(0);
1578   Node* prev_dom = this;
1579   int op = Opcode();
1580   // Search up the dominator tree for an If with an identical test
1581   while (dom->Opcode() != op    ||  // Not same opcode?
1582          dom->in(1)    != in(1) ||  // Not same input 1?
1583          prev_dom->in(0) != dom) {  // One path of test does not dominate?
1584     if (dist < 0) return nullptr;
1585 
1586     dist--;
1587     prev_dom = dom;
1588     dom = up_one_dom(dom);
1589     if (!dom) return nullptr;
1590   }
1591 
1592   // Check that we did not follow a loop back to ourselves
1593   if (this == dom) {
1594     return nullptr;
1595   }
1596 
1597 #ifndef PRODUCT
1598   if (dist > 2) { // Add to count of null checks elided
1599     explicit_null_checks_elided++;
1600   }
1601 #endif
1602 
1603   return prev_dom;
1604 }
1605 
1606 
1607 static int subsuming_bool_test_encode(Node*);
1608 
1609 // Check if dominating test is subsuming 'this' one.
1610 //
1611 //              cmp
1612 //              / \
1613 //     (r1)  bool  \
1614 //            /    bool (r2)
1615 //    (dom) if       \
1616 //            \       )
1617 //    (pre)  if[TF]  /
1618 //               \  /
1619 //                if (this)
1620 //   \r1
1621 //  r2\  eqT  eqF  neT  neF  ltT  ltF  leT  leF  gtT  gtF  geT  geF
1622 //  eq    t    f    f    t    f    -    -    f    f    -    -    f
1623 //  ne    f    t    t    f    t    -    -    t    t    -    -    t
1624 //  lt    f    -    -    f    t    f    -    f    f    -    f    t
1625 //  le    t    -    -    t    t    -    t    f    f    t    -    t
1626 //  gt    f    -    -    f    f    -    f    t    t    f    -    f
1627 //  ge    t    -    -    t    f    t    -    t    t    -    t    f
1628 //
1629 Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) {
1630   // Table encoding: N/A (na), True-branch (tb), False-branch (fb).
1631   static enum { na, tb, fb } s_short_circuit_map[6][12] = {
1632   /*rel: eq+T eq+F ne+T ne+F lt+T lt+F le+T le+F gt+T gt+F ge+T ge+F*/
1633   /*eq*/{ tb,  fb,  fb,  tb,  fb,  na,  na,  fb,  fb,  na,  na,  fb },
1634   /*ne*/{ fb,  tb,  tb,  fb,  tb,  na,  na,  tb,  tb,  na,  na,  tb },
1635   /*lt*/{ fb,  na,  na,  fb,  tb,  fb,  na,  fb,  fb,  na,  fb,  tb },
1636   /*le*/{ tb,  na,  na,  tb,  tb,  na,  tb,  fb,  fb,  tb,  na,  tb },
1637   /*gt*/{ fb,  na,  na,  fb,  fb,  na,  fb,  tb,  tb,  fb,  na,  fb },
1638   /*ge*/{ tb,  na,  na,  tb,  fb,  tb,  na,  tb,  tb,  na,  tb,  fb }};
1639 
1640   Node* pre = in(0);
1641   if (!pre->is_IfTrue() && !pre->is_IfFalse()) {
1642     return nullptr;
1643   }
1644   Node* dom = pre->in(0);
1645   if (!dom->is_If()) {
1646     return nullptr;
1647   }
1648   Node* bol = in(1);
1649   if (!bol->is_Bool()) {
1650     return nullptr;
1651   }
1652   Node* cmp = in(1)->in(1);
1653   if (!cmp->is_Cmp()) {
1654     return nullptr;
1655   }
1656 
1657   if (!dom->in(1)->is_Bool()) {
1658     return nullptr;
1659   }
1660   if (dom->in(1)->in(1) != cmp) {  // Not same cond?
1661     return nullptr;
1662   }
1663 
1664   int drel = subsuming_bool_test_encode(dom->in(1));
1665   int trel = subsuming_bool_test_encode(bol);
1666   int bout = pre->is_IfFalse() ? 1 : 0;
1667 
1668   if (drel < 0 || trel < 0) {
1669     return nullptr;
1670   }
1671   int br = s_short_circuit_map[trel][2*drel+bout];
1672   if (br == na) {
1673     return nullptr;
1674   }
1675 #ifndef PRODUCT
1676   if (TraceIterativeGVN) {
1677     tty->print("   Subsumed IfNode: "); dump();
1678   }
1679 #endif
1680   // Replace condition with constant True(1)/False(0).
1681   bool is_always_true = br == tb;
1682   set_req(1, igvn->intcon(is_always_true ? 1 : 0));
1683 
1684   // Update any data dependencies to the directly dominating test. This subsumed test is not immediately removed by igvn
1685   // and therefore subsequent optimizations might miss these data dependencies otherwise. There might be a dead loop
1686   // ('always_taken_proj' == 'pre') that is cleaned up later. Skip this case to make the iterator work properly.
1687   Node* always_taken_proj = proj_out(is_always_true);
1688   if (always_taken_proj != pre) {
1689     for (DUIterator_Fast imax, i = always_taken_proj->fast_outs(imax); i < imax; i++) {
1690       Node* u = always_taken_proj->fast_out(i);
1691       if (!u->is_CFG()) {
1692         igvn->replace_input_of(u, 0, pre);
1693         --i;
1694         --imax;
1695       }
1696     }
1697   }
1698 
1699   if (bol->outcnt() == 0) {
1700     igvn->remove_dead_node(bol);    // Kill the BoolNode.
1701   }
1702   return this;
1703 }
1704 
1705 // Map BoolTest to local table encoding. The BoolTest (e)numerals
1706 //   { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1 }
1707 // are mapped to table indices, while the remaining (e)numerals in BoolTest
1708 //   { overflow = 2, no_overflow = 6, never = 8, illegal = 9 }
1709 // are ignored (these are not modeled in the table).
1710 //
1711 static int subsuming_bool_test_encode(Node* node) {
1712   precond(node->is_Bool());
1713   BoolTest::mask x = node->as_Bool()->_test._test;
1714   switch (x) {
1715     case BoolTest::eq: return 0;
1716     case BoolTest::ne: return 1;
1717     case BoolTest::lt: return 2;
1718     case BoolTest::le: return 3;
1719     case BoolTest::gt: return 4;
1720     case BoolTest::ge: return 5;
1721     case BoolTest::overflow:
1722     case BoolTest::no_overflow:
1723     case BoolTest::never:
1724     case BoolTest::illegal:
1725     default:
1726       return -1;
1727   }
1728 }
1729 
1730 //------------------------------Identity---------------------------------------
1731 // If the test is constant & we match, then we are the input Control
1732 Node* IfProjNode::Identity(PhaseGVN* phase) {
1733   // Can only optimize if cannot go the other way
1734   const TypeTuple *t = phase->type(in(0))->is_tuple();
1735   if (t == TypeTuple::IFNEITHER || (always_taken(t) &&
1736        // During parsing (GVN) we don't remove dead code aggressively.
1737        // Cut off dead branch and let PhaseRemoveUseless take care of it.
1738       (!phase->is_IterGVN() ||
1739        // During IGVN, first wait for the dead branch to be killed.
1740        // Otherwise, the IfNode's control will have two control uses (the IfNode
1741        // that doesn't go away because it still has uses and this branch of the
1742        // If) which breaks other optimizations. Node::has_special_unique_user()
1743        // will cause this node to be reprocessed once the dead branch is killed.
1744        in(0)->outcnt() == 1))) {
1745     // IfNode control
1746     if (in(0)->is_BaseCountedLoopEnd()) {
1747       // CountedLoopEndNode may be eliminated by if subsuming, replace CountedLoopNode with LoopNode to
1748       // avoid mismatching between CountedLoopNode and CountedLoopEndNode in the following optimization.
1749       Node* head = unique_ctrl_out_or_null();
1750       if (head != nullptr && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) {
1751         Node* new_head = new LoopNode(head->in(LoopNode::EntryControl), this);
1752         phase->is_IterGVN()->register_new_node_with_optimizer(new_head);
1753         phase->is_IterGVN()->replace_node(head, new_head);
1754       }
1755     }
1756     return in(0)->in(0);
1757   }
1758   // no progress
1759   return this;
1760 }
1761 
1762 #ifndef PRODUCT
1763 //------------------------------dump_spec--------------------------------------
1764 void IfNode::dump_spec(outputStream *st) const {
1765   st->print("P=%f, C=%f",_prob,_fcnt);
1766 }
1767 #endif
1768 
1769 //------------------------------idealize_test----------------------------------
1770 // Try to canonicalize tests better.  Peek at the Cmp/Bool/If sequence and
1771 // come up with a canonical sequence.  Bools getting 'eq', 'gt' and 'ge' forms
1772 // converted to 'ne', 'le' and 'lt' forms.  IfTrue/IfFalse get swapped as
1773 // needed.
1774 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
1775   assert(iff->in(0) != nullptr, "If must be live");
1776 
1777   if (iff->outcnt() != 2)  return nullptr; // Malformed projections.
1778   Node* old_if_f = iff->proj_out(false);
1779   Node* old_if_t = iff->proj_out(true);
1780 
1781   // CountedLoopEnds want the back-control test to be TRUE, regardless of
1782   // whether they are testing a 'gt' or 'lt' condition.  The 'gt' condition
1783   // happens in count-down loops
1784   if (iff->is_BaseCountedLoopEnd())  return nullptr;
1785   if (!iff->in(1)->is_Bool())  return nullptr; // Happens for partially optimized IF tests
1786   BoolNode *b = iff->in(1)->as_Bool();
1787   BoolTest bt = b->_test;
1788   // Test already in good order?
1789   if( bt.is_canonical() )
1790     return nullptr;
1791 
1792   // Flip test to be canonical.  Requires flipping the IfFalse/IfTrue and
1793   // cloning the IfNode.
1794   Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) );
1795   if( !new_b->is_Bool() ) return nullptr;
1796   b = new_b->as_Bool();
1797 
1798   PhaseIterGVN *igvn = phase->is_IterGVN();
1799   assert( igvn, "Test is not canonical in parser?" );
1800 
1801   // The IF node never really changes, but it needs to be cloned
1802   iff = iff->clone()->as_If();
1803   iff->set_req(1, b);
1804   iff->_prob = 1.0-iff->_prob;
1805 
1806   Node *prior = igvn->hash_find_insert(iff);
1807   if( prior ) {
1808     igvn->remove_dead_node(iff);
1809     iff = (IfNode*)prior;
1810   } else {
1811     // Cannot call transform on it just yet
1812     igvn->set_type_bottom(iff);
1813   }
1814   igvn->_worklist.push(iff);
1815 
1816   // Now handle projections.  Cloning not required.
1817   Node* new_if_f = (Node*)(new IfFalseNode( iff ));
1818   Node* new_if_t = (Node*)(new IfTrueNode ( iff ));
1819 
1820   igvn->register_new_node_with_optimizer(new_if_f);
1821   igvn->register_new_node_with_optimizer(new_if_t);
1822   // Flip test, so flip trailing control
1823   igvn->replace_node(old_if_f, new_if_t);
1824   igvn->replace_node(old_if_t, new_if_f);
1825 
1826   // Progress
1827   return iff;
1828 }
1829 
1830 Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1831   Node* res = Ideal_common(phase, can_reshape);
1832   if (res != NodeSentinel) {
1833     return res;
1834   }
1835 
1836   PhaseIterGVN *igvn = phase->is_IterGVN();
1837   // Setup to scan up the CFG looking for a dominating test
1838   Node* prev_dom = this;
1839 
1840   // Check for range-check vs other kinds of tests
1841   Node* index1;
1842   Node* range1;
1843   jint offset1;
1844   int flip1 = is_range_check(range1, index1, offset1);
1845   if (flip1) {
1846     Node* dom = in(0);
1847     // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
1848     // so all checks we inspect post-dominate the top-most check we find.
1849     // If we are going to fail the current check and we reach the top check
1850     // then we are guaranteed to fail, so just start interpreting there.
1851     // We 'expand' the top 3 range checks to include all post-dominating
1852     // checks.
1853 
1854     // The top 3 range checks seen
1855     const int NRC = 3;
1856     RangeCheck prev_checks[NRC];
1857     int nb_checks = 0;
1858 
1859     // Low and high offsets seen so far
1860     jint off_lo = offset1;
1861     jint off_hi = offset1;
1862 
1863     bool found_immediate_dominator = false;
1864 
1865     // Scan for the top checks and collect range of offsets
1866     for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
1867       if (dom->Opcode() == Op_RangeCheck &&  // Not same opcode?
1868           prev_dom->in(0) == dom) { // One path of test does dominate?
1869         if (dom == this) return nullptr; // dead loop
1870         // See if this is a range check
1871         Node* index2;
1872         Node* range2;
1873         jint offset2;
1874         int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2);
1875         // See if this is a _matching_ range check, checking against
1876         // the same array bounds.
1877         if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
1878             dom->outcnt() == 2) {
1879           if (nb_checks == 0 && dom->in(1) == in(1)) {
1880             // Found an immediately dominating test at the same offset.
1881             // This kind of back-to-back test can be eliminated locally,
1882             // and there is no need to search further for dominating tests.
1883             assert(offset2 == offset1, "Same test but different offsets");
1884             found_immediate_dominator = true;
1885             break;
1886           }
1887           // Gather expanded bounds
1888           off_lo = MIN2(off_lo,offset2);
1889           off_hi = MAX2(off_hi,offset2);
1890           // Record top NRC range checks
1891           prev_checks[nb_checks%NRC].ctl = prev_dom;
1892           prev_checks[nb_checks%NRC].off = offset2;
1893           nb_checks++;
1894         }
1895       }
1896       prev_dom = dom;
1897       dom = up_one_dom(dom);
1898       if (!dom) break;
1899     }
1900 
1901     if (!found_immediate_dominator) {
1902       // Attempt to widen the dominating range check to cover some later
1903       // ones.  Since range checks "fail" by uncommon-trapping to the
1904       // interpreter, widening a check can make us speculatively enter
1905       // the interpreter.  If we see range-check deopt's, do not widen!
1906       if (!phase->C->allow_range_check_smearing())  return nullptr;
1907 
1908       // Didn't find prior covering check, so cannot remove anything.
1909       if (nb_checks == 0) {
1910         return nullptr;
1911       }
1912       // Constant indices only need to check the upper bound.
1913       // Non-constant indices must check both low and high.
1914       int chk0 = (nb_checks - 1) % NRC;
1915       if (index1) {
1916         if (nb_checks == 1) {
1917           return nullptr;
1918         } else {
1919           // If the top range check's constant is the min or max of
1920           // all constants we widen the next one to cover the whole
1921           // range of constants.
1922           RangeCheck rc0 = prev_checks[chk0];
1923           int chk1 = (nb_checks - 2) % NRC;
1924           RangeCheck rc1 = prev_checks[chk1];
1925           if (rc0.off == off_lo) {
1926             adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
1927             prev_dom = rc1.ctl;
1928           } else if (rc0.off == off_hi) {
1929             adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
1930             prev_dom = rc1.ctl;
1931           } else {
1932             // If the top test's constant is not the min or max of all
1933             // constants, we need 3 range checks. We must leave the
1934             // top test unchanged because widening it would allow the
1935             // accesses it protects to successfully read/write out of
1936             // bounds.
1937             if (nb_checks == 2) {
1938               return nullptr;
1939             }
1940             int chk2 = (nb_checks - 3) % NRC;
1941             RangeCheck rc2 = prev_checks[chk2];
1942             // The top range check a+i covers interval: -a <= i < length-a
1943             // The second range check b+i covers interval: -b <= i < length-b
1944             if (rc1.off <= rc0.off) {
1945               // if b <= a, we change the second range check to:
1946               // -min_of_all_constants <= i < length-min_of_all_constants
1947               // Together top and second range checks now cover:
1948               // -min_of_all_constants <= i < length-a
1949               // which is more restrictive than -b <= i < length-b:
1950               // -b <= -min_of_all_constants <= i < length-a <= length-b
1951               // The third check is then changed to:
1952               // -max_of_all_constants <= i < length-max_of_all_constants
1953               // so 2nd and 3rd checks restrict allowed values of i to:
1954               // -min_of_all_constants <= i < length-max_of_all_constants
1955               adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
1956               adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
1957             } else {
1958               // if b > a, we change the second range check to:
1959               // -max_of_all_constants <= i < length-max_of_all_constants
1960               // Together top and second range checks now cover:
1961               // -a <= i < length-max_of_all_constants
1962               // which is more restrictive than -b <= i < length-b:
1963               // -b < -a <= i < length-max_of_all_constants <= length-b
1964               // The third check is then changed to:
1965               // -max_of_all_constants <= i < length-max_of_all_constants
1966               // so 2nd and 3rd checks restrict allowed values of i to:
1967               // -min_of_all_constants <= i < length-max_of_all_constants
1968               adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
1969               adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
1970             }
1971             prev_dom = rc2.ctl;
1972           }
1973         }
1974       } else {
1975         RangeCheck rc0 = prev_checks[chk0];
1976         // 'Widen' the offset of the 1st and only covering check
1977         adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
1978         // Test is now covered by prior checks, dominate it out
1979         prev_dom = rc0.ctl;
1980       }
1981     }
1982   } else {
1983     prev_dom = search_identical(4);
1984 
1985     if (prev_dom == nullptr) {
1986       return nullptr;
1987     }
1988   }
1989 
1990   // Replace dominated IfNode
1991   return dominated_by(prev_dom, igvn);
1992 }
1993 
1994 ParsePredicateNode::ParsePredicateNode(Node* control, Node* bol, Deoptimization::DeoptReason deopt_reason)
1995     : IfNode(control, bol, PROB_MAX, COUNT_UNKNOWN),
1996       _deopt_reason(deopt_reason) {
1997   init_class_id(Class_ParsePredicate);
1998   assert(bol->Opcode() == Op_Conv2B && bol->in(1) != nullptr && bol->in(1)->is_Opaque1(), "wrong boolean input");
1999 #ifdef ASSERT
2000   switch (deopt_reason) {
2001     case Deoptimization::Reason_predicate:
2002     case Deoptimization::Reason_profile_predicate:
2003     case Deoptimization::Reason_loop_limit_check:
2004       break;
2005     default:
2006       assert(false, "unsupported deoptimization reason for Parse Predicate");
2007   }
2008 #endif // ASSERT
2009 }
2010 
2011 #ifndef PRODUCT
2012 void ParsePredicateNode::dump_spec(outputStream* st) const {
2013   st->print(" #");
2014   switch (_deopt_reason) {
2015     case Deoptimization::DeoptReason::Reason_predicate:
2016       st->print("Loop ");
2017       break;
2018     case Deoptimization::DeoptReason::Reason_profile_predicate:
2019       st->print("Profiled_Loop ");
2020       break;
2021     case Deoptimization::DeoptReason::Reason_loop_limit_check:
2022       st->print("Loop_Limit_Check ");
2023       break;
2024     default:
2025       fatal("unknown kind");
2026   }
2027 }
2028 #endif // NOT PRODUCT