1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/narrowptrnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 #include "opto/vectornode.hpp"
  47 #include "utilities/vmError.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 //------------------------------Value------------------------------------------
  55 // Compute the type of the RegionNode.
  56 const Type* RegionNode::Value(PhaseGVN* phase) const {
  57   for( uint i=1; i<req(); ++i ) {       // For all paths in
  58     Node *n = in(i);            // Get Control source
  59     if( !n ) continue;          // Missing inputs are TOP
  60     if( phase->type(n) == Type::CONTROL )
  61       return Type::CONTROL;
  62   }
  63   return Type::TOP;             // All paths dead?  Then so are we
  64 }
  65 
  66 //------------------------------Identity---------------------------------------
  67 // Check for Region being Identity.
  68 Node* RegionNode::Identity(PhaseGVN* phase) {
  69   // Cannot have Region be an identity, even if it has only 1 input.
  70   // Phi users cannot have their Region input folded away for them,
  71   // since they need to select the proper data input
  72   return this;
  73 }
  74 
  75 //------------------------------merge_region-----------------------------------
  76 // If a Region flows into a Region, merge into one big happy merge.  This is
  77 // hard to do if there is stuff that has to happen
  78 static Node *merge_region(RegionNode *region, PhaseGVN *phase) {
  79   if( region->Opcode() != Op_Region ) // Do not do to LoopNodes
  80     return NULL;
  81   Node *progress = NULL;        // Progress flag
  82   PhaseIterGVN *igvn = phase->is_IterGVN();
  83 
  84   uint rreq = region->req();
  85   for( uint i = 1; i < rreq; i++ ) {
  86     Node *r = region->in(i);
  87     if( r && r->Opcode() == Op_Region && // Found a region?
  88         r->in(0) == r &&        // Not already collapsed?
  89         r != region &&          // Avoid stupid situations
  90         r->outcnt() == 2 ) {    // Self user and 'region' user only?
  91       assert(!r->as_Region()->has_phi(), "no phi users");
  92       if( !progress ) {         // No progress
  93         if (region->has_phi()) {
  94           return NULL;        // Only flatten if no Phi users
  95           // igvn->hash_delete( phi );
  96         }
  97         igvn->hash_delete( region );
  98         progress = region;      // Making progress
  99       }
 100       igvn->hash_delete( r );
 101 
 102       // Append inputs to 'r' onto 'region'
 103       for( uint j = 1; j < r->req(); j++ ) {
 104         // Move an input from 'r' to 'region'
 105         region->add_req(r->in(j));
 106         r->set_req(j, phase->C->top());
 107         // Update phis of 'region'
 108         //for( uint k = 0; k < max; k++ ) {
 109         //  Node *phi = region->out(k);
 110         //  if( phi->is_Phi() ) {
 111         //    phi->add_req(phi->in(i));
 112         //  }
 113         //}
 114 
 115         rreq++;                 // One more input to Region
 116       } // Found a region to merge into Region
 117       igvn->_worklist.push(r);
 118       // Clobber pointer to the now dead 'r'
 119       region->set_req(i, phase->C->top());
 120     }
 121   }
 122 
 123   return progress;
 124 }
 125 
 126 
 127 
 128 //--------------------------------has_phi--------------------------------------
 129 // Helper function: Return any PhiNode that uses this region or NULL
 130 PhiNode* RegionNode::has_phi() const {
 131   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 132     Node* phi = fast_out(i);
 133     if (phi->is_Phi()) {   // Check for Phi users
 134       assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)");
 135       return phi->as_Phi();  // this one is good enough
 136     }
 137   }
 138 
 139   return NULL;
 140 }
 141 
 142 
 143 //-----------------------------has_unique_phi----------------------------------
 144 // Helper function: Return the only PhiNode that uses this region or NULL
 145 PhiNode* RegionNode::has_unique_phi() const {
 146   // Check that only one use is a Phi
 147   PhiNode* only_phi = NULL;
 148   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 149     Node* phi = fast_out(i);
 150     if (phi->is_Phi()) {   // Check for Phi users
 151       assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)");
 152       if (only_phi == NULL) {
 153         only_phi = phi->as_Phi();
 154       } else {
 155         return NULL;  // multiple phis
 156       }
 157     }
 158   }
 159 
 160   return only_phi;
 161 }
 162 
 163 
 164 //------------------------------check_phi_clipping-----------------------------
 165 // Helper function for RegionNode's identification of FP clipping
 166 // Check inputs to the Phi
 167 static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, ConNode * &max, uint &max_idx, Node * &val, uint &val_idx ) {
 168   min     = NULL;
 169   max     = NULL;
 170   val     = NULL;
 171   min_idx = 0;
 172   max_idx = 0;
 173   val_idx = 0;
 174   uint  phi_max = phi->req();
 175   if( phi_max == 4 ) {
 176     for( uint j = 1; j < phi_max; ++j ) {
 177       Node *n = phi->in(j);
 178       int opcode = n->Opcode();
 179       switch( opcode ) {
 180       case Op_ConI:
 181         {
 182           if( min == NULL ) {
 183             min     = n->Opcode() == Op_ConI ? (ConNode*)n : NULL;
 184             min_idx = j;
 185           } else {
 186             max     = n->Opcode() == Op_ConI ? (ConNode*)n : NULL;
 187             max_idx = j;
 188             if( min->get_int() > max->get_int() ) {
 189               // Swap min and max
 190               ConNode *temp;
 191               uint     temp_idx;
 192               temp     = min;     min     = max;     max     = temp;
 193               temp_idx = min_idx; min_idx = max_idx; max_idx = temp_idx;
 194             }
 195           }
 196         }
 197         break;
 198       default:
 199         {
 200           val = n;
 201           val_idx = j;
 202         }
 203         break;
 204       }
 205     }
 206   }
 207   return ( min && max && val && (min->get_int() <= 0) && (max->get_int() >=0) );
 208 }
 209 
 210 
 211 //------------------------------check_if_clipping------------------------------
 212 // Helper function for RegionNode's identification of FP clipping
 213 // Check that inputs to Region come from two IfNodes,
 214 //
 215 //            If
 216 //      False    True
 217 //       If        |
 218 //  False  True    |
 219 //    |      |     |
 220 //  RegionNode_inputs
 221 //
 222 static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) {
 223   top_if = NULL;
 224   bot_if = NULL;
 225 
 226   // Check control structure above RegionNode for (if  ( if  ) )
 227   Node *in1 = region->in(1);
 228   Node *in2 = region->in(2);
 229   Node *in3 = region->in(3);
 230   // Check that all inputs are projections
 231   if( in1->is_Proj() && in2->is_Proj() && in3->is_Proj() ) {
 232     Node *in10 = in1->in(0);
 233     Node *in20 = in2->in(0);
 234     Node *in30 = in3->in(0);
 235     // Check that #1 and #2 are ifTrue and ifFalse from same If
 236     if( in10 != NULL && in10->is_If() &&
 237         in20 != NULL && in20->is_If() &&
 238         in30 != NULL && in30->is_If() && in10 == in20 &&
 239         (in1->Opcode() != in2->Opcode()) ) {
 240       Node  *in100 = in10->in(0);
 241       Node *in1000 = (in100 != NULL && in100->is_Proj()) ? in100->in(0) : NULL;
 242       // Check that control for in10 comes from other branch of IF from in3
 243       if( in1000 != NULL && in1000->is_If() &&
 244           in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) {
 245         // Control pattern checks
 246         top_if = (IfNode*)in1000;
 247         bot_if = (IfNode*)in10;
 248       }
 249     }
 250   }
 251 
 252   return (top_if != NULL);
 253 }
 254 
 255 
 256 //------------------------------check_convf2i_clipping-------------------------
 257 // Helper function for RegionNode's identification of FP clipping
 258 // Verify that the value input to the phi comes from "ConvF2I; LShift; RShift"
 259 static bool check_convf2i_clipping( PhiNode *phi, uint idx, ConvF2INode * &convf2i, Node *min, Node *max) {
 260   convf2i = NULL;
 261 
 262   // Check for the RShiftNode
 263   Node *rshift = phi->in(idx);
 264   assert( rshift, "Previous checks ensure phi input is present");
 265   if( rshift->Opcode() != Op_RShiftI )  { return false; }
 266 
 267   // Check for the LShiftNode
 268   Node *lshift = rshift->in(1);
 269   assert( lshift, "Previous checks ensure phi input is present");
 270   if( lshift->Opcode() != Op_LShiftI )  { return false; }
 271 
 272   // Check for the ConvF2INode
 273   Node *conv = lshift->in(1);
 274   if( conv->Opcode() != Op_ConvF2I ) { return false; }
 275 
 276   // Check that shift amounts are only to get sign bits set after F2I
 277   jint max_cutoff     = max->get_int();
 278   jint min_cutoff     = min->get_int();
 279   jint left_shift     = lshift->in(2)->get_int();
 280   jint right_shift    = rshift->in(2)->get_int();
 281   jint max_post_shift = nth_bit(BitsPerJavaInteger - left_shift - 1);
 282   if( left_shift != right_shift ||
 283       0 > left_shift || left_shift >= BitsPerJavaInteger ||
 284       max_post_shift < max_cutoff ||
 285       max_post_shift < -min_cutoff ) {
 286     // Shifts are necessary but current transformation eliminates them
 287     return false;
 288   }
 289 
 290   // OK to return the result of ConvF2I without shifting
 291   convf2i = (ConvF2INode*)conv;
 292   return true;
 293 }
 294 
 295 
 296 //------------------------------check_compare_clipping-------------------------
 297 // Helper function for RegionNode's identification of FP clipping
 298 static bool check_compare_clipping( bool less_than, IfNode *iff, ConNode *limit, Node * & input ) {
 299   Node *i1 = iff->in(1);
 300   if ( !i1->is_Bool() ) { return false; }
 301   BoolNode *bool1 = i1->as_Bool();
 302   if(       less_than && bool1->_test._test != BoolTest::le ) { return false; }
 303   else if( !less_than && bool1->_test._test != BoolTest::lt ) { return false; }
 304   const Node *cmpF = bool1->in(1);
 305   if( cmpF->Opcode() != Op_CmpF )      { return false; }
 306   // Test that the float value being compared against
 307   // is equivalent to the int value used as a limit
 308   Node *nodef = cmpF->in(2);
 309   if( nodef->Opcode() != Op_ConF ) { return false; }
 310   jfloat conf = nodef->getf();
 311   jint   coni = limit->get_int();
 312   if( ((int)conf) != coni )        { return false; }
 313   input = cmpF->in(1);
 314   return true;
 315 }
 316 
 317 //------------------------------is_unreachable_region--------------------------
 318 // Check if the RegionNode is part of an unsafe loop and unreachable from root.
 319 bool RegionNode::is_unreachable_region(const PhaseGVN* phase) {
 320   Node* top = phase->C->top();
 321   assert(req() == 2 || (req() == 3 && in(1) != NULL && in(2) == top), "sanity check arguments");
 322   if (_is_unreachable_region) {
 323     // Return cached result from previous evaluation which should still be valid
 324     assert(is_unreachable_from_root(phase), "walk the graph again and check if its indeed unreachable");
 325     return true;
 326   }
 327 
 328   // First, cut the simple case of fallthrough region when NONE of
 329   // region's phis references itself directly or through a data node.
 330   if (is_possible_unsafe_loop(phase)) {
 331     // If we have a possible unsafe loop, check if the region node is actually unreachable from root.
 332     if (is_unreachable_from_root(phase)) {
 333       _is_unreachable_region = true;
 334       return true;
 335     }
 336   }
 337   return false;
 338 }
 339 
 340 bool RegionNode::is_possible_unsafe_loop(const PhaseGVN* phase) const {
 341   uint max = outcnt();
 342   uint i;
 343   for (i = 0; i < max; i++) {
 344     Node* n = raw_out(i);
 345     if (n != NULL && n->is_Phi()) {
 346       PhiNode* phi = n->as_Phi();
 347       assert(phi->in(0) == this, "sanity check phi");
 348       if (phi->outcnt() == 0) {
 349         continue; // Safe case - no loops
 350       }
 351       if (phi->outcnt() == 1) {
 352         Node* u = phi->raw_out(0);
 353         // Skip if only one use is an other Phi or Call or Uncommon trap.
 354         // It is safe to consider this case as fallthrough.
 355         if (u != NULL && (u->is_Phi() || u->is_CFG())) {
 356           continue;
 357         }
 358       }
 359       // Check when phi references itself directly or through an other node.
 360       if (phi->as_Phi()->simple_data_loop_check(phi->in(1)) >= PhiNode::Unsafe) {
 361         break; // Found possible unsafe data loop.
 362       }
 363     }
 364   }
 365   if (i >= max) {
 366     return false; // An unsafe case was NOT found - don't need graph walk.
 367   }
 368   return true;
 369 }
 370 
 371 bool RegionNode::is_unreachable_from_root(const PhaseGVN* phase) const {
 372   ResourceMark rm;
 373   Node_List nstack;
 374   VectorSet visited;
 375 
 376   // Mark all control nodes reachable from root outputs
 377   Node* n = (Node*)phase->C->root();
 378   nstack.push(n);
 379   visited.set(n->_idx);
 380   while (nstack.size() != 0) {
 381     n = nstack.pop();
 382     uint max = n->outcnt();
 383     for (uint i = 0; i < max; i++) {
 384       Node* m = n->raw_out(i);
 385       if (m != NULL && m->is_CFG()) {
 386         if (m == this) {
 387           return false; // We reached the Region node - it is not dead.
 388         }
 389         if (!visited.test_set(m->_idx))
 390           nstack.push(m);
 391       }
 392     }
 393   }
 394   return true; // The Region node is unreachable - it is dead.
 395 }
 396 
 397 Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) {
 398   // Incremental inlining + PhaseStringOpts sometimes produce:
 399   //
 400   // cmpP with 1 top input
 401   //           |
 402   //          If
 403   //         /  \
 404   //   IfFalse  IfTrue  /- Some Node
 405   //         \  /      /    /
 406   //        Region    / /-MergeMem
 407   //             \---Phi
 408   //
 409   //
 410   // It's expected by PhaseStringOpts that the Region goes away and is
 411   // replaced by If's control input but because there's still a Phi,
 412   // the Region stays in the graph. The top input from the cmpP is
 413   // propagated forward and a subgraph that is useful goes away. The
 414   // code below replaces the Phi with the MergeMem so that the Region
 415   // is simplified.
 416 
 417   if (type() == Type::MEMORY && is_diamond_phi(true)) {
 418     MergeMemNode* m = NULL;
 419     assert(req() == 3, "same as region");
 420     Node* r = in(0);
 421     for (uint i = 1; i < 3; ++i) {
 422       Node *mem = in(i);
 423       if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) {
 424         // Nothing is control-dependent on path #i except the region itself.
 425         m = mem->as_MergeMem();
 426         uint j = 3 - i;
 427         Node* other = in(j);
 428         if (other && other == m->base_memory()) {
 429           // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
 430           // This will allow the diamond to collapse completely.
 431           return m;
 432         }
 433       }
 434     }
 435   }
 436   return NULL;
 437 }
 438 
 439 //------------------------------Ideal------------------------------------------
 440 // Return a node which is more "ideal" than the current node.  Must preserve
 441 // the CFG, but we can still strip out dead paths.
 442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 443   if( !can_reshape && !in(0) ) return NULL;     // Already degraded to a Copy
 444   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 445 
 446   // Check for RegionNode with no Phi users and both inputs come from either
 447   // arm of the same IF.  If found, then the control-flow split is useless.
 448   bool has_phis = false;
 449   if (can_reshape) {            // Need DU info to check for Phi users
 450     has_phis = (has_phi() != NULL);       // Cache result
 451     if (has_phis) {
 452       PhiNode* phi = has_unique_phi();
 453       if (phi != NULL) {
 454         Node* m = phi->try_clean_mem_phi(phase);
 455         if (m != NULL) {
 456           phase->is_IterGVN()->replace_node(phi, m);
 457           has_phis = false;
 458         }
 459       }
 460     }
 461 
 462     if (!has_phis) {            // No Phi users?  Nothing merging?
 463       for (uint i = 1; i < req()-1; i++) {
 464         Node *if1 = in(i);
 465         if( !if1 ) continue;
 466         Node *iff = if1->in(0);
 467         if( !iff || !iff->is_If() ) continue;
 468         for( uint j=i+1; j<req(); j++ ) {
 469           if( in(j) && in(j)->in(0) == iff &&
 470               if1->Opcode() != in(j)->Opcode() ) {
 471             // Add the IF Projections to the worklist. They (and the IF itself)
 472             // will be eliminated if dead.
 473             phase->is_IterGVN()->add_users_to_worklist(iff);
 474             set_req(i, iff->in(0));// Skip around the useless IF diamond
 475             set_req(j, NULL);
 476             return this;      // Record progress
 477           }
 478         }
 479       }
 480     }
 481   }
 482 
 483   // Remove TOP or NULL input paths. If only 1 input path remains, this Region
 484   // degrades to a copy.
 485   bool add_to_worklist = true;
 486   bool modified = false;
 487   int cnt = 0;                  // Count of values merging
 488   DEBUG_ONLY( int cnt_orig = req(); ) // Save original inputs count
 489   int del_it = 0;               // The last input path we delete
 490   // For all inputs...
 491   for( uint i=1; i<req(); ++i ){// For all paths in
 492     Node *n = in(i);            // Get the input
 493     if( n != NULL ) {
 494       // Remove useless control copy inputs
 495       if( n->is_Region() && n->as_Region()->is_copy() ) {
 496         set_req(i, n->nonnull_req());
 497         modified = true;
 498         i--;
 499         continue;
 500       }
 501       if( n->is_Proj() ) {      // Remove useless rethrows
 502         Node *call = n->in(0);
 503         if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) {
 504           set_req(i, call->in(0));
 505           modified = true;
 506           i--;
 507           continue;
 508         }
 509       }
 510       if( phase->type(n) == Type::TOP ) {
 511         set_req_X(i, NULL, phase); // Ignore TOP inputs
 512         modified = true;
 513         i--;
 514         continue;
 515       }
 516       cnt++;                    // One more value merging
 517 
 518     } else if (can_reshape) {   // Else found dead path with DU info
 519       PhaseIterGVN *igvn = phase->is_IterGVN();
 520       del_req(i);               // Yank path from self
 521       del_it = i;
 522       uint max = outcnt();
 523       DUIterator j;
 524       bool progress = true;
 525       while(progress) {         // Need to establish property over all users
 526         progress = false;
 527         for (j = outs(); has_out(j); j++) {
 528           Node *n = out(j);
 529           if( n->req() != req() && n->is_Phi() ) {
 530             assert( n->in(0) == this, "" );
 531             igvn->hash_delete(n); // Yank from hash before hacking edges
 532             n->set_req_X(i,NULL,igvn);// Correct DU info
 533             n->del_req(i);        // Yank path from Phis
 534             if( max != outcnt() ) {
 535               progress = true;
 536               j = refresh_out_pos(j);
 537               max = outcnt();
 538             }
 539           }
 540         }
 541       }
 542       add_to_worklist = false;
 543       phase->is_IterGVN()->add_users_to_worklist(this);
 544       i--;
 545     }
 546   }
 547 
 548   if (can_reshape && cnt == 1) {
 549     // Is it dead loop?
 550     // If it is LoopNopde it had 2 (+1 itself) inputs and
 551     // one of them was cut. The loop is dead if it was EntryContol.
 552     // Loop node may have only one input because entry path
 553     // is removed in PhaseIdealLoop::Dominators().
 554     assert(!this->is_Loop() || cnt_orig <= 3, "Loop node should have 3 or less inputs");
 555     if ((this->is_Loop() && (del_it == LoopNode::EntryControl ||
 556                              (del_it == 0 && is_unreachable_region(phase)))) ||
 557         (!this->is_Loop() && has_phis && is_unreachable_region(phase))) {
 558       // This region and therefore all nodes on the input control path(s) are unreachable
 559       // from root. To avoid incomplete removal of unreachable subgraphs, walk up the CFG
 560       // and aggressively replace all nodes by top.
 561       PhaseIterGVN* igvn = phase->is_IterGVN();
 562       Node* top = phase->C->top();
 563       ResourceMark rm;
 564       Node_List nstack;
 565       VectorSet visited;
 566       nstack.push(this);
 567       visited.set(_idx);
 568       while (nstack.size() != 0) {
 569         Node* n = nstack.pop();
 570         for (uint i = 0; i < n->req(); ++i) {
 571           Node* m = n->in(i);
 572           assert(m != (Node*)phase->C->root(), "Should be unreachable from root");
 573           if (m != NULL && m->is_CFG() && !visited.test_set(m->_idx)) {
 574             nstack.push(m);
 575           }
 576         }
 577         if (n->is_Region()) {
 578           // Eagerly replace phis with top to avoid regionless phis.
 579           n->set_req(0, NULL);
 580           bool progress = true;
 581           uint max = n->outcnt();
 582           DUIterator j;
 583           while (progress) {
 584             progress = false;
 585             for (j = n->outs(); n->has_out(j); j++) {
 586               Node* u = n->out(j);
 587               if (u->is_Phi()) {
 588                 igvn->replace_node(u, top);
 589                 if (max != n->outcnt()) {
 590                   progress = true;
 591                   j = n->refresh_out_pos(j);
 592                   max = n->outcnt();
 593                 }
 594               }
 595             }
 596           }
 597         }
 598         igvn->replace_node(n, top);
 599       }
 600       return NULL;
 601     }
 602   }
 603 
 604   if( cnt <= 1 ) {              // Only 1 path in?
 605     set_req(0, NULL);           // Null control input for region copy
 606     if( cnt == 0 && !can_reshape) { // Parse phase - leave the node as it is.
 607       // No inputs or all inputs are NULL.
 608       return NULL;
 609     } else if (can_reshape) {   // Optimization phase - remove the node
 610       PhaseIterGVN *igvn = phase->is_IterGVN();
 611       // Strip mined (inner) loop is going away, remove outer loop.
 612       if (is_CountedLoop() &&
 613           as_Loop()->is_strip_mined()) {
 614         Node* outer_sfpt = as_CountedLoop()->outer_safepoint();
 615         Node* outer_out = as_CountedLoop()->outer_loop_exit();
 616         if (outer_sfpt != NULL && outer_out != NULL) {
 617           Node* in = outer_sfpt->in(0);
 618           igvn->replace_node(outer_out, in);
 619           LoopNode* outer = as_CountedLoop()->outer_loop();
 620           igvn->replace_input_of(outer, LoopNode::LoopBackControl, igvn->C->top());
 621         }
 622       }
 623       if (is_CountedLoop()) {
 624         Node* opaq = as_CountedLoop()->is_canonical_loop_entry();
 625         if (opaq != NULL) {
 626           // This is not a loop anymore. No need to keep the Opaque1 node on the test that guards the loop as it won't be
 627           // subject to further loop opts.
 628           assert(opaq->Opcode() == Op_Opaque1, "");
 629           igvn->replace_node(opaq, opaq->in(1));
 630         }
 631       }
 632       Node *parent_ctrl;
 633       if( cnt == 0 ) {
 634         assert( req() == 1, "no inputs expected" );
 635         // During IGVN phase such region will be subsumed by TOP node
 636         // so region's phis will have TOP as control node.
 637         // Kill phis here to avoid it.
 638         // Also set other user's input to top.
 639         parent_ctrl = phase->C->top();
 640       } else {
 641         // The fallthrough case since we already checked dead loops above.
 642         parent_ctrl = in(1);
 643         assert(parent_ctrl != NULL, "Region is a copy of some non-null control");
 644         assert(parent_ctrl != this, "Close dead loop");
 645       }
 646       if (add_to_worklist) {
 647         igvn->add_users_to_worklist(this); // Check for further allowed opts
 648       }
 649       for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
 650         Node* n = last_out(i);
 651         igvn->hash_delete(n); // Remove from worklist before modifying edges
 652         if (n->outcnt() == 0) {
 653           int uses_found = n->replace_edge(this, phase->C->top(), igvn);
 654           if (uses_found > 1) { // (--i) done at the end of the loop.
 655             i -= (uses_found - 1);
 656           }
 657           continue;
 658         }
 659         if( n->is_Phi() ) {   // Collapse all Phis
 660           // Eagerly replace phis to avoid regionless phis.
 661           Node* in;
 662           if( cnt == 0 ) {
 663             assert( n->req() == 1, "No data inputs expected" );
 664             in = parent_ctrl; // replaced by top
 665           } else {
 666             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
 667             in = n->in(1);               // replaced by unique input
 668             if( n->as_Phi()->is_unsafe_data_reference(in) )
 669               in = phase->C->top();      // replaced by top
 670           }
 671           igvn->replace_node(n, in);
 672         }
 673         else if( n->is_Region() ) { // Update all incoming edges
 674           assert(n != this, "Must be removed from DefUse edges");
 675           int uses_found = n->replace_edge(this, parent_ctrl, igvn);
 676           if (uses_found > 1) { // (--i) done at the end of the loop.
 677             i -= (uses_found - 1);
 678           }
 679         }
 680         else {
 681           assert(n->in(0) == this, "Expect RegionNode to be control parent");
 682           n->set_req(0, parent_ctrl);
 683         }
 684 #ifdef ASSERT
 685         for( uint k=0; k < n->req(); k++ ) {
 686           assert(n->in(k) != this, "All uses of RegionNode should be gone");
 687         }
 688 #endif
 689       }
 690       // Remove the RegionNode itself from DefUse info
 691       igvn->remove_dead_node(this);
 692       return NULL;
 693     }
 694     return this;                // Record progress
 695   }
 696 
 697 
 698   // If a Region flows into a Region, merge into one big happy merge.
 699   if (can_reshape) {
 700     Node *m = merge_region(this, phase);
 701     if (m != NULL)  return m;
 702   }
 703 
 704   // Check if this region is the root of a clipping idiom on floats
 705   if( ConvertFloat2IntClipping && can_reshape && req() == 4 ) {
 706     // Check that only one use is a Phi and that it simplifies to two constants +
 707     PhiNode* phi = has_unique_phi();
 708     if (phi != NULL) {          // One Phi user
 709       // Check inputs to the Phi
 710       ConNode *min;
 711       ConNode *max;
 712       Node    *val;
 713       uint     min_idx;
 714       uint     max_idx;
 715       uint     val_idx;
 716       if( check_phi_clipping( phi, min, min_idx, max, max_idx, val, val_idx )  ) {
 717         IfNode *top_if;
 718         IfNode *bot_if;
 719         if( check_if_clipping( this, bot_if, top_if ) ) {
 720           // Control pattern checks, now verify compares
 721           Node   *top_in = NULL;   // value being compared against
 722           Node   *bot_in = NULL;
 723           if( check_compare_clipping( true,  bot_if, min, bot_in ) &&
 724               check_compare_clipping( false, top_if, max, top_in ) ) {
 725             if( bot_in == top_in ) {
 726               PhaseIterGVN *gvn = phase->is_IterGVN();
 727               assert( gvn != NULL, "Only had DefUse info in IterGVN");
 728               // Only remaining check is that bot_in == top_in == (Phi's val + mods)
 729 
 730               // Check for the ConvF2INode
 731               ConvF2INode *convf2i;
 732               if( check_convf2i_clipping( phi, val_idx, convf2i, min, max ) &&
 733                 convf2i->in(1) == bot_in ) {
 734                 // Matched pattern, including LShiftI; RShiftI, replace with integer compares
 735                 // max test
 736                 Node *cmp   = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, min ));
 737                 Node *boo   = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::lt ));
 738                 IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5), top_if->_fcnt ));
 739                 Node *if_min= gvn->register_new_node_with_optimizer(new IfTrueNode (iff));
 740                 Node *ifF   = gvn->register_new_node_with_optimizer(new IfFalseNode(iff));
 741                 // min test
 742                 cmp         = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, max ));
 743                 boo         = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::gt ));
 744                 iff         = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( ifF, boo, PROB_UNLIKELY_MAG(5), bot_if->_fcnt ));
 745                 Node *if_max= gvn->register_new_node_with_optimizer(new IfTrueNode (iff));
 746                 ifF         = gvn->register_new_node_with_optimizer(new IfFalseNode(iff));
 747                 // update input edges to region node
 748                 set_req_X( min_idx, if_min, gvn );
 749                 set_req_X( max_idx, if_max, gvn );
 750                 set_req_X( val_idx, ifF,    gvn );
 751                 // remove unnecessary 'LShiftI; RShiftI' idiom
 752                 gvn->hash_delete(phi);
 753                 phi->set_req_X( val_idx, convf2i, gvn );
 754                 gvn->hash_find_insert(phi);
 755                 // Return transformed region node
 756                 return this;
 757               }
 758             }
 759           }
 760         }
 761       }
 762     }
 763   }
 764 
 765   if (can_reshape) {
 766     modified |= optimize_trichotomy(phase->is_IterGVN());
 767   }
 768 
 769   return modified ? this : NULL;
 770 }
 771 
 772 //------------------------------optimize_trichotomy--------------------------
 773 // Optimize nested comparisons of the following kind:
 774 //
 775 // int compare(int a, int b) {
 776 //   return (a < b) ? -1 : (a == b) ? 0 : 1;
 777 // }
 778 //
 779 // Shape 1:
 780 // if (compare(a, b) == 1) { ... } -> if (a > b) { ... }
 781 //
 782 // Shape 2:
 783 // if (compare(a, b) == 0) { ... } -> if (a == b) { ... }
 784 //
 785 // Above code leads to the following IR shapes where both Ifs compare the
 786 // same value and two out of three region inputs idx1 and idx2 map to
 787 // the same value and control flow.
 788 //
 789 // (1)   If                 (2)   If
 790 //      /  \                     /  \
 791 //   Proj  Proj               Proj  Proj
 792 //     |      \                |      \
 793 //     |       If              |      If                      If
 794 //     |      /  \             |     /  \                    /  \
 795 //     |   Proj  Proj          |  Proj  Proj      ==>     Proj  Proj
 796 //     |   /      /            \    |    /                  |    /
 797 //    Region     /              \   |   /                   |   /
 798 //         \    /                \  |  /                    |  /
 799 //         Region                Region                    Region
 800 //
 801 // The method returns true if 'this' is modified and false otherwise.
 802 bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) {
 803   int idx1 = 1, idx2 = 2;
 804   Node* region = NULL;
 805   if (req() == 3 && in(1) != NULL && in(2) != NULL) {
 806     // Shape 1: Check if one of the inputs is a region that merges two control
 807     // inputs and has no other users (especially no Phi users).
 808     region = in(1)->isa_Region() ? in(1) : in(2)->isa_Region();
 809     if (region == NULL || region->outcnt() != 2 || region->req() != 3) {
 810       return false; // No suitable region input found
 811     }
 812   } else if (req() == 4) {
 813     // Shape 2: Check if two control inputs map to the same value of the unique phi
 814     // user and treat these as if they would come from another region (shape (1)).
 815     PhiNode* phi = has_unique_phi();
 816     if (phi == NULL) {
 817       return false; // No unique phi user
 818     }
 819     if (phi->in(idx1) != phi->in(idx2)) {
 820       idx2 = 3;
 821       if (phi->in(idx1) != phi->in(idx2)) {
 822         idx1 = 2;
 823         if (phi->in(idx1) != phi->in(idx2)) {
 824           return false; // No equal phi inputs found
 825         }
 826       }
 827     }
 828     assert(phi->in(idx1) == phi->in(idx2), "must be"); // Region is merging same value
 829     region = this;
 830   }
 831   if (region == NULL || region->in(idx1) == NULL || region->in(idx2) == NULL) {
 832     return false; // Region does not merge two control inputs
 833   }
 834   // At this point we know that region->in(idx1) and region->(idx2) map to the same
 835   // value and control flow. Now search for ifs that feed into these region inputs.
 836   ProjNode* proj1 = region->in(idx1)->isa_Proj();
 837   ProjNode* proj2 = region->in(idx2)->isa_Proj();
 838   if (proj1 == NULL || proj1->outcnt() != 1 ||
 839       proj2 == NULL || proj2->outcnt() != 1) {
 840     return false; // No projection inputs with region as unique user found
 841   }
 842   assert(proj1 != proj2, "should be different projections");
 843   IfNode* iff1 = proj1->in(0)->isa_If();
 844   IfNode* iff2 = proj2->in(0)->isa_If();
 845   if (iff1 == NULL || iff1->outcnt() != 2 ||
 846       iff2 == NULL || iff2->outcnt() != 2) {
 847     return false; // No ifs found
 848   }
 849   if (iff1 == iff2) {
 850     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 851     igvn->replace_input_of(region, idx1, iff1->in(0));
 852     igvn->replace_input_of(region, idx2, igvn->C->top());
 853     return (region == this); // Remove useless if (both projections map to the same control/value)
 854   }
 855   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 856   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 857   if (bol1 == NULL || bol2 == NULL) {
 858     return false; // No bool inputs found
 859   }
 860   Node* cmp1 = bol1->in(1);
 861   Node* cmp2 = bol2->in(1);
 862   bool commute = false;
 863   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 864     return false; // No comparison
 865   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 866              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 867              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 868              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 869              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
 870              cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
 871     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 872     // SubTypeCheck is not commutative
 873     return false;
 874   } else if (cmp1 != cmp2) {
 875     if (cmp1->in(1) == cmp2->in(2) &&
 876         cmp1->in(2) == cmp2->in(1)) {
 877       commute = true; // Same but swapped inputs, commute the test
 878     } else {
 879       return false; // Ifs are not comparing the same values
 880     }
 881   }
 882   proj1 = proj1->other_if_proj();
 883   proj2 = proj2->other_if_proj();
 884   if (!((proj1->unique_ctrl_out_or_null() == iff2 &&
 885          proj2->unique_ctrl_out_or_null() == this) ||
 886         (proj2->unique_ctrl_out_or_null() == iff1 &&
 887          proj1->unique_ctrl_out_or_null() == this))) {
 888     return false; // Ifs are not connected through other projs
 889   }
 890   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged
 891   // through 'region' and map to the same value. Merge the boolean tests and replace
 892   // the ifs by a single comparison.
 893   BoolTest test1 = (proj1->_con == 1) ? bol1->_test : bol1->_test.negate();
 894   BoolTest test2 = (proj2->_con == 1) ? bol2->_test : bol2->_test.negate();
 895   test1 = commute ? test1.commute() : test1;
 896   // After possibly commuting test1, if we can merge test1 & test2, then proj2/iff2/bol2 are the nodes to refine.
 897   BoolTest::mask res = test1.merge(test2);
 898   if (res == BoolTest::illegal) {
 899     return false; // Unable to merge tests
 900   }
 901   // Adjust iff1 to always pass (only iff2 will remain)
 902   igvn->replace_input_of(iff1, 1, igvn->intcon(proj1->_con));
 903   if (res == BoolTest::never) {
 904     // Merged test is always false, adjust iff2 to always fail
 905     igvn->replace_input_of(iff2, 1, igvn->intcon(1 - proj2->_con));
 906   } else {
 907     // Replace bool input of iff2 with merged test
 908     BoolNode* new_bol = new BoolNode(bol2->in(1), res);
 909     igvn->replace_input_of(iff2, 1, igvn->transform((proj2->_con == 1) ? new_bol : new_bol->negate(igvn)));
 910     if (new_bol->outcnt() == 0) {
 911       igvn->remove_dead_node(new_bol);
 912     }
 913   }
 914   return false;
 915 }
 916 
 917 const RegMask &RegionNode::out_RegMask() const {
 918   return RegMask::Empty;
 919 }
 920 
 921 // Find the one non-null required input.  RegionNode only
 922 Node *Node::nonnull_req() const {
 923   assert( is_Region(), "" );
 924   for( uint i = 1; i < _cnt; i++ )
 925     if( in(i) )
 926       return in(i);
 927   ShouldNotReachHere();
 928   return NULL;
 929 }
 930 
 931 
 932 //=============================================================================
 933 // note that these functions assume that the _adr_type field is flattened
 934 uint PhiNode::hash() const {
 935   const Type* at = _adr_type;
 936   return TypeNode::hash() + (at ? at->hash() : 0);
 937 }
 938 bool PhiNode::cmp( const Node &n ) const {
 939   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
 940 }
 941 static inline
 942 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
 943   if (at == NULL || at == TypePtr::BOTTOM)  return at;
 944   return Compile::current()->alias_type(at)->adr_type();
 945 }
 946 
 947 //----------------------------make---------------------------------------------
 948 // create a new phi with edges matching r and set (initially) to x
 949 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
 950   uint preds = r->req();   // Number of predecessor paths
 951   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flattened_accesses_share_alias()), "flatten at");
 952   PhiNode* p = new PhiNode(r, t, at);
 953   for (uint j = 1; j < preds; j++) {
 954     // Fill in all inputs, except those which the region does not yet have
 955     if (r->in(j) != NULL)
 956       p->init_req(j, x);
 957   }
 958   return p;
 959 }
 960 PhiNode* PhiNode::make(Node* r, Node* x) {
 961   const Type*    t  = x->bottom_type();
 962   const TypePtr* at = NULL;
 963   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 964   return make(r, x, t, at);
 965 }
 966 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
 967   const Type*    t  = x->bottom_type();
 968   const TypePtr* at = NULL;
 969   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 970   return new PhiNode(r, t, at);
 971 }
 972 
 973 
 974 //------------------------slice_memory-----------------------------------------
 975 // create a new phi with narrowed memory type
 976 PhiNode* PhiNode::slice_memory(const TypePtr* adr_type) const {
 977   PhiNode* mem = (PhiNode*) clone();
 978   *(const TypePtr**)&mem->_adr_type = adr_type;
 979   // convert self-loops, or else we get a bad graph
 980   for (uint i = 1; i < req(); i++) {
 981     if ((const Node*)in(i) == this)  mem->set_req(i, mem);
 982   }
 983   mem->verify_adr_type();
 984   return mem;
 985 }
 986 
 987 //------------------------split_out_instance-----------------------------------
 988 // Split out an instance type from a bottom phi.
 989 PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const {
 990   const TypeOopPtr *t_oop = at->isa_oopptr();
 991   assert(t_oop != NULL && t_oop->is_known_instance(), "expecting instance oopptr");
 992   const TypePtr *t = adr_type();
 993   assert(type() == Type::MEMORY &&
 994          (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
 995           t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
 996           t->is_oopptr()->cast_to_exactness(true)
 997            ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
 998            ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop),
 999          "bottom or raw memory required");
1000 
1001   // Check if an appropriate node already exists.
1002   Node *region = in(0);
1003   for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
1004     Node* use = region->fast_out(k);
1005     if( use->is_Phi()) {
1006       PhiNode *phi2 = use->as_Phi();
1007       if (phi2->type() == Type::MEMORY && phi2->adr_type() == at) {
1008         return phi2;
1009       }
1010     }
1011   }
1012   Compile *C = igvn->C;
1013   Arena *a = Thread::current()->resource_area();
1014   Node_Array node_map = new Node_Array(a);
1015   Node_Stack stack(a, C->live_nodes() >> 4);
1016   PhiNode *nphi = slice_memory(at);
1017   igvn->register_new_node_with_optimizer( nphi );
1018   node_map.map(_idx, nphi);
1019   stack.push((Node *)this, 1);
1020   while(!stack.is_empty()) {
1021     PhiNode *ophi = stack.node()->as_Phi();
1022     uint i = stack.index();
1023     assert(i >= 1, "not control edge");
1024     stack.pop();
1025     nphi = node_map[ophi->_idx]->as_Phi();
1026     for (; i < ophi->req(); i++) {
1027       Node *in = ophi->in(i);
1028       if (in == NULL || igvn->type(in) == Type::TOP)
1029         continue;
1030       Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL, igvn);
1031       PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL;
1032       if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) {
1033         opt = node_map[optphi->_idx];
1034         if (opt == NULL) {
1035           stack.push(ophi, i);
1036           nphi = optphi->slice_memory(at);
1037           igvn->register_new_node_with_optimizer( nphi );
1038           node_map.map(optphi->_idx, nphi);
1039           ophi = optphi;
1040           i = 0; // will get incremented at top of loop
1041           continue;
1042         }
1043       }
1044       nphi->set_req(i, opt);
1045     }
1046   }
1047   return nphi;
1048 }
1049 
1050 //------------------------verify_adr_type--------------------------------------
1051 #ifdef ASSERT
1052 void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const {
1053   if (visited.test_set(_idx))  return;  //already visited
1054 
1055   // recheck constructor invariants:
1056   verify_adr_type(false);
1057 
1058   // recheck local phi/phi consistency:
1059   assert(_adr_type == at || _adr_type == TypePtr::BOTTOM,
1060          "adr_type must be consistent across phi nest");
1061 
1062   // walk around
1063   for (uint i = 1; i < req(); i++) {
1064     Node* n = in(i);
1065     if (n == NULL)  continue;
1066     const Node* np = in(i);
1067     if (np->is_Phi()) {
1068       np->as_Phi()->verify_adr_type(visited, at);
1069     } else if (n->bottom_type() == Type::TOP
1070                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1071       // ignore top inputs
1072     } else {
1073       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1074       // recheck phi/non-phi consistency at leaves:
1075       assert((nat != NULL) == (at != NULL), "");
1076       assert(nat == at || nat == TypePtr::BOTTOM,
1077              "adr_type must be consistent at leaves of phi nest");
1078     }
1079   }
1080 }
1081 
1082 // Verify a whole nest of phis rooted at this one.
1083 void PhiNode::verify_adr_type(bool recursive) const {
1084   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1085   if (Node::in_dump())               return;  // muzzle asserts when printing
1086 
1087   assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");
1088   // Flat array element shouldn't get their own memory slice until flattened_accesses_share_alias is cleared.
1089   // It could be the graph has no loads/stores and flattened_accesses_share_alias is never cleared. EA could still
1090   // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1091   assert(_adr_type == NULL || _adr_type->isa_aryptr() == NULL ||
1092          _adr_type->is_aryptr()->is_known_instance() ||
1093          !_adr_type->is_aryptr()->is_flat() ||
1094          !Compile::current()->flattened_accesses_share_alias() ||
1095          _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1096 
1097   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1098 
1099   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1100          "Phi::adr_type must be pre-normalized");
1101 
1102   if (recursive) {
1103     VectorSet visited;
1104     verify_adr_type(visited, _adr_type);
1105   }
1106 }
1107 #endif
1108 
1109 
1110 //------------------------------Value------------------------------------------
1111 // Compute the type of the PhiNode
1112 const Type* PhiNode::Value(PhaseGVN* phase) const {
1113   Node *r = in(0);              // RegionNode
1114   if( !r )                      // Copy or dead
1115     return in(1) ? phase->type(in(1)) : Type::TOP;
1116 
1117   // Note: During parsing, phis are often transformed before their regions.
1118   // This means we have to use type_or_null to defend against untyped regions.
1119   if( phase->type_or_null(r) == Type::TOP )  // Dead code?
1120     return Type::TOP;
1121 
1122   // Check for trip-counted loop.  If so, be smarter.
1123   BaseCountedLoopNode* l = r->is_BaseCountedLoop() ? r->as_BaseCountedLoop() : NULL;
1124   if (l && ((const Node*)l->phi() == this)) { // Trip counted loop!
1125     // protect against init_trip() or limit() returning NULL
1126     if (l->can_be_counted_loop(phase)) {
1127       const Node* init = l->init_trip();
1128       const Node* limit = l->limit();
1129       const Node* stride = l->stride();
1130       if (init != NULL && limit != NULL && stride != NULL) {
1131         const TypeInteger* lo = phase->type(init)->isa_integer(l->bt());
1132         const TypeInteger* hi = phase->type(limit)->isa_integer(l->bt());
1133         const TypeInteger* stride_t = phase->type(stride)->isa_integer(l->bt());
1134         if (lo != NULL && hi != NULL && stride_t != NULL) { // Dying loops might have TOP here
1135           assert(stride_t->is_con(), "bad stride type");
1136           BoolTest::mask bt = l->loopexit()->test_trip();
1137           // If the loop exit condition is "not equal", the condition
1138           // would not trigger if init > limit (if stride > 0) or if
1139           // init < limit if (stride > 0) so we can't deduce bounds
1140           // for the iv from the exit condition.
1141           if (bt != BoolTest::ne) {
1142             jlong stride_con = stride_t->get_con_as_long(l->bt());
1143             if (stride_con < 0) {          // Down-counter loop
1144               swap(lo, hi);
1145               jlong iv_range_lower_limit = lo->lo_as_long();
1146               // Prevent overflow when adding one below
1147               if (iv_range_lower_limit < max_signed_integer(l->bt())) {
1148                 // The loop exit condition is: iv + stride > limit (iv is this Phi). So the loop iterates until
1149                 // iv + stride <= limit
1150                 // We know that: limit >= lo->lo_as_long() and stride <= -1
1151                 // So when the loop exits, iv has to be at most lo->lo_as_long() + 1
1152                 iv_range_lower_limit += 1; // lo is after decrement
1153                 // Exact bounds for the phi can be computed when ABS(stride) greater than 1 if bounds are constant.
1154                 if (lo->is_con() && hi->is_con() && hi->lo_as_long() > lo->hi_as_long() && stride_con != -1) {
1155                   julong uhi = static_cast<julong>(hi->lo_as_long());
1156                   julong ulo = static_cast<julong>(lo->hi_as_long());
1157                   julong diff = ((uhi - ulo - 1) / (-stride_con)) * (-stride_con);
1158                   julong ufirst = hi->lo_as_long() - diff;
1159                   iv_range_lower_limit = reinterpret_cast<jlong &>(ufirst);
1160                   assert(iv_range_lower_limit >= lo->lo_as_long() + 1, "should end up with narrower range");
1161                 }
1162               }
1163               return TypeInteger::make(MIN2(iv_range_lower_limit, hi->lo_as_long()), hi->hi_as_long(), 3, l->bt())->filter_speculative(_type);
1164             } else if (stride_con >= 0) {
1165               jlong iv_range_upper_limit = hi->hi_as_long();
1166               // Prevent overflow when subtracting one below
1167               if (iv_range_upper_limit > min_signed_integer(l->bt())) {
1168                 // The loop exit condition is: iv + stride < limit (iv is this Phi). So the loop iterates until
1169                 // iv + stride >= limit
1170                 // We know that: limit <= hi->hi_as_long() and stride >= 1
1171                 // So when the loop exits, iv has to be at most hi->hi_as_long() - 1
1172                 iv_range_upper_limit -= 1;
1173                 // Exact bounds for the phi can be computed when ABS(stride) greater than 1 if bounds are constant.
1174                 if (lo->is_con() && hi->is_con() && hi->lo_as_long() > lo->hi_as_long() && stride_con != 1) {
1175                   julong uhi = static_cast<julong>(hi->lo_as_long());
1176                   julong ulo = static_cast<julong>(lo->hi_as_long());
1177                   julong diff = ((uhi - ulo - 1) / stride_con) * stride_con;
1178                   julong ulast = lo->hi_as_long() + diff;
1179                   iv_range_upper_limit = reinterpret_cast<jlong &>(ulast);
1180                   assert(iv_range_upper_limit <= hi->hi_as_long() - 1, "should end up with narrower range");
1181                 }
1182               }
1183               return TypeInteger::make(lo->lo_as_long(), MAX2(lo->hi_as_long(), iv_range_upper_limit), 3, l->bt())->filter_speculative(_type);
1184             }
1185           }
1186         }
1187       }
1188     } else if (l->in(LoopNode::LoopBackControl) != NULL &&
1189                in(LoopNode::EntryControl) != NULL &&
1190                phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1191       // During CCP, if we saturate the type of a counted loop's Phi
1192       // before the special code for counted loop above has a chance
1193       // to run (that is as long as the type of the backedge's control
1194       // is top), we might end up with non monotonic types
1195       return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1196     }
1197   }
1198 
1199   // Until we have harmony between classes and interfaces in the type
1200   // lattice, we must tread carefully around phis which implicitly
1201   // convert the one to the other.
1202   const TypePtr* ttp = _type->make_ptr();
1203   const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
1204   const TypeInstKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_instklassptr() : NULL;
1205   bool is_intf = false;
1206   if (ttip != NULL) {
1207     if (ttip->is_interface())
1208       is_intf = true;
1209   }
1210   if (ttkp != NULL) {
1211     if (ttkp->is_interface())
1212       is_intf = true;
1213   }
1214 
1215   // Default case: merge all inputs
1216   const Type *t = Type::TOP;        // Merged type starting value
1217   for (uint i = 1; i < req(); ++i) {// For all paths in
1218     // Reachable control path?
1219     if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1220       const Type* ti = phase->type(in(i));
1221       // We assume that each input of an interface-valued Phi is a true
1222       // subtype of that interface.  This might not be true of the meet
1223       // of all the input types.  The lattice is not distributive in
1224       // such cases.  Ward off asserts in type.cpp by refusing to do
1225       // meets between interfaces and proper classes.
1226       const TypePtr* tip = ti->make_ptr();
1227       const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL;
1228       if (tiip) {
1229         bool ti_is_intf = false;
1230         if (tiip->is_interface())
1231           ti_is_intf = true;
1232         if (is_intf != ti_is_intf)
1233           { t = _type; break; }
1234       }
1235       t = t->meet_speculative(ti);
1236     }
1237   }
1238 
1239   // The worst-case type (from ciTypeFlow) should be consistent with "t".
1240   // That is, we expect that "t->higher_equal(_type)" holds true.
1241   // There are various exceptions:
1242   // - Inputs which are phis might in fact be widened unnecessarily.
1243   //   For example, an input might be a widened int while the phi is a short.
1244   // - Inputs might be BotPtrs but this phi is dependent on a null check,
1245   //   and postCCP has removed the cast which encodes the result of the check.
1246   // - The type of this phi is an interface, and the inputs are classes.
1247   // - Value calls on inputs might produce fuzzy results.
1248   //   (Occurrences of this case suggest improvements to Value methods.)
1249   //
1250   // It is not possible to see Type::BOTTOM values as phi inputs,
1251   // because the ciTypeFlow pre-pass produces verifier-quality types.
1252   const Type* ft = t->filter_speculative(_type);  // Worst case type
1253 
1254 #ifdef ASSERT
1255   // The following logic has been moved into TypeOopPtr::filter.
1256   const Type* jt = t->join_speculative(_type);
1257   if (jt->empty()) {           // Emptied out???
1258 
1259     // Check for evil case of 't' being a class and '_type' expecting an
1260     // interface.  This can happen because the bytecodes do not contain
1261     // enough type info to distinguish a Java-level interface variable
1262     // from a Java-level object variable.  If we meet 2 classes which
1263     // both implement interface I, but their meet is at 'j/l/O' which
1264     // doesn't implement I, we have no way to tell if the result should
1265     // be 'I' or 'j/l/O'.  Thus we'll pick 'j/l/O'.  If this then flows
1266     // into a Phi which "knows" it's an Interface type we'll have to
1267     // uplift the type.
1268     if (!t->empty() && ttip && ttip->is_interface()) {
1269       assert(ft == _type, ""); // Uplift to interface
1270     } else if (!t->empty() && ttkp && ttkp->is_interface()) {
1271       assert(ft == _type, ""); // Uplift to interface
1272     } else {
1273       // We also have to handle 'evil cases' of interface- vs. class-arrays
1274       Type::get_arrays_base_elements(jt, _type, NULL, &ttip);
1275       if (!t->empty() && ttip != NULL && ttip->is_interface()) {
1276           assert(ft == _type, "");   // Uplift to array of interface
1277       } else {
1278         // Otherwise it's something stupid like non-overlapping int ranges
1279         // found on dying counted loops.
1280         assert(ft == Type::TOP, ""); // Canonical empty value
1281       }
1282     }
1283   }
1284 
1285   else {
1286 
1287     // If we have an interface-typed Phi and we narrow to a class type, the join
1288     // should report back the class.  However, if we have a J/L/Object
1289     // class-typed Phi and an interface flows in, it's possible that the meet &
1290     // join report an interface back out.  This isn't possible but happens
1291     // because the type system doesn't interact well with interfaces.
1292     const TypePtr *jtp = jt->make_ptr();
1293     const TypeInstPtr *jtip = (jtp != NULL) ? jtp->isa_instptr() : NULL;
1294     const TypeInstKlassPtr *jtkp = (jtp != NULL) ? jtp->isa_instklassptr() : NULL;
1295     if (jtip && ttip) {
1296       if (jtip->is_interface() &&
1297           !ttip->is_interface()) {
1298         assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) ||
1299                ft->isa_narrowoop() && ft->make_ptr() == ttip->cast_to_ptr_type(jtip->ptr()), "");
1300         jt = ft;
1301       }
1302     }
1303     if (jtkp && ttkp) {
1304       if (jtkp->is_interface() &&
1305           !jtkp->klass_is_exact() && // Keep exact interface klass (6894807)
1306           ttkp->is_loaded() && !ttkp->is_interface()) {
1307         assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) ||
1308                ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), "");
1309         jt = ft;
1310       }
1311     }
1312     if (jt != ft && jt->base() == ft->base()) {
1313       if (jt->isa_int() &&
1314           jt->is_int()->_lo == ft->is_int()->_lo &&
1315           jt->is_int()->_hi == ft->is_int()->_hi)
1316         jt = ft;
1317       if (jt->isa_long() &&
1318           jt->is_long()->_lo == ft->is_long()->_lo &&
1319           jt->is_long()->_hi == ft->is_long()->_hi)
1320         jt = ft;
1321     }
1322     if (jt != ft) {
1323       tty->print("merge type:  "); t->dump(); tty->cr();
1324       tty->print("kill type:   "); _type->dump(); tty->cr();
1325       tty->print("join type:   "); jt->dump(); tty->cr();
1326       tty->print("filter type: "); ft->dump(); tty->cr();
1327     }
1328     assert(jt == ft, "");
1329   }
1330 #endif //ASSERT
1331 
1332   // Deal with conversion problems found in data loops.
1333   ft = phase->saturate(ft, phase->type_or_null(this), _type);
1334 
1335   return ft;
1336 }
1337 
1338 
1339 //------------------------------is_diamond_phi---------------------------------
1340 // Does this Phi represent a simple well-shaped diamond merge?  Return the
1341 // index of the true path or 0 otherwise.
1342 // If check_control_only is true, do not inspect the If node at the
1343 // top, and return -1 (not an edge number) on success.
1344 int PhiNode::is_diamond_phi(bool check_control_only) const {
1345   // Check for a 2-path merge
1346   Node *region = in(0);
1347   if( !region ) return 0;
1348   if( region->req() != 3 ) return 0;
1349   if(         req() != 3 ) return 0;
1350   // Check that both paths come from the same If
1351   Node *ifp1 = region->in(1);
1352   Node *ifp2 = region->in(2);
1353   if( !ifp1 || !ifp2 ) return 0;
1354   Node *iff = ifp1->in(0);
1355   if( !iff || !iff->is_If() ) return 0;
1356   if( iff != ifp2->in(0) ) return 0;
1357   if (check_control_only)  return -1;
1358   // Check for a proper bool/cmp
1359   const Node *b = iff->in(1);
1360   if( !b->is_Bool() ) return 0;
1361   const Node *cmp = b->in(1);
1362   if( !cmp->is_Cmp() ) return 0;
1363 
1364   // Check for branching opposite expected
1365   if( ifp2->Opcode() == Op_IfTrue ) {
1366     assert( ifp1->Opcode() == Op_IfFalse, "" );
1367     return 2;
1368   } else {
1369     assert( ifp1->Opcode() == Op_IfTrue, "" );
1370     return 1;
1371   }
1372 }
1373 
1374 //----------------------------check_cmove_id-----------------------------------
1375 // Check for CMove'ing a constant after comparing against the constant.
1376 // Happens all the time now, since if we compare equality vs a constant in
1377 // the parser, we "know" the variable is constant on one path and we force
1378 // it.  Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
1379 // conditional move: "x = (x==0)?0:x;".  Yucko.  This fix is slightly more
1380 // general in that we don't need constants.  Since CMove's are only inserted
1381 // in very special circumstances, we do it here on generic Phi's.
1382 Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) {
1383   assert(true_path !=0, "only diamond shape graph expected");
1384 
1385   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1386   // phi->region->if_proj->ifnode->bool->cmp
1387   Node*     region = in(0);
1388   Node*     iff    = region->in(1)->in(0);
1389   BoolNode* b      = iff->in(1)->as_Bool();
1390   Node*     cmp    = b->in(1);
1391   Node*     tval   = in(true_path);
1392   Node*     fval   = in(3-true_path);
1393   Node*     id     = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b);
1394   if (id == NULL)
1395     return NULL;
1396 
1397   // Either value might be a cast that depends on a branch of 'iff'.
1398   // Since the 'id' value will float free of the diamond, either
1399   // decast or return failure.
1400   Node* ctl = id->in(0);
1401   if (ctl != NULL && ctl->in(0) == iff) {
1402     if (id->is_ConstraintCast()) {
1403       return id->in(1);
1404     } else {
1405       // Don't know how to disentangle this value.
1406       return NULL;
1407     }
1408   }
1409 
1410   return id;
1411 }
1412 
1413 //------------------------------Identity---------------------------------------
1414 // Check for Region being Identity.
1415 Node* PhiNode::Identity(PhaseGVN* phase) {
1416   // Check for no merging going on
1417   // (There used to be special-case code here when this->region->is_Loop.
1418   // It would check for a tributary phi on the backedge that the main phi
1419   // trivially, perhaps with a single cast.  The unique_input method
1420   // does all this and more, by reducing such tributaries to 'this'.)
1421   Node* uin = unique_input(phase, false);
1422   if (uin != NULL) {
1423     return uin;
1424   }
1425 
1426   int true_path = is_diamond_phi();
1427   // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet.
1428   if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) {
1429     Node* id = is_cmove_id(phase, true_path);
1430     if (id != NULL) {
1431       return id;
1432     }
1433   }
1434 
1435   if (phase->is_IterGVN()) {
1436     Node* m = try_clean_mem_phi(phase);
1437     if (m != NULL) {
1438       return m;
1439     }
1440   }
1441 
1442 
1443   // Looking for phis with identical inputs.  If we find one that has
1444   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1445   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1446       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1447     uint phi_len = req();
1448     Node* phi_reg = region();
1449     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1450       Node* u = phi_reg->fast_out(i);
1451       if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1452           u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1453           u->req() == phi_len) {
1454         for (uint j = 1; j < phi_len; j++) {
1455           if (in(j) != u->in(j)) {
1456             u = NULL;
1457             break;
1458           }
1459         }
1460         if (u != NULL) {
1461           return u;
1462         }
1463       }
1464     }
1465   }
1466 
1467   return this;                     // No identity
1468 }
1469 
1470 //-----------------------------unique_input------------------------------------
1471 // Find the unique value, discounting top, self-loops, and casts.
1472 // Return top if there are no inputs, and self if there are multiple.
1473 Node* PhiNode::unique_input(PhaseTransform* phase, bool uncast) {
1474   //  1) One unique direct input,
1475   // or if uncast is true:
1476   //  2) some of the inputs have an intervening ConstraintCast
1477   //  3) an input is a self loop
1478   //
1479   //  1) input   or   2) input     or   3) input __
1480   //     /   \           /   \               \  /  \
1481   //     \   /          |    cast             phi  cast
1482   //      phi            \   /               /  \  /
1483   //                      phi               /    --
1484 
1485   Node* r = in(0);                      // RegionNode
1486   Node* input = NULL; // The unique direct input (maybe uncasted = ConstraintCasts removed)
1487 
1488   for (uint i = 1, cnt = req(); i < cnt; ++i) {
1489     Node* rc = r->in(i);
1490     if (rc == NULL || phase->type(rc) == Type::TOP)
1491       continue;                 // ignore unreachable control path
1492     Node* n = in(i);
1493     if (n == NULL)
1494       continue;
1495     Node* un = n;
1496     if (uncast) {
1497 #ifdef ASSERT
1498       Node* m = un->uncast();
1499 #endif
1500       while (un != NULL && un->req() == 2 && un->is_ConstraintCast()) {
1501         Node* next = un->in(1);
1502         if (phase->type(next)->isa_rawptr() && phase->type(un)->isa_oopptr()) {
1503           // risk exposing raw ptr at safepoint
1504           break;
1505         }
1506         un = next;
1507       }
1508       assert(m == un || un->in(1) == m, "Only expected at CheckCastPP from allocation");
1509     }
1510     if (un == NULL || un == this || phase->type(un) == Type::TOP) {
1511       continue; // ignore if top, or in(i) and "this" are in a data cycle
1512     }
1513     // Check for a unique input (maybe uncasted)
1514     if (input == NULL) {
1515       input = un;
1516     } else if (input != un) {
1517       input = NodeSentinel; // no unique input
1518     }
1519   }
1520   if (input == NULL) {
1521     return phase->C->top();        // no inputs
1522   }
1523 
1524   if (input != NodeSentinel) {
1525     return input;           // one unique direct input
1526   }
1527 
1528   // Nothing.
1529   return NULL;
1530 }
1531 
1532 //------------------------------is_x2logic-------------------------------------
1533 // Check for simple convert-to-boolean pattern
1534 // If:(C Bool) Region:(IfF IfT) Phi:(Region 0 1)
1535 // Convert Phi to an ConvIB.
1536 static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) {
1537   assert(true_path !=0, "only diamond shape graph expected");
1538   // Convert the true/false index into an expected 0/1 return.
1539   // Map 2->0 and 1->1.
1540   int flipped = 2-true_path;
1541 
1542   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1543   // phi->region->if_proj->ifnode->bool->cmp
1544   Node *region = phi->in(0);
1545   Node *iff = region->in(1)->in(0);
1546   BoolNode *b = (BoolNode*)iff->in(1);
1547   const CmpNode *cmp = (CmpNode*)b->in(1);
1548 
1549   Node *zero = phi->in(1);
1550   Node *one  = phi->in(2);
1551   const Type *tzero = phase->type( zero );
1552   const Type *tone  = phase->type( one  );
1553 
1554   // Check for compare vs 0
1555   const Type *tcmp = phase->type(cmp->in(2));
1556   if( tcmp != TypeInt::ZERO && tcmp != TypePtr::NULL_PTR ) {
1557     // Allow cmp-vs-1 if the other input is bounded by 0-1
1558     if( !(tcmp == TypeInt::ONE && phase->type(cmp->in(1)) == TypeInt::BOOL) )
1559       return NULL;
1560     flipped = 1-flipped;        // Test is vs 1 instead of 0!
1561   }
1562 
1563   // Check for setting zero/one opposite expected
1564   if( tzero == TypeInt::ZERO ) {
1565     if( tone == TypeInt::ONE ) {
1566     } else return NULL;
1567   } else if( tzero == TypeInt::ONE ) {
1568     if( tone == TypeInt::ZERO ) {
1569       flipped = 1-flipped;
1570     } else return NULL;
1571   } else return NULL;
1572 
1573   // Check for boolean test backwards
1574   if( b->_test._test == BoolTest::ne ) {
1575   } else if( b->_test._test == BoolTest::eq ) {
1576     flipped = 1-flipped;
1577   } else return NULL;
1578 
1579   // Build int->bool conversion
1580   Node *n = new Conv2BNode(cmp->in(1));
1581   if( flipped )
1582     n = new XorINode( phase->transform(n), phase->intcon(1) );
1583 
1584   return n;
1585 }
1586 
1587 //------------------------------is_cond_add------------------------------------
1588 // Check for simple conditional add pattern:  "(P < Q) ? X+Y : X;"
1589 // To be profitable the control flow has to disappear; there can be no other
1590 // values merging here.  We replace the test-and-branch with:
1591 // "(sgn(P-Q))&Y) + X".  Basically, convert "(P < Q)" into 0 or -1 by
1592 // moving the carry bit from (P-Q) into a register with 'sbb EAX,EAX'.
1593 // Then convert Y to 0-or-Y and finally add.
1594 // This is a key transform for SpecJava _201_compress.
1595 static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) {
1596   assert(true_path !=0, "only diamond shape graph expected");
1597 
1598   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1599   // phi->region->if_proj->ifnode->bool->cmp
1600   RegionNode *region = (RegionNode*)phi->in(0);
1601   Node *iff = region->in(1)->in(0);
1602   BoolNode* b = iff->in(1)->as_Bool();
1603   const CmpNode *cmp = (CmpNode*)b->in(1);
1604 
1605   // Make sure only merging this one phi here
1606   if (region->has_unique_phi() != phi)  return NULL;
1607 
1608   // Make sure each arm of the diamond has exactly one output, which we assume
1609   // is the region.  Otherwise, the control flow won't disappear.
1610   if (region->in(1)->outcnt() != 1) return NULL;
1611   if (region->in(2)->outcnt() != 1) return NULL;
1612 
1613   // Check for "(P < Q)" of type signed int
1614   if (b->_test._test != BoolTest::lt)  return NULL;
1615   if (cmp->Opcode() != Op_CmpI)        return NULL;
1616 
1617   Node *p = cmp->in(1);
1618   Node *q = cmp->in(2);
1619   Node *n1 = phi->in(  true_path);
1620   Node *n2 = phi->in(3-true_path);
1621 
1622   int op = n1->Opcode();
1623   if( op != Op_AddI           // Need zero as additive identity
1624       /*&&op != Op_SubI &&
1625       op != Op_AddP &&
1626       op != Op_XorI &&
1627       op != Op_OrI*/ )
1628     return NULL;
1629 
1630   Node *x = n2;
1631   Node *y = NULL;
1632   if( x == n1->in(1) ) {
1633     y = n1->in(2);
1634   } else if( x == n1->in(2) ) {
1635     y = n1->in(1);
1636   } else return NULL;
1637 
1638   // Not so profitable if compare and add are constants
1639   if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() )
1640     return NULL;
1641 
1642   Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) );
1643   Node *j_and   = phase->transform( new AndINode(cmplt,y) );
1644   return new AddINode(j_and,x);
1645 }
1646 
1647 //------------------------------is_absolute------------------------------------
1648 // Check for absolute value.
1649 static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) {
1650   assert(true_path !=0, "only diamond shape graph expected");
1651 
1652   int  cmp_zero_idx = 0;        // Index of compare input where to look for zero
1653   int  phi_x_idx = 0;           // Index of phi input where to find naked x
1654 
1655   // ABS ends with the merge of 2 control flow paths.
1656   // Find the false path from the true path. With only 2 inputs, 3 - x works nicely.
1657   int false_path = 3 - true_path;
1658 
1659   // is_diamond_phi() has guaranteed the correctness of the nodes sequence:
1660   // phi->region->if_proj->ifnode->bool->cmp
1661   BoolNode *bol = phi_root->in(0)->in(1)->in(0)->in(1)->as_Bool();
1662   Node *cmp = bol->in(1);
1663 
1664   // Check bool sense
1665   if (cmp->Opcode() == Op_CmpF || cmp->Opcode() == Op_CmpD) {
1666     switch (bol->_test._test) {
1667     case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = true_path;  break;
1668     case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break;
1669     case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = true_path;  break;
1670     case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = false_path; break;
1671     default:           return NULL;                              break;
1672     }
1673   } else if (cmp->Opcode() == Op_CmpI || cmp->Opcode() == Op_CmpL) {
1674     switch (bol->_test._test) {
1675     case BoolTest::lt:
1676     case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break;
1677     case BoolTest::gt:
1678     case BoolTest::ge: cmp_zero_idx = 2; phi_x_idx = true_path;  break;
1679     default:           return NULL;                              break;
1680     }
1681   }
1682 
1683   // Test is next
1684   const Type *tzero = NULL;
1685   switch (cmp->Opcode()) {
1686   case Op_CmpI:    tzero = TypeInt::ZERO; break;  // Integer ABS
1687   case Op_CmpL:    tzero = TypeLong::ZERO; break; // Long ABS
1688   case Op_CmpF:    tzero = TypeF::ZERO; break; // Float ABS
1689   case Op_CmpD:    tzero = TypeD::ZERO; break; // Double ABS
1690   default: return NULL;
1691   }
1692 
1693   // Find zero input of compare; the other input is being abs'd
1694   Node *x = NULL;
1695   bool flip = false;
1696   if( phase->type(cmp->in(cmp_zero_idx)) == tzero ) {
1697     x = cmp->in(3 - cmp_zero_idx);
1698   } else if( phase->type(cmp->in(3 - cmp_zero_idx)) == tzero ) {
1699     // The test is inverted, we should invert the result...
1700     x = cmp->in(cmp_zero_idx);
1701     flip = true;
1702   } else {
1703     return NULL;
1704   }
1705 
1706   // Next get the 2 pieces being selected, one is the original value
1707   // and the other is the negated value.
1708   if( phi_root->in(phi_x_idx) != x ) return NULL;
1709 
1710   // Check other phi input for subtract node
1711   Node *sub = phi_root->in(3 - phi_x_idx);
1712 
1713   bool is_sub = sub->Opcode() == Op_SubF || sub->Opcode() == Op_SubD ||
1714                 sub->Opcode() == Op_SubI || sub->Opcode() == Op_SubL;
1715 
1716   // Allow only Sub(0,X) and fail out for all others; Neg is not OK
1717   if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return NULL;
1718 
1719   if (tzero == TypeF::ZERO) {
1720     x = new AbsFNode(x);
1721     if (flip) {
1722       x = new SubFNode(sub->in(1), phase->transform(x));
1723     }
1724   } else if (tzero == TypeD::ZERO) {
1725     x = new AbsDNode(x);
1726     if (flip) {
1727       x = new SubDNode(sub->in(1), phase->transform(x));
1728     }
1729   } else if (tzero == TypeInt::ZERO && Matcher::match_rule_supported(Op_AbsI)) {
1730     x = new AbsINode(x);
1731     if (flip) {
1732       x = new SubINode(sub->in(1), phase->transform(x));
1733     }
1734   } else if (tzero == TypeLong::ZERO && Matcher::match_rule_supported(Op_AbsL)) {
1735     x = new AbsLNode(x);
1736     if (flip) {
1737       x = new SubLNode(sub->in(1), phase->transform(x));
1738     }
1739   } else return NULL;
1740 
1741   return x;
1742 }
1743 
1744 //------------------------------split_once-------------------------------------
1745 // Helper for split_flow_path
1746 static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) {
1747   igvn->hash_delete(n);         // Remove from hash before hacking edges
1748 
1749   uint j = 1;
1750   for (uint i = phi->req()-1; i > 0; i--) {
1751     if (phi->in(i) == val) {   // Found a path with val?
1752       // Add to NEW Region/Phi, no DU info
1753       newn->set_req( j++, n->in(i) );
1754       // Remove from OLD Region/Phi
1755       n->del_req(i);
1756     }
1757   }
1758 
1759   // Register the new node but do not transform it.  Cannot transform until the
1760   // entire Region/Phi conglomerate has been hacked as a single huge transform.
1761   igvn->register_new_node_with_optimizer( newn );
1762 
1763   // Now I can point to the new node.
1764   n->add_req(newn);
1765   igvn->_worklist.push(n);
1766 }
1767 
1768 //------------------------------split_flow_path--------------------------------
1769 // Check for merging identical values and split flow paths
1770 static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) {
1771   BasicType bt = phi->type()->basic_type();
1772   if( bt == T_ILLEGAL || type2size[bt] <= 0 )
1773     return NULL;                // Bail out on funny non-value stuff
1774   if( phi->req() <= 3 )         // Need at least 2 matched inputs and a
1775     return NULL;                // third unequal input to be worth doing
1776 
1777   // Scan for a constant
1778   uint i;
1779   for( i = 1; i < phi->req()-1; i++ ) {
1780     Node *n = phi->in(i);
1781     if( !n ) return NULL;
1782     if( phase->type(n) == Type::TOP ) return NULL;
1783     if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass )
1784       break;
1785   }
1786   if( i >= phi->req() )         // Only split for constants
1787     return NULL;
1788 
1789   Node *val = phi->in(i);       // Constant to split for
1790   uint hit = 0;                 // Number of times it occurs
1791   Node *r = phi->region();
1792 
1793   for( ; i < phi->req(); i++ ){ // Count occurrences of constant
1794     Node *n = phi->in(i);
1795     if( !n ) return NULL;
1796     if( phase->type(n) == Type::TOP ) return NULL;
1797     if( phi->in(i) == val ) {
1798       hit++;
1799       if (PhaseIdealLoop::find_predicate(r->in(i)) != NULL) {
1800         return NULL;            // don't split loop entry path
1801       }
1802     }
1803   }
1804 
1805   if( hit <= 1 ||               // Make sure we find 2 or more
1806       hit == phi->req()-1 )     // and not ALL the same value
1807     return NULL;
1808 
1809   // Now start splitting out the flow paths that merge the same value.
1810   // Split first the RegionNode.
1811   PhaseIterGVN *igvn = phase->is_IterGVN();
1812   RegionNode *newr = new RegionNode(hit+1);
1813   split_once(igvn, phi, val, r, newr);
1814 
1815   // Now split all other Phis than this one
1816   for (DUIterator_Fast kmax, k = r->fast_outs(kmax); k < kmax; k++) {
1817     Node* phi2 = r->fast_out(k);
1818     if( phi2->is_Phi() && phi2->as_Phi() != phi ) {
1819       PhiNode *newphi = PhiNode::make_blank(newr, phi2);
1820       split_once(igvn, phi, val, phi2, newphi);
1821     }
1822   }
1823 
1824   // Clean up this guy
1825   igvn->hash_delete(phi);
1826   for( i = phi->req()-1; i > 0; i-- ) {
1827     if( phi->in(i) == val ) {
1828       phi->del_req(i);
1829     }
1830   }
1831   phi->add_req(val);
1832 
1833   return phi;
1834 }
1835 
1836 //=============================================================================
1837 //------------------------------simple_data_loop_check-------------------------
1838 //  Try to determining if the phi node in a simple safe/unsafe data loop.
1839 //  Returns:
1840 // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop };
1841 // Safe       - safe case when the phi and it's inputs reference only safe data
1842 //              nodes;
1843 // Unsafe     - the phi and it's inputs reference unsafe data nodes but there
1844 //              is no reference back to the phi - need a graph walk
1845 //              to determine if it is in a loop;
1846 // UnsafeLoop - unsafe case when the phi references itself directly or through
1847 //              unsafe data node.
1848 //  Note: a safe data node is a node which could/never reference itself during
1849 //  GVN transformations. For now it is Con, Proj, Phi, CastPP, CheckCastPP.
1850 //  I mark Phi nodes as safe node not only because they can reference itself
1851 //  but also to prevent mistaking the fallthrough case inside an outer loop
1852 //  as dead loop when the phi references itself through an other phi.
1853 PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const {
1854   // It is unsafe loop if the phi node references itself directly.
1855   if (in == (Node*)this)
1856     return UnsafeLoop; // Unsafe loop
1857   // Unsafe loop if the phi node references itself through an unsafe data node.
1858   // Exclude cases with null inputs or data nodes which could reference
1859   // itself (safe for dead loops).
1860   if (in != NULL && !in->is_dead_loop_safe()) {
1861     // Check inputs of phi's inputs also.
1862     // It is much less expensive then full graph walk.
1863     uint cnt = in->req();
1864     uint i = (in->is_Proj() && !in->is_CFG())  ? 0 : 1;
1865     for (; i < cnt; ++i) {
1866       Node* m = in->in(i);
1867       if (m == (Node*)this)
1868         return UnsafeLoop; // Unsafe loop
1869       if (m != NULL && !m->is_dead_loop_safe()) {
1870         // Check the most common case (about 30% of all cases):
1871         // phi->Load/Store->AddP->(ConP ConP Con)/(Parm Parm Con).
1872         Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : NULL;
1873         if (m1 == (Node*)this)
1874           return UnsafeLoop; // Unsafe loop
1875         if (m1 != NULL && m1 == m->in(2) &&
1876             m1->is_dead_loop_safe() && m->in(3)->is_Con()) {
1877           continue; // Safe case
1878         }
1879         // The phi references an unsafe node - need full analysis.
1880         return Unsafe;
1881       }
1882     }
1883   }
1884   return Safe; // Safe case - we can optimize the phi node.
1885 }
1886 
1887 //------------------------------is_unsafe_data_reference-----------------------
1888 // If phi can be reached through the data input - it is data loop.
1889 bool PhiNode::is_unsafe_data_reference(Node *in) const {
1890   assert(req() > 1, "");
1891   // First, check simple cases when phi references itself directly or
1892   // through an other node.
1893   LoopSafety safety = simple_data_loop_check(in);
1894   if (safety == UnsafeLoop)
1895     return true;  // phi references itself - unsafe loop
1896   else if (safety == Safe)
1897     return false; // Safe case - phi could be replaced with the unique input.
1898 
1899   // Unsafe case when we should go through data graph to determine
1900   // if the phi references itself.
1901 
1902   ResourceMark rm;
1903 
1904   Node_List nstack;
1905   VectorSet visited;
1906 
1907   nstack.push(in); // Start with unique input.
1908   visited.set(in->_idx);
1909   while (nstack.size() != 0) {
1910     Node* n = nstack.pop();
1911     uint cnt = n->req();
1912     uint i = (n->is_Proj() && !n->is_CFG()) ? 0 : 1;
1913     for (; i < cnt; i++) {
1914       Node* m = n->in(i);
1915       if (m == (Node*)this) {
1916         return true;    // Data loop
1917       }
1918       if (m != NULL && !m->is_dead_loop_safe()) { // Only look for unsafe cases.
1919         if (!visited.test_set(m->_idx))
1920           nstack.push(m);
1921       }
1922     }
1923   }
1924   return false; // The phi is not reachable from its inputs
1925 }
1926 
1927 // Is this Phi's region or some inputs to the region enqueued for IGVN
1928 // and so could cause the region to be optimized out?
1929 bool PhiNode::wait_for_region_igvn(PhaseGVN* phase) {
1930   PhaseIterGVN* igvn = phase->is_IterGVN();
1931   Unique_Node_List& worklist = igvn->_worklist;
1932   bool delay = false;
1933   Node* r = in(0);
1934   for (uint j = 1; j < req(); j++) {
1935     Node* rc = r->in(j);
1936     Node* n = in(j);
1937     if (rc != NULL &&
1938         rc->is_Proj()) {
1939       if (worklist.member(rc)) {
1940         delay = true;
1941       } else if (rc->in(0) != NULL &&
1942                  rc->in(0)->is_If()) {
1943         if (worklist.member(rc->in(0))) {
1944           delay = true;
1945         } else if (rc->in(0)->in(1) != NULL &&
1946                    rc->in(0)->in(1)->is_Bool()) {
1947           if (worklist.member(rc->in(0)->in(1))) {
1948             delay = true;
1949           } else if (rc->in(0)->in(1)->in(1) != NULL &&
1950                      rc->in(0)->in(1)->in(1)->is_Cmp()) {
1951             if (worklist.member(rc->in(0)->in(1)->in(1))) {
1952               delay = true;
1953             }
1954           }
1955         }
1956       }
1957     }
1958   }
1959   if (delay) {
1960     worklist.push(this);
1961   }
1962   return delay;
1963 }
1964 
1965 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
1966 InlineTypeNode* PhiNode::push_inline_types_through(PhaseGVN* phase, bool can_reshape, ciInlineKlass* vk, bool is_init) {
1967   InlineTypeNode* vt = InlineTypeNode::make_null(*phase, vk)->clone_with_phis(phase, in(0), is_init);
1968   if (can_reshape) {
1969     // Replace phi right away to be able to use the inline
1970     // type node when reaching the phi again through data loops.
1971     PhaseIterGVN* igvn = phase->is_IterGVN();
1972     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1973       Node* u = fast_out(i);
1974       igvn->rehash_node_delayed(u);
1975       imax -= u->replace_edge(this, vt);
1976       --i;
1977     }
1978     igvn->rehash_node_delayed(this);
1979     assert(outcnt() == 0, "should be dead now");
1980   }
1981   ResourceMark rm;
1982   Node_List casts;
1983   for (uint i = 1; i < req(); ++i) {
1984     Node* n = in(i);
1985     while (n->is_ConstraintCast()) {
1986       casts.push(n);
1987       n = n->in(1);
1988     }
1989     if (phase->type(n)->is_zero_type()) {
1990       n = InlineTypeNode::make_null(*phase, vk);
1991     } else if (n->is_Phi()) {
1992       assert(can_reshape, "can only handle phis during IGVN");
1993       n = phase->transform(n->as_Phi()->push_inline_types_through(phase, can_reshape, vk, is_init));
1994     }
1995     while (casts.size() != 0) {
1996       // Push the cast(s) through the InlineTypeNode
1997       Node* cast = casts.pop()->clone();
1998       cast->set_req_X(1, n->as_InlineType()->get_oop(), phase);
1999       n = n->clone();
2000       n->as_InlineType()->set_oop(phase->transform(cast));
2001       n = phase->transform(n);
2002     }
2003     bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
2004     vt->merge_with(phase, n->as_InlineType(), i, transform);
2005   }
2006   return vt;
2007 }
2008 
2009 //------------------------------Ideal------------------------------------------
2010 // Return a node which is more "ideal" than the current node.  Must preserve
2011 // the CFG, but we can still strip out dead paths.
2012 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2013   Node *r = in(0);              // RegionNode
2014   assert(r != NULL && r->is_Region(), "this phi must have a region");
2015   assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
2016 
2017   // Note: During parsing, phis are often transformed before their regions.
2018   // This means we have to use type_or_null to defend against untyped regions.
2019   if( phase->type_or_null(r) == Type::TOP ) // Dead code?
2020     return NULL;                // No change
2021 
2022   Node *top = phase->C->top();
2023   bool new_phi = (outcnt() == 0); // transforming new Phi
2024   // No change for igvn if new phi is not hooked
2025   if (new_phi && can_reshape)
2026     return NULL;
2027 
2028   // The are 2 situations when only one valid phi's input is left
2029   // (in addition to Region input).
2030   // One: region is not loop - replace phi with this input.
2031   // Two: region is loop - replace phi with top since this data path is dead
2032   //                       and we need to break the dead data loop.
2033   Node* progress = NULL;        // Record if any progress made
2034   for( uint j = 1; j < req(); ++j ){ // For all paths in
2035     // Check unreachable control paths
2036     Node* rc = r->in(j);
2037     Node* n = in(j);            // Get the input
2038     if (rc == NULL || phase->type(rc) == Type::TOP) {
2039       if (n != top) {           // Not already top?
2040         PhaseIterGVN *igvn = phase->is_IterGVN();
2041         if (can_reshape && igvn != NULL) {
2042           igvn->_worklist.push(r);
2043         }
2044         // Nuke it down
2045         set_req_X(j, top, phase);
2046         progress = this;        // Record progress
2047       }
2048     }
2049   }
2050 
2051   if (can_reshape && outcnt() == 0) {
2052     // set_req() above may kill outputs if Phi is referenced
2053     // only by itself on the dead (top) control path.
2054     return top;
2055   }
2056 
2057   bool uncasted = false;
2058   Node* uin = unique_input(phase, false);
2059   if (uin == NULL && can_reshape &&
2060       // If there is a chance that the region can be optimized out do
2061       // not add a cast node that we can't remove yet.
2062       !wait_for_region_igvn(phase)) {
2063     uncasted = true;
2064     uin = unique_input(phase, true);
2065   }
2066   if (uin == top) {             // Simplest case: no alive inputs.
2067     if (can_reshape)            // IGVN transformation
2068       return top;
2069     else
2070       return NULL;              // Identity will return TOP
2071   } else if (uin != NULL) {
2072     // Only one not-NULL unique input path is left.
2073     // Determine if this input is backedge of a loop.
2074     // (Skip new phis which have no uses and dead regions).
2075     if (outcnt() > 0 && r->in(0) != NULL) {
2076       if (is_data_loop(r->as_Region(), uin, phase)) {
2077         // Break this data loop to avoid creation of a dead loop.
2078         if (can_reshape) {
2079           return top;
2080         } else {
2081           // We can't return top if we are in Parse phase - cut inputs only
2082           // let Identity to handle the case.
2083           replace_edge(uin, top, phase);
2084           return NULL;
2085         }
2086       }
2087     }
2088 
2089     if (uncasted) {
2090       // Add cast nodes between the phi to be removed and its unique input.
2091       // Wait until after parsing for the type information to propagate from the casts.
2092       assert(can_reshape, "Invalid during parsing");
2093       const Type* phi_type = bottom_type();
2094       // Add casts to carry the control dependency of the Phi that is
2095       // going away
2096       Node* cast = NULL;
2097       if (phi_type->isa_ptr()) {
2098         const Type* uin_type = phase->type(uin);
2099         if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) {
2100           cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency);
2101         } else {
2102           // Use a CastPP for a cast to not null and a CheckCastPP for
2103           // a cast to a new klass (and both if both null-ness and
2104           // klass change).
2105 
2106           // If the type of phi is not null but the type of uin may be
2107           // null, uin's type must be casted to not null
2108           if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() &&
2109               uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) {
2110             cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, TypePtr::NOTNULL, ConstraintCastNode::StrongDependency);
2111           }
2112 
2113           // If the type of phi and uin, both casted to not null,
2114           // differ the klass of uin must be (check)cast'ed to match
2115           // that of phi
2116           if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) {
2117             Node* n = uin;
2118             if (cast != NULL) {
2119               cast = phase->transform(cast);
2120               n = cast;
2121             }
2122             cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, ConstraintCastNode::StrongDependency);
2123           }
2124           if (cast == NULL) {
2125             cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency);
2126           }
2127         }
2128       } else {
2129         cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency);
2130       }
2131       assert(cast != NULL, "cast should be set");
2132       cast = phase->transform(cast);
2133       // set all inputs to the new cast(s) so the Phi is removed by Identity
2134       PhaseIterGVN* igvn = phase->is_IterGVN();
2135       for (uint i = 1; i < req(); i++) {
2136         set_req_X(i, cast, igvn);
2137       }
2138       uin = cast;
2139     }
2140 
2141     // One unique input.
2142     debug_only(Node* ident = Identity(phase));
2143     // The unique input must eventually be detected by the Identity call.
2144 #ifdef ASSERT
2145     if (ident != uin && !ident->is_top()) {
2146       // print this output before failing assert
2147       r->dump(3);
2148       this->dump(3);
2149       ident->dump();
2150       uin->dump();
2151     }
2152 #endif
2153     assert(ident == uin || ident->is_top(), "Identity must clean this up");
2154     return NULL;
2155   }
2156 
2157   Node* opt = NULL;
2158   int true_path = is_diamond_phi();
2159   if (true_path != 0 &&
2160       // If one of the diamond's branch is in the process of dying then, the Phi's input for that branch might transform
2161       // to top. If that happens replacing the Phi with an operation that consumes the Phi's inputs will cause the Phi
2162       // to be replaced by top. To prevent that, delay the transformation until the branch has a chance to be removed.
2163       !(can_reshape && wait_for_region_igvn(phase))) {
2164     // Check for CMove'ing identity. If it would be unsafe,
2165     // handle it here. In the safe case, let Identity handle it.
2166     Node* unsafe_id = is_cmove_id(phase, true_path);
2167     if( unsafe_id != NULL && is_unsafe_data_reference(unsafe_id) )
2168       opt = unsafe_id;
2169 
2170     // Check for simple convert-to-boolean pattern
2171     if( opt == NULL )
2172       opt = is_x2logic(phase, this, true_path);
2173 
2174     // Check for absolute value
2175     if( opt == NULL )
2176       opt = is_absolute(phase, this, true_path);
2177 
2178     // Check for conditional add
2179     if( opt == NULL && can_reshape )
2180       opt = is_cond_add(phase, this, true_path);
2181 
2182     // These 4 optimizations could subsume the phi:
2183     // have to check for a dead data loop creation.
2184     if( opt != NULL ) {
2185       if( opt == unsafe_id || is_unsafe_data_reference(opt) ) {
2186         // Found dead loop.
2187         if( can_reshape )
2188           return top;
2189         // We can't return top if we are in Parse phase - cut inputs only
2190         // to stop further optimizations for this phi. Identity will return TOP.
2191         assert(req() == 3, "only diamond merge phi here");
2192         set_req(1, top);
2193         set_req(2, top);
2194         return NULL;
2195       } else {
2196         return opt;
2197       }
2198     }
2199   }
2200 
2201   // Check for merging identical values and split flow paths
2202   if (can_reshape) {
2203     opt = split_flow_path(phase, this);
2204     // This optimization only modifies phi - don't need to check for dead loop.
2205     assert(opt == NULL || opt == this, "do not elide phi");
2206     if (opt != NULL)  return opt;
2207   }
2208 
2209   if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) {
2210     // Try to undo Phi of AddP:
2211     // (Phi (AddP base address offset) (AddP base2 address2 offset2))
2212     // becomes:
2213     // newbase := (Phi base base2)
2214     // newaddress := (Phi address address2)
2215     // newoffset := (Phi offset offset2)
2216     // (AddP newbase newaddress newoffset)
2217     //
2218     // This occurs as a result of unsuccessful split_thru_phi and
2219     // interferes with taking advantage of addressing modes. See the
2220     // clone_shift_expressions code in matcher.cpp
2221     Node* addp = in(1);
2222     Node* base = addp->in(AddPNode::Base);
2223     Node* address = addp->in(AddPNode::Address);
2224     Node* offset = addp->in(AddPNode::Offset);
2225     if (base != NULL && address != NULL && offset != NULL &&
2226         !base->is_top() && !address->is_top() && !offset->is_top()) {
2227       const Type* base_type = base->bottom_type();
2228       const Type* address_type = address->bottom_type();
2229       // make sure that all the inputs are similar to the first one,
2230       // i.e. AddP with base == address and same offset as first AddP
2231       bool doit = true;
2232       for (uint i = 2; i < req(); i++) {
2233         if (in(i) == NULL ||
2234             in(i)->Opcode() != Op_AddP ||
2235             in(i)->in(AddPNode::Base) == NULL ||
2236             in(i)->in(AddPNode::Address) == NULL ||
2237             in(i)->in(AddPNode::Offset) == NULL ||
2238             in(i)->in(AddPNode::Base)->is_top() ||
2239             in(i)->in(AddPNode::Address)->is_top() ||
2240             in(i)->in(AddPNode::Offset)->is_top()) {
2241           doit = false;
2242           break;
2243         }
2244         if (in(i)->in(AddPNode::Base) != base) {
2245           base = NULL;
2246         }
2247         if (in(i)->in(AddPNode::Offset) != offset) {
2248           offset = NULL;
2249         }
2250         if (in(i)->in(AddPNode::Address) != address) {
2251           address = NULL;
2252         }
2253         // Accumulate type for resulting Phi
2254         base_type = base_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
2255         address_type = address_type->meet_speculative(in(i)->in(AddPNode::Address)->bottom_type());
2256       }
2257       if (doit && base == NULL) {
2258         // Check for neighboring AddP nodes in a tree.
2259         // If they have a base, use that it.
2260         for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
2261           Node* u = this->fast_out(k);
2262           if (u->is_AddP()) {
2263             Node* base2 = u->in(AddPNode::Base);
2264             if (base2 != NULL && !base2->is_top()) {
2265               if (base == NULL)
2266                 base = base2;
2267               else if (base != base2)
2268                 { doit = false; break; }
2269             }
2270           }
2271         }
2272       }
2273       if (doit) {
2274         if (base == NULL) {
2275           base = new PhiNode(in(0), base_type, NULL);
2276           for (uint i = 1; i < req(); i++) {
2277             base->init_req(i, in(i)->in(AddPNode::Base));
2278           }
2279           phase->is_IterGVN()->register_new_node_with_optimizer(base);
2280         }
2281         if (address == NULL) {
2282           address = new PhiNode(in(0), address_type, NULL);
2283           for (uint i = 1; i < req(); i++) {
2284             address->init_req(i, in(i)->in(AddPNode::Address));
2285           }
2286           phase->is_IterGVN()->register_new_node_with_optimizer(address);
2287         }
2288         if (offset == NULL) {
2289           offset = new PhiNode(in(0), TypeX_X, NULL);
2290           for (uint i = 1; i < req(); i++) {
2291             offset->init_req(i, in(i)->in(AddPNode::Offset));
2292           }
2293           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2294         }
2295         return new AddPNode(base, address, offset);
2296       }
2297     }
2298   }
2299 
2300   // Split phis through memory merges, so that the memory merges will go away.
2301   // Piggy-back this transformation on the search for a unique input....
2302   // It will be as if the merged memory is the unique value of the phi.
2303   // (Do not attempt this optimization unless parsing is complete.
2304   // It would make the parser's memory-merge logic sick.)
2305   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2306   if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2307     // see if this phi should be sliced
2308     uint merge_width = 0;
2309     bool saw_self = false;
2310     // TODO revisit this with JDK-8247216
2311     bool mergemem_only = true;
2312     for( uint i=1; i<req(); ++i ) {// For all paths in
2313       Node *ii = in(i);
2314       // TOP inputs should not be counted as safe inputs because if the
2315       // Phi references itself through all other inputs then splitting the
2316       // Phi through memory merges would create dead loop at later stage.
2317       if (ii == top) {
2318         return NULL; // Delay optimization until graph is cleaned.
2319       }
2320       if (ii->is_MergeMem()) {
2321         MergeMemNode* n = ii->as_MergeMem();
2322         merge_width = MAX2(merge_width, n->req());
2323         saw_self = saw_self || (n->base_memory() == this);
2324       } else {
2325         mergemem_only = false;
2326       }
2327     }
2328 
2329     // This restriction is temporarily necessary to ensure termination:
2330     if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2331 
2332     if (merge_width > Compile::AliasIdxRaw) {
2333       // found at least one non-empty MergeMem
2334       const TypePtr* at = adr_type();
2335       if (at != TypePtr::BOTTOM) {
2336         // Patch the existing phi to select an input from the merge:
2337         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2338         //     Phi:AT1(...m1...)
2339         int alias_idx = phase->C->get_alias_index(at);
2340         for (uint i=1; i<req(); ++i) {
2341           Node *ii = in(i);
2342           if (ii->is_MergeMem()) {
2343             MergeMemNode* n = ii->as_MergeMem();
2344             // compress paths and change unreachable cycles to TOP
2345             // If not, we can update the input infinitely along a MergeMem cycle
2346             // Equivalent code is in MemNode::Ideal_common
2347             Node *m  = phase->transform(n);
2348             if (outcnt() == 0) {  // Above transform() may kill us!
2349               return top;
2350             }
2351             // If transformed to a MergeMem, get the desired slice
2352             // Otherwise the returned node represents memory for every slice
2353             Node *new_mem = (m->is_MergeMem()) ?
2354                              m->as_MergeMem()->memory_at(alias_idx) : m;
2355             // Update input if it is progress over what we have now
2356             if (new_mem != ii) {
2357               set_req_X(i, new_mem, phase->is_IterGVN());
2358               progress = this;
2359             }
2360           }
2361         }
2362       } else {
2363         // We know that at least one MergeMem->base_memory() == this
2364         // (saw_self == true). If all other inputs also references this phi
2365         // (directly or through data nodes) - it is a dead loop.
2366         bool saw_safe_input = false;
2367         for (uint j = 1; j < req(); ++j) {
2368           Node* n = in(j);
2369           if (n->is_MergeMem()) {
2370             MergeMemNode* mm = n->as_MergeMem();
2371             if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) {
2372               // Skip this input if it references back to this phi or if the memory path is dead
2373               continue;
2374             }
2375           }
2376           if (!is_unsafe_data_reference(n)) {
2377             saw_safe_input = true; // found safe input
2378             break;
2379           }
2380         }
2381         if (!saw_safe_input) {
2382           // There is a dead loop: All inputs are either dead or reference back to this phi
2383           return top;
2384         }
2385 
2386         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2387         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2388         PhaseIterGVN* igvn = phase->is_IterGVN();
2389         assert(igvn != NULL, "sanity check");
2390         Node* hook = new Node(1);
2391         PhiNode* new_base = (PhiNode*) clone();
2392         // Must eagerly register phis, since they participate in loops.
2393         igvn->register_new_node_with_optimizer(new_base);
2394         hook->add_req(new_base);
2395 
2396         MergeMemNode* result = MergeMemNode::make(new_base);
2397         for (uint i = 1; i < req(); ++i) {
2398           Node *ii = in(i);
2399           if (ii->is_MergeMem()) {
2400             MergeMemNode* n = ii->as_MergeMem();
2401             if (igvn) {
2402               // TODO revisit this with JDK-8247216
2403               // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2404               igvn->_worklist.push(n);
2405             }
2406             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2407               // If we have not seen this slice yet, make a phi for it.
2408               bool made_new_phi = false;
2409               if (mms.is_empty()) {
2410                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2411                 made_new_phi = true;
2412                 igvn->register_new_node_with_optimizer(new_phi);
2413                 hook->add_req(new_phi);
2414                 mms.set_memory(new_phi);
2415               }
2416               Node* phi = mms.memory();
2417               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2418               phi->set_req(i, mms.memory2());
2419             }
2420           }
2421         }
2422         // Distribute all self-loops.
2423         { // (Extra braces to hide mms.)
2424           for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2425             Node* phi = mms.memory();
2426             for (uint i = 1; i < req(); ++i) {
2427               if (phi->in(i) == this)  phi->set_req(i, phi);
2428             }
2429           }
2430         }
2431         // Already replace this phi node to cut it off from the graph to not interfere in dead loop checks during the
2432         // transformations of the new phi nodes below. Otherwise, we could wrongly conclude that there is no dead loop
2433         // because we are finding this phi node again. Also set the type of the new MergeMem node in case we are also
2434         // visiting it in the transformations below.
2435         igvn->replace_node(this, result);
2436         igvn->set_type(result, result->bottom_type());
2437 
2438         // now transform the new nodes, and return the mergemem
2439         for (MergeMemStream mms(result); mms.next_non_empty(); ) {
2440           Node* phi = mms.memory();
2441           mms.set_memory(phase->transform(phi));
2442         }
2443         hook->destruct(igvn);
2444         // Replace self with the result.
2445         return result;
2446       }
2447     }
2448     //
2449     // Other optimizations on the memory chain
2450     //
2451     const TypePtr* at = adr_type();
2452     for( uint i=1; i<req(); ++i ) {// For all paths in
2453       Node *ii = in(i);
2454       Node *new_in = MemNode::optimize_memory_chain(ii, at, NULL, phase);
2455       if (ii != new_in ) {
2456         set_req(i, new_in);
2457         progress = this;
2458       }
2459     }
2460   }
2461 
2462 #ifdef _LP64
2463   // Push DecodeN/DecodeNKlass down through phi.
2464   // The rest of phi graph will transform by split EncodeP node though phis up.
2465   if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
2466     bool may_push = true;
2467     bool has_decodeN = false;
2468     bool is_decodeN = false;
2469     for (uint i=1; i<req(); ++i) {// For all paths in
2470       Node *ii = in(i);
2471       if (ii->is_DecodeNarrowPtr() && ii->bottom_type() == bottom_type()) {
2472         // Do optimization if a non dead path exist.
2473         if (ii->in(1)->bottom_type() != Type::TOP) {
2474           has_decodeN = true;
2475           is_decodeN = ii->is_DecodeN();
2476         }
2477       } else if (!ii->is_Phi()) {
2478         may_push = false;
2479       }
2480     }
2481 
2482     if (has_decodeN && may_push) {
2483       PhaseIterGVN *igvn = phase->is_IterGVN();
2484       // Make narrow type for new phi.
2485       const Type* narrow_t;
2486       if (is_decodeN) {
2487         narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr());
2488       } else {
2489         narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr());
2490       }
2491       PhiNode* new_phi = new PhiNode(r, narrow_t);
2492       uint orig_cnt = req();
2493       for (uint i=1; i<req(); ++i) {// For all paths in
2494         Node *ii = in(i);
2495         Node* new_ii = NULL;
2496         if (ii->is_DecodeNarrowPtr()) {
2497           assert(ii->bottom_type() == bottom_type(), "sanity");
2498           new_ii = ii->in(1);
2499         } else {
2500           assert(ii->is_Phi(), "sanity");
2501           if (ii->as_Phi() == this) {
2502             new_ii = new_phi;
2503           } else {
2504             if (is_decodeN) {
2505               new_ii = new EncodePNode(ii, narrow_t);
2506             } else {
2507               new_ii = new EncodePKlassNode(ii, narrow_t);
2508             }
2509             igvn->register_new_node_with_optimizer(new_ii);
2510           }
2511         }
2512         new_phi->set_req(i, new_ii);
2513       }
2514       igvn->register_new_node_with_optimizer(new_phi, this);
2515       if (is_decodeN) {
2516         progress = new DecodeNNode(new_phi, bottom_type());
2517       } else {
2518         progress = new DecodeNKlassNode(new_phi, bottom_type());
2519       }
2520     }
2521   }
2522 #endif
2523 
2524   // Check recursively if inputs are either an inline type, constant null
2525   // or another Phi (including self references through data loops). If so,
2526   // push the inline types down through the phis to enable folding of loads.
2527   if (EnableValhalla && (_type->isa_ptr() || _type->isa_inlinetype()) && req() > 2) {
2528     ResourceMark rm;
2529     Unique_Node_List worklist;
2530     worklist.push(this);
2531     bool can_optimize = true;
2532     ciInlineKlass* vk = NULL;
2533     // true if all IsInit inputs of all InlineType* nodes are true
2534     bool is_init = true;
2535     Node_List casts;
2536 
2537     // TODO 8284443 We need to prevent endless pushing through
2538     // TODO 8284443 We could revisit the same node over and over again, right?
2539     // TestLWorld -XX:+UseZGC -DScenarios=0 -DTest=test69
2540     // TestLWorld -XX:-TieredCompilation -XX:-DoEscapeAnalysis -XX:+AlwaysIncrementalInline
2541     bool only_phi = (outcnt() != 0);
2542     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2543       Node* n = fast_out(i);
2544       if (n->is_InlineType() && n->in(1) == this) {
2545         can_optimize = false;
2546         break;
2547       }
2548       if (!n->is_Phi()) {
2549         only_phi = false;
2550       }
2551     }
2552     if (only_phi) {
2553       can_optimize = false;
2554     }
2555     for (uint next = 0; next < worklist.size() && can_optimize; next++) {
2556       Node* phi = worklist.at(next);
2557       for (uint i = 1; i < phi->req() && can_optimize; i++) {
2558         Node* n = phi->in(i);
2559         if (n == NULL) {
2560           can_optimize = false;
2561           break;
2562         }
2563         while (n->is_ConstraintCast()) {
2564           if (n->in(0) != NULL && n->in(0)->is_top()) {
2565             // Will die, don't optimize
2566             can_optimize = false;
2567             break;
2568           }
2569           casts.push(n);
2570           n = n->in(1);
2571         }
2572         const Type* t = phase->type(n);
2573         if (n->is_InlineType() && (vk == NULL || vk == t->inline_klass())) {
2574           vk = (vk == NULL) ? t->inline_klass() : vk;
2575           if (phase->find_int_con(n->as_InlineType()->get_is_init(), 0) != 1) {
2576             is_init = false;
2577           }
2578         } else if (n->is_Phi() && can_reshape && (n->bottom_type()->isa_ptr() || n->bottom_type()->isa_inlinetype())) {
2579           worklist.push(n);
2580         } else if (t->is_zero_type()) {
2581           is_init = false;
2582         } else {
2583           can_optimize = false;
2584         }
2585       }
2586     }
2587     // Check if cast nodes can be pushed through
2588     const Type* t = Type::get_const_type(vk);
2589     while (casts.size() != 0 && can_optimize && t != NULL) {
2590       Node* cast = casts.pop();
2591       if (t->filter(cast->bottom_type()) == Type::TOP) {
2592         can_optimize = false;
2593       }
2594     }
2595     if (can_optimize && vk != NULL) {
2596 // TODO 8275400
2597 //      assert(!_type->isa_ptr() || _type->maybe_null() || is_init, "Phi not null but a possible null was seen");
2598       return push_inline_types_through(phase, can_reshape, vk, is_init);
2599     }
2600   }
2601 
2602   // Phi (VB ... VB) => VB (Phi ...) (Phi ...)
2603   if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) {
2604     progress = merge_through_phi(this, phase->is_IterGVN());
2605   }
2606 
2607   return progress;              // Return any progress
2608 }
2609 
2610 Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) {
2611   Node_Stack stack(1);
2612   VectorSet  visited;
2613   Node_List  node_map;
2614 
2615   stack.push(root_phi, 1); // ignore control
2616   visited.set(root_phi->_idx);
2617 
2618   Node* new_phi = new PhiNode(root_phi->in(0), t);
2619   node_map.map(root_phi->_idx, new_phi);
2620 
2621   while (stack.is_nonempty()) {
2622     Node* n   = stack.node();
2623     uint  idx = stack.index();
2624     assert(n->is_Phi(), "not a phi");
2625     if (idx < n->req()) {
2626       stack.set_index(idx + 1);
2627       Node* def = n->in(idx);
2628       if (def == NULL) {
2629         continue; // ignore dead path
2630       } else if (def->is_Phi()) { // inner node
2631         Node* new_phi = node_map[n->_idx];
2632         if (!visited.test_set(def->_idx)) { // not visited yet
2633           node_map.map(def->_idx, new PhiNode(def->in(0), t));
2634           stack.push(def, 1); // ignore control
2635         }
2636         Node* new_in = node_map[def->_idx];
2637         new_phi->set_req(idx, new_in);
2638       } else if (def->Opcode() == Op_VectorBox) { // leaf
2639         assert(n->is_Phi(), "not a phi");
2640         Node* new_phi = node_map[n->_idx];
2641         new_phi->set_req(idx, def->in(c));
2642       } else {
2643         assert(false, "not optimizeable");
2644         return NULL;
2645       }
2646     } else {
2647       Node* new_phi = node_map[n->_idx];
2648       igvn->register_new_node_with_optimizer(new_phi, n);
2649       stack.pop();
2650     }
2651   }
2652   return new_phi;
2653 }
2654 
2655 Node* PhiNode::merge_through_phi(Node* root_phi, PhaseIterGVN* igvn) {
2656   Node_Stack stack(1);
2657   VectorSet  visited;
2658 
2659   stack.push(root_phi, 1); // ignore control
2660   visited.set(root_phi->_idx);
2661 
2662   VectorBoxNode* cached_vbox = NULL;
2663   while (stack.is_nonempty()) {
2664     Node* n   = stack.node();
2665     uint  idx = stack.index();
2666     if (idx < n->req()) {
2667       stack.set_index(idx + 1);
2668       Node* in = n->in(idx);
2669       if (in == NULL) {
2670         continue; // ignore dead path
2671       } else if (in->isa_Phi()) {
2672         if (!visited.test_set(in->_idx)) {
2673           stack.push(in, 1); // ignore control
2674         }
2675       } else if (in->Opcode() == Op_VectorBox) {
2676         VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in);
2677         if (cached_vbox == NULL) {
2678           cached_vbox = vbox;
2679         } else if (vbox->vec_type() != cached_vbox->vec_type()) {
2680           // TODO: vector type mismatch can be handled with additional reinterpret casts
2681           assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent");
2682           return NULL; // not optimizable: vector type mismatch
2683         } else if (vbox->box_type() != cached_vbox->box_type()) {
2684           assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent");
2685           return NULL; // not optimizable: box type mismatch
2686         }
2687       } else {
2688         return NULL; // not optimizable: neither Phi nor VectorBox
2689       }
2690     } else {
2691       stack.pop();
2692     }
2693   }
2694 
2695   assert(cached_vbox != NULL, "sanity");
2696   const TypeInstPtr* btype = cached_vbox->box_type();
2697   const TypeVect*    vtype = cached_vbox->vec_type();
2698   Node* new_vbox_phi = clone_through_phi(root_phi, btype, VectorBoxNode::Box,   igvn);
2699   Node* new_vect_phi = clone_through_phi(root_phi, vtype, VectorBoxNode::Value, igvn);
2700   return new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, btype, vtype);
2701 }
2702 
2703 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2704   // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2705   // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2706   // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2707   assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2708   const bool is_loop = (r->is_Loop() && r->req() == 3);
2709   const Node* top = phase->C->top();
2710   if (is_loop) {
2711     return !uin->eqv_uncast(in(LoopNode::EntryControl));
2712   } else {
2713     // We have a data loop either with an unsafe data reference or if a region is unreachable.
2714     return is_unsafe_data_reference(uin)
2715            || (r->req() == 3 && (r->in(1) != top && r->in(2) == top && r->is_unreachable_region(phase)));
2716   }
2717 }
2718 
2719 //------------------------------is_tripcount-----------------------------------
2720 bool PhiNode::is_tripcount(BasicType bt) const {
2721   return (in(0) != NULL && in(0)->is_BaseCountedLoop() &&
2722           in(0)->as_BaseCountedLoop()->bt() == bt &&
2723           in(0)->as_BaseCountedLoop()->phi() == this);
2724 }
2725 
2726 //------------------------------out_RegMask------------------------------------
2727 const RegMask &PhiNode::in_RegMask(uint i) const {
2728   return i ? out_RegMask() : RegMask::Empty;
2729 }
2730 
2731 const RegMask &PhiNode::out_RegMask() const {
2732   uint ideal_reg = _type->ideal_reg();
2733   assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" );
2734   if( ideal_reg == 0 ) return RegMask::Empty;
2735   assert(ideal_reg != Op_RegFlags, "flags register is not spillable");
2736   return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]);
2737 }
2738 
2739 #ifndef PRODUCT
2740 void PhiNode::dump_spec(outputStream *st) const {
2741   TypeNode::dump_spec(st);
2742   if (is_tripcount(T_INT) || is_tripcount(T_LONG)) {
2743     st->print(" #tripcount");
2744   }
2745 }
2746 #endif
2747 
2748 
2749 //=============================================================================
2750 const Type* GotoNode::Value(PhaseGVN* phase) const {
2751   // If the input is reachable, then we are executed.
2752   // If the input is not reachable, then we are not executed.
2753   return phase->type(in(0));
2754 }
2755 
2756 Node* GotoNode::Identity(PhaseGVN* phase) {
2757   return in(0);                // Simple copy of incoming control
2758 }
2759 
2760 const RegMask &GotoNode::out_RegMask() const {
2761   return RegMask::Empty;
2762 }
2763 
2764 //=============================================================================
2765 const RegMask &JumpNode::out_RegMask() const {
2766   return RegMask::Empty;
2767 }
2768 
2769 //=============================================================================
2770 const RegMask &JProjNode::out_RegMask() const {
2771   return RegMask::Empty;
2772 }
2773 
2774 //=============================================================================
2775 const RegMask &CProjNode::out_RegMask() const {
2776   return RegMask::Empty;
2777 }
2778 
2779 
2780 
2781 //=============================================================================
2782 
2783 uint PCTableNode::hash() const { return Node::hash() + _size; }
2784 bool PCTableNode::cmp( const Node &n ) const
2785 { return _size == ((PCTableNode&)n)._size; }
2786 
2787 const Type *PCTableNode::bottom_type() const {
2788   const Type** f = TypeTuple::fields(_size);
2789   for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL;
2790   return TypeTuple::make(_size, f);
2791 }
2792 
2793 //------------------------------Value------------------------------------------
2794 // Compute the type of the PCTableNode.  If reachable it is a tuple of
2795 // Control, otherwise the table targets are not reachable
2796 const Type* PCTableNode::Value(PhaseGVN* phase) const {
2797   if( phase->type(in(0)) == Type::CONTROL )
2798     return bottom_type();
2799   return Type::TOP;             // All paths dead?  Then so are we
2800 }
2801 
2802 //------------------------------Ideal------------------------------------------
2803 // Return a node which is more "ideal" than the current node.  Strip out
2804 // control copies
2805 Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2806   return remove_dead_region(phase, can_reshape) ? this : NULL;
2807 }
2808 
2809 //=============================================================================
2810 uint JumpProjNode::hash() const {
2811   return Node::hash() + _dest_bci;
2812 }
2813 
2814 bool JumpProjNode::cmp( const Node &n ) const {
2815   return ProjNode::cmp(n) &&
2816     _dest_bci == ((JumpProjNode&)n)._dest_bci;
2817 }
2818 
2819 #ifndef PRODUCT
2820 void JumpProjNode::dump_spec(outputStream *st) const {
2821   ProjNode::dump_spec(st);
2822   st->print("@bci %d ",_dest_bci);
2823 }
2824 
2825 void JumpProjNode::dump_compact_spec(outputStream *st) const {
2826   ProjNode::dump_compact_spec(st);
2827   st->print("(%d)%d@%d", _switch_val, _proj_no, _dest_bci);
2828 }
2829 #endif
2830 
2831 //=============================================================================
2832 //------------------------------Value------------------------------------------
2833 // Check for being unreachable, or for coming from a Rethrow.  Rethrow's cannot
2834 // have the default "fall_through_index" path.
2835 const Type* CatchNode::Value(PhaseGVN* phase) const {
2836   // Unreachable?  Then so are all paths from here.
2837   if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
2838   // First assume all paths are reachable
2839   const Type** f = TypeTuple::fields(_size);
2840   for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL;
2841   // Identify cases that will always throw an exception
2842   // () rethrow call
2843   // () virtual or interface call with NULL receiver
2844   // () call is a check cast with incompatible arguments
2845   if( in(1)->is_Proj() ) {
2846     Node *i10 = in(1)->in(0);
2847     if( i10->is_Call() ) {
2848       CallNode *call = i10->as_Call();
2849       // Rethrows always throw exceptions, never return
2850       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
2851         f[CatchProjNode::fall_through_index] = Type::TOP;
2852       } else if (call->is_AllocateArray()) {
2853         Node* klass_node = call->in(AllocateNode::KlassNode);
2854         Node* length = call->in(AllocateNode::ALength);
2855         const Type* length_type = phase->type(length);
2856         const Type* klass_type = phase->type(klass_node);
2857         Node* valid_length_test = call->in(AllocateNode::ValidLengthTest);
2858         const Type* valid_length_test_t = phase->type(valid_length_test);
2859         if (length_type == Type::TOP || klass_type == Type::TOP || valid_length_test_t == Type::TOP ||
2860             valid_length_test_t->is_int()->is_con(0)) {
2861           f[CatchProjNode::fall_through_index] = Type::TOP;
2862         }
2863       } else if( call->req() > TypeFunc::Parms ) {
2864         const Type *arg0 = phase->type( call->in(TypeFunc::Parms) );
2865         // Check for null receiver to virtual or interface calls
2866         if( call->is_CallDynamicJava() &&
2867             arg0->higher_equal(TypePtr::NULL_PTR) ) {
2868           f[CatchProjNode::fall_through_index] = Type::TOP;
2869         }
2870       } // End of if not a runtime stub
2871     } // End of if have call above me
2872   } // End of slot 1 is not a projection
2873   return TypeTuple::make(_size, f);
2874 }
2875 
2876 //=============================================================================
2877 uint CatchProjNode::hash() const {
2878   return Node::hash() + _handler_bci;
2879 }
2880 
2881 
2882 bool CatchProjNode::cmp( const Node &n ) const {
2883   return ProjNode::cmp(n) &&
2884     _handler_bci == ((CatchProjNode&)n)._handler_bci;
2885 }
2886 
2887 
2888 //------------------------------Identity---------------------------------------
2889 // If only 1 target is possible, choose it if it is the main control
2890 Node* CatchProjNode::Identity(PhaseGVN* phase) {
2891   // If my value is control and no other value is, then treat as ID
2892   const TypeTuple *t = phase->type(in(0))->is_tuple();
2893   if (t->field_at(_con) != Type::CONTROL)  return this;
2894   // If we remove the last CatchProj and elide the Catch/CatchProj, then we
2895   // also remove any exception table entry.  Thus we must know the call
2896   // feeding the Catch will not really throw an exception.  This is ok for
2897   // the main fall-thru control (happens when we know a call can never throw
2898   // an exception) or for "rethrow", because a further optimization will
2899   // yank the rethrow (happens when we inline a function that can throw an
2900   // exception and the caller has no handler).  Not legal, e.g., for passing
2901   // a NULL receiver to a v-call, or passing bad types to a slow-check-cast.
2902   // These cases MUST throw an exception via the runtime system, so the VM
2903   // will be looking for a table entry.
2904   Node *proj = in(0)->in(1);    // Expect a proj feeding CatchNode
2905   CallNode *call;
2906   if (_con != TypeFunc::Control && // Bail out if not the main control.
2907       !(proj->is_Proj() &&      // AND NOT a rethrow
2908         proj->in(0)->is_Call() &&
2909         (call = proj->in(0)->as_Call()) &&
2910         call->entry_point() == OptoRuntime::rethrow_stub()))
2911     return this;
2912 
2913   // Search for any other path being control
2914   for (uint i = 0; i < t->cnt(); i++) {
2915     if (i != _con && t->field_at(i) == Type::CONTROL)
2916       return this;
2917   }
2918   // Only my path is possible; I am identity on control to the jump
2919   return in(0)->in(0);
2920 }
2921 
2922 
2923 #ifndef PRODUCT
2924 void CatchProjNode::dump_spec(outputStream *st) const {
2925   ProjNode::dump_spec(st);
2926   st->print("@bci %d ",_handler_bci);
2927 }
2928 #endif
2929 
2930 //=============================================================================
2931 //------------------------------Identity---------------------------------------
2932 // Check for CreateEx being Identity.
2933 Node* CreateExNode::Identity(PhaseGVN* phase) {
2934   if( phase->type(in(1)) == Type::TOP ) return in(1);
2935   if( phase->type(in(0)) == Type::TOP ) return in(0);
2936   // We only come from CatchProj, unless the CatchProj goes away.
2937   // If the CatchProj is optimized away, then we just carry the
2938   // exception oop through.
2939 
2940   // CheckCastPPNode::Ideal() for inline types reuses the exception
2941   // paths of a call to perform an allocation: we can see a Phi here.
2942   if (in(1)->is_Phi()) {
2943     return this;
2944   }
2945   CallNode *call = in(1)->in(0)->as_Call();
2946 
2947   return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2948     ? this
2949     : call->in(TypeFunc::Parms);
2950 }
2951 
2952 //=============================================================================
2953 //------------------------------Value------------------------------------------
2954 // Check for being unreachable.
2955 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2956   if (!in(0) || in(0)->is_top()) return Type::TOP;
2957   return bottom_type();
2958 }
2959 
2960 //------------------------------Ideal------------------------------------------
2961 // Check for no longer being part of a loop
2962 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2963   if (can_reshape && !in(0)->is_Region()) {
2964     // Dead code elimination can sometimes delete this projection so
2965     // if it's not there, there's nothing to do.
2966     Node* fallthru = proj_out_or_null(0);
2967     if (fallthru != NULL) {
2968       phase->is_IterGVN()->replace_node(fallthru, in(0));
2969     }
2970     return phase->C->top();
2971   }
2972   return NULL;
2973 }
2974 
2975 #ifndef PRODUCT
2976 void NeverBranchNode::format( PhaseRegAlloc *ra_, outputStream *st) const {
2977   st->print("%s", Name());
2978 }
2979 #endif