< prev index next >

src/hotspot/share/opto/cfgnode.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"

  36 #include "opto/loopnode.hpp"
  37 #include "opto/machnode.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/mulnode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/regmask.hpp"
  43 #include "opto/runtime.hpp"
  44 #include "opto/subnode.hpp"
  45 #include "opto/vectornode.hpp"
  46 #include "utilities/vmError.hpp"
  47 
  48 // Portions of code courtesy of Clifford Click
  49 
  50 // Optimization - Graph Style
  51 
  52 //=============================================================================
  53 //------------------------------Value------------------------------------------
  54 // Compute the type of the RegionNode.
  55 const Type* RegionNode::Value(PhaseGVN* phase) const {

 376   Node *n = (Node*)phase->C->root();
 377   nstack.push(n);
 378   visited.set(n->_idx);
 379   while (nstack.size() != 0) {
 380     n = nstack.pop();
 381     uint max = n->outcnt();
 382     for (uint i = 0; i < max; i++) {
 383       Node* m = n->raw_out(i);
 384       if (m != NULL && m->is_CFG()) {
 385         if (m == this) {
 386           return false; // We reached the Region node - it is not dead.
 387         }
 388         if (!visited.test_set(m->_idx))
 389           nstack.push(m);
 390       }
 391     }
 392   }
 393   return true; // The Region node is unreachable - it is dead.
 394 }
 395 
 396 bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) {
 397   // Incremental inlining + PhaseStringOpts sometimes produce:
 398   //
 399   // cmpP with 1 top input
 400   //           |
 401   //          If
 402   //         /  \
 403   //   IfFalse  IfTrue  /- Some Node
 404   //         \  /      /    /
 405   //        Region    / /-MergeMem
 406   //             \---Phi
 407   //
 408   //
 409   // It's expected by PhaseStringOpts that the Region goes away and is
 410   // replaced by If's control input but because there's still a Phi,
 411   // the Region stays in the graph. The top input from the cmpP is
 412   // propagated forward and a subgraph that is useful goes away. The
 413   // code below replaces the Phi with the MergeMem so that the Region
 414   // is simplified.
 415 
 416   PhiNode* phi = has_unique_phi();
 417   if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) {
 418     MergeMemNode* m = NULL;
 419     assert(phi->req() == 3, "same as region");

 420     for (uint i = 1; i < 3; ++i) {
 421       Node *mem = phi->in(i);
 422       if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) {
 423         // Nothing is control-dependent on path #i except the region itself.
 424         m = mem->as_MergeMem();
 425         uint j = 3 - i;
 426         Node* other = phi->in(j);
 427         if (other && other == m->base_memory()) {
 428           // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
 429           // This will allow the diamond to collapse completely.
 430           phase->is_IterGVN()->replace_node(phi, m);
 431           return true;
 432         }
 433       }
 434     }
 435   }
 436   return false;
 437 }
 438 
 439 //------------------------------Ideal------------------------------------------
 440 // Return a node which is more "ideal" than the current node.  Must preserve
 441 // the CFG, but we can still strip out dead paths.
 442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 443   if( !can_reshape && !in(0) ) return NULL;     // Already degraded to a Copy
 444   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 445 
 446   // Check for RegionNode with no Phi users and both inputs come from either
 447   // arm of the same IF.  If found, then the control-flow split is useless.
 448   bool has_phis = false;
 449   if (can_reshape) {            // Need DU info to check for Phi users
 450     has_phis = (has_phi() != NULL);       // Cache result
 451     if (has_phis && try_clean_mem_phi(phase)) {
 452       has_phis = false;







 453     }
 454 
 455     if (!has_phis) {            // No Phi users?  Nothing merging?
 456       for (uint i = 1; i < req()-1; i++) {
 457         Node *if1 = in(i);
 458         if( !if1 ) continue;
 459         Node *iff = if1->in(0);
 460         if( !iff || !iff->is_If() ) continue;
 461         for( uint j=i+1; j<req(); j++ ) {
 462           if( in(j) && in(j)->in(0) == iff &&
 463               if1->Opcode() != in(j)->Opcode() ) {
 464             // Add the IF Projections to the worklist. They (and the IF itself)
 465             // will be eliminated if dead.
 466             phase->is_IterGVN()->add_users_to_worklist(iff);
 467             set_req(i, iff->in(0));// Skip around the useless IF diamond
 468             set_req(j, NULL);
 469             return this;      // Record progress
 470           }
 471         }
 472       }

 834   if (iff1 == iff2) {
 835     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 836     igvn->replace_input_of(region, idx1, iff1->in(0));
 837     igvn->replace_input_of(region, idx2, igvn->C->top());
 838     return (region == this); // Remove useless if (both projections map to the same control/value)
 839   }
 840   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 841   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 842   if (bol1 == NULL || bol2 == NULL) {
 843     return false; // No bool inputs found
 844   }
 845   Node* cmp1 = bol1->in(1);
 846   Node* cmp2 = bol2->in(1);
 847   bool commute = false;
 848   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 849     return false; // No comparison
 850   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 851              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 852              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 853              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 854              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {

 855     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 856     // SubTypeCheck is not commutative
 857     return false;
 858   } else if (cmp1 != cmp2) {
 859     if (cmp1->in(1) == cmp2->in(2) &&
 860         cmp1->in(2) == cmp2->in(1)) {
 861       commute = true; // Same but swapped inputs, commute the test
 862     } else {
 863       return false; // Ifs are not comparing the same values
 864     }
 865   }
 866   proj1 = proj1->other_if_proj();
 867   proj2 = proj2->other_if_proj();
 868   if (!((proj1->unique_ctrl_out() == iff2 &&
 869          proj2->unique_ctrl_out() == this) ||
 870         (proj2->unique_ctrl_out() == iff1 &&
 871          proj1->unique_ctrl_out() == this))) {
 872     return false; // Ifs are not connected through other projs
 873   }
 874   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

 915 
 916 //=============================================================================
 917 // note that these functions assume that the _adr_type field is flattened
 918 uint PhiNode::hash() const {
 919   const Type* at = _adr_type;
 920   return TypeNode::hash() + (at ? at->hash() : 0);
 921 }
 922 bool PhiNode::cmp( const Node &n ) const {
 923   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
 924 }
 925 static inline
 926 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
 927   if (at == NULL || at == TypePtr::BOTTOM)  return at;
 928   return Compile::current()->alias_type(at)->adr_type();
 929 }
 930 
 931 //----------------------------make---------------------------------------------
 932 // create a new phi with edges matching r and set (initially) to x
 933 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
 934   uint preds = r->req();   // Number of predecessor paths
 935   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
 936   PhiNode* p = new PhiNode(r, t, at);
 937   for (uint j = 1; j < preds; j++) {
 938     // Fill in all inputs, except those which the region does not yet have
 939     if (r->in(j) != NULL)
 940       p->init_req(j, x);
 941   }
 942   return p;
 943 }
 944 PhiNode* PhiNode::make(Node* r, Node* x) {
 945   const Type*    t  = x->bottom_type();
 946   const TypePtr* at = NULL;
 947   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 948   return make(r, x, t, at);
 949 }
 950 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
 951   const Type*    t  = x->bottom_type();
 952   const TypePtr* at = NULL;
 953   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 954   return new PhiNode(r, t, at);
 955 }

1052       np->as_Phi()->verify_adr_type(visited, at);
1053     } else if (n->bottom_type() == Type::TOP
1054                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1055       // ignore top inputs
1056     } else {
1057       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1058       // recheck phi/non-phi consistency at leaves:
1059       assert((nat != NULL) == (at != NULL), "");
1060       assert(nat == at || nat == TypePtr::BOTTOM,
1061              "adr_type must be consistent at leaves of phi nest");
1062     }
1063   }
1064 }
1065 
1066 // Verify a whole nest of phis rooted at this one.
1067 void PhiNode::verify_adr_type(bool recursive) const {
1068   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1069   if (Node::in_dump())               return;  // muzzle asserts when printing
1070 
1071   assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");








1072 
1073   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1074 
1075   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1076          "Phi::adr_type must be pre-normalized");
1077 
1078   if (recursive) {
1079     VectorSet visited;
1080     verify_adr_type(visited, _adr_type);
1081   }
1082 }
1083 #endif
1084 
1085 
1086 //------------------------------Value------------------------------------------
1087 // Compute the type of the PhiNode
1088 const Type* PhiNode::Value(PhaseGVN* phase) const {
1089   Node *r = in(0);              // RegionNode
1090   if( !r )                      // Copy or dead
1091     return in(1) ? phase->type(in(1)) : Type::TOP;

1125         }
1126       }
1127     } else if (l->in(LoopNode::LoopBackControl) != NULL &&
1128                in(LoopNode::EntryControl) != NULL &&
1129                phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1130       // During CCP, if we saturate the type of a counted loop's Phi
1131       // before the special code for counted loop above has a chance
1132       // to run (that is as long as the type of the backedge's control
1133       // is top), we might end up with non monotonic types
1134       return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1135     }
1136   }
1137 
1138   // Until we have harmony between classes and interfaces in the type
1139   // lattice, we must tread carefully around phis which implicitly
1140   // convert the one to the other.
1141   const TypePtr* ttp = _type->make_ptr();
1142   const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
1143   const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_instklassptr() : NULL;
1144   bool is_intf = false;
1145   if (ttip != NULL) {
1146     ciKlass* k = ttip->klass();
1147     if (k->is_loaded() && k->is_interface())
1148       is_intf = true;
1149   }
1150   if (ttkp != NULL) {
1151     ciKlass* k = ttkp->klass();
1152     if (k->is_loaded() && k->is_interface())
1153       is_intf = true;
1154   }
1155 
1156   // Default case: merge all inputs
1157   const Type *t = Type::TOP;        // Merged type starting value
1158   for (uint i = 1; i < req(); ++i) {// For all paths in
1159     // Reachable control path?
1160     if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1161       const Type* ti = phase->type(in(i));
1162       // We assume that each input of an interface-valued Phi is a true
1163       // subtype of that interface.  This might not be true of the meet
1164       // of all the input types.  The lattice is not distributive in
1165       // such cases.  Ward off asserts in type.cpp by refusing to do
1166       // meets between interfaces and proper classes.
1167       const TypePtr* tip = ti->make_ptr();
1168       const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL;
1169       if (tiip) {
1170         bool ti_is_intf = false;
1171         ciKlass* k = tiip->klass();
1172         if (k->is_loaded() && k->is_interface())
1173           ti_is_intf = true;

1190   //   (Occurrences of this case suggest improvements to Value methods.)
1191   //
1192   // It is not possible to see Type::BOTTOM values as phi inputs,
1193   // because the ciTypeFlow pre-pass produces verifier-quality types.
1194   const Type* ft = t->filter_speculative(_type);  // Worst case type
1195 
1196 #ifdef ASSERT
1197   // The following logic has been moved into TypeOopPtr::filter.
1198   const Type* jt = t->join_speculative(_type);
1199   if (jt->empty()) {           // Emptied out???
1200 
1201     // Check for evil case of 't' being a class and '_type' expecting an
1202     // interface.  This can happen because the bytecodes do not contain
1203     // enough type info to distinguish a Java-level interface variable
1204     // from a Java-level object variable.  If we meet 2 classes which
1205     // both implement interface I, but their meet is at 'j/l/O' which
1206     // doesn't implement I, we have no way to tell if the result should
1207     // be 'I' or 'j/l/O'.  Thus we'll pick 'j/l/O'.  If this then flows
1208     // into a Phi which "knows" it's an Interface type we'll have to
1209     // uplift the type.
1210     if (!t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface()) {
1211       assert(ft == _type, ""); // Uplift to interface
1212     } else if (!t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1213       assert(ft == _type, ""); // Uplift to interface
1214     } else {
1215       // We also have to handle 'evil cases' of interface- vs. class-arrays
1216       Type::get_arrays_base_elements(jt, _type, NULL, &ttip);
1217       if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1218           assert(ft == _type, "");   // Uplift to array of interface
1219       } else {
1220         // Otherwise it's something stupid like non-overlapping int ranges
1221         // found on dying counted loops.
1222         assert(ft == Type::TOP, ""); // Canonical empty value
1223       }
1224     }
1225   }
1226 
1227   else {
1228 
1229     // If we have an interface-typed Phi and we narrow to a class type, the join
1230     // should report back the class.  However, if we have a J/L/Object
1231     // class-typed Phi and an interface flows in, it's possible that the meet &
1232     // join report an interface back out.  This isn't possible but happens

1354 
1355 //------------------------------Identity---------------------------------------
1356 // Check for Region being Identity.
1357 Node* PhiNode::Identity(PhaseGVN* phase) {
1358   // Check for no merging going on
1359   // (There used to be special-case code here when this->region->is_Loop.
1360   // It would check for a tributary phi on the backedge that the main phi
1361   // trivially, perhaps with a single cast.  The unique_input method
1362   // does all this and more, by reducing such tributaries to 'this'.)
1363   Node* uin = unique_input(phase, false);
1364   if (uin != NULL) {
1365     return uin;
1366   }
1367 
1368   int true_path = is_diamond_phi();
1369   if (true_path != 0) {
1370     Node* id = is_cmove_id(phase, true_path);
1371     if (id != NULL)  return id;
1372   }
1373 








1374   // Looking for phis with identical inputs.  If we find one that has
1375   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1376   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1377       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1378     uint phi_len = req();
1379     Node* phi_reg = region();
1380     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1381       Node* u = phi_reg->fast_out(i);
1382       if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1383           u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1384           u->req() == phi_len) {
1385         for (uint j = 1; j < phi_len; j++) {
1386           if (in(j) != u->in(j)) {
1387             u = NULL;
1388             break;
1389           }
1390         }
1391         if (u != NULL) {
1392           return u;
1393         }

1876         } else if (rc->in(0)->in(1) != NULL &&
1877                    rc->in(0)->in(1)->is_Bool()) {
1878           if (worklist.member(rc->in(0)->in(1))) {
1879             delay = true;
1880           } else if (rc->in(0)->in(1)->in(1) != NULL &&
1881                      rc->in(0)->in(1)->in(1)->is_Cmp()) {
1882             if (worklist.member(rc->in(0)->in(1)->in(1))) {
1883               delay = true;
1884             }
1885           }
1886         }
1887       }
1888     }
1889   }
1890   if (delay) {
1891     worklist.push(this);
1892   }
1893   return delay;
1894 }
1895 





































1896 //------------------------------Ideal------------------------------------------
1897 // Return a node which is more "ideal" than the current node.  Must preserve
1898 // the CFG, but we can still strip out dead paths.
1899 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1900   Node *r = in(0);              // RegionNode
1901   assert(r != NULL && r->is_Region(), "this phi must have a region");
1902   assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1903 
1904   // Note: During parsing, phis are often transformed before their regions.
1905   // This means we have to use type_or_null to defend against untyped regions.
1906   if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1907     return NULL;                // No change
1908 
1909   Node *top = phase->C->top();
1910   bool new_phi = (outcnt() == 0); // transforming new Phi
1911   // No change for igvn if new phi is not hooked
1912   if (new_phi && can_reshape)
1913     return NULL;
1914 
1915   // The are 2 situations when only one valid phi's input is left

2177           for (uint i = 1; i < req(); i++) {
2178             offset->init_req(i, in(i)->in(AddPNode::Offset));
2179           }
2180           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2181         }
2182         return new AddPNode(base, address, offset);
2183       }
2184     }
2185   }
2186 
2187   // Split phis through memory merges, so that the memory merges will go away.
2188   // Piggy-back this transformation on the search for a unique input....
2189   // It will be as if the merged memory is the unique value of the phi.
2190   // (Do not attempt this optimization unless parsing is complete.
2191   // It would make the parser's memory-merge logic sick.)
2192   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2193   if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2194     // see if this phi should be sliced
2195     uint merge_width = 0;
2196     bool saw_self = false;


2197     for( uint i=1; i<req(); ++i ) {// For all paths in
2198       Node *ii = in(i);
2199       // TOP inputs should not be counted as safe inputs because if the
2200       // Phi references itself through all other inputs then splitting the
2201       // Phi through memory merges would create dead loop at later stage.
2202       if (ii == top) {
2203         return NULL; // Delay optimization until graph is cleaned.
2204       }
2205       if (ii->is_MergeMem()) {
2206         MergeMemNode* n = ii->as_MergeMem();
2207         merge_width = MAX2(merge_width, n->req());
2208         saw_self = saw_self || (n->base_memory() == this);


2209       }
2210     }
2211 
2212     // This restriction is temporarily necessary to ensure termination:
2213     if (!saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2214 
2215     if (merge_width > Compile::AliasIdxRaw) {
2216       // found at least one non-empty MergeMem
2217       const TypePtr* at = adr_type();
2218       if (at != TypePtr::BOTTOM) {
2219         // Patch the existing phi to select an input from the merge:
2220         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2221         //     Phi:AT1(...m1...)
2222         int alias_idx = phase->C->get_alias_index(at);
2223         for (uint i=1; i<req(); ++i) {
2224           Node *ii = in(i);
2225           if (ii->is_MergeMem()) {
2226             MergeMemNode* n = ii->as_MergeMem();
2227             // compress paths and change unreachable cycles to TOP
2228             // If not, we can update the input infinitely along a MergeMem cycle
2229             // Equivalent code is in MemNode::Ideal_common
2230             Node *m  = phase->transform(n);
2231             if (outcnt() == 0) {  // Above transform() may kill us!
2232               return top;
2233             }

2264         if (!saw_safe_input) {
2265           // There is a dead loop: All inputs are either dead or reference back to this phi
2266           return top;
2267         }
2268 
2269         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2270         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2271         PhaseIterGVN* igvn = phase->is_IterGVN();
2272         Node* hook = new Node(1);
2273         PhiNode* new_base = (PhiNode*) clone();
2274         // Must eagerly register phis, since they participate in loops.
2275         if (igvn) {
2276           igvn->register_new_node_with_optimizer(new_base);
2277           hook->add_req(new_base);
2278         }
2279         MergeMemNode* result = MergeMemNode::make(new_base);
2280         for (uint i = 1; i < req(); ++i) {
2281           Node *ii = in(i);
2282           if (ii->is_MergeMem()) {
2283             MergeMemNode* n = ii->as_MergeMem();





2284             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2285               // If we have not seen this slice yet, make a phi for it.
2286               bool made_new_phi = false;
2287               if (mms.is_empty()) {
2288                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2289                 made_new_phi = true;
2290                 if (igvn) {
2291                   igvn->register_new_node_with_optimizer(new_phi);
2292                   hook->add_req(new_phi);
2293                 }
2294                 mms.set_memory(new_phi);
2295               }
2296               Node* phi = mms.memory();
2297               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2298               phi->set_req(i, mms.memory2());
2299             }
2300           }
2301         }
2302         // Distribute all self-loops.
2303         { // (Extra braces to hide mms.)

2418           break;
2419         }
2420       }
2421     }
2422 
2423     if (all_inputs_are_equiv_vboxes) {
2424       VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in(1));
2425       PhiNode* new_vbox_phi = new PhiNode(r, vbox->box_type());
2426       PhiNode* new_vect_phi = new PhiNode(r, vbox->vec_type());
2427       for (uint i = 1; i < req(); ++i) {
2428         VectorBoxNode* old_vbox = static_cast<VectorBoxNode*>(in(i));
2429         new_vbox_phi->set_req(i, old_vbox->in(VectorBoxNode::Box));
2430         new_vect_phi->set_req(i, old_vbox->in(VectorBoxNode::Value));
2431       }
2432       igvn->register_new_node_with_optimizer(new_vbox_phi, this);
2433       igvn->register_new_node_with_optimizer(new_vect_phi, this);
2434       progress = new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, vbox->box_type(), vbox->vec_type());
2435     }
2436   }
2437 



































2438   return progress;              // Return any progress
2439 }
2440 
2441 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2442   // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2443   // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2444   // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2445   assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2446   const bool is_loop = (r->is_Loop() && r->req() == 3);
2447   const Node* top = phase->C->top();
2448   if (is_loop) {
2449     return !uin->eqv_uncast(in(LoopNode::EntryControl));
2450   } else {
2451     // We have a data loop either with an unsafe data reference or if a region is unreachable.
2452     return is_unsafe_data_reference(uin)
2453            || (r->req() == 3 && (r->in(1) != top && r->in(2) == top && r->is_unreachable_region(phase)));
2454   }
2455 }
2456 
2457 //------------------------------is_tripcount-----------------------------------

2682   return in(0)->in(0);
2683 }
2684 
2685 
2686 #ifndef PRODUCT
2687 void CatchProjNode::dump_spec(outputStream *st) const {
2688   ProjNode::dump_spec(st);
2689   st->print("@bci %d ",_handler_bci);
2690 }
2691 #endif
2692 
2693 //=============================================================================
2694 //------------------------------Identity---------------------------------------
2695 // Check for CreateEx being Identity.
2696 Node* CreateExNode::Identity(PhaseGVN* phase) {
2697   if( phase->type(in(1)) == Type::TOP ) return in(1);
2698   if( phase->type(in(0)) == Type::TOP ) return in(0);
2699   // We only come from CatchProj, unless the CatchProj goes away.
2700   // If the CatchProj is optimized away, then we just carry the
2701   // exception oop through.






2702   CallNode *call = in(1)->in(0)->as_Call();
2703 
2704   return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2705     ? this
2706     : call->in(TypeFunc::Parms);
2707 }
2708 
2709 //=============================================================================
2710 //------------------------------Value------------------------------------------
2711 // Check for being unreachable.
2712 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2713   if (!in(0) || in(0)->is_top()) return Type::TOP;
2714   return bottom_type();
2715 }
2716 
2717 //------------------------------Ideal------------------------------------------
2718 // Check for no longer being part of a loop
2719 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2720   if (can_reshape && !in(0)->is_Region()) {
2721     // Dead code elimination can sometimes delete this projection so

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/cfgnode.hpp"
  34 #include "opto/connode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/loopnode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/narrowptrnode.hpp"
  41 #include "opto/mulnode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/regmask.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 #include "opto/vectornode.hpp"
  47 #include "utilities/vmError.hpp"
  48 
  49 // Portions of code courtesy of Clifford Click
  50 
  51 // Optimization - Graph Style
  52 
  53 //=============================================================================
  54 //------------------------------Value------------------------------------------
  55 // Compute the type of the RegionNode.
  56 const Type* RegionNode::Value(PhaseGVN* phase) const {

 377   Node *n = (Node*)phase->C->root();
 378   nstack.push(n);
 379   visited.set(n->_idx);
 380   while (nstack.size() != 0) {
 381     n = nstack.pop();
 382     uint max = n->outcnt();
 383     for (uint i = 0; i < max; i++) {
 384       Node* m = n->raw_out(i);
 385       if (m != NULL && m->is_CFG()) {
 386         if (m == this) {
 387           return false; // We reached the Region node - it is not dead.
 388         }
 389         if (!visited.test_set(m->_idx))
 390           nstack.push(m);
 391       }
 392     }
 393   }
 394   return true; // The Region node is unreachable - it is dead.
 395 }
 396 
 397 Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) {
 398   // Incremental inlining + PhaseStringOpts sometimes produce:
 399   //
 400   // cmpP with 1 top input
 401   //           |
 402   //          If
 403   //         /  \
 404   //   IfFalse  IfTrue  /- Some Node
 405   //         \  /      /    /
 406   //        Region    / /-MergeMem
 407   //             \---Phi
 408   //
 409   //
 410   // It's expected by PhaseStringOpts that the Region goes away and is
 411   // replaced by If's control input but because there's still a Phi,
 412   // the Region stays in the graph. The top input from the cmpP is
 413   // propagated forward and a subgraph that is useful goes away. The
 414   // code below replaces the Phi with the MergeMem so that the Region
 415   // is simplified.
 416 
 417   if (type() == Type::MEMORY && is_diamond_phi(true)) {

 418     MergeMemNode* m = NULL;
 419     assert(req() == 3, "same as region");
 420     Node* r = in(0);
 421     for (uint i = 1; i < 3; ++i) {
 422       Node *mem = in(i);
 423       if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) {
 424         // Nothing is control-dependent on path #i except the region itself.
 425         m = mem->as_MergeMem();
 426         uint j = 3 - i;
 427         Node* other = in(j);
 428         if (other && other == m->base_memory()) {
 429           // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
 430           // This will allow the diamond to collapse completely.
 431           return m;

 432         }
 433       }
 434     }
 435   }
 436   return NULL;
 437 }
 438 
 439 //------------------------------Ideal------------------------------------------
 440 // Return a node which is more "ideal" than the current node.  Must preserve
 441 // the CFG, but we can still strip out dead paths.
 442 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 443   if( !can_reshape && !in(0) ) return NULL;     // Already degraded to a Copy
 444   assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
 445 
 446   // Check for RegionNode with no Phi users and both inputs come from either
 447   // arm of the same IF.  If found, then the control-flow split is useless.
 448   bool has_phis = false;
 449   if (can_reshape) {            // Need DU info to check for Phi users
 450     has_phis = (has_phi() != NULL);       // Cache result
 451     if (has_phis) {
 452       PhiNode* phi = has_unique_phi();
 453       if (phi != NULL) {
 454         Node* m = phi->try_clean_mem_phi(phase);
 455         if (m != NULL) {
 456           phase->is_IterGVN()->replace_node(phi, m);
 457           has_phis = false;
 458         }
 459       }
 460     }
 461 
 462     if (!has_phis) {            // No Phi users?  Nothing merging?
 463       for (uint i = 1; i < req()-1; i++) {
 464         Node *if1 = in(i);
 465         if( !if1 ) continue;
 466         Node *iff = if1->in(0);
 467         if( !iff || !iff->is_If() ) continue;
 468         for( uint j=i+1; j<req(); j++ ) {
 469           if( in(j) && in(j)->in(0) == iff &&
 470               if1->Opcode() != in(j)->Opcode() ) {
 471             // Add the IF Projections to the worklist. They (and the IF itself)
 472             // will be eliminated if dead.
 473             phase->is_IterGVN()->add_users_to_worklist(iff);
 474             set_req(i, iff->in(0));// Skip around the useless IF diamond
 475             set_req(j, NULL);
 476             return this;      // Record progress
 477           }
 478         }
 479       }

 841   if (iff1 == iff2) {
 842     igvn->add_users_to_worklist(iff1); // Make sure dead if is eliminated
 843     igvn->replace_input_of(region, idx1, iff1->in(0));
 844     igvn->replace_input_of(region, idx2, igvn->C->top());
 845     return (region == this); // Remove useless if (both projections map to the same control/value)
 846   }
 847   BoolNode* bol1 = iff1->in(1)->isa_Bool();
 848   BoolNode* bol2 = iff2->in(1)->isa_Bool();
 849   if (bol1 == NULL || bol2 == NULL) {
 850     return false; // No bool inputs found
 851   }
 852   Node* cmp1 = bol1->in(1);
 853   Node* cmp2 = bol2->in(1);
 854   bool commute = false;
 855   if (!cmp1->is_Cmp() || !cmp2->is_Cmp()) {
 856     return false; // No comparison
 857   } else if (cmp1->Opcode() == Op_CmpF || cmp1->Opcode() == Op_CmpD ||
 858              cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
 859              cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
 860              cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
 861              cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
 862              cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
 863     // Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
 864     // SubTypeCheck is not commutative
 865     return false;
 866   } else if (cmp1 != cmp2) {
 867     if (cmp1->in(1) == cmp2->in(2) &&
 868         cmp1->in(2) == cmp2->in(1)) {
 869       commute = true; // Same but swapped inputs, commute the test
 870     } else {
 871       return false; // Ifs are not comparing the same values
 872     }
 873   }
 874   proj1 = proj1->other_if_proj();
 875   proj2 = proj2->other_if_proj();
 876   if (!((proj1->unique_ctrl_out() == iff2 &&
 877          proj2->unique_ctrl_out() == this) ||
 878         (proj2->unique_ctrl_out() == iff1 &&
 879          proj1->unique_ctrl_out() == this))) {
 880     return false; // Ifs are not connected through other projs
 881   }
 882   // Found 'iff -> proj -> iff -> proj -> this' shape where all other projs are merged

 923 
 924 //=============================================================================
 925 // note that these functions assume that the _adr_type field is flattened
 926 uint PhiNode::hash() const {
 927   const Type* at = _adr_type;
 928   return TypeNode::hash() + (at ? at->hash() : 0);
 929 }
 930 bool PhiNode::cmp( const Node &n ) const {
 931   return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
 932 }
 933 static inline
 934 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
 935   if (at == NULL || at == TypePtr::BOTTOM)  return at;
 936   return Compile::current()->alias_type(at)->adr_type();
 937 }
 938 
 939 //----------------------------make---------------------------------------------
 940 // create a new phi with edges matching r and set (initially) to x
 941 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
 942   uint preds = r->req();   // Number of predecessor paths
 943   assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flattened_accesses_share_alias()), "flatten at");
 944   PhiNode* p = new PhiNode(r, t, at);
 945   for (uint j = 1; j < preds; j++) {
 946     // Fill in all inputs, except those which the region does not yet have
 947     if (r->in(j) != NULL)
 948       p->init_req(j, x);
 949   }
 950   return p;
 951 }
 952 PhiNode* PhiNode::make(Node* r, Node* x) {
 953   const Type*    t  = x->bottom_type();
 954   const TypePtr* at = NULL;
 955   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 956   return make(r, x, t, at);
 957 }
 958 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
 959   const Type*    t  = x->bottom_type();
 960   const TypePtr* at = NULL;
 961   if (t == Type::MEMORY)  at = flatten_phi_adr_type(x->adr_type());
 962   return new PhiNode(r, t, at);
 963 }

1060       np->as_Phi()->verify_adr_type(visited, at);
1061     } else if (n->bottom_type() == Type::TOP
1062                || (n->is_Mem() && n->in(MemNode::Address)->bottom_type() == Type::TOP)) {
1063       // ignore top inputs
1064     } else {
1065       const TypePtr* nat = flatten_phi_adr_type(n->adr_type());
1066       // recheck phi/non-phi consistency at leaves:
1067       assert((nat != NULL) == (at != NULL), "");
1068       assert(nat == at || nat == TypePtr::BOTTOM,
1069              "adr_type must be consistent at leaves of phi nest");
1070     }
1071   }
1072 }
1073 
1074 // Verify a whole nest of phis rooted at this one.
1075 void PhiNode::verify_adr_type(bool recursive) const {
1076   if (VMError::is_error_reported())  return;  // muzzle asserts when debugging an error
1077   if (Node::in_dump())               return;  // muzzle asserts when printing
1078 
1079   assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only");
1080   // Flat array element shouldn't get their own memory slice until flattened_accesses_share_alias is cleared.
1081   // It could be the graph has no loads/stores and flattened_accesses_share_alias is never cleared. EA could still
1082   // creates per element Phis but that wouldn't be a problem as there are no memory accesses for that array.
1083   assert(_adr_type == NULL || _adr_type->isa_aryptr() == NULL ||
1084          _adr_type->is_aryptr()->is_known_instance() ||
1085          !_adr_type->is_aryptr()->is_flat() ||
1086          !Compile::current()->flattened_accesses_share_alias() ||
1087          _adr_type == TypeAryPtr::INLINES, "flat array element shouldn't get its own slice yet");
1088 
1089   if (!VerifyAliases)       return;  // verify thoroughly only if requested
1090 
1091   assert(_adr_type == flatten_phi_adr_type(_adr_type),
1092          "Phi::adr_type must be pre-normalized");
1093 
1094   if (recursive) {
1095     VectorSet visited;
1096     verify_adr_type(visited, _adr_type);
1097   }
1098 }
1099 #endif
1100 
1101 
1102 //------------------------------Value------------------------------------------
1103 // Compute the type of the PhiNode
1104 const Type* PhiNode::Value(PhaseGVN* phase) const {
1105   Node *r = in(0);              // RegionNode
1106   if( !r )                      // Copy or dead
1107     return in(1) ? phase->type(in(1)) : Type::TOP;

1141         }
1142       }
1143     } else if (l->in(LoopNode::LoopBackControl) != NULL &&
1144                in(LoopNode::EntryControl) != NULL &&
1145                phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1146       // During CCP, if we saturate the type of a counted loop's Phi
1147       // before the special code for counted loop above has a chance
1148       // to run (that is as long as the type of the backedge's control
1149       // is top), we might end up with non monotonic types
1150       return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1151     }
1152   }
1153 
1154   // Until we have harmony between classes and interfaces in the type
1155   // lattice, we must tread carefully around phis which implicitly
1156   // convert the one to the other.
1157   const TypePtr* ttp = _type->make_ptr();
1158   const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
1159   const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_instklassptr() : NULL;
1160   bool is_intf = false;
1161   if (ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1162     is_intf = true;
1163   } else if (ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1164     is_intf = true;





1165   }
1166 
1167   // Default case: merge all inputs
1168   const Type *t = Type::TOP;        // Merged type starting value
1169   for (uint i = 1; i < req(); ++i) {// For all paths in
1170     // Reachable control path?
1171     if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1172       const Type* ti = phase->type(in(i));
1173       // We assume that each input of an interface-valued Phi is a true
1174       // subtype of that interface.  This might not be true of the meet
1175       // of all the input types.  The lattice is not distributive in
1176       // such cases.  Ward off asserts in type.cpp by refusing to do
1177       // meets between interfaces and proper classes.
1178       const TypePtr* tip = ti->make_ptr();
1179       const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL;
1180       if (tiip) {
1181         bool ti_is_intf = false;
1182         ciKlass* k = tiip->klass();
1183         if (k->is_loaded() && k->is_interface())
1184           ti_is_intf = true;

1201   //   (Occurrences of this case suggest improvements to Value methods.)
1202   //
1203   // It is not possible to see Type::BOTTOM values as phi inputs,
1204   // because the ciTypeFlow pre-pass produces verifier-quality types.
1205   const Type* ft = t->filter_speculative(_type);  // Worst case type
1206 
1207 #ifdef ASSERT
1208   // The following logic has been moved into TypeOopPtr::filter.
1209   const Type* jt = t->join_speculative(_type);
1210   if (jt->empty()) {           // Emptied out???
1211 
1212     // Check for evil case of 't' being a class and '_type' expecting an
1213     // interface.  This can happen because the bytecodes do not contain
1214     // enough type info to distinguish a Java-level interface variable
1215     // from a Java-level object variable.  If we meet 2 classes which
1216     // both implement interface I, but their meet is at 'j/l/O' which
1217     // doesn't implement I, we have no way to tell if the result should
1218     // be 'I' or 'j/l/O'.  Thus we'll pick 'j/l/O'.  If this then flows
1219     // into a Phi which "knows" it's an Interface type we'll have to
1220     // uplift the type.
1221     if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1222       assert(ft == _type, ""); // Uplift to interface
1223     } else if (!t->empty() && ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1224       assert(ft == _type, ""); // Uplift to interface
1225     } else {
1226       // We also have to handle 'evil cases' of interface- vs. class-arrays
1227       Type::get_arrays_base_elements(jt, _type, NULL, &ttip);
1228       if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1229           assert(ft == _type, "");   // Uplift to array of interface
1230       } else {
1231         // Otherwise it's something stupid like non-overlapping int ranges
1232         // found on dying counted loops.
1233         assert(ft == Type::TOP, ""); // Canonical empty value
1234       }
1235     }
1236   }
1237 
1238   else {
1239 
1240     // If we have an interface-typed Phi and we narrow to a class type, the join
1241     // should report back the class.  However, if we have a J/L/Object
1242     // class-typed Phi and an interface flows in, it's possible that the meet &
1243     // join report an interface back out.  This isn't possible but happens

1365 
1366 //------------------------------Identity---------------------------------------
1367 // Check for Region being Identity.
1368 Node* PhiNode::Identity(PhaseGVN* phase) {
1369   // Check for no merging going on
1370   // (There used to be special-case code here when this->region->is_Loop.
1371   // It would check for a tributary phi on the backedge that the main phi
1372   // trivially, perhaps with a single cast.  The unique_input method
1373   // does all this and more, by reducing such tributaries to 'this'.)
1374   Node* uin = unique_input(phase, false);
1375   if (uin != NULL) {
1376     return uin;
1377   }
1378 
1379   int true_path = is_diamond_phi();
1380   if (true_path != 0) {
1381     Node* id = is_cmove_id(phase, true_path);
1382     if (id != NULL)  return id;
1383   }
1384 
1385   if (phase->is_IterGVN()) {
1386     Node* m = try_clean_mem_phi(phase);
1387     if (m != NULL) {
1388       return m;
1389     }
1390   }
1391 
1392 
1393   // Looking for phis with identical inputs.  If we find one that has
1394   // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1395   if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1396       TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1397     uint phi_len = req();
1398     Node* phi_reg = region();
1399     for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1400       Node* u = phi_reg->fast_out(i);
1401       if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1402           u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1403           u->req() == phi_len) {
1404         for (uint j = 1; j < phi_len; j++) {
1405           if (in(j) != u->in(j)) {
1406             u = NULL;
1407             break;
1408           }
1409         }
1410         if (u != NULL) {
1411           return u;
1412         }

1895         } else if (rc->in(0)->in(1) != NULL &&
1896                    rc->in(0)->in(1)->is_Bool()) {
1897           if (worklist.member(rc->in(0)->in(1))) {
1898             delay = true;
1899           } else if (rc->in(0)->in(1)->in(1) != NULL &&
1900                      rc->in(0)->in(1)->in(1)->is_Cmp()) {
1901             if (worklist.member(rc->in(0)->in(1)->in(1))) {
1902               delay = true;
1903             }
1904           }
1905         }
1906       }
1907     }
1908   }
1909   if (delay) {
1910     worklist.push(this);
1911   }
1912   return delay;
1913 }
1914 
1915 // Push inline type input nodes (and null) down through the phi recursively (can handle data loops).
1916 InlineTypeBaseNode* PhiNode::push_inline_types_through(PhaseGVN* phase, bool can_reshape, ciInlineKlass* vk) {
1917   InlineTypeBaseNode* vt = NULL;
1918   if (_type->isa_ptr()) {
1919     vt = InlineTypePtrNode::make_null(*phase, vk)->clone_with_phis(phase, in(0));
1920   } else {
1921     vt = InlineTypeNode::make_null(*phase, vk)->clone_with_phis(phase, in(0));
1922   }
1923   if (can_reshape) {
1924     // Replace phi right away to be able to use the inline
1925     // type node when reaching the phi again through data loops.
1926     PhaseIterGVN* igvn = phase->is_IterGVN();
1927     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1928       Node* u = fast_out(i);
1929       igvn->rehash_node_delayed(u);
1930       imax -= u->replace_edge(this, vt);
1931       --i;
1932     }
1933     assert(outcnt() == 0, "should be dead now");
1934   }
1935   for (uint i = 1; i < req(); ++i) {
1936     Node* n = in(i)->uncast();
1937     Node* other = NULL;
1938     if (n->is_InlineTypeBase()) {
1939       other = n;
1940     } else if (phase->type(n)->is_zero_type()) {
1941       other = InlineTypePtrNode::make_null(*phase, vk);
1942     } else {
1943       assert(can_reshape, "can only handle phis during IGVN");
1944       other = phase->transform(n->as_Phi()->push_inline_types_through(phase, can_reshape, vk));
1945     }
1946     bool transform = !can_reshape && (i == (req()-1)); // Transform phis on last merge
1947     vt->merge_with(phase, other->as_InlineTypeBase(), i, transform);
1948   }
1949   return vt;
1950 }
1951 
1952 //------------------------------Ideal------------------------------------------
1953 // Return a node which is more "ideal" than the current node.  Must preserve
1954 // the CFG, but we can still strip out dead paths.
1955 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1956   Node *r = in(0);              // RegionNode
1957   assert(r != NULL && r->is_Region(), "this phi must have a region");
1958   assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1959 
1960   // Note: During parsing, phis are often transformed before their regions.
1961   // This means we have to use type_or_null to defend against untyped regions.
1962   if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1963     return NULL;                // No change
1964 
1965   Node *top = phase->C->top();
1966   bool new_phi = (outcnt() == 0); // transforming new Phi
1967   // No change for igvn if new phi is not hooked
1968   if (new_phi && can_reshape)
1969     return NULL;
1970 
1971   // The are 2 situations when only one valid phi's input is left

2233           for (uint i = 1; i < req(); i++) {
2234             offset->init_req(i, in(i)->in(AddPNode::Offset));
2235           }
2236           phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2237         }
2238         return new AddPNode(base, address, offset);
2239       }
2240     }
2241   }
2242 
2243   // Split phis through memory merges, so that the memory merges will go away.
2244   // Piggy-back this transformation on the search for a unique input....
2245   // It will be as if the merged memory is the unique value of the phi.
2246   // (Do not attempt this optimization unless parsing is complete.
2247   // It would make the parser's memory-merge logic sick.)
2248   // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2249   if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2250     // see if this phi should be sliced
2251     uint merge_width = 0;
2252     bool saw_self = false;
2253     // TODO revisit this with JDK-8247216
2254     bool mergemem_only = true;
2255     for( uint i=1; i<req(); ++i ) {// For all paths in
2256       Node *ii = in(i);
2257       // TOP inputs should not be counted as safe inputs because if the
2258       // Phi references itself through all other inputs then splitting the
2259       // Phi through memory merges would create dead loop at later stage.
2260       if (ii == top) {
2261         return NULL; // Delay optimization until graph is cleaned.
2262       }
2263       if (ii->is_MergeMem()) {
2264         MergeMemNode* n = ii->as_MergeMem();
2265         merge_width = MAX2(merge_width, n->req());
2266         saw_self = saw_self || (n->base_memory() == this);
2267       } else {
2268         mergemem_only = false;
2269       }
2270     }
2271 
2272     // This restriction is temporarily necessary to ensure termination:
2273     if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM)  merge_width = 0;
2274 
2275     if (merge_width > Compile::AliasIdxRaw) {
2276       // found at least one non-empty MergeMem
2277       const TypePtr* at = adr_type();
2278       if (at != TypePtr::BOTTOM) {
2279         // Patch the existing phi to select an input from the merge:
2280         // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2281         //     Phi:AT1(...m1...)
2282         int alias_idx = phase->C->get_alias_index(at);
2283         for (uint i=1; i<req(); ++i) {
2284           Node *ii = in(i);
2285           if (ii->is_MergeMem()) {
2286             MergeMemNode* n = ii->as_MergeMem();
2287             // compress paths and change unreachable cycles to TOP
2288             // If not, we can update the input infinitely along a MergeMem cycle
2289             // Equivalent code is in MemNode::Ideal_common
2290             Node *m  = phase->transform(n);
2291             if (outcnt() == 0) {  // Above transform() may kill us!
2292               return top;
2293             }

2324         if (!saw_safe_input) {
2325           // There is a dead loop: All inputs are either dead or reference back to this phi
2326           return top;
2327         }
2328 
2329         // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into
2330         //     MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...))
2331         PhaseIterGVN* igvn = phase->is_IterGVN();
2332         Node* hook = new Node(1);
2333         PhiNode* new_base = (PhiNode*) clone();
2334         // Must eagerly register phis, since they participate in loops.
2335         if (igvn) {
2336           igvn->register_new_node_with_optimizer(new_base);
2337           hook->add_req(new_base);
2338         }
2339         MergeMemNode* result = MergeMemNode::make(new_base);
2340         for (uint i = 1; i < req(); ++i) {
2341           Node *ii = in(i);
2342           if (ii->is_MergeMem()) {
2343             MergeMemNode* n = ii->as_MergeMem();
2344             if (igvn) {
2345               // TODO revisit this with JDK-8247216
2346               // Put 'n' on the worklist because it might be modified by MergeMemStream::iteration_setup
2347               igvn->_worklist.push(n);
2348             }
2349             for (MergeMemStream mms(result, n); mms.next_non_empty2(); ) {
2350               // If we have not seen this slice yet, make a phi for it.
2351               bool made_new_phi = false;
2352               if (mms.is_empty()) {
2353                 Node* new_phi = new_base->slice_memory(mms.adr_type(phase->C));
2354                 made_new_phi = true;
2355                 if (igvn) {
2356                   igvn->register_new_node_with_optimizer(new_phi);
2357                   hook->add_req(new_phi);
2358                 }
2359                 mms.set_memory(new_phi);
2360               }
2361               Node* phi = mms.memory();
2362               assert(made_new_phi || phi->in(i) == n, "replace the i-th merge by a slice");
2363               phi->set_req(i, mms.memory2());
2364             }
2365           }
2366         }
2367         // Distribute all self-loops.
2368         { // (Extra braces to hide mms.)

2483           break;
2484         }
2485       }
2486     }
2487 
2488     if (all_inputs_are_equiv_vboxes) {
2489       VectorBoxNode* vbox = static_cast<VectorBoxNode*>(in(1));
2490       PhiNode* new_vbox_phi = new PhiNode(r, vbox->box_type());
2491       PhiNode* new_vect_phi = new PhiNode(r, vbox->vec_type());
2492       for (uint i = 1; i < req(); ++i) {
2493         VectorBoxNode* old_vbox = static_cast<VectorBoxNode*>(in(i));
2494         new_vbox_phi->set_req(i, old_vbox->in(VectorBoxNode::Box));
2495         new_vect_phi->set_req(i, old_vbox->in(VectorBoxNode::Value));
2496       }
2497       igvn->register_new_node_with_optimizer(new_vbox_phi, this);
2498       igvn->register_new_node_with_optimizer(new_vect_phi, this);
2499       progress = new VectorBoxNode(igvn->C, new_vbox_phi, new_vect_phi, vbox->box_type(), vbox->vec_type());
2500     }
2501   }
2502 
2503   // Check recursively if inputs are either an inline type, constant null
2504   // or another Phi (including self references through data loops). If so,
2505   // push the inline types down through the phis to enable folding of loads.
2506   if (EnableValhalla && req() > 2 && progress == NULL) {
2507     ResourceMark rm;
2508     Unique_Node_List worklist;
2509     worklist.push(this);
2510     bool can_optimize = true;
2511     ciInlineKlass* vk = NULL;
2512 
2513     for (uint next = 0; next < worklist.size() && can_optimize; next++) {
2514       Node* phi = worklist.at(next);
2515       for (uint i = 1; i < phi->req() && can_optimize; i++) {
2516         Node* n = phi->in(i);
2517         if (n == NULL) {
2518           can_optimize = false;
2519           break;
2520         }
2521         n = n->uncast();
2522         const Type* t = phase->type(n);
2523         if (n->is_InlineTypeBase() && n->as_InlineTypeBase()->can_merge() &&
2524             (vk == NULL || vk == t->inline_klass())) {
2525           vk = (vk == NULL) ? t->inline_klass() : vk;
2526         } else if (n->is_Phi() && can_reshape) {
2527           worklist.push(n);
2528         } else if (!t->is_zero_type()) {
2529           can_optimize = false;
2530         }
2531       }
2532     }
2533     if (can_optimize && vk != NULL) {
2534       progress = push_inline_types_through(phase, can_reshape, vk);
2535     }
2536   }
2537 
2538   return progress;              // Return any progress
2539 }
2540 
2541 bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) {
2542   // First, take the short cut when we know it is a loop and the EntryControl data path is dead.
2543   // The loop node may only have one input because the entry path was removed in PhaseIdealLoop::Dominators().
2544   // Then, check if there is a data loop when the phi references itself directly or through other data nodes.
2545   assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
2546   const bool is_loop = (r->is_Loop() && r->req() == 3);
2547   const Node* top = phase->C->top();
2548   if (is_loop) {
2549     return !uin->eqv_uncast(in(LoopNode::EntryControl));
2550   } else {
2551     // We have a data loop either with an unsafe data reference or if a region is unreachable.
2552     return is_unsafe_data_reference(uin)
2553            || (r->req() == 3 && (r->in(1) != top && r->in(2) == top && r->is_unreachable_region(phase)));
2554   }
2555 }
2556 
2557 //------------------------------is_tripcount-----------------------------------

2782   return in(0)->in(0);
2783 }
2784 
2785 
2786 #ifndef PRODUCT
2787 void CatchProjNode::dump_spec(outputStream *st) const {
2788   ProjNode::dump_spec(st);
2789   st->print("@bci %d ",_handler_bci);
2790 }
2791 #endif
2792 
2793 //=============================================================================
2794 //------------------------------Identity---------------------------------------
2795 // Check for CreateEx being Identity.
2796 Node* CreateExNode::Identity(PhaseGVN* phase) {
2797   if( phase->type(in(1)) == Type::TOP ) return in(1);
2798   if( phase->type(in(0)) == Type::TOP ) return in(0);
2799   // We only come from CatchProj, unless the CatchProj goes away.
2800   // If the CatchProj is optimized away, then we just carry the
2801   // exception oop through.
2802 
2803   // CheckCastPPNode::Ideal() for inline types reuses the exception
2804   // paths of a call to perform an allocation: we can see a Phi here.
2805   if (in(1)->is_Phi()) {
2806     return this;
2807   }
2808   CallNode *call = in(1)->in(0)->as_Call();
2809 
2810   return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2811     ? this
2812     : call->in(TypeFunc::Parms);
2813 }
2814 
2815 //=============================================================================
2816 //------------------------------Value------------------------------------------
2817 // Check for being unreachable.
2818 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2819   if (!in(0) || in(0)->is_top()) return Type::TOP;
2820   return bottom_type();
2821 }
2822 
2823 //------------------------------Ideal------------------------------------------
2824 // Check for no longer being part of a loop
2825 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2826   if (can_reshape && !in(0)->is_Region()) {
2827     // Dead code elimination can sometimes delete this projection so
< prev index next >